~ubuntu-branches/ubuntu/trusty/swift/trusty-updates

« back to all changes in this revision

Viewing changes to swift/obj/auditor.py

  • Committer: Package Import Robot
  • Author(s): Chuck Short, James Page, Chuck Short
  • Date: 2013-08-13 10:37:13 UTC
  • mfrom: (1.2.21)
  • Revision ID: package-import@ubuntu.com-20130813103713-1ctbx4zifyljs2aq
Tags: 1.9.1-0ubuntu1
[ James Page ]
* d/control: Update VCS fields for new branch locations.

[ Chuck Short ]
* New upstream release.

Show diffs side-by-side

added added

removed removed

Lines of Context:
15
15
 
16
16
import os
17
17
import time
 
18
from gettext import gettext as _
18
19
 
19
20
from eventlet import Timeout
20
21
 
 
22
from swift.obj import diskfile
21
23
from swift.obj import server as object_server
22
24
from swift.common.utils import get_logger, audit_location_generator, \
23
 
    ratelimit_sleep, config_true_value, dump_recon_cache
 
25
    ratelimit_sleep, config_true_value, dump_recon_cache, list_from_csv, json
24
26
from swift.common.exceptions import AuditException, DiskFileError, \
25
27
    DiskFileNotExist
26
28
from swift.common.daemon import Daemon
56
58
        self.recon_cache_path = conf.get('recon_cache_path',
57
59
                                         '/var/cache/swift')
58
60
        self.rcache = os.path.join(self.recon_cache_path, "object.recon")
 
61
        self.stats_sizes = sorted(
 
62
            [int(s) for s in list_from_csv(conf.get('object_size_stats'))])
 
63
        self.stats_buckets = dict(
 
64
            [(s, 0) for s in self.stats_sizes + ['OVER']])
59
65
 
60
66
    def audit_all_objects(self, mode='once'):
61
67
        self.logger.info(_('Begin object audit "%s" mode (%s)' %
67
73
        total_errors = 0
68
74
        time_auditing = 0
69
75
        all_locs = audit_location_generator(self.devices,
70
 
                                            object_server.DATADIR,
 
76
                                            object_server.DATADIR, '.data',
71
77
                                            mount_check=self.mount_check,
72
78
                                            logger=self.logger)
73
79
        for path, device, partition in all_locs:
124
130
                'frate': self.total_files_processed / elapsed,
125
131
                'brate': self.total_bytes_processed / elapsed,
126
132
                'audit': time_auditing, 'audit_rate': time_auditing / elapsed})
 
133
        if self.stats_sizes:
 
134
            self.logger.info(
 
135
                _('Object audit stats: %s') % json.dumps(self.stats_buckets))
 
136
 
 
137
    def record_stats(self, obj_size):
 
138
        """
 
139
        Based on config's object_size_stats will keep track of how many objects
 
140
        fall into the specified ranges. For example with the following:
 
141
 
 
142
        object_size_stats = 10, 100, 1024
 
143
 
 
144
        and your system has 3 objects of sizes: 5, 20, and 10000 bytes the log
 
145
        will look like: {"10": 1, "100": 1, "1024": 0, "OVER": 1}
 
146
        """
 
147
        for size in self.stats_sizes:
 
148
            if obj_size <= size:
 
149
                self.stats_buckets[size] += 1
 
150
                break
 
151
        else:
 
152
            self.stats_buckets["OVER"] += 1
127
153
 
128
154
    def object_audit(self, path, device, partition):
129
155
        """
134
160
        :param partition: the partition the path is on
135
161
        """
136
162
        try:
137
 
            if not path.endswith('.data'):
138
 
                return
139
163
            try:
140
 
                name = object_server.read_metadata(path)['name']
141
 
            except (Exception, Timeout), exc:
 
164
                name = diskfile.read_metadata(path)['name']
 
165
            except (Exception, Timeout) as exc:
142
166
                raise AuditException('Error when reading metadata: %s' % exc)
143
167
            _junk, account, container, obj = name.split('/', 3)
144
 
            df = object_server.DiskFile(self.devices, device, partition,
145
 
                                        account, container, obj, self.logger,
146
 
                                        keep_data_fp=True)
 
168
            df = diskfile.DiskFile(self.devices, device, partition,
 
169
                                   account, container, obj, self.logger,
 
170
                                   keep_data_fp=True)
147
171
            try:
148
 
                if df.data_file is None:
149
 
                    # file is deleted, we found the tombstone
150
 
                    return
151
172
                try:
152
173
                    obj_size = df.get_data_file_size()
153
 
                except DiskFileError, e:
 
174
                except DiskFileNotExist:
 
175
                    return
 
176
                except DiskFileError as e:
154
177
                    raise AuditException(str(e))
155
 
                except DiskFileNotExist:
156
 
                    return
 
178
                if self.stats_sizes:
 
179
                    self.record_stats(obj_size)
157
180
                if self.zero_byte_only_at_fps and obj_size:
158
181
                    self.passes += 1
159
182
                    return
172
195
                        {'path': path})
173
196
            finally:
174
197
                df.close(verify_file=False)
175
 
        except AuditException, err:
 
198
        except AuditException as err:
176
199
            self.logger.increment('quarantines')
177
200
            self.quarantines += 1
178
201
            self.logger.error(_('ERROR Object %(obj)s failed audit and will '
179
202
                                'be quarantined: %(err)s'),
180
203
                              {'obj': path, 'err': err})
181
 
            object_server.quarantine_renamer(
 
204
            diskfile.quarantine_renamer(
182
205
                os.path.join(self.devices, device), path)
183
206
            return
184
207
        except (Exception, Timeout):