2
* QEMU Enhanced Disk Format Consistency Check
4
* Copyright IBM, Corp. 2010
7
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
10
* See the COPYING.LIB file in the top-level directory.
18
BdrvCheckResult *result;
19
bool fix; /* whether to fix invalid offsets */
22
uint32_t *used_clusters; /* referenced cluster bitmap */
27
static bool qed_test_bit(uint32_t *bitmap, uint64_t n) {
28
return !!(bitmap[n / 32] & (1 << (n % 32)));
31
static void qed_set_bit(uint32_t *bitmap, uint64_t n) {
32
bitmap[n / 32] |= 1 << (n % 32);
36
* Set bitmap bits for clusters
38
* @check: Check structure
39
* @offset: Starting offset in bytes
40
* @n: Number of clusters
42
static bool qed_set_used_clusters(QEDCheck *check, uint64_t offset,
45
uint64_t cluster = qed_bytes_to_clusters(check->s, offset);
46
unsigned int corruptions = 0;
49
/* Clusters should only be referenced once */
50
if (qed_test_bit(check->used_clusters, cluster)) {
54
qed_set_bit(check->used_clusters, cluster);
58
check->result->corruptions += corruptions;
59
return corruptions == 0;
65
* @ret: Number of invalid cluster offsets
67
static unsigned int qed_check_l2_table(QEDCheck *check, QEDTable *table)
69
BDRVQEDState *s = check->s;
70
unsigned int i, num_invalid = 0;
72
for (i = 0; i < s->table_nelems; i++) {
73
uint64_t offset = table->offsets[i];
75
if (qed_offset_is_unalloc_cluster(offset) ||
76
qed_offset_is_zero_cluster(offset)) {
80
/* Detect invalid cluster offset */
81
if (!qed_check_cluster_offset(s, offset)) {
83
table->offsets[i] = 0;
85
check->result->corruptions++;
92
qed_set_used_clusters(check, offset, 1);
99
* Descend tables and check each cluster is referenced once only
101
static int qed_check_l1_table(QEDCheck *check, QEDTable *table)
103
BDRVQEDState *s = check->s;
104
unsigned int i, num_invalid_l1 = 0;
105
int ret, last_error = 0;
107
/* Mark L1 table clusters used */
108
qed_set_used_clusters(check, s->header.l1_table_offset,
109
s->header.table_size);
111
for (i = 0; i < s->table_nelems; i++) {
112
unsigned int num_invalid_l2;
113
uint64_t offset = table->offsets[i];
115
if (qed_offset_is_unalloc_cluster(offset)) {
119
/* Detect invalid L2 offset */
120
if (!qed_check_table_offset(s, offset)) {
121
/* Clear invalid offset */
123
table->offsets[i] = 0;
125
check->result->corruptions++;
132
if (!qed_set_used_clusters(check, offset, s->header.table_size)) {
133
continue; /* skip an invalid table */
136
ret = qed_read_l2_table_sync(s, &check->request, offset);
138
check->result->check_errors++;
143
num_invalid_l2 = qed_check_l2_table(check,
144
check->request.l2_table->table);
146
/* Write out fixed L2 table */
147
if (num_invalid_l2 > 0 && check->fix) {
148
ret = qed_write_l2_table_sync(s, &check->request, 0,
149
s->table_nelems, false);
151
check->result->check_errors++;
158
/* Drop reference to final table */
159
qed_unref_l2_cache_entry(check->request.l2_table);
160
check->request.l2_table = NULL;
162
/* Write out fixed L1 table */
163
if (num_invalid_l1 > 0 && check->fix) {
164
ret = qed_write_l1_table_sync(s, 0, s->table_nelems);
166
check->result->check_errors++;
175
* Check for unreferenced (leaked) clusters
177
static void qed_check_for_leaks(QEDCheck *check)
179
BDRVQEDState *s = check->s;
182
for (i = s->header.header_size; i < check->nclusters; i++) {
183
if (!qed_test_bit(check->used_clusters, i)) {
184
check->result->leaks++;
189
int qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix)
194
.nclusters = qed_bytes_to_clusters(s, s->file_size),
195
.request = { .l2_table = NULL },
200
check.used_clusters = qemu_mallocz(((check.nclusters + 31) / 32) *
201
sizeof(check.used_clusters[0]));
203
ret = qed_check_l1_table(&check, s->l1_table);
205
/* Only check for leaks if entire image was scanned successfully */
206
qed_check_for_leaks(&check);
209
qemu_free(check.used_clusters);