2
* Block driver for the QCOW version 2 format
4
* Copyright (c) 2004-2006 Fabrice Bellard
6
* Permission is hereby granted, free of charge, to any person obtaining a copy
7
* of this software and associated documentation files (the "Software"), to deal
8
* in the Software without restriction, including without limitation the rights
9
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
* copies of the Software, and to permit persons to whom the Software is
11
* furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27
#include "qemu-common.h"
28
#include "block_int.h"
29
#include "block/qcow2.h"
31
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size)
33
BDRVQcowState *s = bs->opaque;
34
int new_l1_size, new_l1_size2, ret, i;
35
uint64_t *new_l1_table;
36
uint64_t new_l1_table_offset;
39
new_l1_size = s->l1_size;
40
if (min_size <= new_l1_size)
42
while (min_size > new_l1_size) {
43
new_l1_size = (new_l1_size * 3 + 1) / 2;
46
printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
49
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
50
new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512));
51
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
53
/* write new table (align to cluster) */
54
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
56
for(i = 0; i < s->l1_size; i++)
57
new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
58
ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
59
if (ret != new_l1_size2)
61
for(i = 0; i < s->l1_size; i++)
62
new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
65
cpu_to_be32w((uint32_t*)data, new_l1_size);
66
cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
67
if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
68
sizeof(data)) != sizeof(data))
70
qemu_free(s->l1_table);
71
qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
72
s->l1_table_offset = new_l1_table_offset;
73
s->l1_table = new_l1_table;
74
s->l1_size = new_l1_size;
77
qemu_free(s->l1_table);
81
void qcow2_l2_cache_reset(BlockDriverState *bs)
83
BDRVQcowState *s = bs->opaque;
85
memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
86
memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
87
memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
90
static inline int l2_cache_new_entry(BlockDriverState *bs)
92
BDRVQcowState *s = bs->opaque;
96
/* find a new entry in the least used one */
98
min_count = 0xffffffff;
99
for(i = 0; i < L2_CACHE_SIZE; i++) {
100
if (s->l2_cache_counts[i] < min_count) {
101
min_count = s->l2_cache_counts[i];
111
* seek l2_offset in the l2_cache table
112
* if not found, return NULL,
114
* increments the l2 cache hit count of the entry,
115
* if counter overflow, divide by two all counters
116
* return the pointer to the l2 cache entry
120
static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
124
for(i = 0; i < L2_CACHE_SIZE; i++) {
125
if (l2_offset == s->l2_cache_offsets[i]) {
126
/* increment the hit count */
127
if (++s->l2_cache_counts[i] == 0xffffffff) {
128
for(j = 0; j < L2_CACHE_SIZE; j++) {
129
s->l2_cache_counts[j] >>= 1;
132
return s->l2_cache + (i << s->l2_bits);
141
* Loads a L2 table into memory. If the table is in the cache, the cache
142
* is used; otherwise the L2 table is loaded from the image file.
144
* Returns a pointer to the L2 table on success, or NULL if the read from
145
* the image file failed.
148
static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
150
BDRVQcowState *s = bs->opaque;
154
/* seek if the table for the given offset is in the cache */
156
l2_table = seek_l2_table(s, l2_offset);
157
if (l2_table != NULL)
160
/* not found: load a new entry in the least used one */
162
min_index = l2_cache_new_entry(bs);
163
l2_table = s->l2_cache + (min_index << s->l2_bits);
164
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
165
s->l2_size * sizeof(uint64_t))
167
s->l2_cache_offsets[min_index] = l2_offset;
168
s->l2_cache_counts[min_index] = 1;
174
* Writes one sector of the L1 table to the disk (can't update single entries
175
* and we really don't want bdrv_pread to perform a read-modify-write)
177
#define L1_ENTRIES_PER_SECTOR (512 / 8)
178
static int write_l1_entry(BDRVQcowState *s, int l1_index)
180
uint64_t buf[L1_ENTRIES_PER_SECTOR];
184
l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
185
for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
186
buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
189
if (bdrv_pwrite(s->hd, s->l1_table_offset + 8 * l1_start_index,
190
buf, sizeof(buf)) != sizeof(buf))
201
* Allocate a new l2 entry in the file. If l1_index points to an already
202
* used entry in the L2 table (i.e. we are doing a copy on write for the L2
203
* table) copy the contents of the old L2 table into the newly allocated one.
204
* Otherwise the new table is initialized with zeros.
208
static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
210
BDRVQcowState *s = bs->opaque;
212
uint64_t old_l2_offset;
213
uint64_t *l2_table, l2_offset;
215
old_l2_offset = s->l1_table[l1_index];
217
/* allocate a new l2 entry */
219
l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
221
/* update the L1 entry */
223
s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
224
if (write_l1_entry(s, l1_index) < 0) {
228
/* allocate a new entry in the l2 cache */
230
min_index = l2_cache_new_entry(bs);
231
l2_table = s->l2_cache + (min_index << s->l2_bits);
233
if (old_l2_offset == 0) {
234
/* if there was no old l2 table, clear the new table */
235
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
237
/* if there was an old l2 table, read it from the disk */
238
if (bdrv_pread(s->hd, old_l2_offset,
239
l2_table, s->l2_size * sizeof(uint64_t)) !=
240
s->l2_size * sizeof(uint64_t))
243
/* write the l2 table to the file */
244
if (bdrv_pwrite(s->hd, l2_offset,
245
l2_table, s->l2_size * sizeof(uint64_t)) !=
246
s->l2_size * sizeof(uint64_t))
249
/* update the l2 cache entry */
251
s->l2_cache_offsets[min_index] = l2_offset;
252
s->l2_cache_counts[min_index] = 1;
257
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
258
uint64_t *l2_table, uint64_t start, uint64_t mask)
261
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
266
for (i = start; i < start + nb_clusters; i++)
267
if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
273
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
277
while(nb_clusters-- && l2_table[i] == 0)
283
/* The crypt function is compatible with the linux cryptoloop
284
algorithm for < 4 GB images. NOTE: out_buf == in_buf is
286
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
287
uint8_t *out_buf, const uint8_t *in_buf,
288
int nb_sectors, int enc,
297
for(i = 0; i < nb_sectors; i++) {
298
ivec.ll[0] = cpu_to_le64(sector_num);
300
AES_cbc_encrypt(in_buf, out_buf, 512, key,
309
static int qcow_read(BlockDriverState *bs, int64_t sector_num,
310
uint8_t *buf, int nb_sectors)
312
BDRVQcowState *s = bs->opaque;
313
int ret, index_in_cluster, n, n1;
314
uint64_t cluster_offset;
316
while (nb_sectors > 0) {
318
cluster_offset = qcow2_get_cluster_offset(bs, sector_num << 9, &n);
319
index_in_cluster = sector_num & (s->cluster_sectors - 1);
320
if (!cluster_offset) {
321
if (bs->backing_hd) {
322
/* read from the base image */
323
n1 = qcow2_backing_read1(bs->backing_hd, sector_num, buf, n);
325
ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
330
memset(buf, 0, 512 * n);
332
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
333
if (qcow2_decompress_cluster(s, cluster_offset) < 0)
335
memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
337
ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
340
if (s->crypt_method) {
341
qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
342
&s->aes_decrypt_key);
352
static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
353
uint64_t cluster_offset, int n_start, int n_end)
355
BDRVQcowState *s = bs->opaque;
361
ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
364
if (s->crypt_method) {
365
qcow2_encrypt_sectors(s, start_sect + n_start,
367
s->cluster_data, n, 1,
368
&s->aes_encrypt_key);
370
ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
381
* For a given offset of the disk image, return cluster offset in
384
* on entry, *num is the number of contiguous clusters we'd like to
385
* access following offset.
387
* on exit, *num is the number of contiguous clusters we can read.
389
* Return 1, if the offset is found
390
* Return 0, otherwise.
394
uint64_t qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
397
BDRVQcowState *s = bs->opaque;
398
int l1_index, l2_index;
399
uint64_t l2_offset, *l2_table, cluster_offset;
401
int index_in_cluster, nb_available, nb_needed, nb_clusters;
403
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
404
nb_needed = *num + index_in_cluster;
406
l1_bits = s->l2_bits + s->cluster_bits;
408
/* compute how many bytes there are between the offset and
409
* the end of the l1 entry
412
nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));
414
/* compute the number of available sectors */
416
nb_available = (nb_available >> 9) + index_in_cluster;
418
if (nb_needed > nb_available) {
419
nb_needed = nb_available;
424
/* seek the the l2 offset in the l1 table */
426
l1_index = offset >> l1_bits;
427
if (l1_index >= s->l1_size)
430
l2_offset = s->l1_table[l1_index];
432
/* seek the l2 table of the given l2 offset */
437
/* load the l2 table in memory */
439
l2_offset &= ~QCOW_OFLAG_COPIED;
440
l2_table = l2_load(bs, l2_offset);
441
if (l2_table == NULL)
444
/* find the cluster offset for the given disk offset */
446
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
447
cluster_offset = be64_to_cpu(l2_table[l2_index]);
448
nb_clusters = size_to_clusters(s, nb_needed << 9);
450
if (!cluster_offset) {
451
/* how many empty clusters ? */
452
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
454
/* how many allocated clusters ? */
455
c = count_contiguous_clusters(nb_clusters, s->cluster_size,
456
&l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
459
nb_available = (c * s->cluster_sectors);
461
if (nb_available > nb_needed)
462
nb_available = nb_needed;
464
*num = nb_available - index_in_cluster;
466
return cluster_offset & ~QCOW_OFLAG_COPIED;
472
* for a given disk offset, load (and allocate if needed)
475
* the l2 table offset in the qcow2 file and the cluster index
476
* in the l2 table are given to the caller.
480
static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
481
uint64_t **new_l2_table,
482
uint64_t *new_l2_offset,
485
BDRVQcowState *s = bs->opaque;
486
int l1_index, l2_index, ret;
487
uint64_t l2_offset, *l2_table;
489
/* seek the the l2 offset in the l1 table */
491
l1_index = offset >> (s->l2_bits + s->cluster_bits);
492
if (l1_index >= s->l1_size) {
493
ret = qcow2_grow_l1_table(bs, l1_index + 1);
497
l2_offset = s->l1_table[l1_index];
499
/* seek the l2 table of the given l2 offset */
501
if (l2_offset & QCOW_OFLAG_COPIED) {
502
/* load the l2 table in memory */
503
l2_offset &= ~QCOW_OFLAG_COPIED;
504
l2_table = l2_load(bs, l2_offset);
505
if (l2_table == NULL)
509
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
510
l2_table = l2_allocate(bs, l1_index);
511
if (l2_table == NULL)
513
l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
516
/* find the cluster offset for the given disk offset */
518
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
520
*new_l2_table = l2_table;
521
*new_l2_offset = l2_offset;
522
*new_l2_index = l2_index;
528
* alloc_compressed_cluster_offset
530
* For a given offset of the disk image, return cluster offset in
533
* If the offset is not found, allocate a new compressed cluster.
535
* Return the cluster offset if successful,
536
* Return 0, otherwise.
540
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
544
BDRVQcowState *s = bs->opaque;
546
uint64_t l2_offset, *l2_table, cluster_offset;
549
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
553
cluster_offset = be64_to_cpu(l2_table[l2_index]);
554
if (cluster_offset & QCOW_OFLAG_COPIED)
555
return cluster_offset & ~QCOW_OFLAG_COPIED;
558
qcow2_free_any_clusters(bs, cluster_offset, 1);
560
cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
561
nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
562
(cluster_offset >> 9);
564
cluster_offset |= QCOW_OFLAG_COMPRESSED |
565
((uint64_t)nb_csectors << s->csize_shift);
567
/* update L2 table */
569
/* compressed clusters never have the copied flag */
571
l2_table[l2_index] = cpu_to_be64(cluster_offset);
572
if (bdrv_pwrite(s->hd,
573
l2_offset + l2_index * sizeof(uint64_t),
575
sizeof(uint64_t)) != sizeof(uint64_t))
578
return cluster_offset;
582
* Write L2 table updates to disk, writing whole sectors to avoid a
583
* read-modify-write in bdrv_pwrite
585
#define L2_ENTRIES_PER_SECTOR (512 / 8)
586
static int write_l2_entries(BDRVQcowState *s, uint64_t *l2_table,
587
uint64_t l2_offset, int l2_index, int num)
589
int l2_start_index = l2_index & ~(L1_ENTRIES_PER_SECTOR - 1);
590
int start_offset = (8 * l2_index) & ~511;
591
int end_offset = (8 * (l2_index + num) + 511) & ~511;
592
size_t len = end_offset - start_offset;
594
if (bdrv_pwrite(s->hd, l2_offset + start_offset, &l2_table[l2_start_index],
603
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
606
BDRVQcowState *s = bs->opaque;
607
int i, j = 0, l2_index, ret;
608
uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
610
if (m->nb_clusters == 0)
613
old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
615
/* copy content of unmodified sectors */
616
start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
618
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
623
if (m->nb_available & (s->cluster_sectors - 1)) {
624
uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
625
ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
626
m->nb_available - end, s->cluster_sectors);
632
/* update L2 table */
633
if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
636
for (i = 0; i < m->nb_clusters; i++) {
637
/* if two concurrent writes happen to the same unallocated cluster
638
* each write allocates separate cluster and writes data concurrently.
639
* The first one to complete updates l2 table with pointer to its
640
* cluster the second one has to do RMW (which is done above by
641
* copy_sectors()), update l2 table with its cluster pointer and free
642
* old cluster. This is what this loop does */
643
if(l2_table[l2_index + i] != 0)
644
old_cluster[j++] = l2_table[l2_index + i];
646
l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
647
(i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
650
if (write_l2_entries(s, l2_table, l2_offset, l2_index, m->nb_clusters) < 0) {
655
for (i = 0; i < j; i++)
656
qcow2_free_any_clusters(bs,
657
be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
661
qemu_free(old_cluster);
666
* alloc_cluster_offset
668
* For a given offset of the disk image, return cluster offset in
671
* If the offset is not found, allocate a new cluster.
673
* Return the cluster offset if successful,
674
* Return 0, otherwise.
678
uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
680
int n_start, int n_end,
681
int *num, QCowL2Meta *m)
683
BDRVQcowState *s = bs->opaque;
685
uint64_t l2_offset, *l2_table, cluster_offset;
686
int nb_clusters, i = 0;
688
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
692
nb_clusters = size_to_clusters(s, n_end << 9);
694
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
696
cluster_offset = be64_to_cpu(l2_table[l2_index]);
698
/* We keep all QCOW_OFLAG_COPIED clusters */
700
if (cluster_offset & QCOW_OFLAG_COPIED) {
701
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
702
&l2_table[l2_index], 0, 0);
704
cluster_offset &= ~QCOW_OFLAG_COPIED;
710
/* for the moment, multiple compressed clusters are not managed */
712
if (cluster_offset & QCOW_OFLAG_COMPRESSED)
715
/* how many available clusters ? */
717
while (i < nb_clusters) {
718
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
719
&l2_table[l2_index], i, 0);
721
if(be64_to_cpu(l2_table[l2_index + i]))
724
i += count_contiguous_free_clusters(nb_clusters - i,
725
&l2_table[l2_index + i]);
727
cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
729
if ((cluster_offset & QCOW_OFLAG_COPIED) ||
730
(cluster_offset & QCOW_OFLAG_COMPRESSED))
735
/* allocate a new cluster */
737
cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
739
/* save info needed for meta data update */
741
m->n_start = n_start;
742
m->nb_clusters = nb_clusters;
745
m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
747
*num = m->nb_available - n_start;
749
return cluster_offset;
752
static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
753
const uint8_t *buf, int buf_size)
755
z_stream strm1, *strm = &strm1;
758
memset(strm, 0, sizeof(*strm));
760
strm->next_in = (uint8_t *)buf;
761
strm->avail_in = buf_size;
762
strm->next_out = out_buf;
763
strm->avail_out = out_buf_size;
765
ret = inflateInit2(strm, -12);
768
ret = inflate(strm, Z_FINISH);
769
out_len = strm->next_out - out_buf;
770
if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
771
out_len != out_buf_size) {
779
int qcow2_decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
781
int ret, csize, nb_csectors, sector_offset;
784
coffset = cluster_offset & s->cluster_offset_mask;
785
if (s->cluster_cache_offset != coffset) {
786
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
787
sector_offset = coffset & 511;
788
csize = nb_csectors * 512 - sector_offset;
789
ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
793
if (decompress_buffer(s->cluster_cache, s->cluster_size,
794
s->cluster_data + sector_offset, csize) < 0) {
797
s->cluster_cache_offset = coffset;