1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include <zlib.h> 26 27 #include "qemu-common.h" 28 #include "block_int.h" 29 #include "block/qcow2.h" 30 #include "trace.h" 31 32 int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) 33 { 34 BDRVQcowState *s = bs->opaque; 35 int new_l1_size, new_l1_size2, ret, i; 36 uint64_t *new_l1_table; 37 int64_t new_l1_table_offset; 38 uint8_t data[12]; 39 40 if (min_size <= s->l1_size) 41 return 0; 42 43 if (exact_size) { 44 new_l1_size = min_size; 45 } else { 46 /* Bump size up to reduce the number of times we have to grow */ 47 new_l1_size = s->l1_size; 48 if (new_l1_size == 0) { 49 new_l1_size = 1; 50 } 51 while (min_size > new_l1_size) { 52 new_l1_size = (new_l1_size * 3 + 1) / 2; 53 } 54 } 55 56 #ifdef DEBUG_ALLOC2 57 fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size); 58 #endif 59 60 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 61 new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); 62 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 63 64 /* write new table (align to cluster) */ 65 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 66 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 67 if (new_l1_table_offset < 0) { 68 g_free(new_l1_table); 69 return new_l1_table_offset; 70 } 71 72 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 73 if (ret < 0) { 74 goto fail; 75 } 76 77 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 78 for(i = 0; i < s->l1_size; i++) 79 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 80 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); 81 if (ret < 0) 82 goto fail; 83 for(i = 0; i < s->l1_size; i++) 84 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 85 86 /* set new table */ 87 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 88 cpu_to_be32w((uint32_t*)data, new_l1_size); 89 cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset); 90 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); 91 if (ret < 0) { 92 goto fail; 93 } 94 g_free(s->l1_table); 95 qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t)); 96 s->l1_table_offset = new_l1_table_offset; 97 s->l1_table = new_l1_table; 98 s->l1_size = new_l1_size; 99 return 0; 100 fail: 101 g_free(new_l1_table); 102 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2); 103 return ret; 104 } 105 106 /* 107 * l2_load 108 * 109 * Loads a L2 table into memory. If the table is in the cache, the cache 110 * is used; otherwise the L2 table is loaded from the image file. 111 * 112 * Returns a pointer to the L2 table on success, or NULL if the read from 113 * the image file failed. 114 */ 115 116 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 117 uint64_t **l2_table) 118 { 119 BDRVQcowState *s = bs->opaque; 120 int ret; 121 122 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 123 124 return ret; 125 } 126 127 /* 128 * Writes one sector of the L1 table to the disk (can't update single entries 129 * and we really don't want bdrv_pread to perform a read-modify-write) 130 */ 131 #define L1_ENTRIES_PER_SECTOR (512 / 8) 132 static int write_l1_entry(BlockDriverState *bs, int l1_index) 133 { 134 BDRVQcowState *s = bs->opaque; 135 uint64_t buf[L1_ENTRIES_PER_SECTOR]; 136 int l1_start_index; 137 int i, ret; 138 139 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 140 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { 141 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 142 } 143 144 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 145 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, 146 buf, sizeof(buf)); 147 if (ret < 0) { 148 return ret; 149 } 150 151 return 0; 152 } 153 154 /* 155 * l2_allocate 156 * 157 * Allocate a new l2 entry in the file. If l1_index points to an already 158 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 159 * table) copy the contents of the old L2 table into the newly allocated one. 160 * Otherwise the new table is initialized with zeros. 161 * 162 */ 163 164 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 165 { 166 BDRVQcowState *s = bs->opaque; 167 uint64_t old_l2_offset; 168 uint64_t *l2_table; 169 int64_t l2_offset; 170 int ret; 171 172 old_l2_offset = s->l1_table[l1_index]; 173 174 trace_qcow2_l2_allocate(bs, l1_index); 175 176 /* allocate a new l2 entry */ 177 178 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 179 if (l2_offset < 0) { 180 return l2_offset; 181 } 182 183 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 184 if (ret < 0) { 185 goto fail; 186 } 187 188 /* allocate a new entry in the l2 cache */ 189 190 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 191 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 192 if (ret < 0) { 193 return ret; 194 } 195 196 l2_table = *table; 197 198 if (old_l2_offset == 0) { 199 /* if there was no old l2 table, clear the new table */ 200 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 201 } else { 202 uint64_t* old_table; 203 204 /* if there was an old l2 table, read it from the disk */ 205 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 206 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset, 207 (void**) &old_table); 208 if (ret < 0) { 209 goto fail; 210 } 211 212 memcpy(l2_table, old_table, s->cluster_size); 213 214 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); 215 if (ret < 0) { 216 goto fail; 217 } 218 } 219 220 /* write the l2 table to the file */ 221 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 222 223 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 224 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 225 ret = qcow2_cache_flush(bs, s->l2_table_cache); 226 if (ret < 0) { 227 goto fail; 228 } 229 230 /* update the L1 entry */ 231 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 232 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 233 ret = write_l1_entry(bs, l1_index); 234 if (ret < 0) { 235 goto fail; 236 } 237 238 *table = l2_table; 239 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 240 return 0; 241 242 fail: 243 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 244 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 245 s->l1_table[l1_index] = old_l2_offset; 246 return ret; 247 } 248 249 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, 250 uint64_t *l2_table, uint64_t start, uint64_t mask) 251 { 252 int i; 253 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask; 254 255 if (!offset) 256 return 0; 257 258 for (i = start; i < start + nb_clusters; i++) 259 if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask)) 260 break; 261 262 return (i - start); 263 } 264 265 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) 266 { 267 int i = 0; 268 269 while(nb_clusters-- && l2_table[i] == 0) 270 i++; 271 272 return i; 273 } 274 275 /* The crypt function is compatible with the linux cryptoloop 276 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 277 supported */ 278 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, 279 uint8_t *out_buf, const uint8_t *in_buf, 280 int nb_sectors, int enc, 281 const AES_KEY *key) 282 { 283 union { 284 uint64_t ll[2]; 285 uint8_t b[16]; 286 } ivec; 287 int i; 288 289 for(i = 0; i < nb_sectors; i++) { 290 ivec.ll[0] = cpu_to_le64(sector_num); 291 ivec.ll[1] = 0; 292 AES_cbc_encrypt(in_buf, out_buf, 512, key, 293 ivec.b, enc); 294 sector_num++; 295 in_buf += 512; 296 out_buf += 512; 297 } 298 } 299 300 static int coroutine_fn copy_sectors(BlockDriverState *bs, 301 uint64_t start_sect, 302 uint64_t cluster_offset, 303 int n_start, int n_end) 304 { 305 BDRVQcowState *s = bs->opaque; 306 QEMUIOVector qiov; 307 struct iovec iov; 308 int n, ret; 309 310 /* 311 * If this is the last cluster and it is only partially used, we must only 312 * copy until the end of the image, or bdrv_check_request will fail for the 313 * bdrv_read/write calls below. 314 */ 315 if (start_sect + n_end > bs->total_sectors) { 316 n_end = bs->total_sectors - start_sect; 317 } 318 319 n = n_end - n_start; 320 if (n <= 0) { 321 return 0; 322 } 323 324 iov.iov_len = n * BDRV_SECTOR_SIZE; 325 iov.iov_base = qemu_blockalign(bs, iov.iov_len); 326 327 qemu_iovec_init_external(&qiov, &iov, 1); 328 329 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 330 331 /* Call .bdrv_co_readv() directly instead of using the public block-layer 332 * interface. This avoids double I/O throttling and request tracking, 333 * which can lead to deadlock when block layer copy-on-read is enabled. 334 */ 335 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 336 if (ret < 0) { 337 goto out; 338 } 339 340 if (s->crypt_method) { 341 qcow2_encrypt_sectors(s, start_sect + n_start, 342 iov.iov_base, iov.iov_base, n, 1, 343 &s->aes_encrypt_key); 344 } 345 346 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 347 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); 348 if (ret < 0) { 349 goto out; 350 } 351 352 ret = 0; 353 out: 354 qemu_vfree(iov.iov_base); 355 return ret; 356 } 357 358 359 /* 360 * get_cluster_offset 361 * 362 * For a given offset of the disk image, find the cluster offset in 363 * qcow2 file. The offset is stored in *cluster_offset. 364 * 365 * on entry, *num is the number of contiguous sectors we'd like to 366 * access following offset. 367 * 368 * on exit, *num is the number of contiguous sectors we can read. 369 * 370 * Return 0, if the offset is found 371 * Return -errno, otherwise. 372 * 373 */ 374 375 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 376 int *num, uint64_t *cluster_offset) 377 { 378 BDRVQcowState *s = bs->opaque; 379 unsigned int l1_index, l2_index; 380 uint64_t l2_offset, *l2_table; 381 int l1_bits, c; 382 unsigned int index_in_cluster, nb_clusters; 383 uint64_t nb_available, nb_needed; 384 int ret; 385 386 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 387 nb_needed = *num + index_in_cluster; 388 389 l1_bits = s->l2_bits + s->cluster_bits; 390 391 /* compute how many bytes there are between the offset and 392 * the end of the l1 entry 393 */ 394 395 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 396 397 /* compute the number of available sectors */ 398 399 nb_available = (nb_available >> 9) + index_in_cluster; 400 401 if (nb_needed > nb_available) { 402 nb_needed = nb_available; 403 } 404 405 *cluster_offset = 0; 406 407 /* seek the the l2 offset in the l1 table */ 408 409 l1_index = offset >> l1_bits; 410 if (l1_index >= s->l1_size) 411 goto out; 412 413 l2_offset = s->l1_table[l1_index]; 414 415 /* seek the l2 table of the given l2 offset */ 416 417 if (!l2_offset) 418 goto out; 419 420 /* load the l2 table in memory */ 421 422 l2_offset &= ~QCOW_OFLAG_COPIED; 423 ret = l2_load(bs, l2_offset, &l2_table); 424 if (ret < 0) { 425 return ret; 426 } 427 428 /* find the cluster offset for the given disk offset */ 429 430 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 431 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 432 nb_clusters = size_to_clusters(s, nb_needed << 9); 433 434 if (!*cluster_offset) { 435 /* how many empty clusters ? */ 436 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); 437 } else { 438 /* how many allocated clusters ? */ 439 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 440 &l2_table[l2_index], 0, QCOW_OFLAG_COPIED); 441 } 442 443 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 444 445 nb_available = (c * s->cluster_sectors); 446 out: 447 if (nb_available > nb_needed) 448 nb_available = nb_needed; 449 450 *num = nb_available - index_in_cluster; 451 452 *cluster_offset &=~QCOW_OFLAG_COPIED; 453 return 0; 454 } 455 456 /* 457 * get_cluster_table 458 * 459 * for a given disk offset, load (and allocate if needed) 460 * the l2 table. 461 * 462 * the l2 table offset in the qcow2 file and the cluster index 463 * in the l2 table are given to the caller. 464 * 465 * Returns 0 on success, -errno in failure case 466 */ 467 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 468 uint64_t **new_l2_table, 469 int *new_l2_index) 470 { 471 BDRVQcowState *s = bs->opaque; 472 unsigned int l1_index, l2_index; 473 uint64_t l2_offset; 474 uint64_t *l2_table = NULL; 475 int ret; 476 477 /* seek the the l2 offset in the l1 table */ 478 479 l1_index = offset >> (s->l2_bits + s->cluster_bits); 480 if (l1_index >= s->l1_size) { 481 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 482 if (ret < 0) { 483 return ret; 484 } 485 } 486 l2_offset = s->l1_table[l1_index]; 487 488 /* seek the l2 table of the given l2 offset */ 489 490 if (l2_offset & QCOW_OFLAG_COPIED) { 491 /* load the l2 table in memory */ 492 l2_offset &= ~QCOW_OFLAG_COPIED; 493 ret = l2_load(bs, l2_offset, &l2_table); 494 if (ret < 0) { 495 return ret; 496 } 497 } else { 498 /* First allocate a new L2 table (and do COW if needed) */ 499 ret = l2_allocate(bs, l1_index, &l2_table); 500 if (ret < 0) { 501 return ret; 502 } 503 504 /* Then decrease the refcount of the old table */ 505 if (l2_offset) { 506 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t)); 507 } 508 l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED; 509 } 510 511 /* find the cluster offset for the given disk offset */ 512 513 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 514 515 *new_l2_table = l2_table; 516 *new_l2_index = l2_index; 517 518 return 0; 519 } 520 521 /* 522 * alloc_compressed_cluster_offset 523 * 524 * For a given offset of the disk image, return cluster offset in 525 * qcow2 file. 526 * 527 * If the offset is not found, allocate a new compressed cluster. 528 * 529 * Return the cluster offset if successful, 530 * Return 0, otherwise. 531 * 532 */ 533 534 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 535 uint64_t offset, 536 int compressed_size) 537 { 538 BDRVQcowState *s = bs->opaque; 539 int l2_index, ret; 540 uint64_t *l2_table; 541 int64_t cluster_offset; 542 int nb_csectors; 543 544 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 545 if (ret < 0) { 546 return 0; 547 } 548 549 cluster_offset = be64_to_cpu(l2_table[l2_index]); 550 if (cluster_offset & QCOW_OFLAG_COPIED) { 551 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 552 return 0; 553 } 554 555 if (cluster_offset) 556 qcow2_free_any_clusters(bs, cluster_offset, 1); 557 558 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 559 if (cluster_offset < 0) { 560 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 561 return 0; 562 } 563 564 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 565 (cluster_offset >> 9); 566 567 cluster_offset |= QCOW_OFLAG_COMPRESSED | 568 ((uint64_t)nb_csectors << s->csize_shift); 569 570 /* update L2 table */ 571 572 /* compressed clusters never have the copied flag */ 573 574 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 575 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 576 l2_table[l2_index] = cpu_to_be64(cluster_offset); 577 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 578 if (ret < 0) { 579 return 0; 580 } 581 582 return cluster_offset; 583 } 584 585 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 586 { 587 BDRVQcowState *s = bs->opaque; 588 int i, j = 0, l2_index, ret; 589 uint64_t *old_cluster, start_sect, *l2_table; 590 uint64_t cluster_offset = m->alloc_offset; 591 bool cow = false; 592 593 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 594 595 if (m->nb_clusters == 0) 596 return 0; 597 598 old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); 599 600 /* copy content of unmodified sectors */ 601 start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9; 602 if (m->n_start) { 603 cow = true; 604 qemu_co_mutex_unlock(&s->lock); 605 ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start); 606 qemu_co_mutex_lock(&s->lock); 607 if (ret < 0) 608 goto err; 609 } 610 611 if (m->nb_available & (s->cluster_sectors - 1)) { 612 uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1); 613 cow = true; 614 qemu_co_mutex_unlock(&s->lock); 615 ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9), 616 m->nb_available - end, s->cluster_sectors); 617 qemu_co_mutex_lock(&s->lock); 618 if (ret < 0) 619 goto err; 620 } 621 622 /* 623 * Update L2 table. 624 * 625 * Before we update the L2 table to actually point to the new cluster, we 626 * need to be sure that the refcounts have been increased and COW was 627 * handled. 628 */ 629 if (cow) { 630 qcow2_cache_depends_on_flush(s->l2_table_cache); 631 } 632 633 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); 634 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 635 if (ret < 0) { 636 goto err; 637 } 638 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 639 640 for (i = 0; i < m->nb_clusters; i++) { 641 /* if two concurrent writes happen to the same unallocated cluster 642 * each write allocates separate cluster and writes data concurrently. 643 * The first one to complete updates l2 table with pointer to its 644 * cluster the second one has to do RMW (which is done above by 645 * copy_sectors()), update l2 table with its cluster pointer and free 646 * old cluster. This is what this loop does */ 647 if(l2_table[l2_index + i] != 0) 648 old_cluster[j++] = l2_table[l2_index + i]; 649 650 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 651 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 652 } 653 654 655 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 656 if (ret < 0) { 657 goto err; 658 } 659 660 /* 661 * If this was a COW, we need to decrease the refcount of the old cluster. 662 * Also flush bs->file to get the right order for L2 and refcount update. 663 */ 664 if (j != 0) { 665 for (i = 0; i < j; i++) { 666 qcow2_free_any_clusters(bs, 667 be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1); 668 } 669 } 670 671 ret = 0; 672 err: 673 g_free(old_cluster); 674 return ret; 675 } 676 677 /* 678 * Returns the number of contiguous clusters that can be used for an allocating 679 * write, but require COW to be performed (this includes yet unallocated space, 680 * which must copy from the backing file) 681 */ 682 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, 683 uint64_t *l2_table, int l2_index) 684 { 685 int i = 0; 686 uint64_t cluster_offset; 687 688 while (i < nb_clusters) { 689 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, 690 &l2_table[l2_index], i, 0); 691 if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) { 692 break; 693 } 694 695 i += count_contiguous_free_clusters(nb_clusters - i, 696 &l2_table[l2_index + i]); 697 if (i >= nb_clusters) { 698 break; 699 } 700 701 cluster_offset = be64_to_cpu(l2_table[l2_index + i]); 702 703 if ((cluster_offset & QCOW_OFLAG_COPIED) || 704 (cluster_offset & QCOW_OFLAG_COMPRESSED)) 705 break; 706 } 707 708 assert(i <= nb_clusters); 709 return i; 710 } 711 712 /* 713 * Allocates new clusters for the given guest_offset. 714 * 715 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 716 * contain the number of clusters that have been allocated and are contiguous 717 * in the image file. 718 * 719 * If *host_offset is non-zero, it specifies the offset in the image file at 720 * which the new clusters must start. *nb_clusters can be 0 on return in this 721 * case if the cluster at host_offset is already in use. If *host_offset is 722 * zero, the clusters can be allocated anywhere in the image file. 723 * 724 * *host_offset is updated to contain the offset into the image file at which 725 * the first allocated cluster starts. 726 * 727 * Return 0 on success and -errno in error cases. -EAGAIN means that the 728 * function has been waiting for another request and the allocation must be 729 * restarted, but the whole request should not be failed. 730 */ 731 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 732 uint64_t *host_offset, unsigned int *nb_clusters, uint64_t *l2_table) 733 { 734 BDRVQcowState *s = bs->opaque; 735 int64_t cluster_offset; 736 QCowL2Meta *old_alloc; 737 738 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 739 *host_offset, *nb_clusters); 740 741 /* 742 * Check if there already is an AIO write request in flight which allocates 743 * the same cluster. In this case we need to wait until the previous 744 * request has completed and updated the L2 table accordingly. 745 */ 746 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 747 748 uint64_t start = guest_offset >> s->cluster_bits; 749 uint64_t end = start + *nb_clusters; 750 uint64_t old_start = old_alloc->offset >> s->cluster_bits; 751 uint64_t old_end = old_start + old_alloc->nb_clusters; 752 753 if (end < old_start || start > old_end) { 754 /* No intersection */ 755 } else { 756 if (start < old_start) { 757 /* Stop at the start of a running allocation */ 758 *nb_clusters = old_start - start; 759 } else { 760 *nb_clusters = 0; 761 } 762 763 if (*nb_clusters == 0) { 764 /* Wait for the dependency to complete. We need to recheck 765 * the free/allocated clusters when we continue. */ 766 qemu_co_mutex_unlock(&s->lock); 767 qemu_co_queue_wait(&old_alloc->dependent_requests); 768 qemu_co_mutex_lock(&s->lock); 769 return -EAGAIN; 770 } 771 } 772 } 773 774 if (!*nb_clusters) { 775 abort(); 776 } 777 778 /* Allocate new clusters */ 779 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 780 if (*host_offset == 0) { 781 cluster_offset = qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 782 } else { 783 cluster_offset = *host_offset; 784 *nb_clusters = qcow2_alloc_clusters_at(bs, cluster_offset, *nb_clusters); 785 } 786 787 if (cluster_offset < 0) { 788 return cluster_offset; 789 } 790 *host_offset = cluster_offset; 791 return 0; 792 } 793 794 /* 795 * alloc_cluster_offset 796 * 797 * For a given offset on the virtual disk, find the cluster offset in qcow2 798 * file. If the offset is not found, allocate a new cluster. 799 * 800 * If the cluster was already allocated, m->nb_clusters is set to 0 and 801 * other fields in m are meaningless. 802 * 803 * If the cluster is newly allocated, m->nb_clusters is set to the number of 804 * contiguous clusters that have been allocated. In this case, the other 805 * fields of m are valid and contain information about the first allocated 806 * cluster. 807 * 808 * If the request conflicts with another write request in flight, the coroutine 809 * is queued and will be reentered when the dependency has completed. 810 * 811 * Return 0 on success and -errno in error cases 812 */ 813 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 814 int n_start, int n_end, int *num, QCowL2Meta *m) 815 { 816 BDRVQcowState *s = bs->opaque; 817 int l2_index, ret, sectors; 818 uint64_t *l2_table; 819 unsigned int nb_clusters, keep_clusters; 820 uint64_t cluster_offset; 821 822 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, 823 n_start, n_end); 824 825 /* Find L2 entry for the first involved cluster */ 826 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 827 if (ret < 0) { 828 return ret; 829 } 830 831 /* 832 * Calculate the number of clusters to look for. We stop at L2 table 833 * boundaries to keep things simple. 834 */ 835 again: 836 nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS), 837 s->l2_size - l2_index); 838 839 cluster_offset = be64_to_cpu(l2_table[l2_index]); 840 841 /* 842 * Check how many clusters are already allocated and don't need COW, and how 843 * many need a new allocation. 844 */ 845 if (cluster_offset & QCOW_OFLAG_COPIED) { 846 /* We keep all QCOW_OFLAG_COPIED clusters */ 847 keep_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, 848 &l2_table[l2_index], 0, 0); 849 assert(keep_clusters <= nb_clusters); 850 nb_clusters -= keep_clusters; 851 } else { 852 /* For the moment, overwrite compressed clusters one by one */ 853 if (cluster_offset & QCOW_OFLAG_COMPRESSED) { 854 nb_clusters = 1; 855 } else { 856 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 857 } 858 859 keep_clusters = 0; 860 cluster_offset = 0; 861 } 862 863 cluster_offset &= ~QCOW_OFLAG_COPIED; 864 865 /* If there is something left to allocate, do that now */ 866 *m = (QCowL2Meta) { 867 .cluster_offset = cluster_offset, 868 .nb_clusters = 0, 869 }; 870 qemu_co_queue_init(&m->dependent_requests); 871 872 if (nb_clusters > 0) { 873 uint64_t alloc_offset; 874 uint64_t alloc_cluster_offset; 875 uint64_t keep_bytes = keep_clusters * s->cluster_size; 876 877 /* Calculate start and size of allocation */ 878 alloc_offset = offset + keep_bytes; 879 880 if (keep_clusters == 0) { 881 alloc_cluster_offset = 0; 882 } else { 883 alloc_cluster_offset = cluster_offset + keep_bytes; 884 } 885 886 /* Allocate, if necessary at a given offset in the image file */ 887 ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset, 888 &nb_clusters, l2_table); 889 if (ret == -EAGAIN) { 890 goto again; 891 } else if (ret < 0) { 892 goto fail; 893 } 894 895 /* save info needed for meta data update */ 896 if (nb_clusters > 0) { 897 int requested_sectors = n_end - keep_clusters * s->cluster_sectors; 898 int avail_sectors = (keep_clusters + nb_clusters) 899 << (s->cluster_bits - BDRV_SECTOR_BITS); 900 901 *m = (QCowL2Meta) { 902 .cluster_offset = keep_clusters == 0 ? 903 alloc_cluster_offset : cluster_offset, 904 .alloc_offset = alloc_cluster_offset, 905 .offset = alloc_offset, 906 .n_start = keep_clusters == 0 ? n_start : 0, 907 .nb_clusters = nb_clusters, 908 .nb_available = MIN(requested_sectors, avail_sectors), 909 }; 910 qemu_co_queue_init(&m->dependent_requests); 911 QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); 912 } 913 } 914 915 /* Some cleanup work */ 916 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 917 if (ret < 0) { 918 goto fail_put; 919 } 920 921 sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9); 922 if (sectors > n_end) { 923 sectors = n_end; 924 } 925 926 assert(sectors > n_start); 927 *num = sectors - n_start; 928 929 return 0; 930 931 fail: 932 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 933 fail_put: 934 if (nb_clusters > 0) { 935 QLIST_REMOVE(m, next_in_flight); 936 } 937 return ret; 938 } 939 940 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 941 const uint8_t *buf, int buf_size) 942 { 943 z_stream strm1, *strm = &strm1; 944 int ret, out_len; 945 946 memset(strm, 0, sizeof(*strm)); 947 948 strm->next_in = (uint8_t *)buf; 949 strm->avail_in = buf_size; 950 strm->next_out = out_buf; 951 strm->avail_out = out_buf_size; 952 953 ret = inflateInit2(strm, -12); 954 if (ret != Z_OK) 955 return -1; 956 ret = inflate(strm, Z_FINISH); 957 out_len = strm->next_out - out_buf; 958 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 959 out_len != out_buf_size) { 960 inflateEnd(strm); 961 return -1; 962 } 963 inflateEnd(strm); 964 return 0; 965 } 966 967 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 968 { 969 BDRVQcowState *s = bs->opaque; 970 int ret, csize, nb_csectors, sector_offset; 971 uint64_t coffset; 972 973 coffset = cluster_offset & s->cluster_offset_mask; 974 if (s->cluster_cache_offset != coffset) { 975 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 976 sector_offset = coffset & 511; 977 csize = nb_csectors * 512 - sector_offset; 978 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 979 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); 980 if (ret < 0) { 981 return ret; 982 } 983 if (decompress_buffer(s->cluster_cache, s->cluster_size, 984 s->cluster_data + sector_offset, csize) < 0) { 985 return -EIO; 986 } 987 s->cluster_cache_offset = coffset; 988 } 989 return 0; 990 } 991 992 /* 993 * This discards as many clusters of nb_clusters as possible at once (i.e. 994 * all clusters in the same L2 table) and returns the number of discarded 995 * clusters. 996 */ 997 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 998 unsigned int nb_clusters) 999 { 1000 BDRVQcowState *s = bs->opaque; 1001 uint64_t *l2_table; 1002 int l2_index; 1003 int ret; 1004 int i; 1005 1006 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1007 if (ret < 0) { 1008 return ret; 1009 } 1010 1011 /* Limit nb_clusters to one L2 table */ 1012 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1013 1014 for (i = 0; i < nb_clusters; i++) { 1015 uint64_t old_offset; 1016 1017 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1018 old_offset &= ~QCOW_OFLAG_COPIED; 1019 1020 if (old_offset == 0) { 1021 continue; 1022 } 1023 1024 /* First remove L2 entries */ 1025 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1026 l2_table[l2_index + i] = cpu_to_be64(0); 1027 1028 /* Then decrease the refcount */ 1029 qcow2_free_any_clusters(bs, old_offset, 1); 1030 } 1031 1032 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1033 if (ret < 0) { 1034 return ret; 1035 } 1036 1037 return nb_clusters; 1038 } 1039 1040 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1041 int nb_sectors) 1042 { 1043 BDRVQcowState *s = bs->opaque; 1044 uint64_t end_offset; 1045 unsigned int nb_clusters; 1046 int ret; 1047 1048 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1049 1050 /* Round start up and end down */ 1051 offset = align_offset(offset, s->cluster_size); 1052 end_offset &= ~(s->cluster_size - 1); 1053 1054 if (offset > end_offset) { 1055 return 0; 1056 } 1057 1058 nb_clusters = size_to_clusters(s, end_offset - offset); 1059 1060 /* Each L2 table is handled by its own loop iteration */ 1061 while (nb_clusters > 0) { 1062 ret = discard_single_l2(bs, offset, nb_clusters); 1063 if (ret < 0) { 1064 return ret; 1065 } 1066 1067 nb_clusters -= ret; 1068 offset += (ret * s->cluster_size); 1069 } 1070 1071 return 0; 1072 } 1073