1 /* 2 * Copyright (C) 2011-2012 Red Hat, Inc. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-thin-metadata.h" 8 #include "persistent-data/dm-btree.h" 9 #include "persistent-data/dm-space-map.h" 10 #include "persistent-data/dm-space-map-disk.h" 11 #include "persistent-data/dm-transaction-manager.h" 12 13 #include <linux/list.h> 14 #include <linux/device-mapper.h> 15 #include <linux/workqueue.h> 16 17 /*-------------------------------------------------------------------------- 18 * As far as the metadata goes, there is: 19 * 20 * - A superblock in block zero, taking up fewer than 512 bytes for 21 * atomic writes. 22 * 23 * - A space map managing the metadata blocks. 24 * 25 * - A space map managing the data blocks. 26 * 27 * - A btree mapping our internal thin dev ids onto struct disk_device_details. 28 * 29 * - A hierarchical btree, with 2 levels which effectively maps (thin 30 * dev id, virtual block) -> block_time. Block time is a 64-bit 31 * field holding the time in the low 24 bits, and block in the top 48 32 * bits. 33 * 34 * BTrees consist solely of btree_nodes, that fill a block. Some are 35 * internal nodes, as such their values are a __le64 pointing to other 36 * nodes. Leaf nodes can store data of any reasonable size (ie. much 37 * smaller than the block size). The nodes consist of the header, 38 * followed by an array of keys, followed by an array of values. We have 39 * to binary search on the keys so they're all held together to help the 40 * cpu cache. 41 * 42 * Space maps have 2 btrees: 43 * 44 * - One maps a uint64_t onto a struct index_entry. Which points to a 45 * bitmap block, and has some details about how many free entries there 46 * are etc. 47 * 48 * - The bitmap blocks have a header (for the checksum). Then the rest 49 * of the block is pairs of bits. With the meaning being: 50 * 51 * 0 - ref count is 0 52 * 1 - ref count is 1 53 * 2 - ref count is 2 54 * 3 - ref count is higher than 2 55 * 56 * - If the count is higher than 2 then the ref count is entered in a 57 * second btree that directly maps the block_address to a uint32_t ref 58 * count. 59 * 60 * The space map metadata variant doesn't have a bitmaps btree. Instead 61 * it has one single blocks worth of index_entries. This avoids 62 * recursive issues with the bitmap btree needing to allocate space in 63 * order to insert. With a small data block size such as 64k the 64 * metadata support data devices that are hundreds of terrabytes. 65 * 66 * The space maps allocate space linearly from front to back. Space that 67 * is freed in a transaction is never recycled within that transaction. 68 * To try and avoid fragmenting _free_ space the allocator always goes 69 * back and fills in gaps. 70 * 71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks 72 * from the block manager. 73 *--------------------------------------------------------------------------*/ 74 75 #define DM_MSG_PREFIX "thin metadata" 76 77 #define THIN_SUPERBLOCK_MAGIC 27022010 78 #define THIN_SUPERBLOCK_LOCATION 0 79 #define THIN_VERSION 2 80 #define SECTOR_TO_BLOCK_SHIFT 3 81 82 /* 83 * For btree insert: 84 * 3 for btree insert + 85 * 2 for btree lookup used within space map 86 * For btree remove: 87 * 2 for shadow spine + 88 * 4 for rebalance 3 child node 89 */ 90 #define THIN_MAX_CONCURRENT_LOCKS 6 91 92 /* This should be plenty */ 93 #define SPACE_MAP_ROOT_SIZE 128 94 95 /* 96 * Little endian on-disk superblock and device details. 97 */ 98 struct thin_disk_superblock { 99 __le32 csum; /* Checksum of superblock except for this field. */ 100 __le32 flags; 101 __le64 blocknr; /* This block number, dm_block_t. */ 102 103 __u8 uuid[16]; 104 __le64 magic; 105 __le32 version; 106 __le32 time; 107 108 __le64 trans_id; 109 110 /* 111 * Root held by userspace transactions. 112 */ 113 __le64 held_root; 114 115 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; 116 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; 117 118 /* 119 * 2-level btree mapping (dev_id, (dev block, time)) -> data block 120 */ 121 __le64 data_mapping_root; 122 123 /* 124 * Device detail root mapping dev_id -> device_details 125 */ 126 __le64 device_details_root; 127 128 __le32 data_block_size; /* In 512-byte sectors. */ 129 130 __le32 metadata_block_size; /* In 512-byte sectors. */ 131 __le64 metadata_nr_blocks; 132 133 __le32 compat_flags; 134 __le32 compat_ro_flags; 135 __le32 incompat_flags; 136 } __packed; 137 138 struct disk_device_details { 139 __le64 mapped_blocks; 140 __le64 transaction_id; /* When created. */ 141 __le32 creation_time; 142 __le32 snapshotted_time; 143 } __packed; 144 145 struct dm_pool_metadata { 146 struct hlist_node hash; 147 148 struct block_device *bdev; 149 struct dm_block_manager *bm; 150 struct dm_space_map *metadata_sm; 151 struct dm_space_map *data_sm; 152 struct dm_transaction_manager *tm; 153 struct dm_transaction_manager *nb_tm; 154 155 /* 156 * Two-level btree. 157 * First level holds thin_dev_t. 158 * Second level holds mappings. 159 */ 160 struct dm_btree_info info; 161 162 /* 163 * Non-blocking version of the above. 164 */ 165 struct dm_btree_info nb_info; 166 167 /* 168 * Just the top level for deleting whole devices. 169 */ 170 struct dm_btree_info tl_info; 171 172 /* 173 * Just the bottom level for creating new devices. 174 */ 175 struct dm_btree_info bl_info; 176 177 /* 178 * Describes the device details btree. 179 */ 180 struct dm_btree_info details_info; 181 182 struct rw_semaphore root_lock; 183 uint32_t time; 184 dm_block_t root; 185 dm_block_t details_root; 186 struct list_head thin_devices; 187 uint64_t trans_id; 188 unsigned long flags; 189 sector_t data_block_size; 190 191 /* 192 * Set if a transaction has to be aborted but the attempt to roll back 193 * to the previous (good) transaction failed. The only pool metadata 194 * operation possible in this state is the closing of the device. 195 */ 196 bool fail_io:1; 197 198 /* 199 * Reading the space map roots can fail, so we read it into these 200 * buffers before the superblock is locked and updated. 201 */ 202 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; 203 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; 204 }; 205 206 struct dm_thin_device { 207 struct list_head list; 208 struct dm_pool_metadata *pmd; 209 dm_thin_id id; 210 211 int open_count; 212 bool changed:1; 213 bool aborted_with_changes:1; 214 uint64_t mapped_blocks; 215 uint64_t transaction_id; 216 uint32_t creation_time; 217 uint32_t snapshotted_time; 218 }; 219 220 /*---------------------------------------------------------------- 221 * superblock validator 222 *--------------------------------------------------------------*/ 223 224 #define SUPERBLOCK_CSUM_XOR 160774 225 226 static void sb_prepare_for_write(struct dm_block_validator *v, 227 struct dm_block *b, 228 size_t block_size) 229 { 230 struct thin_disk_superblock *disk_super = dm_block_data(b); 231 232 disk_super->blocknr = cpu_to_le64(dm_block_location(b)); 233 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags, 234 block_size - sizeof(__le32), 235 SUPERBLOCK_CSUM_XOR)); 236 } 237 238 static int sb_check(struct dm_block_validator *v, 239 struct dm_block *b, 240 size_t block_size) 241 { 242 struct thin_disk_superblock *disk_super = dm_block_data(b); 243 __le32 csum_le; 244 245 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) { 246 DMERR("sb_check failed: blocknr %llu: " 247 "wanted %llu", le64_to_cpu(disk_super->blocknr), 248 (unsigned long long)dm_block_location(b)); 249 return -ENOTBLK; 250 } 251 252 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) { 253 DMERR("sb_check failed: magic %llu: " 254 "wanted %llu", le64_to_cpu(disk_super->magic), 255 (unsigned long long)THIN_SUPERBLOCK_MAGIC); 256 return -EILSEQ; 257 } 258 259 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags, 260 block_size - sizeof(__le32), 261 SUPERBLOCK_CSUM_XOR)); 262 if (csum_le != disk_super->csum) { 263 DMERR("sb_check failed: csum %u: wanted %u", 264 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum)); 265 return -EILSEQ; 266 } 267 268 return 0; 269 } 270 271 static struct dm_block_validator sb_validator = { 272 .name = "superblock", 273 .prepare_for_write = sb_prepare_for_write, 274 .check = sb_check 275 }; 276 277 /*---------------------------------------------------------------- 278 * Methods for the btree value types 279 *--------------------------------------------------------------*/ 280 281 static uint64_t pack_block_time(dm_block_t b, uint32_t t) 282 { 283 return (b << 24) | t; 284 } 285 286 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t) 287 { 288 *b = v >> 24; 289 *t = v & ((1 << 24) - 1); 290 } 291 292 static void data_block_inc(void *context, const void *value_le) 293 { 294 struct dm_space_map *sm = context; 295 __le64 v_le; 296 uint64_t b; 297 uint32_t t; 298 299 memcpy(&v_le, value_le, sizeof(v_le)); 300 unpack_block_time(le64_to_cpu(v_le), &b, &t); 301 dm_sm_inc_block(sm, b); 302 } 303 304 static void data_block_dec(void *context, const void *value_le) 305 { 306 struct dm_space_map *sm = context; 307 __le64 v_le; 308 uint64_t b; 309 uint32_t t; 310 311 memcpy(&v_le, value_le, sizeof(v_le)); 312 unpack_block_time(le64_to_cpu(v_le), &b, &t); 313 dm_sm_dec_block(sm, b); 314 } 315 316 static int data_block_equal(void *context, const void *value1_le, const void *value2_le) 317 { 318 __le64 v1_le, v2_le; 319 uint64_t b1, b2; 320 uint32_t t; 321 322 memcpy(&v1_le, value1_le, sizeof(v1_le)); 323 memcpy(&v2_le, value2_le, sizeof(v2_le)); 324 unpack_block_time(le64_to_cpu(v1_le), &b1, &t); 325 unpack_block_time(le64_to_cpu(v2_le), &b2, &t); 326 327 return b1 == b2; 328 } 329 330 static void subtree_inc(void *context, const void *value) 331 { 332 struct dm_btree_info *info = context; 333 __le64 root_le; 334 uint64_t root; 335 336 memcpy(&root_le, value, sizeof(root_le)); 337 root = le64_to_cpu(root_le); 338 dm_tm_inc(info->tm, root); 339 } 340 341 static void subtree_dec(void *context, const void *value) 342 { 343 struct dm_btree_info *info = context; 344 __le64 root_le; 345 uint64_t root; 346 347 memcpy(&root_le, value, sizeof(root_le)); 348 root = le64_to_cpu(root_le); 349 if (dm_btree_del(info, root)) 350 DMERR("btree delete failed"); 351 } 352 353 static int subtree_equal(void *context, const void *value1_le, const void *value2_le) 354 { 355 __le64 v1_le, v2_le; 356 memcpy(&v1_le, value1_le, sizeof(v1_le)); 357 memcpy(&v2_le, value2_le, sizeof(v2_le)); 358 359 return v1_le == v2_le; 360 } 361 362 /*----------------------------------------------------------------*/ 363 364 static int superblock_lock_zero(struct dm_pool_metadata *pmd, 365 struct dm_block **sblock) 366 { 367 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION, 368 &sb_validator, sblock); 369 } 370 371 static int superblock_lock(struct dm_pool_metadata *pmd, 372 struct dm_block **sblock) 373 { 374 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 375 &sb_validator, sblock); 376 } 377 378 static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result) 379 { 380 int r; 381 unsigned i; 382 struct dm_block *b; 383 __le64 *data_le, zero = cpu_to_le64(0); 384 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64); 385 386 /* 387 * We can't use a validator here - it may be all zeroes. 388 */ 389 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b); 390 if (r) 391 return r; 392 393 data_le = dm_block_data(b); 394 *result = 1; 395 for (i = 0; i < block_size; i++) { 396 if (data_le[i] != zero) { 397 *result = 0; 398 break; 399 } 400 } 401 402 dm_bm_unlock(b); 403 404 return 0; 405 } 406 407 static void __setup_btree_details(struct dm_pool_metadata *pmd) 408 { 409 pmd->info.tm = pmd->tm; 410 pmd->info.levels = 2; 411 pmd->info.value_type.context = pmd->data_sm; 412 pmd->info.value_type.size = sizeof(__le64); 413 pmd->info.value_type.inc = data_block_inc; 414 pmd->info.value_type.dec = data_block_dec; 415 pmd->info.value_type.equal = data_block_equal; 416 417 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info)); 418 pmd->nb_info.tm = pmd->nb_tm; 419 420 pmd->tl_info.tm = pmd->tm; 421 pmd->tl_info.levels = 1; 422 pmd->tl_info.value_type.context = &pmd->bl_info; 423 pmd->tl_info.value_type.size = sizeof(__le64); 424 pmd->tl_info.value_type.inc = subtree_inc; 425 pmd->tl_info.value_type.dec = subtree_dec; 426 pmd->tl_info.value_type.equal = subtree_equal; 427 428 pmd->bl_info.tm = pmd->tm; 429 pmd->bl_info.levels = 1; 430 pmd->bl_info.value_type.context = pmd->data_sm; 431 pmd->bl_info.value_type.size = sizeof(__le64); 432 pmd->bl_info.value_type.inc = data_block_inc; 433 pmd->bl_info.value_type.dec = data_block_dec; 434 pmd->bl_info.value_type.equal = data_block_equal; 435 436 pmd->details_info.tm = pmd->tm; 437 pmd->details_info.levels = 1; 438 pmd->details_info.value_type.context = NULL; 439 pmd->details_info.value_type.size = sizeof(struct disk_device_details); 440 pmd->details_info.value_type.inc = NULL; 441 pmd->details_info.value_type.dec = NULL; 442 pmd->details_info.value_type.equal = NULL; 443 } 444 445 static int save_sm_roots(struct dm_pool_metadata *pmd) 446 { 447 int r; 448 size_t len; 449 450 r = dm_sm_root_size(pmd->metadata_sm, &len); 451 if (r < 0) 452 return r; 453 454 r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); 455 if (r < 0) 456 return r; 457 458 r = dm_sm_root_size(pmd->data_sm, &len); 459 if (r < 0) 460 return r; 461 462 return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); 463 } 464 465 static void copy_sm_roots(struct dm_pool_metadata *pmd, 466 struct thin_disk_superblock *disk) 467 { 468 memcpy(&disk->metadata_space_map_root, 469 &pmd->metadata_space_map_root, 470 sizeof(pmd->metadata_space_map_root)); 471 472 memcpy(&disk->data_space_map_root, 473 &pmd->data_space_map_root, 474 sizeof(pmd->data_space_map_root)); 475 } 476 477 static int __write_initial_superblock(struct dm_pool_metadata *pmd) 478 { 479 int r; 480 struct dm_block *sblock; 481 struct thin_disk_superblock *disk_super; 482 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; 483 484 if (bdev_size > THIN_METADATA_MAX_SECTORS) 485 bdev_size = THIN_METADATA_MAX_SECTORS; 486 487 r = dm_sm_commit(pmd->data_sm); 488 if (r < 0) 489 return r; 490 491 r = dm_tm_pre_commit(pmd->tm); 492 if (r < 0) 493 return r; 494 495 r = save_sm_roots(pmd); 496 if (r < 0) 497 return r; 498 499 r = superblock_lock_zero(pmd, &sblock); 500 if (r) 501 return r; 502 503 disk_super = dm_block_data(sblock); 504 disk_super->flags = 0; 505 memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); 506 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC); 507 disk_super->version = cpu_to_le32(THIN_VERSION); 508 disk_super->time = 0; 509 disk_super->trans_id = 0; 510 disk_super->held_root = 0; 511 512 copy_sm_roots(pmd, disk_super); 513 514 disk_super->data_mapping_root = cpu_to_le64(pmd->root); 515 disk_super->device_details_root = cpu_to_le64(pmd->details_root); 516 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE); 517 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT); 518 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); 519 520 return dm_tm_commit(pmd->tm, sblock); 521 } 522 523 static int __format_metadata(struct dm_pool_metadata *pmd) 524 { 525 int r; 526 527 r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, 528 &pmd->tm, &pmd->metadata_sm); 529 if (r < 0) { 530 DMERR("tm_create_with_sm failed"); 531 return r; 532 } 533 534 pmd->data_sm = dm_sm_disk_create(pmd->tm, 0); 535 if (IS_ERR(pmd->data_sm)) { 536 DMERR("sm_disk_create failed"); 537 r = PTR_ERR(pmd->data_sm); 538 goto bad_cleanup_tm; 539 } 540 541 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); 542 if (!pmd->nb_tm) { 543 DMERR("could not create non-blocking clone tm"); 544 r = -ENOMEM; 545 goto bad_cleanup_data_sm; 546 } 547 548 __setup_btree_details(pmd); 549 550 r = dm_btree_empty(&pmd->info, &pmd->root); 551 if (r < 0) 552 goto bad_cleanup_nb_tm; 553 554 r = dm_btree_empty(&pmd->details_info, &pmd->details_root); 555 if (r < 0) { 556 DMERR("couldn't create devices root"); 557 goto bad_cleanup_nb_tm; 558 } 559 560 r = __write_initial_superblock(pmd); 561 if (r) 562 goto bad_cleanup_nb_tm; 563 564 return 0; 565 566 bad_cleanup_nb_tm: 567 dm_tm_destroy(pmd->nb_tm); 568 bad_cleanup_data_sm: 569 dm_sm_destroy(pmd->data_sm); 570 bad_cleanup_tm: 571 dm_tm_destroy(pmd->tm); 572 dm_sm_destroy(pmd->metadata_sm); 573 574 return r; 575 } 576 577 static int __check_incompat_features(struct thin_disk_superblock *disk_super, 578 struct dm_pool_metadata *pmd) 579 { 580 uint32_t features; 581 582 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP; 583 if (features) { 584 DMERR("could not access metadata due to unsupported optional features (%lx).", 585 (unsigned long)features); 586 return -EINVAL; 587 } 588 589 /* 590 * Check for read-only metadata to skip the following RDWR checks. 591 */ 592 if (get_disk_ro(pmd->bdev->bd_disk)) 593 return 0; 594 595 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP; 596 if (features) { 597 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).", 598 (unsigned long)features); 599 return -EINVAL; 600 } 601 602 return 0; 603 } 604 605 static int __open_metadata(struct dm_pool_metadata *pmd) 606 { 607 int r; 608 struct dm_block *sblock; 609 struct thin_disk_superblock *disk_super; 610 611 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 612 &sb_validator, &sblock); 613 if (r < 0) { 614 DMERR("couldn't read superblock"); 615 return r; 616 } 617 618 disk_super = dm_block_data(sblock); 619 620 /* Verify the data block size hasn't changed */ 621 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { 622 DMERR("changing the data block size (from %u to %llu) is not supported", 623 le32_to_cpu(disk_super->data_block_size), 624 (unsigned long long)pmd->data_block_size); 625 r = -EINVAL; 626 goto bad_unlock_sblock; 627 } 628 629 r = __check_incompat_features(disk_super, pmd); 630 if (r < 0) 631 goto bad_unlock_sblock; 632 633 r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, 634 disk_super->metadata_space_map_root, 635 sizeof(disk_super->metadata_space_map_root), 636 &pmd->tm, &pmd->metadata_sm); 637 if (r < 0) { 638 DMERR("tm_open_with_sm failed"); 639 goto bad_unlock_sblock; 640 } 641 642 pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root, 643 sizeof(disk_super->data_space_map_root)); 644 if (IS_ERR(pmd->data_sm)) { 645 DMERR("sm_disk_open failed"); 646 r = PTR_ERR(pmd->data_sm); 647 goto bad_cleanup_tm; 648 } 649 650 pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); 651 if (!pmd->nb_tm) { 652 DMERR("could not create non-blocking clone tm"); 653 r = -ENOMEM; 654 goto bad_cleanup_data_sm; 655 } 656 657 __setup_btree_details(pmd); 658 dm_bm_unlock(sblock); 659 660 return 0; 661 662 bad_cleanup_data_sm: 663 dm_sm_destroy(pmd->data_sm); 664 bad_cleanup_tm: 665 dm_tm_destroy(pmd->tm); 666 dm_sm_destroy(pmd->metadata_sm); 667 bad_unlock_sblock: 668 dm_bm_unlock(sblock); 669 670 return r; 671 } 672 673 static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device) 674 { 675 int r, unformatted; 676 677 r = __superblock_all_zeroes(pmd->bm, &unformatted); 678 if (r) 679 return r; 680 681 if (unformatted) 682 return format_device ? __format_metadata(pmd) : -EPERM; 683 684 return __open_metadata(pmd); 685 } 686 687 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device) 688 { 689 int r; 690 691 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, 692 THIN_MAX_CONCURRENT_LOCKS); 693 if (IS_ERR(pmd->bm)) { 694 DMERR("could not create block manager"); 695 return PTR_ERR(pmd->bm); 696 } 697 698 r = __open_or_format_metadata(pmd, format_device); 699 if (r) 700 dm_block_manager_destroy(pmd->bm); 701 702 return r; 703 } 704 705 static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) 706 { 707 dm_sm_destroy(pmd->data_sm); 708 dm_sm_destroy(pmd->metadata_sm); 709 dm_tm_destroy(pmd->nb_tm); 710 dm_tm_destroy(pmd->tm); 711 dm_block_manager_destroy(pmd->bm); 712 } 713 714 static int __begin_transaction(struct dm_pool_metadata *pmd) 715 { 716 int r; 717 struct thin_disk_superblock *disk_super; 718 struct dm_block *sblock; 719 720 /* 721 * We re-read the superblock every time. Shouldn't need to do this 722 * really. 723 */ 724 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 725 &sb_validator, &sblock); 726 if (r) 727 return r; 728 729 disk_super = dm_block_data(sblock); 730 pmd->time = le32_to_cpu(disk_super->time); 731 pmd->root = le64_to_cpu(disk_super->data_mapping_root); 732 pmd->details_root = le64_to_cpu(disk_super->device_details_root); 733 pmd->trans_id = le64_to_cpu(disk_super->trans_id); 734 pmd->flags = le32_to_cpu(disk_super->flags); 735 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size); 736 737 dm_bm_unlock(sblock); 738 return 0; 739 } 740 741 static int __write_changed_details(struct dm_pool_metadata *pmd) 742 { 743 int r; 744 struct dm_thin_device *td, *tmp; 745 struct disk_device_details details; 746 uint64_t key; 747 748 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { 749 if (!td->changed) 750 continue; 751 752 key = td->id; 753 754 details.mapped_blocks = cpu_to_le64(td->mapped_blocks); 755 details.transaction_id = cpu_to_le64(td->transaction_id); 756 details.creation_time = cpu_to_le32(td->creation_time); 757 details.snapshotted_time = cpu_to_le32(td->snapshotted_time); 758 __dm_bless_for_disk(&details); 759 760 r = dm_btree_insert(&pmd->details_info, pmd->details_root, 761 &key, &details, &pmd->details_root); 762 if (r) 763 return r; 764 765 if (td->open_count) 766 td->changed = 0; 767 else { 768 list_del(&td->list); 769 kfree(td); 770 } 771 } 772 773 return 0; 774 } 775 776 static int __commit_transaction(struct dm_pool_metadata *pmd) 777 { 778 int r; 779 size_t metadata_len, data_len; 780 struct thin_disk_superblock *disk_super; 781 struct dm_block *sblock; 782 783 /* 784 * We need to know if the thin_disk_superblock exceeds a 512-byte sector. 785 */ 786 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512); 787 788 r = __write_changed_details(pmd); 789 if (r < 0) 790 return r; 791 792 r = dm_sm_commit(pmd->data_sm); 793 if (r < 0) 794 return r; 795 796 r = dm_tm_pre_commit(pmd->tm); 797 if (r < 0) 798 return r; 799 800 r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); 801 if (r < 0) 802 return r; 803 804 r = dm_sm_root_size(pmd->data_sm, &data_len); 805 if (r < 0) 806 return r; 807 808 r = save_sm_roots(pmd); 809 if (r < 0) 810 return r; 811 812 r = superblock_lock(pmd, &sblock); 813 if (r) 814 return r; 815 816 disk_super = dm_block_data(sblock); 817 disk_super->time = cpu_to_le32(pmd->time); 818 disk_super->data_mapping_root = cpu_to_le64(pmd->root); 819 disk_super->device_details_root = cpu_to_le64(pmd->details_root); 820 disk_super->trans_id = cpu_to_le64(pmd->trans_id); 821 disk_super->flags = cpu_to_le32(pmd->flags); 822 823 copy_sm_roots(pmd, disk_super); 824 825 return dm_tm_commit(pmd->tm, sblock); 826 } 827 828 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, 829 sector_t data_block_size, 830 bool format_device) 831 { 832 int r; 833 struct dm_pool_metadata *pmd; 834 835 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL); 836 if (!pmd) { 837 DMERR("could not allocate metadata struct"); 838 return ERR_PTR(-ENOMEM); 839 } 840 841 init_rwsem(&pmd->root_lock); 842 pmd->time = 0; 843 INIT_LIST_HEAD(&pmd->thin_devices); 844 pmd->fail_io = false; 845 pmd->bdev = bdev; 846 pmd->data_block_size = data_block_size; 847 848 r = __create_persistent_data_objects(pmd, format_device); 849 if (r) { 850 kfree(pmd); 851 return ERR_PTR(r); 852 } 853 854 r = __begin_transaction(pmd); 855 if (r < 0) { 856 if (dm_pool_metadata_close(pmd) < 0) 857 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 858 return ERR_PTR(r); 859 } 860 861 return pmd; 862 } 863 864 int dm_pool_metadata_close(struct dm_pool_metadata *pmd) 865 { 866 int r; 867 unsigned open_devices = 0; 868 struct dm_thin_device *td, *tmp; 869 870 down_read(&pmd->root_lock); 871 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { 872 if (td->open_count) 873 open_devices++; 874 else { 875 list_del(&td->list); 876 kfree(td); 877 } 878 } 879 up_read(&pmd->root_lock); 880 881 if (open_devices) { 882 DMERR("attempt to close pmd when %u device(s) are still open", 883 open_devices); 884 return -EBUSY; 885 } 886 887 if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { 888 r = __commit_transaction(pmd); 889 if (r < 0) 890 DMWARN("%s: __commit_transaction() failed, error = %d", 891 __func__, r); 892 } 893 894 if (!pmd->fail_io) 895 __destroy_persistent_data_objects(pmd); 896 897 kfree(pmd); 898 return 0; 899 } 900 901 /* 902 * __open_device: Returns @td corresponding to device with id @dev, 903 * creating it if @create is set and incrementing @td->open_count. 904 * On failure, @td is undefined. 905 */ 906 static int __open_device(struct dm_pool_metadata *pmd, 907 dm_thin_id dev, int create, 908 struct dm_thin_device **td) 909 { 910 int r, changed = 0; 911 struct dm_thin_device *td2; 912 uint64_t key = dev; 913 struct disk_device_details details_le; 914 915 /* 916 * If the device is already open, return it. 917 */ 918 list_for_each_entry(td2, &pmd->thin_devices, list) 919 if (td2->id == dev) { 920 /* 921 * May not create an already-open device. 922 */ 923 if (create) 924 return -EEXIST; 925 926 td2->open_count++; 927 *td = td2; 928 return 0; 929 } 930 931 /* 932 * Check the device exists. 933 */ 934 r = dm_btree_lookup(&pmd->details_info, pmd->details_root, 935 &key, &details_le); 936 if (r) { 937 if (r != -ENODATA || !create) 938 return r; 939 940 /* 941 * Create new device. 942 */ 943 changed = 1; 944 details_le.mapped_blocks = 0; 945 details_le.transaction_id = cpu_to_le64(pmd->trans_id); 946 details_le.creation_time = cpu_to_le32(pmd->time); 947 details_le.snapshotted_time = cpu_to_le32(pmd->time); 948 } 949 950 *td = kmalloc(sizeof(**td), GFP_NOIO); 951 if (!*td) 952 return -ENOMEM; 953 954 (*td)->pmd = pmd; 955 (*td)->id = dev; 956 (*td)->open_count = 1; 957 (*td)->changed = changed; 958 (*td)->aborted_with_changes = false; 959 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks); 960 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id); 961 (*td)->creation_time = le32_to_cpu(details_le.creation_time); 962 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time); 963 964 list_add(&(*td)->list, &pmd->thin_devices); 965 966 return 0; 967 } 968 969 static void __close_device(struct dm_thin_device *td) 970 { 971 --td->open_count; 972 } 973 974 static int __create_thin(struct dm_pool_metadata *pmd, 975 dm_thin_id dev) 976 { 977 int r; 978 dm_block_t dev_root; 979 uint64_t key = dev; 980 struct disk_device_details details_le; 981 struct dm_thin_device *td; 982 __le64 value; 983 984 r = dm_btree_lookup(&pmd->details_info, pmd->details_root, 985 &key, &details_le); 986 if (!r) 987 return -EEXIST; 988 989 /* 990 * Create an empty btree for the mappings. 991 */ 992 r = dm_btree_empty(&pmd->bl_info, &dev_root); 993 if (r) 994 return r; 995 996 /* 997 * Insert it into the main mapping tree. 998 */ 999 value = cpu_to_le64(dev_root); 1000 __dm_bless_for_disk(&value); 1001 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root); 1002 if (r) { 1003 dm_btree_del(&pmd->bl_info, dev_root); 1004 return r; 1005 } 1006 1007 r = __open_device(pmd, dev, 1, &td); 1008 if (r) { 1009 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); 1010 dm_btree_del(&pmd->bl_info, dev_root); 1011 return r; 1012 } 1013 __close_device(td); 1014 1015 return r; 1016 } 1017 1018 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev) 1019 { 1020 int r = -EINVAL; 1021 1022 down_write(&pmd->root_lock); 1023 if (!pmd->fail_io) 1024 r = __create_thin(pmd, dev); 1025 up_write(&pmd->root_lock); 1026 1027 return r; 1028 } 1029 1030 static int __set_snapshot_details(struct dm_pool_metadata *pmd, 1031 struct dm_thin_device *snap, 1032 dm_thin_id origin, uint32_t time) 1033 { 1034 int r; 1035 struct dm_thin_device *td; 1036 1037 r = __open_device(pmd, origin, 0, &td); 1038 if (r) 1039 return r; 1040 1041 td->changed = 1; 1042 td->snapshotted_time = time; 1043 1044 snap->mapped_blocks = td->mapped_blocks; 1045 snap->snapshotted_time = time; 1046 __close_device(td); 1047 1048 return 0; 1049 } 1050 1051 static int __create_snap(struct dm_pool_metadata *pmd, 1052 dm_thin_id dev, dm_thin_id origin) 1053 { 1054 int r; 1055 dm_block_t origin_root; 1056 uint64_t key = origin, dev_key = dev; 1057 struct dm_thin_device *td; 1058 struct disk_device_details details_le; 1059 __le64 value; 1060 1061 /* check this device is unused */ 1062 r = dm_btree_lookup(&pmd->details_info, pmd->details_root, 1063 &dev_key, &details_le); 1064 if (!r) 1065 return -EEXIST; 1066 1067 /* find the mapping tree for the origin */ 1068 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value); 1069 if (r) 1070 return r; 1071 origin_root = le64_to_cpu(value); 1072 1073 /* clone the origin, an inc will do */ 1074 dm_tm_inc(pmd->tm, origin_root); 1075 1076 /* insert into the main mapping tree */ 1077 value = cpu_to_le64(origin_root); 1078 __dm_bless_for_disk(&value); 1079 key = dev; 1080 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root); 1081 if (r) { 1082 dm_tm_dec(pmd->tm, origin_root); 1083 return r; 1084 } 1085 1086 pmd->time++; 1087 1088 r = __open_device(pmd, dev, 1, &td); 1089 if (r) 1090 goto bad; 1091 1092 r = __set_snapshot_details(pmd, td, origin, pmd->time); 1093 __close_device(td); 1094 1095 if (r) 1096 goto bad; 1097 1098 return 0; 1099 1100 bad: 1101 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); 1102 dm_btree_remove(&pmd->details_info, pmd->details_root, 1103 &key, &pmd->details_root); 1104 return r; 1105 } 1106 1107 int dm_pool_create_snap(struct dm_pool_metadata *pmd, 1108 dm_thin_id dev, 1109 dm_thin_id origin) 1110 { 1111 int r = -EINVAL; 1112 1113 down_write(&pmd->root_lock); 1114 if (!pmd->fail_io) 1115 r = __create_snap(pmd, dev, origin); 1116 up_write(&pmd->root_lock); 1117 1118 return r; 1119 } 1120 1121 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev) 1122 { 1123 int r; 1124 uint64_t key = dev; 1125 struct dm_thin_device *td; 1126 1127 /* TODO: failure should mark the transaction invalid */ 1128 r = __open_device(pmd, dev, 0, &td); 1129 if (r) 1130 return r; 1131 1132 if (td->open_count > 1) { 1133 __close_device(td); 1134 return -EBUSY; 1135 } 1136 1137 list_del(&td->list); 1138 kfree(td); 1139 r = dm_btree_remove(&pmd->details_info, pmd->details_root, 1140 &key, &pmd->details_root); 1141 if (r) 1142 return r; 1143 1144 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); 1145 if (r) 1146 return r; 1147 1148 return 0; 1149 } 1150 1151 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, 1152 dm_thin_id dev) 1153 { 1154 int r = -EINVAL; 1155 1156 down_write(&pmd->root_lock); 1157 if (!pmd->fail_io) 1158 r = __delete_device(pmd, dev); 1159 up_write(&pmd->root_lock); 1160 1161 return r; 1162 } 1163 1164 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, 1165 uint64_t current_id, 1166 uint64_t new_id) 1167 { 1168 int r = -EINVAL; 1169 1170 down_write(&pmd->root_lock); 1171 1172 if (pmd->fail_io) 1173 goto out; 1174 1175 if (pmd->trans_id != current_id) { 1176 DMERR("mismatched transaction id"); 1177 goto out; 1178 } 1179 1180 pmd->trans_id = new_id; 1181 r = 0; 1182 1183 out: 1184 up_write(&pmd->root_lock); 1185 1186 return r; 1187 } 1188 1189 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, 1190 uint64_t *result) 1191 { 1192 int r = -EINVAL; 1193 1194 down_read(&pmd->root_lock); 1195 if (!pmd->fail_io) { 1196 *result = pmd->trans_id; 1197 r = 0; 1198 } 1199 up_read(&pmd->root_lock); 1200 1201 return r; 1202 } 1203 1204 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) 1205 { 1206 int r, inc; 1207 struct thin_disk_superblock *disk_super; 1208 struct dm_block *copy, *sblock; 1209 dm_block_t held_root; 1210 1211 /* 1212 * We commit to ensure the btree roots which we increment in a 1213 * moment are up to date. 1214 */ 1215 __commit_transaction(pmd); 1216 1217 /* 1218 * Copy the superblock. 1219 */ 1220 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); 1221 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION, 1222 &sb_validator, ©, &inc); 1223 if (r) 1224 return r; 1225 1226 BUG_ON(!inc); 1227 1228 held_root = dm_block_location(copy); 1229 disk_super = dm_block_data(copy); 1230 1231 if (le64_to_cpu(disk_super->held_root)) { 1232 DMWARN("Pool metadata snapshot already exists: release this before taking another."); 1233 1234 dm_tm_dec(pmd->tm, held_root); 1235 dm_tm_unlock(pmd->tm, copy); 1236 return -EBUSY; 1237 } 1238 1239 /* 1240 * Wipe the spacemap since we're not publishing this. 1241 */ 1242 memset(&disk_super->data_space_map_root, 0, 1243 sizeof(disk_super->data_space_map_root)); 1244 memset(&disk_super->metadata_space_map_root, 0, 1245 sizeof(disk_super->metadata_space_map_root)); 1246 1247 /* 1248 * Increment the data structures that need to be preserved. 1249 */ 1250 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root)); 1251 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root)); 1252 dm_tm_unlock(pmd->tm, copy); 1253 1254 /* 1255 * Write the held root into the superblock. 1256 */ 1257 r = superblock_lock(pmd, &sblock); 1258 if (r) { 1259 dm_tm_dec(pmd->tm, held_root); 1260 return r; 1261 } 1262 1263 disk_super = dm_block_data(sblock); 1264 disk_super->held_root = cpu_to_le64(held_root); 1265 dm_bm_unlock(sblock); 1266 return 0; 1267 } 1268 1269 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd) 1270 { 1271 int r = -EINVAL; 1272 1273 down_write(&pmd->root_lock); 1274 if (!pmd->fail_io) 1275 r = __reserve_metadata_snap(pmd); 1276 up_write(&pmd->root_lock); 1277 1278 return r; 1279 } 1280 1281 static int __release_metadata_snap(struct dm_pool_metadata *pmd) 1282 { 1283 int r; 1284 struct thin_disk_superblock *disk_super; 1285 struct dm_block *sblock, *copy; 1286 dm_block_t held_root; 1287 1288 r = superblock_lock(pmd, &sblock); 1289 if (r) 1290 return r; 1291 1292 disk_super = dm_block_data(sblock); 1293 held_root = le64_to_cpu(disk_super->held_root); 1294 disk_super->held_root = cpu_to_le64(0); 1295 1296 dm_bm_unlock(sblock); 1297 1298 if (!held_root) { 1299 DMWARN("No pool metadata snapshot found: nothing to release."); 1300 return -EINVAL; 1301 } 1302 1303 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©); 1304 if (r) 1305 return r; 1306 1307 disk_super = dm_block_data(copy); 1308 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); 1309 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); 1310 dm_sm_dec_block(pmd->metadata_sm, held_root); 1311 1312 dm_tm_unlock(pmd->tm, copy); 1313 1314 return 0; 1315 } 1316 1317 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd) 1318 { 1319 int r = -EINVAL; 1320 1321 down_write(&pmd->root_lock); 1322 if (!pmd->fail_io) 1323 r = __release_metadata_snap(pmd); 1324 up_write(&pmd->root_lock); 1325 1326 return r; 1327 } 1328 1329 static int __get_metadata_snap(struct dm_pool_metadata *pmd, 1330 dm_block_t *result) 1331 { 1332 int r; 1333 struct thin_disk_superblock *disk_super; 1334 struct dm_block *sblock; 1335 1336 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 1337 &sb_validator, &sblock); 1338 if (r) 1339 return r; 1340 1341 disk_super = dm_block_data(sblock); 1342 *result = le64_to_cpu(disk_super->held_root); 1343 1344 dm_bm_unlock(sblock); 1345 1346 return 0; 1347 } 1348 1349 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, 1350 dm_block_t *result) 1351 { 1352 int r = -EINVAL; 1353 1354 down_read(&pmd->root_lock); 1355 if (!pmd->fail_io) 1356 r = __get_metadata_snap(pmd, result); 1357 up_read(&pmd->root_lock); 1358 1359 return r; 1360 } 1361 1362 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev, 1363 struct dm_thin_device **td) 1364 { 1365 int r = -EINVAL; 1366 1367 down_write(&pmd->root_lock); 1368 if (!pmd->fail_io) 1369 r = __open_device(pmd, dev, 0, td); 1370 up_write(&pmd->root_lock); 1371 1372 return r; 1373 } 1374 1375 int dm_pool_close_thin_device(struct dm_thin_device *td) 1376 { 1377 down_write(&td->pmd->root_lock); 1378 __close_device(td); 1379 up_write(&td->pmd->root_lock); 1380 1381 return 0; 1382 } 1383 1384 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td) 1385 { 1386 return td->id; 1387 } 1388 1389 /* 1390 * Check whether @time (of block creation) is older than @td's last snapshot. 1391 * If so then the associated block is shared with the last snapshot device. 1392 * Any block on a device created *after* the device last got snapshotted is 1393 * necessarily not shared. 1394 */ 1395 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time) 1396 { 1397 return td->snapshotted_time > time; 1398 } 1399 1400 static void unpack_lookup_result(struct dm_thin_device *td, __le64 value, 1401 struct dm_thin_lookup_result *result) 1402 { 1403 uint64_t block_time = 0; 1404 dm_block_t exception_block; 1405 uint32_t exception_time; 1406 1407 block_time = le64_to_cpu(value); 1408 unpack_block_time(block_time, &exception_block, &exception_time); 1409 result->block = exception_block; 1410 result->shared = __snapshotted_since(td, exception_time); 1411 } 1412 1413 static int __find_block(struct dm_thin_device *td, dm_block_t block, 1414 int can_issue_io, struct dm_thin_lookup_result *result) 1415 { 1416 int r; 1417 __le64 value; 1418 struct dm_pool_metadata *pmd = td->pmd; 1419 dm_block_t keys[2] = { td->id, block }; 1420 struct dm_btree_info *info; 1421 1422 if (can_issue_io) { 1423 info = &pmd->info; 1424 } else 1425 info = &pmd->nb_info; 1426 1427 r = dm_btree_lookup(info, pmd->root, keys, &value); 1428 if (!r) 1429 unpack_lookup_result(td, value, result); 1430 1431 return r; 1432 } 1433 1434 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, 1435 int can_issue_io, struct dm_thin_lookup_result *result) 1436 { 1437 int r; 1438 struct dm_pool_metadata *pmd = td->pmd; 1439 1440 down_read(&pmd->root_lock); 1441 if (pmd->fail_io) { 1442 up_read(&pmd->root_lock); 1443 return -EINVAL; 1444 } 1445 1446 r = __find_block(td, block, can_issue_io, result); 1447 1448 up_read(&pmd->root_lock); 1449 return r; 1450 } 1451 1452 static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block, 1453 dm_block_t *vblock, 1454 struct dm_thin_lookup_result *result) 1455 { 1456 int r; 1457 __le64 value; 1458 struct dm_pool_metadata *pmd = td->pmd; 1459 dm_block_t keys[2] = { td->id, block }; 1460 1461 r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value); 1462 if (!r) 1463 unpack_lookup_result(td, value, result); 1464 1465 return r; 1466 } 1467 1468 static int __find_mapped_range(struct dm_thin_device *td, 1469 dm_block_t begin, dm_block_t end, 1470 dm_block_t *thin_begin, dm_block_t *thin_end, 1471 dm_block_t *pool_begin, bool *maybe_shared) 1472 { 1473 int r; 1474 dm_block_t pool_end; 1475 struct dm_thin_lookup_result lookup; 1476 1477 if (end < begin) 1478 return -ENODATA; 1479 1480 r = __find_next_mapped_block(td, begin, &begin, &lookup); 1481 if (r) 1482 return r; 1483 1484 if (begin >= end) 1485 return -ENODATA; 1486 1487 *thin_begin = begin; 1488 *pool_begin = lookup.block; 1489 *maybe_shared = lookup.shared; 1490 1491 begin++; 1492 pool_end = *pool_begin + 1; 1493 while (begin != end) { 1494 r = __find_block(td, begin, true, &lookup); 1495 if (r) { 1496 if (r == -ENODATA) 1497 break; 1498 else 1499 return r; 1500 } 1501 1502 if ((lookup.block != pool_end) || 1503 (lookup.shared != *maybe_shared)) 1504 break; 1505 1506 pool_end++; 1507 begin++; 1508 } 1509 1510 *thin_end = begin; 1511 return 0; 1512 } 1513 1514 int dm_thin_find_mapped_range(struct dm_thin_device *td, 1515 dm_block_t begin, dm_block_t end, 1516 dm_block_t *thin_begin, dm_block_t *thin_end, 1517 dm_block_t *pool_begin, bool *maybe_shared) 1518 { 1519 int r = -EINVAL; 1520 struct dm_pool_metadata *pmd = td->pmd; 1521 1522 down_read(&pmd->root_lock); 1523 if (!pmd->fail_io) { 1524 r = __find_mapped_range(td, begin, end, thin_begin, thin_end, 1525 pool_begin, maybe_shared); 1526 } 1527 up_read(&pmd->root_lock); 1528 1529 return r; 1530 } 1531 1532 static int __insert(struct dm_thin_device *td, dm_block_t block, 1533 dm_block_t data_block) 1534 { 1535 int r, inserted; 1536 __le64 value; 1537 struct dm_pool_metadata *pmd = td->pmd; 1538 dm_block_t keys[2] = { td->id, block }; 1539 1540 value = cpu_to_le64(pack_block_time(data_block, pmd->time)); 1541 __dm_bless_for_disk(&value); 1542 1543 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value, 1544 &pmd->root, &inserted); 1545 if (r) 1546 return r; 1547 1548 td->changed = 1; 1549 if (inserted) 1550 td->mapped_blocks++; 1551 1552 return 0; 1553 } 1554 1555 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block, 1556 dm_block_t data_block) 1557 { 1558 int r = -EINVAL; 1559 1560 down_write(&td->pmd->root_lock); 1561 if (!td->pmd->fail_io) 1562 r = __insert(td, block, data_block); 1563 up_write(&td->pmd->root_lock); 1564 1565 return r; 1566 } 1567 1568 static int __remove(struct dm_thin_device *td, dm_block_t block) 1569 { 1570 int r; 1571 struct dm_pool_metadata *pmd = td->pmd; 1572 dm_block_t keys[2] = { td->id, block }; 1573 1574 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root); 1575 if (r) 1576 return r; 1577 1578 td->mapped_blocks--; 1579 td->changed = 1; 1580 1581 return 0; 1582 } 1583 1584 static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end) 1585 { 1586 int r; 1587 unsigned count, total_count = 0; 1588 struct dm_pool_metadata *pmd = td->pmd; 1589 dm_block_t keys[1] = { td->id }; 1590 __le64 value; 1591 dm_block_t mapping_root; 1592 1593 /* 1594 * Find the mapping tree 1595 */ 1596 r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value); 1597 if (r) 1598 return r; 1599 1600 /* 1601 * Remove from the mapping tree, taking care to inc the 1602 * ref count so it doesn't get deleted. 1603 */ 1604 mapping_root = le64_to_cpu(value); 1605 dm_tm_inc(pmd->tm, mapping_root); 1606 r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root); 1607 if (r) 1608 return r; 1609 1610 /* 1611 * Remove leaves stops at the first unmapped entry, so we have to 1612 * loop round finding mapped ranges. 1613 */ 1614 while (begin < end) { 1615 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value); 1616 if (r == -ENODATA) 1617 break; 1618 1619 if (r) 1620 return r; 1621 1622 if (begin >= end) 1623 break; 1624 1625 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count); 1626 if (r) 1627 return r; 1628 1629 total_count += count; 1630 } 1631 1632 td->mapped_blocks -= total_count; 1633 td->changed = 1; 1634 1635 /* 1636 * Reinsert the mapping tree. 1637 */ 1638 value = cpu_to_le64(mapping_root); 1639 __dm_bless_for_disk(&value); 1640 return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root); 1641 } 1642 1643 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block) 1644 { 1645 int r = -EINVAL; 1646 1647 down_write(&td->pmd->root_lock); 1648 if (!td->pmd->fail_io) 1649 r = __remove(td, block); 1650 up_write(&td->pmd->root_lock); 1651 1652 return r; 1653 } 1654 1655 int dm_thin_remove_range(struct dm_thin_device *td, 1656 dm_block_t begin, dm_block_t end) 1657 { 1658 int r = -EINVAL; 1659 1660 down_write(&td->pmd->root_lock); 1661 if (!td->pmd->fail_io) 1662 r = __remove_range(td, begin, end); 1663 up_write(&td->pmd->root_lock); 1664 1665 return r; 1666 } 1667 1668 int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) 1669 { 1670 int r; 1671 uint32_t ref_count; 1672 1673 down_read(&pmd->root_lock); 1674 r = dm_sm_get_count(pmd->data_sm, b, &ref_count); 1675 if (!r) 1676 *result = (ref_count != 0); 1677 up_read(&pmd->root_lock); 1678 1679 return r; 1680 } 1681 1682 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) 1683 { 1684 int r = 0; 1685 1686 down_write(&pmd->root_lock); 1687 for (; b != e; b++) { 1688 r = dm_sm_inc_block(pmd->data_sm, b); 1689 if (r) 1690 break; 1691 } 1692 up_write(&pmd->root_lock); 1693 1694 return r; 1695 } 1696 1697 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) 1698 { 1699 int r = 0; 1700 1701 down_write(&pmd->root_lock); 1702 for (; b != e; b++) { 1703 r = dm_sm_dec_block(pmd->data_sm, b); 1704 if (r) 1705 break; 1706 } 1707 up_write(&pmd->root_lock); 1708 1709 return r; 1710 } 1711 1712 bool dm_thin_changed_this_transaction(struct dm_thin_device *td) 1713 { 1714 int r; 1715 1716 down_read(&td->pmd->root_lock); 1717 r = td->changed; 1718 up_read(&td->pmd->root_lock); 1719 1720 return r; 1721 } 1722 1723 bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd) 1724 { 1725 bool r = false; 1726 struct dm_thin_device *td, *tmp; 1727 1728 down_read(&pmd->root_lock); 1729 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { 1730 if (td->changed) { 1731 r = td->changed; 1732 break; 1733 } 1734 } 1735 up_read(&pmd->root_lock); 1736 1737 return r; 1738 } 1739 1740 bool dm_thin_aborted_changes(struct dm_thin_device *td) 1741 { 1742 bool r; 1743 1744 down_read(&td->pmd->root_lock); 1745 r = td->aborted_with_changes; 1746 up_read(&td->pmd->root_lock); 1747 1748 return r; 1749 } 1750 1751 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result) 1752 { 1753 int r = -EINVAL; 1754 1755 down_write(&pmd->root_lock); 1756 if (!pmd->fail_io) 1757 r = dm_sm_new_block(pmd->data_sm, result); 1758 up_write(&pmd->root_lock); 1759 1760 return r; 1761 } 1762 1763 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd) 1764 { 1765 int r = -EINVAL; 1766 1767 down_write(&pmd->root_lock); 1768 if (pmd->fail_io) 1769 goto out; 1770 1771 r = __commit_transaction(pmd); 1772 if (r <= 0) 1773 goto out; 1774 1775 /* 1776 * Open the next transaction. 1777 */ 1778 r = __begin_transaction(pmd); 1779 out: 1780 up_write(&pmd->root_lock); 1781 return r; 1782 } 1783 1784 static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) 1785 { 1786 struct dm_thin_device *td; 1787 1788 list_for_each_entry(td, &pmd->thin_devices, list) 1789 td->aborted_with_changes = td->changed; 1790 } 1791 1792 int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) 1793 { 1794 int r = -EINVAL; 1795 1796 down_write(&pmd->root_lock); 1797 if (pmd->fail_io) 1798 goto out; 1799 1800 __set_abort_with_changes_flags(pmd); 1801 __destroy_persistent_data_objects(pmd); 1802 r = __create_persistent_data_objects(pmd, false); 1803 if (r) 1804 pmd->fail_io = true; 1805 1806 out: 1807 up_write(&pmd->root_lock); 1808 1809 return r; 1810 } 1811 1812 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result) 1813 { 1814 int r = -EINVAL; 1815 1816 down_read(&pmd->root_lock); 1817 if (!pmd->fail_io) 1818 r = dm_sm_get_nr_free(pmd->data_sm, result); 1819 up_read(&pmd->root_lock); 1820 1821 return r; 1822 } 1823 1824 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, 1825 dm_block_t *result) 1826 { 1827 int r = -EINVAL; 1828 1829 down_read(&pmd->root_lock); 1830 if (!pmd->fail_io) 1831 r = dm_sm_get_nr_free(pmd->metadata_sm, result); 1832 up_read(&pmd->root_lock); 1833 1834 return r; 1835 } 1836 1837 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, 1838 dm_block_t *result) 1839 { 1840 int r = -EINVAL; 1841 1842 down_read(&pmd->root_lock); 1843 if (!pmd->fail_io) 1844 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result); 1845 up_read(&pmd->root_lock); 1846 1847 return r; 1848 } 1849 1850 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) 1851 { 1852 int r = -EINVAL; 1853 1854 down_read(&pmd->root_lock); 1855 if (!pmd->fail_io) 1856 r = dm_sm_get_nr_blocks(pmd->data_sm, result); 1857 up_read(&pmd->root_lock); 1858 1859 return r; 1860 } 1861 1862 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result) 1863 { 1864 int r = -EINVAL; 1865 struct dm_pool_metadata *pmd = td->pmd; 1866 1867 down_read(&pmd->root_lock); 1868 if (!pmd->fail_io) { 1869 *result = td->mapped_blocks; 1870 r = 0; 1871 } 1872 up_read(&pmd->root_lock); 1873 1874 return r; 1875 } 1876 1877 static int __highest_block(struct dm_thin_device *td, dm_block_t *result) 1878 { 1879 int r; 1880 __le64 value_le; 1881 dm_block_t thin_root; 1882 struct dm_pool_metadata *pmd = td->pmd; 1883 1884 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le); 1885 if (r) 1886 return r; 1887 1888 thin_root = le64_to_cpu(value_le); 1889 1890 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result); 1891 } 1892 1893 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, 1894 dm_block_t *result) 1895 { 1896 int r = -EINVAL; 1897 struct dm_pool_metadata *pmd = td->pmd; 1898 1899 down_read(&pmd->root_lock); 1900 if (!pmd->fail_io) 1901 r = __highest_block(td, result); 1902 up_read(&pmd->root_lock); 1903 1904 return r; 1905 } 1906 1907 static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count) 1908 { 1909 int r; 1910 dm_block_t old_count; 1911 1912 r = dm_sm_get_nr_blocks(sm, &old_count); 1913 if (r) 1914 return r; 1915 1916 if (new_count == old_count) 1917 return 0; 1918 1919 if (new_count < old_count) { 1920 DMERR("cannot reduce size of space map"); 1921 return -EINVAL; 1922 } 1923 1924 return dm_sm_extend(sm, new_count - old_count); 1925 } 1926 1927 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) 1928 { 1929 int r = -EINVAL; 1930 1931 down_write(&pmd->root_lock); 1932 if (!pmd->fail_io) 1933 r = __resize_space_map(pmd->data_sm, new_count); 1934 up_write(&pmd->root_lock); 1935 1936 return r; 1937 } 1938 1939 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) 1940 { 1941 int r = -EINVAL; 1942 1943 down_write(&pmd->root_lock); 1944 if (!pmd->fail_io) 1945 r = __resize_space_map(pmd->metadata_sm, new_count); 1946 up_write(&pmd->root_lock); 1947 1948 return r; 1949 } 1950 1951 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) 1952 { 1953 down_write(&pmd->root_lock); 1954 dm_bm_set_read_only(pmd->bm); 1955 up_write(&pmd->root_lock); 1956 } 1957 1958 void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) 1959 { 1960 down_write(&pmd->root_lock); 1961 dm_bm_set_read_write(pmd->bm); 1962 up_write(&pmd->root_lock); 1963 } 1964 1965 int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, 1966 dm_block_t threshold, 1967 dm_sm_threshold_fn fn, 1968 void *context) 1969 { 1970 int r; 1971 1972 down_write(&pmd->root_lock); 1973 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); 1974 up_write(&pmd->root_lock); 1975 1976 return r; 1977 } 1978 1979 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) 1980 { 1981 int r; 1982 struct dm_block *sblock; 1983 struct thin_disk_superblock *disk_super; 1984 1985 down_write(&pmd->root_lock); 1986 pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; 1987 1988 r = superblock_lock(pmd, &sblock); 1989 if (r) { 1990 DMERR("couldn't read superblock"); 1991 goto out; 1992 } 1993 1994 disk_super = dm_block_data(sblock); 1995 disk_super->flags = cpu_to_le32(pmd->flags); 1996 1997 dm_bm_unlock(sblock); 1998 out: 1999 up_write(&pmd->root_lock); 2000 return r; 2001 } 2002 2003 bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) 2004 { 2005 bool needs_check; 2006 2007 down_read(&pmd->root_lock); 2008 needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; 2009 up_read(&pmd->root_lock); 2010 2011 return needs_check; 2012 } 2013 2014 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd) 2015 { 2016 down_read(&pmd->root_lock); 2017 if (!pmd->fail_io) 2018 dm_tm_issue_prefetches(pmd->tm); 2019 up_read(&pmd->root_lock); 2020 } 2021