1 /* 2 * Copyright (C) 2011-2012 Red Hat UK. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-thin-metadata.h" 8 #include "dm-bio-prison.h" 9 #include "dm.h" 10 11 #include <linux/device-mapper.h> 12 #include <linux/dm-io.h> 13 #include <linux/dm-kcopyd.h> 14 #include <linux/jiffies.h> 15 #include <linux/log2.h> 16 #include <linux/list.h> 17 #include <linux/rculist.h> 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/sort.h> 23 #include <linux/rbtree.h> 24 25 #define DM_MSG_PREFIX "thin" 26 27 /* 28 * Tunable constants 29 */ 30 #define ENDIO_HOOK_POOL_SIZE 1024 31 #define MAPPING_POOL_SIZE 1024 32 #define COMMIT_PERIOD HZ 33 #define NO_SPACE_TIMEOUT_SECS 60 34 35 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; 36 37 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 38 "A percentage of time allocated for copy on write"); 39 40 /* 41 * The block size of the device holding pool data must be 42 * between 64KB and 1GB. 43 */ 44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) 45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 46 47 /* 48 * Device id is restricted to 24 bits. 49 */ 50 #define MAX_DEV_ID ((1 << 24) - 1) 51 52 /* 53 * How do we handle breaking sharing of data blocks? 54 * ================================================= 55 * 56 * We use a standard copy-on-write btree to store the mappings for the 57 * devices (note I'm talking about copy-on-write of the metadata here, not 58 * the data). When you take an internal snapshot you clone the root node 59 * of the origin btree. After this there is no concept of an origin or a 60 * snapshot. They are just two device trees that happen to point to the 61 * same data blocks. 62 * 63 * When we get a write in we decide if it's to a shared data block using 64 * some timestamp magic. If it is, we have to break sharing. 65 * 66 * Let's say we write to a shared block in what was the origin. The 67 * steps are: 68 * 69 * i) plug io further to this physical block. (see bio_prison code). 70 * 71 * ii) quiesce any read io to that shared data block. Obviously 72 * including all devices that share this block. (see dm_deferred_set code) 73 * 74 * iii) copy the data block to a newly allocate block. This step can be 75 * missed out if the io covers the block. (schedule_copy). 76 * 77 * iv) insert the new mapping into the origin's btree 78 * (process_prepared_mapping). This act of inserting breaks some 79 * sharing of btree nodes between the two devices. Breaking sharing only 80 * effects the btree of that specific device. Btrees for the other 81 * devices that share the block never change. The btree for the origin 82 * device as it was after the last commit is untouched, ie. we're using 83 * persistent data structures in the functional programming sense. 84 * 85 * v) unplug io to this physical block, including the io that triggered 86 * the breaking of sharing. 87 * 88 * Steps (ii) and (iii) occur in parallel. 89 * 90 * The metadata _doesn't_ need to be committed before the io continues. We 91 * get away with this because the io is always written to a _new_ block. 92 * If there's a crash, then: 93 * 94 * - The origin mapping will point to the old origin block (the shared 95 * one). This will contain the data as it was before the io that triggered 96 * the breaking of sharing came in. 97 * 98 * - The snap mapping still points to the old block. As it would after 99 * the commit. 100 * 101 * The downside of this scheme is the timestamp magic isn't perfect, and 102 * will continue to think that data block in the snapshot device is shared 103 * even after the write to the origin has broken sharing. I suspect data 104 * blocks will typically be shared by many different devices, so we're 105 * breaking sharing n + 1 times, rather than n, where n is the number of 106 * devices that reference this data block. At the moment I think the 107 * benefits far, far outweigh the disadvantages. 108 */ 109 110 /*----------------------------------------------------------------*/ 111 112 /* 113 * Key building. 114 */ 115 enum lock_space { 116 VIRTUAL, 117 PHYSICAL 118 }; 119 120 static void build_key(struct dm_thin_device *td, enum lock_space ls, 121 dm_block_t b, dm_block_t e, struct dm_cell_key *key) 122 { 123 key->virtual = (ls == VIRTUAL); 124 key->dev = dm_thin_dev_id(td); 125 key->block_begin = b; 126 key->block_end = e; 127 } 128 129 static void build_data_key(struct dm_thin_device *td, dm_block_t b, 130 struct dm_cell_key *key) 131 { 132 build_key(td, PHYSICAL, b, b + 1llu, key); 133 } 134 135 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, 136 struct dm_cell_key *key) 137 { 138 build_key(td, VIRTUAL, b, b + 1llu, key); 139 } 140 141 /*----------------------------------------------------------------*/ 142 143 #define THROTTLE_THRESHOLD (1 * HZ) 144 145 struct throttle { 146 struct rw_semaphore lock; 147 unsigned long threshold; 148 bool throttle_applied; 149 }; 150 151 static void throttle_init(struct throttle *t) 152 { 153 init_rwsem(&t->lock); 154 t->throttle_applied = false; 155 } 156 157 static void throttle_work_start(struct throttle *t) 158 { 159 t->threshold = jiffies + THROTTLE_THRESHOLD; 160 } 161 162 static void throttle_work_update(struct throttle *t) 163 { 164 if (!t->throttle_applied && jiffies > t->threshold) { 165 down_write(&t->lock); 166 t->throttle_applied = true; 167 } 168 } 169 170 static void throttle_work_complete(struct throttle *t) 171 { 172 if (t->throttle_applied) { 173 t->throttle_applied = false; 174 up_write(&t->lock); 175 } 176 } 177 178 static void throttle_lock(struct throttle *t) 179 { 180 down_read(&t->lock); 181 } 182 183 static void throttle_unlock(struct throttle *t) 184 { 185 up_read(&t->lock); 186 } 187 188 /*----------------------------------------------------------------*/ 189 190 /* 191 * A pool device ties together a metadata device and a data device. It 192 * also provides the interface for creating and destroying internal 193 * devices. 194 */ 195 struct dm_thin_new_mapping; 196 197 /* 198 * The pool runs in 4 modes. Ordered in degraded order for comparisons. 199 */ 200 enum pool_mode { 201 PM_WRITE, /* metadata may be changed */ 202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ 203 PM_READ_ONLY, /* metadata may not be changed */ 204 PM_FAIL, /* all I/O fails */ 205 }; 206 207 struct pool_features { 208 enum pool_mode mode; 209 210 bool zero_new_blocks:1; 211 bool discard_enabled:1; 212 bool discard_passdown:1; 213 bool error_if_no_space:1; 214 }; 215 216 struct thin_c; 217 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 218 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell); 219 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 220 221 #define CELL_SORT_ARRAY_SIZE 8192 222 223 struct pool { 224 struct list_head list; 225 struct dm_target *ti; /* Only set if a pool target is bound */ 226 227 struct mapped_device *pool_md; 228 struct block_device *md_dev; 229 struct dm_pool_metadata *pmd; 230 231 dm_block_t low_water_blocks; 232 uint32_t sectors_per_block; 233 int sectors_per_block_shift; 234 235 struct pool_features pf; 236 bool low_water_triggered:1; /* A dm event has been sent */ 237 bool suspended:1; 238 bool out_of_data_space:1; 239 240 struct dm_bio_prison *prison; 241 struct dm_kcopyd_client *copier; 242 243 struct workqueue_struct *wq; 244 struct throttle throttle; 245 struct work_struct worker; 246 struct delayed_work waker; 247 struct delayed_work no_space_timeout; 248 249 unsigned long last_commit_jiffies; 250 unsigned ref_count; 251 252 spinlock_t lock; 253 struct bio_list deferred_flush_bios; 254 struct list_head prepared_mappings; 255 struct list_head prepared_discards; 256 struct list_head active_thins; 257 258 struct dm_deferred_set *shared_read_ds; 259 struct dm_deferred_set *all_io_ds; 260 261 struct dm_thin_new_mapping *next_mapping; 262 mempool_t *mapping_pool; 263 264 process_bio_fn process_bio; 265 process_bio_fn process_discard; 266 267 process_cell_fn process_cell; 268 process_cell_fn process_discard_cell; 269 270 process_mapping_fn process_prepared_mapping; 271 process_mapping_fn process_prepared_discard; 272 273 struct dm_bio_prison_cell **cell_sort_array; 274 }; 275 276 static enum pool_mode get_pool_mode(struct pool *pool); 277 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 278 279 /* 280 * Target context for a pool. 281 */ 282 struct pool_c { 283 struct dm_target *ti; 284 struct pool *pool; 285 struct dm_dev *data_dev; 286 struct dm_dev *metadata_dev; 287 struct dm_target_callbacks callbacks; 288 289 dm_block_t low_water_blocks; 290 struct pool_features requested_pf; /* Features requested during table load */ 291 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ 292 }; 293 294 /* 295 * Target context for a thin. 296 */ 297 struct thin_c { 298 struct list_head list; 299 struct dm_dev *pool_dev; 300 struct dm_dev *origin_dev; 301 sector_t origin_size; 302 dm_thin_id dev_id; 303 304 struct pool *pool; 305 struct dm_thin_device *td; 306 struct mapped_device *thin_md; 307 308 bool requeue_mode:1; 309 spinlock_t lock; 310 struct list_head deferred_cells; 311 struct bio_list deferred_bio_list; 312 struct bio_list retry_on_resume_list; 313 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 314 315 /* 316 * Ensures the thin is not destroyed until the worker has finished 317 * iterating the active_thins list. 318 */ 319 atomic_t refcount; 320 struct completion can_destroy; 321 }; 322 323 /*----------------------------------------------------------------*/ 324 325 /** 326 * __blkdev_issue_discard_async - queue a discard with async completion 327 * @bdev: blockdev to issue discard for 328 * @sector: start sector 329 * @nr_sects: number of sectors to discard 330 * @gfp_mask: memory allocation flags (for bio_alloc) 331 * @flags: BLKDEV_IFL_* flags to control behaviour 332 * @parent_bio: parent discard bio that all sub discards get chained to 333 * 334 * Description: 335 * Asynchronously issue a discard request for the sectors in question. 336 */ 337 static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector, 338 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags, 339 struct bio *parent_bio) 340 { 341 struct request_queue *q = bdev_get_queue(bdev); 342 int type = REQ_WRITE | REQ_DISCARD; 343 struct bio *bio; 344 345 if (!q || !nr_sects) 346 return -ENXIO; 347 348 if (!blk_queue_discard(q)) 349 return -EOPNOTSUPP; 350 351 if (flags & BLKDEV_DISCARD_SECURE) { 352 if (!blk_queue_secdiscard(q)) 353 return -EOPNOTSUPP; 354 type |= REQ_SECURE; 355 } 356 357 /* 358 * Required bio_put occurs in bio_endio thanks to bio_chain below 359 */ 360 bio = bio_alloc(gfp_mask, 1); 361 if (!bio) 362 return -ENOMEM; 363 364 bio_chain(bio, parent_bio); 365 366 bio->bi_iter.bi_sector = sector; 367 bio->bi_bdev = bdev; 368 bio->bi_iter.bi_size = nr_sects << 9; 369 370 submit_bio(type, bio); 371 372 return 0; 373 } 374 375 static bool block_size_is_power_of_two(struct pool *pool) 376 { 377 return pool->sectors_per_block_shift >= 0; 378 } 379 380 static sector_t block_to_sectors(struct pool *pool, dm_block_t b) 381 { 382 return block_size_is_power_of_two(pool) ? 383 (b << pool->sectors_per_block_shift) : 384 (b * pool->sectors_per_block); 385 } 386 387 static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e, 388 struct bio *parent_bio) 389 { 390 sector_t s = block_to_sectors(tc->pool, data_b); 391 sector_t len = block_to_sectors(tc->pool, data_e - data_b); 392 393 return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len, 394 GFP_NOWAIT, 0, parent_bio); 395 } 396 397 /*----------------------------------------------------------------*/ 398 399 /* 400 * wake_worker() is used when new work is queued and when pool_resume is 401 * ready to continue deferred IO processing. 402 */ 403 static void wake_worker(struct pool *pool) 404 { 405 queue_work(pool->wq, &pool->worker); 406 } 407 408 /*----------------------------------------------------------------*/ 409 410 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, 411 struct dm_bio_prison_cell **cell_result) 412 { 413 int r; 414 struct dm_bio_prison_cell *cell_prealloc; 415 416 /* 417 * Allocate a cell from the prison's mempool. 418 * This might block but it can't fail. 419 */ 420 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); 421 422 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); 423 if (r) 424 /* 425 * We reused an old cell; we can get rid of 426 * the new one. 427 */ 428 dm_bio_prison_free_cell(pool->prison, cell_prealloc); 429 430 return r; 431 } 432 433 static void cell_release(struct pool *pool, 434 struct dm_bio_prison_cell *cell, 435 struct bio_list *bios) 436 { 437 dm_cell_release(pool->prison, cell, bios); 438 dm_bio_prison_free_cell(pool->prison, cell); 439 } 440 441 static void cell_visit_release(struct pool *pool, 442 void (*fn)(void *, struct dm_bio_prison_cell *), 443 void *context, 444 struct dm_bio_prison_cell *cell) 445 { 446 dm_cell_visit_release(pool->prison, fn, context, cell); 447 dm_bio_prison_free_cell(pool->prison, cell); 448 } 449 450 static void cell_release_no_holder(struct pool *pool, 451 struct dm_bio_prison_cell *cell, 452 struct bio_list *bios) 453 { 454 dm_cell_release_no_holder(pool->prison, cell, bios); 455 dm_bio_prison_free_cell(pool->prison, cell); 456 } 457 458 static void cell_error_with_code(struct pool *pool, 459 struct dm_bio_prison_cell *cell, int error_code) 460 { 461 dm_cell_error(pool->prison, cell, error_code); 462 dm_bio_prison_free_cell(pool->prison, cell); 463 } 464 465 static int get_pool_io_error_code(struct pool *pool) 466 { 467 return pool->out_of_data_space ? -ENOSPC : -EIO; 468 } 469 470 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) 471 { 472 int error = get_pool_io_error_code(pool); 473 474 cell_error_with_code(pool, cell, error); 475 } 476 477 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) 478 { 479 cell_error_with_code(pool, cell, 0); 480 } 481 482 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) 483 { 484 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); 485 } 486 487 /*----------------------------------------------------------------*/ 488 489 /* 490 * A global list of pools that uses a struct mapped_device as a key. 491 */ 492 static struct dm_thin_pool_table { 493 struct mutex mutex; 494 struct list_head pools; 495 } dm_thin_pool_table; 496 497 static void pool_table_init(void) 498 { 499 mutex_init(&dm_thin_pool_table.mutex); 500 INIT_LIST_HEAD(&dm_thin_pool_table.pools); 501 } 502 503 static void __pool_table_insert(struct pool *pool) 504 { 505 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 506 list_add(&pool->list, &dm_thin_pool_table.pools); 507 } 508 509 static void __pool_table_remove(struct pool *pool) 510 { 511 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 512 list_del(&pool->list); 513 } 514 515 static struct pool *__pool_table_lookup(struct mapped_device *md) 516 { 517 struct pool *pool = NULL, *tmp; 518 519 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 520 521 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 522 if (tmp->pool_md == md) { 523 pool = tmp; 524 break; 525 } 526 } 527 528 return pool; 529 } 530 531 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) 532 { 533 struct pool *pool = NULL, *tmp; 534 535 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 536 537 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 538 if (tmp->md_dev == md_dev) { 539 pool = tmp; 540 break; 541 } 542 } 543 544 return pool; 545 } 546 547 /*----------------------------------------------------------------*/ 548 549 struct dm_thin_endio_hook { 550 struct thin_c *tc; 551 struct dm_deferred_entry *shared_read_entry; 552 struct dm_deferred_entry *all_io_entry; 553 struct dm_thin_new_mapping *overwrite_mapping; 554 struct rb_node rb_node; 555 struct dm_bio_prison_cell *cell; 556 }; 557 558 static void __merge_bio_list(struct bio_list *bios, struct bio_list *master) 559 { 560 bio_list_merge(bios, master); 561 bio_list_init(master); 562 } 563 564 static void error_bio_list(struct bio_list *bios, int error) 565 { 566 struct bio *bio; 567 568 while ((bio = bio_list_pop(bios))) { 569 bio->bi_error = error; 570 bio_endio(bio); 571 } 572 } 573 574 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) 575 { 576 struct bio_list bios; 577 unsigned long flags; 578 579 bio_list_init(&bios); 580 581 spin_lock_irqsave(&tc->lock, flags); 582 __merge_bio_list(&bios, master); 583 spin_unlock_irqrestore(&tc->lock, flags); 584 585 error_bio_list(&bios, error); 586 } 587 588 static void requeue_deferred_cells(struct thin_c *tc) 589 { 590 struct pool *pool = tc->pool; 591 unsigned long flags; 592 struct list_head cells; 593 struct dm_bio_prison_cell *cell, *tmp; 594 595 INIT_LIST_HEAD(&cells); 596 597 spin_lock_irqsave(&tc->lock, flags); 598 list_splice_init(&tc->deferred_cells, &cells); 599 spin_unlock_irqrestore(&tc->lock, flags); 600 601 list_for_each_entry_safe(cell, tmp, &cells, user_list) 602 cell_requeue(pool, cell); 603 } 604 605 static void requeue_io(struct thin_c *tc) 606 { 607 struct bio_list bios; 608 unsigned long flags; 609 610 bio_list_init(&bios); 611 612 spin_lock_irqsave(&tc->lock, flags); 613 __merge_bio_list(&bios, &tc->deferred_bio_list); 614 __merge_bio_list(&bios, &tc->retry_on_resume_list); 615 spin_unlock_irqrestore(&tc->lock, flags); 616 617 error_bio_list(&bios, DM_ENDIO_REQUEUE); 618 requeue_deferred_cells(tc); 619 } 620 621 static void error_retry_list_with_code(struct pool *pool, int error) 622 { 623 struct thin_c *tc; 624 625 rcu_read_lock(); 626 list_for_each_entry_rcu(tc, &pool->active_thins, list) 627 error_thin_bio_list(tc, &tc->retry_on_resume_list, error); 628 rcu_read_unlock(); 629 } 630 631 static void error_retry_list(struct pool *pool) 632 { 633 int error = get_pool_io_error_code(pool); 634 635 return error_retry_list_with_code(pool, error); 636 } 637 638 /* 639 * This section of code contains the logic for processing a thin device's IO. 640 * Much of the code depends on pool object resources (lists, workqueues, etc) 641 * but most is exclusively called from the thin target rather than the thin-pool 642 * target. 643 */ 644 645 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 646 { 647 struct pool *pool = tc->pool; 648 sector_t block_nr = bio->bi_iter.bi_sector; 649 650 if (block_size_is_power_of_two(pool)) 651 block_nr >>= pool->sectors_per_block_shift; 652 else 653 (void) sector_div(block_nr, pool->sectors_per_block); 654 655 return block_nr; 656 } 657 658 /* 659 * Returns the _complete_ blocks that this bio covers. 660 */ 661 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, 662 dm_block_t *begin, dm_block_t *end) 663 { 664 struct pool *pool = tc->pool; 665 sector_t b = bio->bi_iter.bi_sector; 666 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); 667 668 b += pool->sectors_per_block - 1ull; /* so we round up */ 669 670 if (block_size_is_power_of_two(pool)) { 671 b >>= pool->sectors_per_block_shift; 672 e >>= pool->sectors_per_block_shift; 673 } else { 674 (void) sector_div(b, pool->sectors_per_block); 675 (void) sector_div(e, pool->sectors_per_block); 676 } 677 678 if (e < b) 679 /* Can happen if the bio is within a single block. */ 680 e = b; 681 682 *begin = b; 683 *end = e; 684 } 685 686 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 687 { 688 struct pool *pool = tc->pool; 689 sector_t bi_sector = bio->bi_iter.bi_sector; 690 691 bio->bi_bdev = tc->pool_dev->bdev; 692 if (block_size_is_power_of_two(pool)) 693 bio->bi_iter.bi_sector = 694 (block << pool->sectors_per_block_shift) | 695 (bi_sector & (pool->sectors_per_block - 1)); 696 else 697 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + 698 sector_div(bi_sector, pool->sectors_per_block); 699 } 700 701 static void remap_to_origin(struct thin_c *tc, struct bio *bio) 702 { 703 bio->bi_bdev = tc->origin_dev->bdev; 704 } 705 706 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 707 { 708 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && 709 dm_thin_changed_this_transaction(tc->td); 710 } 711 712 static void inc_all_io_entry(struct pool *pool, struct bio *bio) 713 { 714 struct dm_thin_endio_hook *h; 715 716 if (bio->bi_rw & REQ_DISCARD) 717 return; 718 719 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 720 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); 721 } 722 723 static void issue(struct thin_c *tc, struct bio *bio) 724 { 725 struct pool *pool = tc->pool; 726 unsigned long flags; 727 728 if (!bio_triggers_commit(tc, bio)) { 729 generic_make_request(bio); 730 return; 731 } 732 733 /* 734 * Complete bio with an error if earlier I/O caused changes to 735 * the metadata that can't be committed e.g, due to I/O errors 736 * on the metadata device. 737 */ 738 if (dm_thin_aborted_changes(tc->td)) { 739 bio_io_error(bio); 740 return; 741 } 742 743 /* 744 * Batch together any bios that trigger commits and then issue a 745 * single commit for them in process_deferred_bios(). 746 */ 747 spin_lock_irqsave(&pool->lock, flags); 748 bio_list_add(&pool->deferred_flush_bios, bio); 749 spin_unlock_irqrestore(&pool->lock, flags); 750 } 751 752 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) 753 { 754 remap_to_origin(tc, bio); 755 issue(tc, bio); 756 } 757 758 static void remap_and_issue(struct thin_c *tc, struct bio *bio, 759 dm_block_t block) 760 { 761 remap(tc, bio, block); 762 issue(tc, bio); 763 } 764 765 /*----------------------------------------------------------------*/ 766 767 /* 768 * Bio endio functions. 769 */ 770 struct dm_thin_new_mapping { 771 struct list_head list; 772 773 bool pass_discard:1; 774 bool maybe_shared:1; 775 776 /* 777 * Track quiescing, copying and zeroing preparation actions. When this 778 * counter hits zero the block is prepared and can be inserted into the 779 * btree. 780 */ 781 atomic_t prepare_actions; 782 783 int err; 784 struct thin_c *tc; 785 dm_block_t virt_begin, virt_end; 786 dm_block_t data_block; 787 struct dm_bio_prison_cell *cell; 788 789 /* 790 * If the bio covers the whole area of a block then we can avoid 791 * zeroing or copying. Instead this bio is hooked. The bio will 792 * still be in the cell, so care has to be taken to avoid issuing 793 * the bio twice. 794 */ 795 struct bio *bio; 796 bio_end_io_t *saved_bi_end_io; 797 }; 798 799 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m) 800 { 801 struct pool *pool = m->tc->pool; 802 803 if (atomic_dec_and_test(&m->prepare_actions)) { 804 list_add_tail(&m->list, &pool->prepared_mappings); 805 wake_worker(pool); 806 } 807 } 808 809 static void complete_mapping_preparation(struct dm_thin_new_mapping *m) 810 { 811 unsigned long flags; 812 struct pool *pool = m->tc->pool; 813 814 spin_lock_irqsave(&pool->lock, flags); 815 __complete_mapping_preparation(m); 816 spin_unlock_irqrestore(&pool->lock, flags); 817 } 818 819 static void copy_complete(int read_err, unsigned long write_err, void *context) 820 { 821 struct dm_thin_new_mapping *m = context; 822 823 m->err = read_err || write_err ? -EIO : 0; 824 complete_mapping_preparation(m); 825 } 826 827 static void overwrite_endio(struct bio *bio) 828 { 829 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 830 struct dm_thin_new_mapping *m = h->overwrite_mapping; 831 832 bio->bi_end_io = m->saved_bi_end_io; 833 834 m->err = bio->bi_error; 835 complete_mapping_preparation(m); 836 } 837 838 /*----------------------------------------------------------------*/ 839 840 /* 841 * Workqueue. 842 */ 843 844 /* 845 * Prepared mapping jobs. 846 */ 847 848 /* 849 * This sends the bios in the cell, except the original holder, back 850 * to the deferred_bios list. 851 */ 852 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) 853 { 854 struct pool *pool = tc->pool; 855 unsigned long flags; 856 857 spin_lock_irqsave(&tc->lock, flags); 858 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); 859 spin_unlock_irqrestore(&tc->lock, flags); 860 861 wake_worker(pool); 862 } 863 864 static void thin_defer_bio(struct thin_c *tc, struct bio *bio); 865 866 struct remap_info { 867 struct thin_c *tc; 868 struct bio_list defer_bios; 869 struct bio_list issue_bios; 870 }; 871 872 static void __inc_remap_and_issue_cell(void *context, 873 struct dm_bio_prison_cell *cell) 874 { 875 struct remap_info *info = context; 876 struct bio *bio; 877 878 while ((bio = bio_list_pop(&cell->bios))) { 879 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) 880 bio_list_add(&info->defer_bios, bio); 881 else { 882 inc_all_io_entry(info->tc->pool, bio); 883 884 /* 885 * We can't issue the bios with the bio prison lock 886 * held, so we add them to a list to issue on 887 * return from this function. 888 */ 889 bio_list_add(&info->issue_bios, bio); 890 } 891 } 892 } 893 894 static void inc_remap_and_issue_cell(struct thin_c *tc, 895 struct dm_bio_prison_cell *cell, 896 dm_block_t block) 897 { 898 struct bio *bio; 899 struct remap_info info; 900 901 info.tc = tc; 902 bio_list_init(&info.defer_bios); 903 bio_list_init(&info.issue_bios); 904 905 /* 906 * We have to be careful to inc any bios we're about to issue 907 * before the cell is released, and avoid a race with new bios 908 * being added to the cell. 909 */ 910 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, 911 &info, cell); 912 913 while ((bio = bio_list_pop(&info.defer_bios))) 914 thin_defer_bio(tc, bio); 915 916 while ((bio = bio_list_pop(&info.issue_bios))) 917 remap_and_issue(info.tc, bio, block); 918 } 919 920 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 921 { 922 cell_error(m->tc->pool, m->cell); 923 list_del(&m->list); 924 mempool_free(m, m->tc->pool->mapping_pool); 925 } 926 927 static void process_prepared_mapping(struct dm_thin_new_mapping *m) 928 { 929 struct thin_c *tc = m->tc; 930 struct pool *pool = tc->pool; 931 struct bio *bio = m->bio; 932 int r; 933 934 if (m->err) { 935 cell_error(pool, m->cell); 936 goto out; 937 } 938 939 /* 940 * Commit the prepared block into the mapping btree. 941 * Any I/O for this block arriving after this point will get 942 * remapped to it directly. 943 */ 944 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); 945 if (r) { 946 metadata_operation_failed(pool, "dm_thin_insert_block", r); 947 cell_error(pool, m->cell); 948 goto out; 949 } 950 951 /* 952 * Release any bios held while the block was being provisioned. 953 * If we are processing a write bio that completely covers the block, 954 * we already processed it so can ignore it now when processing 955 * the bios in the cell. 956 */ 957 if (bio) { 958 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 959 bio_endio(bio); 960 } else { 961 inc_all_io_entry(tc->pool, m->cell->holder); 962 remap_and_issue(tc, m->cell->holder, m->data_block); 963 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 964 } 965 966 out: 967 list_del(&m->list); 968 mempool_free(m, pool->mapping_pool); 969 } 970 971 /*----------------------------------------------------------------*/ 972 973 static void free_discard_mapping(struct dm_thin_new_mapping *m) 974 { 975 struct thin_c *tc = m->tc; 976 if (m->cell) 977 cell_defer_no_holder(tc, m->cell); 978 mempool_free(m, tc->pool->mapping_pool); 979 } 980 981 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) 982 { 983 bio_io_error(m->bio); 984 free_discard_mapping(m); 985 } 986 987 static void process_prepared_discard_success(struct dm_thin_new_mapping *m) 988 { 989 bio_endio(m->bio); 990 free_discard_mapping(m); 991 } 992 993 static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m) 994 { 995 int r; 996 struct thin_c *tc = m->tc; 997 998 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); 999 if (r) { 1000 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); 1001 bio_io_error(m->bio); 1002 } else 1003 bio_endio(m->bio); 1004 1005 cell_defer_no_holder(tc, m->cell); 1006 mempool_free(m, tc->pool->mapping_pool); 1007 } 1008 1009 static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m) 1010 { 1011 /* 1012 * We've already unmapped this range of blocks, but before we 1013 * passdown we have to check that these blocks are now unused. 1014 */ 1015 int r; 1016 bool used = true; 1017 struct thin_c *tc = m->tc; 1018 struct pool *pool = tc->pool; 1019 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; 1020 1021 while (b != end) { 1022 /* find start of unmapped run */ 1023 for (; b < end; b++) { 1024 r = dm_pool_block_is_used(pool->pmd, b, &used); 1025 if (r) 1026 return r; 1027 1028 if (!used) 1029 break; 1030 } 1031 1032 if (b == end) 1033 break; 1034 1035 /* find end of run */ 1036 for (e = b + 1; e != end; e++) { 1037 r = dm_pool_block_is_used(pool->pmd, e, &used); 1038 if (r) 1039 return r; 1040 1041 if (used) 1042 break; 1043 } 1044 1045 r = issue_discard(tc, b, e, m->bio); 1046 if (r) 1047 return r; 1048 1049 b = e; 1050 } 1051 1052 return 0; 1053 } 1054 1055 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) 1056 { 1057 int r; 1058 struct thin_c *tc = m->tc; 1059 struct pool *pool = tc->pool; 1060 1061 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); 1062 if (r) 1063 metadata_operation_failed(pool, "dm_thin_remove_range", r); 1064 1065 else if (m->maybe_shared) 1066 r = passdown_double_checking_shared_status(m); 1067 else 1068 r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio); 1069 1070 /* 1071 * Even if r is set, there could be sub discards in flight that we 1072 * need to wait for. 1073 */ 1074 m->bio->bi_error = r; 1075 bio_endio(m->bio); 1076 cell_defer_no_holder(tc, m->cell); 1077 mempool_free(m, pool->mapping_pool); 1078 } 1079 1080 static void process_prepared(struct pool *pool, struct list_head *head, 1081 process_mapping_fn *fn) 1082 { 1083 unsigned long flags; 1084 struct list_head maps; 1085 struct dm_thin_new_mapping *m, *tmp; 1086 1087 INIT_LIST_HEAD(&maps); 1088 spin_lock_irqsave(&pool->lock, flags); 1089 list_splice_init(head, &maps); 1090 spin_unlock_irqrestore(&pool->lock, flags); 1091 1092 list_for_each_entry_safe(m, tmp, &maps, list) 1093 (*fn)(m); 1094 } 1095 1096 /* 1097 * Deferred bio jobs. 1098 */ 1099 static int io_overlaps_block(struct pool *pool, struct bio *bio) 1100 { 1101 return bio->bi_iter.bi_size == 1102 (pool->sectors_per_block << SECTOR_SHIFT); 1103 } 1104 1105 static int io_overwrites_block(struct pool *pool, struct bio *bio) 1106 { 1107 return (bio_data_dir(bio) == WRITE) && 1108 io_overlaps_block(pool, bio); 1109 } 1110 1111 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, 1112 bio_end_io_t *fn) 1113 { 1114 *save = bio->bi_end_io; 1115 bio->bi_end_io = fn; 1116 } 1117 1118 static int ensure_next_mapping(struct pool *pool) 1119 { 1120 if (pool->next_mapping) 1121 return 0; 1122 1123 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC); 1124 1125 return pool->next_mapping ? 0 : -ENOMEM; 1126 } 1127 1128 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) 1129 { 1130 struct dm_thin_new_mapping *m = pool->next_mapping; 1131 1132 BUG_ON(!pool->next_mapping); 1133 1134 memset(m, 0, sizeof(struct dm_thin_new_mapping)); 1135 INIT_LIST_HEAD(&m->list); 1136 m->bio = NULL; 1137 1138 pool->next_mapping = NULL; 1139 1140 return m; 1141 } 1142 1143 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, 1144 sector_t begin, sector_t end) 1145 { 1146 int r; 1147 struct dm_io_region to; 1148 1149 to.bdev = tc->pool_dev->bdev; 1150 to.sector = begin; 1151 to.count = end - begin; 1152 1153 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); 1154 if (r < 0) { 1155 DMERR_LIMIT("dm_kcopyd_zero() failed"); 1156 copy_complete(1, 1, m); 1157 } 1158 } 1159 1160 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, 1161 dm_block_t data_begin, 1162 struct dm_thin_new_mapping *m) 1163 { 1164 struct pool *pool = tc->pool; 1165 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1166 1167 h->overwrite_mapping = m; 1168 m->bio = bio; 1169 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1170 inc_all_io_entry(pool, bio); 1171 remap_and_issue(tc, bio, data_begin); 1172 } 1173 1174 /* 1175 * A partial copy also needs to zero the uncopied region. 1176 */ 1177 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 1178 struct dm_dev *origin, dm_block_t data_origin, 1179 dm_block_t data_dest, 1180 struct dm_bio_prison_cell *cell, struct bio *bio, 1181 sector_t len) 1182 { 1183 int r; 1184 struct pool *pool = tc->pool; 1185 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1186 1187 m->tc = tc; 1188 m->virt_begin = virt_block; 1189 m->virt_end = virt_block + 1u; 1190 m->data_block = data_dest; 1191 m->cell = cell; 1192 1193 /* 1194 * quiesce action + copy action + an extra reference held for the 1195 * duration of this function (we may need to inc later for a 1196 * partial zero). 1197 */ 1198 atomic_set(&m->prepare_actions, 3); 1199 1200 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 1201 complete_mapping_preparation(m); /* already quiesced */ 1202 1203 /* 1204 * IO to pool_dev remaps to the pool target's data_dev. 1205 * 1206 * If the whole block of data is being overwritten, we can issue the 1207 * bio immediately. Otherwise we use kcopyd to clone the data first. 1208 */ 1209 if (io_overwrites_block(pool, bio)) 1210 remap_and_issue_overwrite(tc, bio, data_dest, m); 1211 else { 1212 struct dm_io_region from, to; 1213 1214 from.bdev = origin->bdev; 1215 from.sector = data_origin * pool->sectors_per_block; 1216 from.count = len; 1217 1218 to.bdev = tc->pool_dev->bdev; 1219 to.sector = data_dest * pool->sectors_per_block; 1220 to.count = len; 1221 1222 r = dm_kcopyd_copy(pool->copier, &from, 1, &to, 1223 0, copy_complete, m); 1224 if (r < 0) { 1225 DMERR_LIMIT("dm_kcopyd_copy() failed"); 1226 copy_complete(1, 1, m); 1227 1228 /* 1229 * We allow the zero to be issued, to simplify the 1230 * error path. Otherwise we'd need to start 1231 * worrying about decrementing the prepare_actions 1232 * counter. 1233 */ 1234 } 1235 1236 /* 1237 * Do we need to zero a tail region? 1238 */ 1239 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { 1240 atomic_inc(&m->prepare_actions); 1241 ll_zero(tc, m, 1242 data_dest * pool->sectors_per_block + len, 1243 (data_dest + 1) * pool->sectors_per_block); 1244 } 1245 } 1246 1247 complete_mapping_preparation(m); /* drop our ref */ 1248 } 1249 1250 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1251 dm_block_t data_origin, dm_block_t data_dest, 1252 struct dm_bio_prison_cell *cell, struct bio *bio) 1253 { 1254 schedule_copy(tc, virt_block, tc->pool_dev, 1255 data_origin, data_dest, cell, bio, 1256 tc->pool->sectors_per_block); 1257 } 1258 1259 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1260 dm_block_t data_block, struct dm_bio_prison_cell *cell, 1261 struct bio *bio) 1262 { 1263 struct pool *pool = tc->pool; 1264 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1265 1266 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */ 1267 m->tc = tc; 1268 m->virt_begin = virt_block; 1269 m->virt_end = virt_block + 1u; 1270 m->data_block = data_block; 1271 m->cell = cell; 1272 1273 /* 1274 * If the whole block of data is being overwritten or we are not 1275 * zeroing pre-existing data, we can issue the bio immediately. 1276 * Otherwise we use kcopyd to zero the data first. 1277 */ 1278 if (pool->pf.zero_new_blocks) { 1279 if (io_overwrites_block(pool, bio)) 1280 remap_and_issue_overwrite(tc, bio, data_block, m); 1281 else 1282 ll_zero(tc, m, data_block * pool->sectors_per_block, 1283 (data_block + 1) * pool->sectors_per_block); 1284 } else 1285 process_prepared_mapping(m); 1286 } 1287 1288 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1289 dm_block_t data_dest, 1290 struct dm_bio_prison_cell *cell, struct bio *bio) 1291 { 1292 struct pool *pool = tc->pool; 1293 sector_t virt_block_begin = virt_block * pool->sectors_per_block; 1294 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; 1295 1296 if (virt_block_end <= tc->origin_size) 1297 schedule_copy(tc, virt_block, tc->origin_dev, 1298 virt_block, data_dest, cell, bio, 1299 pool->sectors_per_block); 1300 1301 else if (virt_block_begin < tc->origin_size) 1302 schedule_copy(tc, virt_block, tc->origin_dev, 1303 virt_block, data_dest, cell, bio, 1304 tc->origin_size - virt_block_begin); 1305 1306 else 1307 schedule_zero(tc, virt_block, data_dest, cell, bio); 1308 } 1309 1310 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); 1311 1312 static void check_for_space(struct pool *pool) 1313 { 1314 int r; 1315 dm_block_t nr_free; 1316 1317 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) 1318 return; 1319 1320 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); 1321 if (r) 1322 return; 1323 1324 if (nr_free) 1325 set_pool_mode(pool, PM_WRITE); 1326 } 1327 1328 /* 1329 * A non-zero return indicates read_only or fail_io mode. 1330 * Many callers don't care about the return value. 1331 */ 1332 static int commit(struct pool *pool) 1333 { 1334 int r; 1335 1336 if (get_pool_mode(pool) >= PM_READ_ONLY) 1337 return -EINVAL; 1338 1339 r = dm_pool_commit_metadata(pool->pmd); 1340 if (r) 1341 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); 1342 else 1343 check_for_space(pool); 1344 1345 return r; 1346 } 1347 1348 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) 1349 { 1350 unsigned long flags; 1351 1352 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { 1353 DMWARN("%s: reached low water mark for data device: sending event.", 1354 dm_device_name(pool->pool_md)); 1355 spin_lock_irqsave(&pool->lock, flags); 1356 pool->low_water_triggered = true; 1357 spin_unlock_irqrestore(&pool->lock, flags); 1358 dm_table_event(pool->ti->table); 1359 } 1360 } 1361 1362 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 1363 { 1364 int r; 1365 dm_block_t free_blocks; 1366 struct pool *pool = tc->pool; 1367 1368 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) 1369 return -EINVAL; 1370 1371 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 1372 if (r) { 1373 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); 1374 return r; 1375 } 1376 1377 check_low_water_mark(pool, free_blocks); 1378 1379 if (!free_blocks) { 1380 /* 1381 * Try to commit to see if that will free up some 1382 * more space. 1383 */ 1384 r = commit(pool); 1385 if (r) 1386 return r; 1387 1388 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 1389 if (r) { 1390 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); 1391 return r; 1392 } 1393 1394 if (!free_blocks) { 1395 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); 1396 return -ENOSPC; 1397 } 1398 } 1399 1400 r = dm_pool_alloc_data_block(pool->pmd, result); 1401 if (r) { 1402 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); 1403 return r; 1404 } 1405 1406 return 0; 1407 } 1408 1409 /* 1410 * If we have run out of space, queue bios until the device is 1411 * resumed, presumably after having been reloaded with more space. 1412 */ 1413 static void retry_on_resume(struct bio *bio) 1414 { 1415 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1416 struct thin_c *tc = h->tc; 1417 unsigned long flags; 1418 1419 spin_lock_irqsave(&tc->lock, flags); 1420 bio_list_add(&tc->retry_on_resume_list, bio); 1421 spin_unlock_irqrestore(&tc->lock, flags); 1422 } 1423 1424 static int should_error_unserviceable_bio(struct pool *pool) 1425 { 1426 enum pool_mode m = get_pool_mode(pool); 1427 1428 switch (m) { 1429 case PM_WRITE: 1430 /* Shouldn't get here */ 1431 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); 1432 return -EIO; 1433 1434 case PM_OUT_OF_DATA_SPACE: 1435 return pool->pf.error_if_no_space ? -ENOSPC : 0; 1436 1437 case PM_READ_ONLY: 1438 case PM_FAIL: 1439 return -EIO; 1440 default: 1441 /* Shouldn't get here */ 1442 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); 1443 return -EIO; 1444 } 1445 } 1446 1447 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) 1448 { 1449 int error = should_error_unserviceable_bio(pool); 1450 1451 if (error) { 1452 bio->bi_error = error; 1453 bio_endio(bio); 1454 } else 1455 retry_on_resume(bio); 1456 } 1457 1458 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) 1459 { 1460 struct bio *bio; 1461 struct bio_list bios; 1462 int error; 1463 1464 error = should_error_unserviceable_bio(pool); 1465 if (error) { 1466 cell_error_with_code(pool, cell, error); 1467 return; 1468 } 1469 1470 bio_list_init(&bios); 1471 cell_release(pool, cell, &bios); 1472 1473 while ((bio = bio_list_pop(&bios))) 1474 retry_on_resume(bio); 1475 } 1476 1477 static void process_discard_cell_no_passdown(struct thin_c *tc, 1478 struct dm_bio_prison_cell *virt_cell) 1479 { 1480 struct pool *pool = tc->pool; 1481 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1482 1483 /* 1484 * We don't need to lock the data blocks, since there's no 1485 * passdown. We only lock data blocks for allocation and breaking sharing. 1486 */ 1487 m->tc = tc; 1488 m->virt_begin = virt_cell->key.block_begin; 1489 m->virt_end = virt_cell->key.block_end; 1490 m->cell = virt_cell; 1491 m->bio = virt_cell->holder; 1492 1493 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) 1494 pool->process_prepared_discard(m); 1495 } 1496 1497 /* 1498 * __bio_inc_remaining() is used to defer parent bios's end_io until 1499 * we _know_ all chained sub range discard bios have completed. 1500 */ 1501 static inline void __bio_inc_remaining(struct bio *bio) 1502 { 1503 bio->bi_flags |= (1 << BIO_CHAIN); 1504 smp_mb__before_atomic(); 1505 atomic_inc(&bio->__bi_remaining); 1506 } 1507 1508 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end, 1509 struct bio *bio) 1510 { 1511 struct pool *pool = tc->pool; 1512 1513 int r; 1514 bool maybe_shared; 1515 struct dm_cell_key data_key; 1516 struct dm_bio_prison_cell *data_cell; 1517 struct dm_thin_new_mapping *m; 1518 dm_block_t virt_begin, virt_end, data_begin; 1519 1520 while (begin != end) { 1521 r = ensure_next_mapping(pool); 1522 if (r) 1523 /* we did our best */ 1524 return; 1525 1526 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, 1527 &data_begin, &maybe_shared); 1528 if (r) 1529 /* 1530 * Silently fail, letting any mappings we've 1531 * created complete. 1532 */ 1533 break; 1534 1535 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); 1536 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { 1537 /* contention, we'll give up with this range */ 1538 begin = virt_end; 1539 continue; 1540 } 1541 1542 /* 1543 * IO may still be going to the destination block. We must 1544 * quiesce before we can do the removal. 1545 */ 1546 m = get_next_mapping(pool); 1547 m->tc = tc; 1548 m->maybe_shared = maybe_shared; 1549 m->virt_begin = virt_begin; 1550 m->virt_end = virt_end; 1551 m->data_block = data_begin; 1552 m->cell = data_cell; 1553 m->bio = bio; 1554 1555 /* 1556 * The parent bio must not complete before sub discard bios are 1557 * chained to it (see __blkdev_issue_discard_async's bio_chain)! 1558 * 1559 * This per-mapping bi_remaining increment is paired with 1560 * the implicit decrement that occurs via bio_endio() in 1561 * process_prepared_discard_{passdown,no_passdown}. 1562 */ 1563 __bio_inc_remaining(bio); 1564 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) 1565 pool->process_prepared_discard(m); 1566 1567 begin = virt_end; 1568 } 1569 } 1570 1571 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell) 1572 { 1573 struct bio *bio = virt_cell->holder; 1574 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1575 1576 /* 1577 * The virt_cell will only get freed once the origin bio completes. 1578 * This means it will remain locked while all the individual 1579 * passdown bios are in flight. 1580 */ 1581 h->cell = virt_cell; 1582 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); 1583 1584 /* 1585 * We complete the bio now, knowing that the bi_remaining field 1586 * will prevent completion until the sub range discards have 1587 * completed. 1588 */ 1589 bio_endio(bio); 1590 } 1591 1592 static void process_discard_bio(struct thin_c *tc, struct bio *bio) 1593 { 1594 dm_block_t begin, end; 1595 struct dm_cell_key virt_key; 1596 struct dm_bio_prison_cell *virt_cell; 1597 1598 get_bio_block_range(tc, bio, &begin, &end); 1599 if (begin == end) { 1600 /* 1601 * The discard covers less than a block. 1602 */ 1603 bio_endio(bio); 1604 return; 1605 } 1606 1607 build_key(tc->td, VIRTUAL, begin, end, &virt_key); 1608 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) 1609 /* 1610 * Potential starvation issue: We're relying on the 1611 * fs/application being well behaved, and not trying to 1612 * send IO to a region at the same time as discarding it. 1613 * If they do this persistently then it's possible this 1614 * cell will never be granted. 1615 */ 1616 return; 1617 1618 tc->pool->process_discard_cell(tc, virt_cell); 1619 } 1620 1621 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1622 struct dm_cell_key *key, 1623 struct dm_thin_lookup_result *lookup_result, 1624 struct dm_bio_prison_cell *cell) 1625 { 1626 int r; 1627 dm_block_t data_block; 1628 struct pool *pool = tc->pool; 1629 1630 r = alloc_data_block(tc, &data_block); 1631 switch (r) { 1632 case 0: 1633 schedule_internal_copy(tc, block, lookup_result->block, 1634 data_block, cell, bio); 1635 break; 1636 1637 case -ENOSPC: 1638 retry_bios_on_resume(pool, cell); 1639 break; 1640 1641 default: 1642 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1643 __func__, r); 1644 cell_error(pool, cell); 1645 break; 1646 } 1647 } 1648 1649 static void __remap_and_issue_shared_cell(void *context, 1650 struct dm_bio_prison_cell *cell) 1651 { 1652 struct remap_info *info = context; 1653 struct bio *bio; 1654 1655 while ((bio = bio_list_pop(&cell->bios))) { 1656 if ((bio_data_dir(bio) == WRITE) || 1657 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))) 1658 bio_list_add(&info->defer_bios, bio); 1659 else { 1660 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; 1661 1662 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); 1663 inc_all_io_entry(info->tc->pool, bio); 1664 bio_list_add(&info->issue_bios, bio); 1665 } 1666 } 1667 } 1668 1669 static void remap_and_issue_shared_cell(struct thin_c *tc, 1670 struct dm_bio_prison_cell *cell, 1671 dm_block_t block) 1672 { 1673 struct bio *bio; 1674 struct remap_info info; 1675 1676 info.tc = tc; 1677 bio_list_init(&info.defer_bios); 1678 bio_list_init(&info.issue_bios); 1679 1680 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, 1681 &info, cell); 1682 1683 while ((bio = bio_list_pop(&info.defer_bios))) 1684 thin_defer_bio(tc, bio); 1685 1686 while ((bio = bio_list_pop(&info.issue_bios))) 1687 remap_and_issue(tc, bio, block); 1688 } 1689 1690 static void process_shared_bio(struct thin_c *tc, struct bio *bio, 1691 dm_block_t block, 1692 struct dm_thin_lookup_result *lookup_result, 1693 struct dm_bio_prison_cell *virt_cell) 1694 { 1695 struct dm_bio_prison_cell *data_cell; 1696 struct pool *pool = tc->pool; 1697 struct dm_cell_key key; 1698 1699 /* 1700 * If cell is already occupied, then sharing is already in the process 1701 * of being broken so we have nothing further to do here. 1702 */ 1703 build_data_key(tc->td, lookup_result->block, &key); 1704 if (bio_detain(pool, &key, bio, &data_cell)) { 1705 cell_defer_no_holder(tc, virt_cell); 1706 return; 1707 } 1708 1709 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { 1710 break_sharing(tc, bio, block, &key, lookup_result, data_cell); 1711 cell_defer_no_holder(tc, virt_cell); 1712 } else { 1713 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1714 1715 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1716 inc_all_io_entry(pool, bio); 1717 remap_and_issue(tc, bio, lookup_result->block); 1718 1719 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); 1720 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); 1721 } 1722 } 1723 1724 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1725 struct dm_bio_prison_cell *cell) 1726 { 1727 int r; 1728 dm_block_t data_block; 1729 struct pool *pool = tc->pool; 1730 1731 /* 1732 * Remap empty bios (flushes) immediately, without provisioning. 1733 */ 1734 if (!bio->bi_iter.bi_size) { 1735 inc_all_io_entry(pool, bio); 1736 cell_defer_no_holder(tc, cell); 1737 1738 remap_and_issue(tc, bio, 0); 1739 return; 1740 } 1741 1742 /* 1743 * Fill read bios with zeroes and complete them immediately. 1744 */ 1745 if (bio_data_dir(bio) == READ) { 1746 zero_fill_bio(bio); 1747 cell_defer_no_holder(tc, cell); 1748 bio_endio(bio); 1749 return; 1750 } 1751 1752 r = alloc_data_block(tc, &data_block); 1753 switch (r) { 1754 case 0: 1755 if (tc->origin_dev) 1756 schedule_external_copy(tc, block, data_block, cell, bio); 1757 else 1758 schedule_zero(tc, block, data_block, cell, bio); 1759 break; 1760 1761 case -ENOSPC: 1762 retry_bios_on_resume(pool, cell); 1763 break; 1764 1765 default: 1766 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1767 __func__, r); 1768 cell_error(pool, cell); 1769 break; 1770 } 1771 } 1772 1773 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1774 { 1775 int r; 1776 struct pool *pool = tc->pool; 1777 struct bio *bio = cell->holder; 1778 dm_block_t block = get_bio_block(tc, bio); 1779 struct dm_thin_lookup_result lookup_result; 1780 1781 if (tc->requeue_mode) { 1782 cell_requeue(pool, cell); 1783 return; 1784 } 1785 1786 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1787 switch (r) { 1788 case 0: 1789 if (lookup_result.shared) 1790 process_shared_bio(tc, bio, block, &lookup_result, cell); 1791 else { 1792 inc_all_io_entry(pool, bio); 1793 remap_and_issue(tc, bio, lookup_result.block); 1794 inc_remap_and_issue_cell(tc, cell, lookup_result.block); 1795 } 1796 break; 1797 1798 case -ENODATA: 1799 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1800 inc_all_io_entry(pool, bio); 1801 cell_defer_no_holder(tc, cell); 1802 1803 if (bio_end_sector(bio) <= tc->origin_size) 1804 remap_to_origin_and_issue(tc, bio); 1805 1806 else if (bio->bi_iter.bi_sector < tc->origin_size) { 1807 zero_fill_bio(bio); 1808 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; 1809 remap_to_origin_and_issue(tc, bio); 1810 1811 } else { 1812 zero_fill_bio(bio); 1813 bio_endio(bio); 1814 } 1815 } else 1816 provision_block(tc, bio, block, cell); 1817 break; 1818 1819 default: 1820 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1821 __func__, r); 1822 cell_defer_no_holder(tc, cell); 1823 bio_io_error(bio); 1824 break; 1825 } 1826 } 1827 1828 static void process_bio(struct thin_c *tc, struct bio *bio) 1829 { 1830 struct pool *pool = tc->pool; 1831 dm_block_t block = get_bio_block(tc, bio); 1832 struct dm_bio_prison_cell *cell; 1833 struct dm_cell_key key; 1834 1835 /* 1836 * If cell is already occupied, then the block is already 1837 * being provisioned so we have nothing further to do here. 1838 */ 1839 build_virtual_key(tc->td, block, &key); 1840 if (bio_detain(pool, &key, bio, &cell)) 1841 return; 1842 1843 process_cell(tc, cell); 1844 } 1845 1846 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, 1847 struct dm_bio_prison_cell *cell) 1848 { 1849 int r; 1850 int rw = bio_data_dir(bio); 1851 dm_block_t block = get_bio_block(tc, bio); 1852 struct dm_thin_lookup_result lookup_result; 1853 1854 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1855 switch (r) { 1856 case 0: 1857 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { 1858 handle_unserviceable_bio(tc->pool, bio); 1859 if (cell) 1860 cell_defer_no_holder(tc, cell); 1861 } else { 1862 inc_all_io_entry(tc->pool, bio); 1863 remap_and_issue(tc, bio, lookup_result.block); 1864 if (cell) 1865 inc_remap_and_issue_cell(tc, cell, lookup_result.block); 1866 } 1867 break; 1868 1869 case -ENODATA: 1870 if (cell) 1871 cell_defer_no_holder(tc, cell); 1872 if (rw != READ) { 1873 handle_unserviceable_bio(tc->pool, bio); 1874 break; 1875 } 1876 1877 if (tc->origin_dev) { 1878 inc_all_io_entry(tc->pool, bio); 1879 remap_to_origin_and_issue(tc, bio); 1880 break; 1881 } 1882 1883 zero_fill_bio(bio); 1884 bio_endio(bio); 1885 break; 1886 1887 default: 1888 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1889 __func__, r); 1890 if (cell) 1891 cell_defer_no_holder(tc, cell); 1892 bio_io_error(bio); 1893 break; 1894 } 1895 } 1896 1897 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) 1898 { 1899 __process_bio_read_only(tc, bio, NULL); 1900 } 1901 1902 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1903 { 1904 __process_bio_read_only(tc, cell->holder, cell); 1905 } 1906 1907 static void process_bio_success(struct thin_c *tc, struct bio *bio) 1908 { 1909 bio_endio(bio); 1910 } 1911 1912 static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1913 { 1914 bio_io_error(bio); 1915 } 1916 1917 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1918 { 1919 cell_success(tc->pool, cell); 1920 } 1921 1922 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1923 { 1924 cell_error(tc->pool, cell); 1925 } 1926 1927 /* 1928 * FIXME: should we also commit due to size of transaction, measured in 1929 * metadata blocks? 1930 */ 1931 static int need_commit_due_to_time(struct pool *pool) 1932 { 1933 return !time_in_range(jiffies, pool->last_commit_jiffies, 1934 pool->last_commit_jiffies + COMMIT_PERIOD); 1935 } 1936 1937 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node) 1938 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook)) 1939 1940 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) 1941 { 1942 struct rb_node **rbp, *parent; 1943 struct dm_thin_endio_hook *pbd; 1944 sector_t bi_sector = bio->bi_iter.bi_sector; 1945 1946 rbp = &tc->sort_bio_list.rb_node; 1947 parent = NULL; 1948 while (*rbp) { 1949 parent = *rbp; 1950 pbd = thin_pbd(parent); 1951 1952 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) 1953 rbp = &(*rbp)->rb_left; 1954 else 1955 rbp = &(*rbp)->rb_right; 1956 } 1957 1958 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1959 rb_link_node(&pbd->rb_node, parent, rbp); 1960 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); 1961 } 1962 1963 static void __extract_sorted_bios(struct thin_c *tc) 1964 { 1965 struct rb_node *node; 1966 struct dm_thin_endio_hook *pbd; 1967 struct bio *bio; 1968 1969 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { 1970 pbd = thin_pbd(node); 1971 bio = thin_bio(pbd); 1972 1973 bio_list_add(&tc->deferred_bio_list, bio); 1974 rb_erase(&pbd->rb_node, &tc->sort_bio_list); 1975 } 1976 1977 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); 1978 } 1979 1980 static void __sort_thin_deferred_bios(struct thin_c *tc) 1981 { 1982 struct bio *bio; 1983 struct bio_list bios; 1984 1985 bio_list_init(&bios); 1986 bio_list_merge(&bios, &tc->deferred_bio_list); 1987 bio_list_init(&tc->deferred_bio_list); 1988 1989 /* Sort deferred_bio_list using rb-tree */ 1990 while ((bio = bio_list_pop(&bios))) 1991 __thin_bio_rb_add(tc, bio); 1992 1993 /* 1994 * Transfer the sorted bios in sort_bio_list back to 1995 * deferred_bio_list to allow lockless submission of 1996 * all bios. 1997 */ 1998 __extract_sorted_bios(tc); 1999 } 2000 2001 static void process_thin_deferred_bios(struct thin_c *tc) 2002 { 2003 struct pool *pool = tc->pool; 2004 unsigned long flags; 2005 struct bio *bio; 2006 struct bio_list bios; 2007 struct blk_plug plug; 2008 unsigned count = 0; 2009 2010 if (tc->requeue_mode) { 2011 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); 2012 return; 2013 } 2014 2015 bio_list_init(&bios); 2016 2017 spin_lock_irqsave(&tc->lock, flags); 2018 2019 if (bio_list_empty(&tc->deferred_bio_list)) { 2020 spin_unlock_irqrestore(&tc->lock, flags); 2021 return; 2022 } 2023 2024 __sort_thin_deferred_bios(tc); 2025 2026 bio_list_merge(&bios, &tc->deferred_bio_list); 2027 bio_list_init(&tc->deferred_bio_list); 2028 2029 spin_unlock_irqrestore(&tc->lock, flags); 2030 2031 blk_start_plug(&plug); 2032 while ((bio = bio_list_pop(&bios))) { 2033 /* 2034 * If we've got no free new_mapping structs, and processing 2035 * this bio might require one, we pause until there are some 2036 * prepared mappings to process. 2037 */ 2038 if (ensure_next_mapping(pool)) { 2039 spin_lock_irqsave(&tc->lock, flags); 2040 bio_list_add(&tc->deferred_bio_list, bio); 2041 bio_list_merge(&tc->deferred_bio_list, &bios); 2042 spin_unlock_irqrestore(&tc->lock, flags); 2043 break; 2044 } 2045 2046 if (bio->bi_rw & REQ_DISCARD) 2047 pool->process_discard(tc, bio); 2048 else 2049 pool->process_bio(tc, bio); 2050 2051 if ((count++ & 127) == 0) { 2052 throttle_work_update(&pool->throttle); 2053 dm_pool_issue_prefetches(pool->pmd); 2054 } 2055 } 2056 blk_finish_plug(&plug); 2057 } 2058 2059 static int cmp_cells(const void *lhs, const void *rhs) 2060 { 2061 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs); 2062 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs); 2063 2064 BUG_ON(!lhs_cell->holder); 2065 BUG_ON(!rhs_cell->holder); 2066 2067 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector) 2068 return -1; 2069 2070 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector) 2071 return 1; 2072 2073 return 0; 2074 } 2075 2076 static unsigned sort_cells(struct pool *pool, struct list_head *cells) 2077 { 2078 unsigned count = 0; 2079 struct dm_bio_prison_cell *cell, *tmp; 2080 2081 list_for_each_entry_safe(cell, tmp, cells, user_list) { 2082 if (count >= CELL_SORT_ARRAY_SIZE) 2083 break; 2084 2085 pool->cell_sort_array[count++] = cell; 2086 list_del(&cell->user_list); 2087 } 2088 2089 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); 2090 2091 return count; 2092 } 2093 2094 static void process_thin_deferred_cells(struct thin_c *tc) 2095 { 2096 struct pool *pool = tc->pool; 2097 unsigned long flags; 2098 struct list_head cells; 2099 struct dm_bio_prison_cell *cell; 2100 unsigned i, j, count; 2101 2102 INIT_LIST_HEAD(&cells); 2103 2104 spin_lock_irqsave(&tc->lock, flags); 2105 list_splice_init(&tc->deferred_cells, &cells); 2106 spin_unlock_irqrestore(&tc->lock, flags); 2107 2108 if (list_empty(&cells)) 2109 return; 2110 2111 do { 2112 count = sort_cells(tc->pool, &cells); 2113 2114 for (i = 0; i < count; i++) { 2115 cell = pool->cell_sort_array[i]; 2116 BUG_ON(!cell->holder); 2117 2118 /* 2119 * If we've got no free new_mapping structs, and processing 2120 * this bio might require one, we pause until there are some 2121 * prepared mappings to process. 2122 */ 2123 if (ensure_next_mapping(pool)) { 2124 for (j = i; j < count; j++) 2125 list_add(&pool->cell_sort_array[j]->user_list, &cells); 2126 2127 spin_lock_irqsave(&tc->lock, flags); 2128 list_splice(&cells, &tc->deferred_cells); 2129 spin_unlock_irqrestore(&tc->lock, flags); 2130 return; 2131 } 2132 2133 if (cell->holder->bi_rw & REQ_DISCARD) 2134 pool->process_discard_cell(tc, cell); 2135 else 2136 pool->process_cell(tc, cell); 2137 } 2138 } while (!list_empty(&cells)); 2139 } 2140 2141 static void thin_get(struct thin_c *tc); 2142 static void thin_put(struct thin_c *tc); 2143 2144 /* 2145 * We can't hold rcu_read_lock() around code that can block. So we 2146 * find a thin with the rcu lock held; bump a refcount; then drop 2147 * the lock. 2148 */ 2149 static struct thin_c *get_first_thin(struct pool *pool) 2150 { 2151 struct thin_c *tc = NULL; 2152 2153 rcu_read_lock(); 2154 if (!list_empty(&pool->active_thins)) { 2155 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); 2156 thin_get(tc); 2157 } 2158 rcu_read_unlock(); 2159 2160 return tc; 2161 } 2162 2163 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) 2164 { 2165 struct thin_c *old_tc = tc; 2166 2167 rcu_read_lock(); 2168 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { 2169 thin_get(tc); 2170 thin_put(old_tc); 2171 rcu_read_unlock(); 2172 return tc; 2173 } 2174 thin_put(old_tc); 2175 rcu_read_unlock(); 2176 2177 return NULL; 2178 } 2179 2180 static void process_deferred_bios(struct pool *pool) 2181 { 2182 unsigned long flags; 2183 struct bio *bio; 2184 struct bio_list bios; 2185 struct thin_c *tc; 2186 2187 tc = get_first_thin(pool); 2188 while (tc) { 2189 process_thin_deferred_cells(tc); 2190 process_thin_deferred_bios(tc); 2191 tc = get_next_thin(pool, tc); 2192 } 2193 2194 /* 2195 * If there are any deferred flush bios, we must commit 2196 * the metadata before issuing them. 2197 */ 2198 bio_list_init(&bios); 2199 spin_lock_irqsave(&pool->lock, flags); 2200 bio_list_merge(&bios, &pool->deferred_flush_bios); 2201 bio_list_init(&pool->deferred_flush_bios); 2202 spin_unlock_irqrestore(&pool->lock, flags); 2203 2204 if (bio_list_empty(&bios) && 2205 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2206 return; 2207 2208 if (commit(pool)) { 2209 while ((bio = bio_list_pop(&bios))) 2210 bio_io_error(bio); 2211 return; 2212 } 2213 pool->last_commit_jiffies = jiffies; 2214 2215 while ((bio = bio_list_pop(&bios))) 2216 generic_make_request(bio); 2217 } 2218 2219 static void do_worker(struct work_struct *ws) 2220 { 2221 struct pool *pool = container_of(ws, struct pool, worker); 2222 2223 throttle_work_start(&pool->throttle); 2224 dm_pool_issue_prefetches(pool->pmd); 2225 throttle_work_update(&pool->throttle); 2226 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); 2227 throttle_work_update(&pool->throttle); 2228 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); 2229 throttle_work_update(&pool->throttle); 2230 process_deferred_bios(pool); 2231 throttle_work_complete(&pool->throttle); 2232 } 2233 2234 /* 2235 * We want to commit periodically so that not too much 2236 * unwritten data builds up. 2237 */ 2238 static void do_waker(struct work_struct *ws) 2239 { 2240 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); 2241 wake_worker(pool); 2242 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 2243 } 2244 2245 static void notify_of_pool_mode_change_to_oods(struct pool *pool); 2246 2247 /* 2248 * We're holding onto IO to allow userland time to react. After the 2249 * timeout either the pool will have been resized (and thus back in 2250 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space. 2251 */ 2252 static void do_no_space_timeout(struct work_struct *ws) 2253 { 2254 struct pool *pool = container_of(to_delayed_work(ws), struct pool, 2255 no_space_timeout); 2256 2257 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2258 pool->pf.error_if_no_space = true; 2259 notify_of_pool_mode_change_to_oods(pool); 2260 error_retry_list_with_code(pool, -ENOSPC); 2261 } 2262 } 2263 2264 /*----------------------------------------------------------------*/ 2265 2266 struct pool_work { 2267 struct work_struct worker; 2268 struct completion complete; 2269 }; 2270 2271 static struct pool_work *to_pool_work(struct work_struct *ws) 2272 { 2273 return container_of(ws, struct pool_work, worker); 2274 } 2275 2276 static void pool_work_complete(struct pool_work *pw) 2277 { 2278 complete(&pw->complete); 2279 } 2280 2281 static void pool_work_wait(struct pool_work *pw, struct pool *pool, 2282 void (*fn)(struct work_struct *)) 2283 { 2284 INIT_WORK_ONSTACK(&pw->worker, fn); 2285 init_completion(&pw->complete); 2286 queue_work(pool->wq, &pw->worker); 2287 wait_for_completion(&pw->complete); 2288 } 2289 2290 /*----------------------------------------------------------------*/ 2291 2292 struct noflush_work { 2293 struct pool_work pw; 2294 struct thin_c *tc; 2295 }; 2296 2297 static struct noflush_work *to_noflush(struct work_struct *ws) 2298 { 2299 return container_of(to_pool_work(ws), struct noflush_work, pw); 2300 } 2301 2302 static void do_noflush_start(struct work_struct *ws) 2303 { 2304 struct noflush_work *w = to_noflush(ws); 2305 w->tc->requeue_mode = true; 2306 requeue_io(w->tc); 2307 pool_work_complete(&w->pw); 2308 } 2309 2310 static void do_noflush_stop(struct work_struct *ws) 2311 { 2312 struct noflush_work *w = to_noflush(ws); 2313 w->tc->requeue_mode = false; 2314 pool_work_complete(&w->pw); 2315 } 2316 2317 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) 2318 { 2319 struct noflush_work w; 2320 2321 w.tc = tc; 2322 pool_work_wait(&w.pw, tc->pool, fn); 2323 } 2324 2325 /*----------------------------------------------------------------*/ 2326 2327 static enum pool_mode get_pool_mode(struct pool *pool) 2328 { 2329 return pool->pf.mode; 2330 } 2331 2332 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) 2333 { 2334 dm_table_event(pool->ti->table); 2335 DMINFO("%s: switching pool to %s mode", 2336 dm_device_name(pool->pool_md), new_mode); 2337 } 2338 2339 static void notify_of_pool_mode_change_to_oods(struct pool *pool) 2340 { 2341 if (!pool->pf.error_if_no_space) 2342 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); 2343 else 2344 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); 2345 } 2346 2347 static bool passdown_enabled(struct pool_c *pt) 2348 { 2349 return pt->adjusted_pf.discard_passdown; 2350 } 2351 2352 static void set_discard_callbacks(struct pool *pool) 2353 { 2354 struct pool_c *pt = pool->ti->private; 2355 2356 if (passdown_enabled(pt)) { 2357 pool->process_discard_cell = process_discard_cell_passdown; 2358 pool->process_prepared_discard = process_prepared_discard_passdown; 2359 } else { 2360 pool->process_discard_cell = process_discard_cell_no_passdown; 2361 pool->process_prepared_discard = process_prepared_discard_no_passdown; 2362 } 2363 } 2364 2365 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) 2366 { 2367 struct pool_c *pt = pool->ti->private; 2368 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 2369 enum pool_mode old_mode = get_pool_mode(pool); 2370 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; 2371 2372 /* 2373 * Never allow the pool to transition to PM_WRITE mode if user 2374 * intervention is required to verify metadata and data consistency. 2375 */ 2376 if (new_mode == PM_WRITE && needs_check) { 2377 DMERR("%s: unable to switch pool to write mode until repaired.", 2378 dm_device_name(pool->pool_md)); 2379 if (old_mode != new_mode) 2380 new_mode = old_mode; 2381 else 2382 new_mode = PM_READ_ONLY; 2383 } 2384 /* 2385 * If we were in PM_FAIL mode, rollback of metadata failed. We're 2386 * not going to recover without a thin_repair. So we never let the 2387 * pool move out of the old mode. 2388 */ 2389 if (old_mode == PM_FAIL) 2390 new_mode = old_mode; 2391 2392 switch (new_mode) { 2393 case PM_FAIL: 2394 if (old_mode != new_mode) 2395 notify_of_pool_mode_change(pool, "failure"); 2396 dm_pool_metadata_read_only(pool->pmd); 2397 pool->process_bio = process_bio_fail; 2398 pool->process_discard = process_bio_fail; 2399 pool->process_cell = process_cell_fail; 2400 pool->process_discard_cell = process_cell_fail; 2401 pool->process_prepared_mapping = process_prepared_mapping_fail; 2402 pool->process_prepared_discard = process_prepared_discard_fail; 2403 2404 error_retry_list(pool); 2405 break; 2406 2407 case PM_READ_ONLY: 2408 if (old_mode != new_mode) 2409 notify_of_pool_mode_change(pool, "read-only"); 2410 dm_pool_metadata_read_only(pool->pmd); 2411 pool->process_bio = process_bio_read_only; 2412 pool->process_discard = process_bio_success; 2413 pool->process_cell = process_cell_read_only; 2414 pool->process_discard_cell = process_cell_success; 2415 pool->process_prepared_mapping = process_prepared_mapping_fail; 2416 pool->process_prepared_discard = process_prepared_discard_success; 2417 2418 error_retry_list(pool); 2419 break; 2420 2421 case PM_OUT_OF_DATA_SPACE: 2422 /* 2423 * Ideally we'd never hit this state; the low water mark 2424 * would trigger userland to extend the pool before we 2425 * completely run out of data space. However, many small 2426 * IOs to unprovisioned space can consume data space at an 2427 * alarming rate. Adjust your low water mark if you're 2428 * frequently seeing this mode. 2429 */ 2430 if (old_mode != new_mode) 2431 notify_of_pool_mode_change_to_oods(pool); 2432 pool->out_of_data_space = true; 2433 pool->process_bio = process_bio_read_only; 2434 pool->process_discard = process_discard_bio; 2435 pool->process_cell = process_cell_read_only; 2436 pool->process_prepared_mapping = process_prepared_mapping; 2437 set_discard_callbacks(pool); 2438 2439 if (!pool->pf.error_if_no_space && no_space_timeout) 2440 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); 2441 break; 2442 2443 case PM_WRITE: 2444 if (old_mode != new_mode) 2445 notify_of_pool_mode_change(pool, "write"); 2446 pool->out_of_data_space = false; 2447 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; 2448 dm_pool_metadata_read_write(pool->pmd); 2449 pool->process_bio = process_bio; 2450 pool->process_discard = process_discard_bio; 2451 pool->process_cell = process_cell; 2452 pool->process_prepared_mapping = process_prepared_mapping; 2453 set_discard_callbacks(pool); 2454 break; 2455 } 2456 2457 pool->pf.mode = new_mode; 2458 /* 2459 * The pool mode may have changed, sync it so bind_control_target() 2460 * doesn't cause an unexpected mode transition on resume. 2461 */ 2462 pt->adjusted_pf.mode = new_mode; 2463 } 2464 2465 static void abort_transaction(struct pool *pool) 2466 { 2467 const char *dev_name = dm_device_name(pool->pool_md); 2468 2469 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); 2470 if (dm_pool_abort_metadata(pool->pmd)) { 2471 DMERR("%s: failed to abort metadata transaction", dev_name); 2472 set_pool_mode(pool, PM_FAIL); 2473 } 2474 2475 if (dm_pool_metadata_set_needs_check(pool->pmd)) { 2476 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); 2477 set_pool_mode(pool, PM_FAIL); 2478 } 2479 } 2480 2481 static void metadata_operation_failed(struct pool *pool, const char *op, int r) 2482 { 2483 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", 2484 dm_device_name(pool->pool_md), op, r); 2485 2486 abort_transaction(pool); 2487 set_pool_mode(pool, PM_READ_ONLY); 2488 } 2489 2490 /*----------------------------------------------------------------*/ 2491 2492 /* 2493 * Mapping functions. 2494 */ 2495 2496 /* 2497 * Called only while mapping a thin bio to hand it over to the workqueue. 2498 */ 2499 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) 2500 { 2501 unsigned long flags; 2502 struct pool *pool = tc->pool; 2503 2504 spin_lock_irqsave(&tc->lock, flags); 2505 bio_list_add(&tc->deferred_bio_list, bio); 2506 spin_unlock_irqrestore(&tc->lock, flags); 2507 2508 wake_worker(pool); 2509 } 2510 2511 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) 2512 { 2513 struct pool *pool = tc->pool; 2514 2515 throttle_lock(&pool->throttle); 2516 thin_defer_bio(tc, bio); 2517 throttle_unlock(&pool->throttle); 2518 } 2519 2520 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) 2521 { 2522 unsigned long flags; 2523 struct pool *pool = tc->pool; 2524 2525 throttle_lock(&pool->throttle); 2526 spin_lock_irqsave(&tc->lock, flags); 2527 list_add_tail(&cell->user_list, &tc->deferred_cells); 2528 spin_unlock_irqrestore(&tc->lock, flags); 2529 throttle_unlock(&pool->throttle); 2530 2531 wake_worker(pool); 2532 } 2533 2534 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 2535 { 2536 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2537 2538 h->tc = tc; 2539 h->shared_read_entry = NULL; 2540 h->all_io_entry = NULL; 2541 h->overwrite_mapping = NULL; 2542 h->cell = NULL; 2543 } 2544 2545 /* 2546 * Non-blocking function called from the thin target's map function. 2547 */ 2548 static int thin_bio_map(struct dm_target *ti, struct bio *bio) 2549 { 2550 int r; 2551 struct thin_c *tc = ti->private; 2552 dm_block_t block = get_bio_block(tc, bio); 2553 struct dm_thin_device *td = tc->td; 2554 struct dm_thin_lookup_result result; 2555 struct dm_bio_prison_cell *virt_cell, *data_cell; 2556 struct dm_cell_key key; 2557 2558 thin_hook_bio(tc, bio); 2559 2560 if (tc->requeue_mode) { 2561 bio->bi_error = DM_ENDIO_REQUEUE; 2562 bio_endio(bio); 2563 return DM_MAPIO_SUBMITTED; 2564 } 2565 2566 if (get_pool_mode(tc->pool) == PM_FAIL) { 2567 bio_io_error(bio); 2568 return DM_MAPIO_SUBMITTED; 2569 } 2570 2571 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { 2572 thin_defer_bio_with_throttle(tc, bio); 2573 return DM_MAPIO_SUBMITTED; 2574 } 2575 2576 /* 2577 * We must hold the virtual cell before doing the lookup, otherwise 2578 * there's a race with discard. 2579 */ 2580 build_virtual_key(tc->td, block, &key); 2581 if (bio_detain(tc->pool, &key, bio, &virt_cell)) 2582 return DM_MAPIO_SUBMITTED; 2583 2584 r = dm_thin_find_block(td, block, 0, &result); 2585 2586 /* 2587 * Note that we defer readahead too. 2588 */ 2589 switch (r) { 2590 case 0: 2591 if (unlikely(result.shared)) { 2592 /* 2593 * We have a race condition here between the 2594 * result.shared value returned by the lookup and 2595 * snapshot creation, which may cause new 2596 * sharing. 2597 * 2598 * To avoid this always quiesce the origin before 2599 * taking the snap. You want to do this anyway to 2600 * ensure a consistent application view 2601 * (i.e. lockfs). 2602 * 2603 * More distant ancestors are irrelevant. The 2604 * shared flag will be set in their case. 2605 */ 2606 thin_defer_cell(tc, virt_cell); 2607 return DM_MAPIO_SUBMITTED; 2608 } 2609 2610 build_data_key(tc->td, result.block, &key); 2611 if (bio_detain(tc->pool, &key, bio, &data_cell)) { 2612 cell_defer_no_holder(tc, virt_cell); 2613 return DM_MAPIO_SUBMITTED; 2614 } 2615 2616 inc_all_io_entry(tc->pool, bio); 2617 cell_defer_no_holder(tc, data_cell); 2618 cell_defer_no_holder(tc, virt_cell); 2619 2620 remap(tc, bio, result.block); 2621 return DM_MAPIO_REMAPPED; 2622 2623 case -ENODATA: 2624 case -EWOULDBLOCK: 2625 thin_defer_cell(tc, virt_cell); 2626 return DM_MAPIO_SUBMITTED; 2627 2628 default: 2629 /* 2630 * Must always call bio_io_error on failure. 2631 * dm_thin_find_block can fail with -EINVAL if the 2632 * pool is switched to fail-io mode. 2633 */ 2634 bio_io_error(bio); 2635 cell_defer_no_holder(tc, virt_cell); 2636 return DM_MAPIO_SUBMITTED; 2637 } 2638 } 2639 2640 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 2641 { 2642 struct pool_c *pt = container_of(cb, struct pool_c, callbacks); 2643 struct request_queue *q; 2644 2645 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) 2646 return 1; 2647 2648 q = bdev_get_queue(pt->data_dev->bdev); 2649 return bdi_congested(&q->backing_dev_info, bdi_bits); 2650 } 2651 2652 static void requeue_bios(struct pool *pool) 2653 { 2654 unsigned long flags; 2655 struct thin_c *tc; 2656 2657 rcu_read_lock(); 2658 list_for_each_entry_rcu(tc, &pool->active_thins, list) { 2659 spin_lock_irqsave(&tc->lock, flags); 2660 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); 2661 bio_list_init(&tc->retry_on_resume_list); 2662 spin_unlock_irqrestore(&tc->lock, flags); 2663 } 2664 rcu_read_unlock(); 2665 } 2666 2667 /*---------------------------------------------------------------- 2668 * Binding of control targets to a pool object 2669 *--------------------------------------------------------------*/ 2670 static bool data_dev_supports_discard(struct pool_c *pt) 2671 { 2672 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); 2673 2674 return q && blk_queue_discard(q); 2675 } 2676 2677 static bool is_factor(sector_t block_size, uint32_t n) 2678 { 2679 return !sector_div(block_size, n); 2680 } 2681 2682 /* 2683 * If discard_passdown was enabled verify that the data device 2684 * supports discards. Disable discard_passdown if not. 2685 */ 2686 static void disable_passdown_if_not_supported(struct pool_c *pt) 2687 { 2688 struct pool *pool = pt->pool; 2689 struct block_device *data_bdev = pt->data_dev->bdev; 2690 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; 2691 const char *reason = NULL; 2692 char buf[BDEVNAME_SIZE]; 2693 2694 if (!pt->adjusted_pf.discard_passdown) 2695 return; 2696 2697 if (!data_dev_supports_discard(pt)) 2698 reason = "discard unsupported"; 2699 2700 else if (data_limits->max_discard_sectors < pool->sectors_per_block) 2701 reason = "max discard sectors smaller than a block"; 2702 2703 if (reason) { 2704 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason); 2705 pt->adjusted_pf.discard_passdown = false; 2706 } 2707 } 2708 2709 static int bind_control_target(struct pool *pool, struct dm_target *ti) 2710 { 2711 struct pool_c *pt = ti->private; 2712 2713 /* 2714 * We want to make sure that a pool in PM_FAIL mode is never upgraded. 2715 */ 2716 enum pool_mode old_mode = get_pool_mode(pool); 2717 enum pool_mode new_mode = pt->adjusted_pf.mode; 2718 2719 /* 2720 * Don't change the pool's mode until set_pool_mode() below. 2721 * Otherwise the pool's process_* function pointers may 2722 * not match the desired pool mode. 2723 */ 2724 pt->adjusted_pf.mode = old_mode; 2725 2726 pool->ti = ti; 2727 pool->pf = pt->adjusted_pf; 2728 pool->low_water_blocks = pt->low_water_blocks; 2729 2730 set_pool_mode(pool, new_mode); 2731 2732 return 0; 2733 } 2734 2735 static void unbind_control_target(struct pool *pool, struct dm_target *ti) 2736 { 2737 if (pool->ti == ti) 2738 pool->ti = NULL; 2739 } 2740 2741 /*---------------------------------------------------------------- 2742 * Pool creation 2743 *--------------------------------------------------------------*/ 2744 /* Initialize pool features. */ 2745 static void pool_features_init(struct pool_features *pf) 2746 { 2747 pf->mode = PM_WRITE; 2748 pf->zero_new_blocks = true; 2749 pf->discard_enabled = true; 2750 pf->discard_passdown = true; 2751 pf->error_if_no_space = false; 2752 } 2753 2754 static void __pool_destroy(struct pool *pool) 2755 { 2756 __pool_table_remove(pool); 2757 2758 vfree(pool->cell_sort_array); 2759 if (dm_pool_metadata_close(pool->pmd) < 0) 2760 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 2761 2762 dm_bio_prison_destroy(pool->prison); 2763 dm_kcopyd_client_destroy(pool->copier); 2764 2765 if (pool->wq) 2766 destroy_workqueue(pool->wq); 2767 2768 if (pool->next_mapping) 2769 mempool_free(pool->next_mapping, pool->mapping_pool); 2770 mempool_destroy(pool->mapping_pool); 2771 dm_deferred_set_destroy(pool->shared_read_ds); 2772 dm_deferred_set_destroy(pool->all_io_ds); 2773 kfree(pool); 2774 } 2775 2776 static struct kmem_cache *_new_mapping_cache; 2777 2778 static struct pool *pool_create(struct mapped_device *pool_md, 2779 struct block_device *metadata_dev, 2780 unsigned long block_size, 2781 int read_only, char **error) 2782 { 2783 int r; 2784 void *err_p; 2785 struct pool *pool; 2786 struct dm_pool_metadata *pmd; 2787 bool format_device = read_only ? false : true; 2788 2789 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); 2790 if (IS_ERR(pmd)) { 2791 *error = "Error creating metadata object"; 2792 return (struct pool *)pmd; 2793 } 2794 2795 pool = kmalloc(sizeof(*pool), GFP_KERNEL); 2796 if (!pool) { 2797 *error = "Error allocating memory for pool"; 2798 err_p = ERR_PTR(-ENOMEM); 2799 goto bad_pool; 2800 } 2801 2802 pool->pmd = pmd; 2803 pool->sectors_per_block = block_size; 2804 if (block_size & (block_size - 1)) 2805 pool->sectors_per_block_shift = -1; 2806 else 2807 pool->sectors_per_block_shift = __ffs(block_size); 2808 pool->low_water_blocks = 0; 2809 pool_features_init(&pool->pf); 2810 pool->prison = dm_bio_prison_create(); 2811 if (!pool->prison) { 2812 *error = "Error creating pool's bio prison"; 2813 err_p = ERR_PTR(-ENOMEM); 2814 goto bad_prison; 2815 } 2816 2817 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2818 if (IS_ERR(pool->copier)) { 2819 r = PTR_ERR(pool->copier); 2820 *error = "Error creating pool's kcopyd client"; 2821 err_p = ERR_PTR(r); 2822 goto bad_kcopyd_client; 2823 } 2824 2825 /* 2826 * Create singlethreaded workqueue that will service all devices 2827 * that use this metadata. 2828 */ 2829 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); 2830 if (!pool->wq) { 2831 *error = "Error creating pool's workqueue"; 2832 err_p = ERR_PTR(-ENOMEM); 2833 goto bad_wq; 2834 } 2835 2836 throttle_init(&pool->throttle); 2837 INIT_WORK(&pool->worker, do_worker); 2838 INIT_DELAYED_WORK(&pool->waker, do_waker); 2839 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2840 spin_lock_init(&pool->lock); 2841 bio_list_init(&pool->deferred_flush_bios); 2842 INIT_LIST_HEAD(&pool->prepared_mappings); 2843 INIT_LIST_HEAD(&pool->prepared_discards); 2844 INIT_LIST_HEAD(&pool->active_thins); 2845 pool->low_water_triggered = false; 2846 pool->suspended = true; 2847 pool->out_of_data_space = false; 2848 2849 pool->shared_read_ds = dm_deferred_set_create(); 2850 if (!pool->shared_read_ds) { 2851 *error = "Error creating pool's shared read deferred set"; 2852 err_p = ERR_PTR(-ENOMEM); 2853 goto bad_shared_read_ds; 2854 } 2855 2856 pool->all_io_ds = dm_deferred_set_create(); 2857 if (!pool->all_io_ds) { 2858 *error = "Error creating pool's all io deferred set"; 2859 err_p = ERR_PTR(-ENOMEM); 2860 goto bad_all_io_ds; 2861 } 2862 2863 pool->next_mapping = NULL; 2864 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, 2865 _new_mapping_cache); 2866 if (!pool->mapping_pool) { 2867 *error = "Error creating pool's mapping mempool"; 2868 err_p = ERR_PTR(-ENOMEM); 2869 goto bad_mapping_pool; 2870 } 2871 2872 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); 2873 if (!pool->cell_sort_array) { 2874 *error = "Error allocating cell sort array"; 2875 err_p = ERR_PTR(-ENOMEM); 2876 goto bad_sort_array; 2877 } 2878 2879 pool->ref_count = 1; 2880 pool->last_commit_jiffies = jiffies; 2881 pool->pool_md = pool_md; 2882 pool->md_dev = metadata_dev; 2883 __pool_table_insert(pool); 2884 2885 return pool; 2886 2887 bad_sort_array: 2888 mempool_destroy(pool->mapping_pool); 2889 bad_mapping_pool: 2890 dm_deferred_set_destroy(pool->all_io_ds); 2891 bad_all_io_ds: 2892 dm_deferred_set_destroy(pool->shared_read_ds); 2893 bad_shared_read_ds: 2894 destroy_workqueue(pool->wq); 2895 bad_wq: 2896 dm_kcopyd_client_destroy(pool->copier); 2897 bad_kcopyd_client: 2898 dm_bio_prison_destroy(pool->prison); 2899 bad_prison: 2900 kfree(pool); 2901 bad_pool: 2902 if (dm_pool_metadata_close(pmd)) 2903 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 2904 2905 return err_p; 2906 } 2907 2908 static void __pool_inc(struct pool *pool) 2909 { 2910 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 2911 pool->ref_count++; 2912 } 2913 2914 static void __pool_dec(struct pool *pool) 2915 { 2916 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 2917 BUG_ON(!pool->ref_count); 2918 if (!--pool->ref_count) 2919 __pool_destroy(pool); 2920 } 2921 2922 static struct pool *__pool_find(struct mapped_device *pool_md, 2923 struct block_device *metadata_dev, 2924 unsigned long block_size, int read_only, 2925 char **error, int *created) 2926 { 2927 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); 2928 2929 if (pool) { 2930 if (pool->pool_md != pool_md) { 2931 *error = "metadata device already in use by a pool"; 2932 return ERR_PTR(-EBUSY); 2933 } 2934 __pool_inc(pool); 2935 2936 } else { 2937 pool = __pool_table_lookup(pool_md); 2938 if (pool) { 2939 if (pool->md_dev != metadata_dev) { 2940 *error = "different pool cannot replace a pool"; 2941 return ERR_PTR(-EINVAL); 2942 } 2943 __pool_inc(pool); 2944 2945 } else { 2946 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); 2947 *created = 1; 2948 } 2949 } 2950 2951 return pool; 2952 } 2953 2954 /*---------------------------------------------------------------- 2955 * Pool target methods 2956 *--------------------------------------------------------------*/ 2957 static void pool_dtr(struct dm_target *ti) 2958 { 2959 struct pool_c *pt = ti->private; 2960 2961 mutex_lock(&dm_thin_pool_table.mutex); 2962 2963 unbind_control_target(pt->pool, ti); 2964 __pool_dec(pt->pool); 2965 dm_put_device(ti, pt->metadata_dev); 2966 dm_put_device(ti, pt->data_dev); 2967 kfree(pt); 2968 2969 mutex_unlock(&dm_thin_pool_table.mutex); 2970 } 2971 2972 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, 2973 struct dm_target *ti) 2974 { 2975 int r; 2976 unsigned argc; 2977 const char *arg_name; 2978 2979 static struct dm_arg _args[] = { 2980 {0, 4, "Invalid number of pool feature arguments"}, 2981 }; 2982 2983 /* 2984 * No feature arguments supplied. 2985 */ 2986 if (!as->argc) 2987 return 0; 2988 2989 r = dm_read_arg_group(_args, as, &argc, &ti->error); 2990 if (r) 2991 return -EINVAL; 2992 2993 while (argc && !r) { 2994 arg_name = dm_shift_arg(as); 2995 argc--; 2996 2997 if (!strcasecmp(arg_name, "skip_block_zeroing")) 2998 pf->zero_new_blocks = false; 2999 3000 else if (!strcasecmp(arg_name, "ignore_discard")) 3001 pf->discard_enabled = false; 3002 3003 else if (!strcasecmp(arg_name, "no_discard_passdown")) 3004 pf->discard_passdown = false; 3005 3006 else if (!strcasecmp(arg_name, "read_only")) 3007 pf->mode = PM_READ_ONLY; 3008 3009 else if (!strcasecmp(arg_name, "error_if_no_space")) 3010 pf->error_if_no_space = true; 3011 3012 else { 3013 ti->error = "Unrecognised pool feature requested"; 3014 r = -EINVAL; 3015 break; 3016 } 3017 } 3018 3019 return r; 3020 } 3021 3022 static void metadata_low_callback(void *context) 3023 { 3024 struct pool *pool = context; 3025 3026 DMWARN("%s: reached low water mark for metadata device: sending event.", 3027 dm_device_name(pool->pool_md)); 3028 3029 dm_table_event(pool->ti->table); 3030 } 3031 3032 static sector_t get_dev_size(struct block_device *bdev) 3033 { 3034 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 3035 } 3036 3037 static void warn_if_metadata_device_too_big(struct block_device *bdev) 3038 { 3039 sector_t metadata_dev_size = get_dev_size(bdev); 3040 char buffer[BDEVNAME_SIZE]; 3041 3042 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) 3043 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 3044 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); 3045 } 3046 3047 static sector_t get_metadata_dev_size(struct block_device *bdev) 3048 { 3049 sector_t metadata_dev_size = get_dev_size(bdev); 3050 3051 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS) 3052 metadata_dev_size = THIN_METADATA_MAX_SECTORS; 3053 3054 return metadata_dev_size; 3055 } 3056 3057 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) 3058 { 3059 sector_t metadata_dev_size = get_metadata_dev_size(bdev); 3060 3061 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE); 3062 3063 return metadata_dev_size; 3064 } 3065 3066 /* 3067 * When a metadata threshold is crossed a dm event is triggered, and 3068 * userland should respond by growing the metadata device. We could let 3069 * userland set the threshold, like we do with the data threshold, but I'm 3070 * not sure they know enough to do this well. 3071 */ 3072 static dm_block_t calc_metadata_threshold(struct pool_c *pt) 3073 { 3074 /* 3075 * 4M is ample for all ops with the possible exception of thin 3076 * device deletion which is harmless if it fails (just retry the 3077 * delete after you've grown the device). 3078 */ 3079 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; 3080 return min((dm_block_t)1024ULL /* 4M */, quarter); 3081 } 3082 3083 /* 3084 * thin-pool <metadata dev> <data dev> 3085 * <data block size (sectors)> 3086 * <low water mark (blocks)> 3087 * [<#feature args> [<arg>]*] 3088 * 3089 * Optional feature arguments are: 3090 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks. 3091 * ignore_discard: disable discard 3092 * no_discard_passdown: don't pass discards down to the data device 3093 * read_only: Don't allow any changes to be made to the pool metadata. 3094 * error_if_no_space: error IOs, instead of queueing, if no space. 3095 */ 3096 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) 3097 { 3098 int r, pool_created = 0; 3099 struct pool_c *pt; 3100 struct pool *pool; 3101 struct pool_features pf; 3102 struct dm_arg_set as; 3103 struct dm_dev *data_dev; 3104 unsigned long block_size; 3105 dm_block_t low_water_blocks; 3106 struct dm_dev *metadata_dev; 3107 fmode_t metadata_mode; 3108 3109 /* 3110 * FIXME Remove validation from scope of lock. 3111 */ 3112 mutex_lock(&dm_thin_pool_table.mutex); 3113 3114 if (argc < 4) { 3115 ti->error = "Invalid argument count"; 3116 r = -EINVAL; 3117 goto out_unlock; 3118 } 3119 3120 as.argc = argc; 3121 as.argv = argv; 3122 3123 /* 3124 * Set default pool features. 3125 */ 3126 pool_features_init(&pf); 3127 3128 dm_consume_args(&as, 4); 3129 r = parse_pool_features(&as, &pf, ti); 3130 if (r) 3131 goto out_unlock; 3132 3133 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); 3134 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); 3135 if (r) { 3136 ti->error = "Error opening metadata block device"; 3137 goto out_unlock; 3138 } 3139 warn_if_metadata_device_too_big(metadata_dev->bdev); 3140 3141 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); 3142 if (r) { 3143 ti->error = "Error getting data device"; 3144 goto out_metadata; 3145 } 3146 3147 if (kstrtoul(argv[2], 10, &block_size) || !block_size || 3148 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || 3149 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || 3150 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { 3151 ti->error = "Invalid block size"; 3152 r = -EINVAL; 3153 goto out; 3154 } 3155 3156 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) { 3157 ti->error = "Invalid low water mark"; 3158 r = -EINVAL; 3159 goto out; 3160 } 3161 3162 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 3163 if (!pt) { 3164 r = -ENOMEM; 3165 goto out; 3166 } 3167 3168 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, 3169 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); 3170 if (IS_ERR(pool)) { 3171 r = PTR_ERR(pool); 3172 goto out_free_pt; 3173 } 3174 3175 /* 3176 * 'pool_created' reflects whether this is the first table load. 3177 * Top level discard support is not allowed to be changed after 3178 * initial load. This would require a pool reload to trigger thin 3179 * device changes. 3180 */ 3181 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { 3182 ti->error = "Discard support cannot be disabled once enabled"; 3183 r = -EINVAL; 3184 goto out_flags_changed; 3185 } 3186 3187 pt->pool = pool; 3188 pt->ti = ti; 3189 pt->metadata_dev = metadata_dev; 3190 pt->data_dev = data_dev; 3191 pt->low_water_blocks = low_water_blocks; 3192 pt->adjusted_pf = pt->requested_pf = pf; 3193 ti->num_flush_bios = 1; 3194 3195 /* 3196 * Only need to enable discards if the pool should pass 3197 * them down to the data device. The thin device's discard 3198 * processing will cause mappings to be removed from the btree. 3199 */ 3200 ti->discard_zeroes_data_unsupported = true; 3201 if (pf.discard_enabled && pf.discard_passdown) { 3202 ti->num_discard_bios = 1; 3203 3204 /* 3205 * Setting 'discards_supported' circumvents the normal 3206 * stacking of discard limits (this keeps the pool and 3207 * thin devices' discard limits consistent). 3208 */ 3209 ti->discards_supported = true; 3210 } 3211 ti->private = pt; 3212 3213 r = dm_pool_register_metadata_threshold(pt->pool->pmd, 3214 calc_metadata_threshold(pt), 3215 metadata_low_callback, 3216 pool); 3217 if (r) 3218 goto out_flags_changed; 3219 3220 pt->callbacks.congested_fn = pool_is_congested; 3221 dm_table_add_target_callbacks(ti->table, &pt->callbacks); 3222 3223 mutex_unlock(&dm_thin_pool_table.mutex); 3224 3225 return 0; 3226 3227 out_flags_changed: 3228 __pool_dec(pool); 3229 out_free_pt: 3230 kfree(pt); 3231 out: 3232 dm_put_device(ti, data_dev); 3233 out_metadata: 3234 dm_put_device(ti, metadata_dev); 3235 out_unlock: 3236 mutex_unlock(&dm_thin_pool_table.mutex); 3237 3238 return r; 3239 } 3240 3241 static int pool_map(struct dm_target *ti, struct bio *bio) 3242 { 3243 int r; 3244 struct pool_c *pt = ti->private; 3245 struct pool *pool = pt->pool; 3246 unsigned long flags; 3247 3248 /* 3249 * As this is a singleton target, ti->begin is always zero. 3250 */ 3251 spin_lock_irqsave(&pool->lock, flags); 3252 bio->bi_bdev = pt->data_dev->bdev; 3253 r = DM_MAPIO_REMAPPED; 3254 spin_unlock_irqrestore(&pool->lock, flags); 3255 3256 return r; 3257 } 3258 3259 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) 3260 { 3261 int r; 3262 struct pool_c *pt = ti->private; 3263 struct pool *pool = pt->pool; 3264 sector_t data_size = ti->len; 3265 dm_block_t sb_data_size; 3266 3267 *need_commit = false; 3268 3269 (void) sector_div(data_size, pool->sectors_per_block); 3270 3271 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); 3272 if (r) { 3273 DMERR("%s: failed to retrieve data device size", 3274 dm_device_name(pool->pool_md)); 3275 return r; 3276 } 3277 3278 if (data_size < sb_data_size) { 3279 DMERR("%s: pool target (%llu blocks) too small: expected %llu", 3280 dm_device_name(pool->pool_md), 3281 (unsigned long long)data_size, sb_data_size); 3282 return -EINVAL; 3283 3284 } else if (data_size > sb_data_size) { 3285 if (dm_pool_metadata_needs_check(pool->pmd)) { 3286 DMERR("%s: unable to grow the data device until repaired.", 3287 dm_device_name(pool->pool_md)); 3288 return 0; 3289 } 3290 3291 if (sb_data_size) 3292 DMINFO("%s: growing the data device from %llu to %llu blocks", 3293 dm_device_name(pool->pool_md), 3294 sb_data_size, (unsigned long long)data_size); 3295 r = dm_pool_resize_data_dev(pool->pmd, data_size); 3296 if (r) { 3297 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); 3298 return r; 3299 } 3300 3301 *need_commit = true; 3302 } 3303 3304 return 0; 3305 } 3306 3307 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) 3308 { 3309 int r; 3310 struct pool_c *pt = ti->private; 3311 struct pool *pool = pt->pool; 3312 dm_block_t metadata_dev_size, sb_metadata_dev_size; 3313 3314 *need_commit = false; 3315 3316 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); 3317 3318 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); 3319 if (r) { 3320 DMERR("%s: failed to retrieve metadata device size", 3321 dm_device_name(pool->pool_md)); 3322 return r; 3323 } 3324 3325 if (metadata_dev_size < sb_metadata_dev_size) { 3326 DMERR("%s: metadata device (%llu blocks) too small: expected %llu", 3327 dm_device_name(pool->pool_md), 3328 metadata_dev_size, sb_metadata_dev_size); 3329 return -EINVAL; 3330 3331 } else if (metadata_dev_size > sb_metadata_dev_size) { 3332 if (dm_pool_metadata_needs_check(pool->pmd)) { 3333 DMERR("%s: unable to grow the metadata device until repaired.", 3334 dm_device_name(pool->pool_md)); 3335 return 0; 3336 } 3337 3338 warn_if_metadata_device_too_big(pool->md_dev); 3339 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 3340 dm_device_name(pool->pool_md), 3341 sb_metadata_dev_size, metadata_dev_size); 3342 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); 3343 if (r) { 3344 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); 3345 return r; 3346 } 3347 3348 *need_commit = true; 3349 } 3350 3351 return 0; 3352 } 3353 3354 /* 3355 * Retrieves the number of blocks of the data device from 3356 * the superblock and compares it to the actual device size, 3357 * thus resizing the data device in case it has grown. 3358 * 3359 * This both copes with opening preallocated data devices in the ctr 3360 * being followed by a resume 3361 * -and- 3362 * calling the resume method individually after userspace has 3363 * grown the data device in reaction to a table event. 3364 */ 3365 static int pool_preresume(struct dm_target *ti) 3366 { 3367 int r; 3368 bool need_commit1, need_commit2; 3369 struct pool_c *pt = ti->private; 3370 struct pool *pool = pt->pool; 3371 3372 /* 3373 * Take control of the pool object. 3374 */ 3375 r = bind_control_target(pool, ti); 3376 if (r) 3377 return r; 3378 3379 r = maybe_resize_data_dev(ti, &need_commit1); 3380 if (r) 3381 return r; 3382 3383 r = maybe_resize_metadata_dev(ti, &need_commit2); 3384 if (r) 3385 return r; 3386 3387 if (need_commit1 || need_commit2) 3388 (void) commit(pool); 3389 3390 return 0; 3391 } 3392 3393 static void pool_suspend_active_thins(struct pool *pool) 3394 { 3395 struct thin_c *tc; 3396 3397 /* Suspend all active thin devices */ 3398 tc = get_first_thin(pool); 3399 while (tc) { 3400 dm_internal_suspend_noflush(tc->thin_md); 3401 tc = get_next_thin(pool, tc); 3402 } 3403 } 3404 3405 static void pool_resume_active_thins(struct pool *pool) 3406 { 3407 struct thin_c *tc; 3408 3409 /* Resume all active thin devices */ 3410 tc = get_first_thin(pool); 3411 while (tc) { 3412 dm_internal_resume(tc->thin_md); 3413 tc = get_next_thin(pool, tc); 3414 } 3415 } 3416 3417 static void pool_resume(struct dm_target *ti) 3418 { 3419 struct pool_c *pt = ti->private; 3420 struct pool *pool = pt->pool; 3421 unsigned long flags; 3422 3423 /* 3424 * Must requeue active_thins' bios and then resume 3425 * active_thins _before_ clearing 'suspend' flag. 3426 */ 3427 requeue_bios(pool); 3428 pool_resume_active_thins(pool); 3429 3430 spin_lock_irqsave(&pool->lock, flags); 3431 pool->low_water_triggered = false; 3432 pool->suspended = false; 3433 spin_unlock_irqrestore(&pool->lock, flags); 3434 3435 do_waker(&pool->waker.work); 3436 } 3437 3438 static void pool_presuspend(struct dm_target *ti) 3439 { 3440 struct pool_c *pt = ti->private; 3441 struct pool *pool = pt->pool; 3442 unsigned long flags; 3443 3444 spin_lock_irqsave(&pool->lock, flags); 3445 pool->suspended = true; 3446 spin_unlock_irqrestore(&pool->lock, flags); 3447 3448 pool_suspend_active_thins(pool); 3449 } 3450 3451 static void pool_presuspend_undo(struct dm_target *ti) 3452 { 3453 struct pool_c *pt = ti->private; 3454 struct pool *pool = pt->pool; 3455 unsigned long flags; 3456 3457 pool_resume_active_thins(pool); 3458 3459 spin_lock_irqsave(&pool->lock, flags); 3460 pool->suspended = false; 3461 spin_unlock_irqrestore(&pool->lock, flags); 3462 } 3463 3464 static void pool_postsuspend(struct dm_target *ti) 3465 { 3466 struct pool_c *pt = ti->private; 3467 struct pool *pool = pt->pool; 3468 3469 cancel_delayed_work_sync(&pool->waker); 3470 cancel_delayed_work_sync(&pool->no_space_timeout); 3471 flush_workqueue(pool->wq); 3472 (void) commit(pool); 3473 } 3474 3475 static int check_arg_count(unsigned argc, unsigned args_required) 3476 { 3477 if (argc != args_required) { 3478 DMWARN("Message received with %u arguments instead of %u.", 3479 argc, args_required); 3480 return -EINVAL; 3481 } 3482 3483 return 0; 3484 } 3485 3486 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning) 3487 { 3488 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) && 3489 *dev_id <= MAX_DEV_ID) 3490 return 0; 3491 3492 if (warning) 3493 DMWARN("Message received with invalid device id: %s", arg); 3494 3495 return -EINVAL; 3496 } 3497 3498 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) 3499 { 3500 dm_thin_id dev_id; 3501 int r; 3502 3503 r = check_arg_count(argc, 2); 3504 if (r) 3505 return r; 3506 3507 r = read_dev_id(argv[1], &dev_id, 1); 3508 if (r) 3509 return r; 3510 3511 r = dm_pool_create_thin(pool->pmd, dev_id); 3512 if (r) { 3513 DMWARN("Creation of new thinly-provisioned device with id %s failed.", 3514 argv[1]); 3515 return r; 3516 } 3517 3518 return 0; 3519 } 3520 3521 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) 3522 { 3523 dm_thin_id dev_id; 3524 dm_thin_id origin_dev_id; 3525 int r; 3526 3527 r = check_arg_count(argc, 3); 3528 if (r) 3529 return r; 3530 3531 r = read_dev_id(argv[1], &dev_id, 1); 3532 if (r) 3533 return r; 3534 3535 r = read_dev_id(argv[2], &origin_dev_id, 1); 3536 if (r) 3537 return r; 3538 3539 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); 3540 if (r) { 3541 DMWARN("Creation of new snapshot %s of device %s failed.", 3542 argv[1], argv[2]); 3543 return r; 3544 } 3545 3546 return 0; 3547 } 3548 3549 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) 3550 { 3551 dm_thin_id dev_id; 3552 int r; 3553 3554 r = check_arg_count(argc, 2); 3555 if (r) 3556 return r; 3557 3558 r = read_dev_id(argv[1], &dev_id, 1); 3559 if (r) 3560 return r; 3561 3562 r = dm_pool_delete_thin_device(pool->pmd, dev_id); 3563 if (r) 3564 DMWARN("Deletion of thin device %s failed.", argv[1]); 3565 3566 return r; 3567 } 3568 3569 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) 3570 { 3571 dm_thin_id old_id, new_id; 3572 int r; 3573 3574 r = check_arg_count(argc, 3); 3575 if (r) 3576 return r; 3577 3578 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) { 3579 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]); 3580 return -EINVAL; 3581 } 3582 3583 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) { 3584 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]); 3585 return -EINVAL; 3586 } 3587 3588 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); 3589 if (r) { 3590 DMWARN("Failed to change transaction id from %s to %s.", 3591 argv[1], argv[2]); 3592 return r; 3593 } 3594 3595 return 0; 3596 } 3597 3598 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) 3599 { 3600 int r; 3601 3602 r = check_arg_count(argc, 1); 3603 if (r) 3604 return r; 3605 3606 (void) commit(pool); 3607 3608 r = dm_pool_reserve_metadata_snap(pool->pmd); 3609 if (r) 3610 DMWARN("reserve_metadata_snap message failed."); 3611 3612 return r; 3613 } 3614 3615 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) 3616 { 3617 int r; 3618 3619 r = check_arg_count(argc, 1); 3620 if (r) 3621 return r; 3622 3623 r = dm_pool_release_metadata_snap(pool->pmd); 3624 if (r) 3625 DMWARN("release_metadata_snap message failed."); 3626 3627 return r; 3628 } 3629 3630 /* 3631 * Messages supported: 3632 * create_thin <dev_id> 3633 * create_snap <dev_id> <origin_id> 3634 * delete <dev_id> 3635 * set_transaction_id <current_trans_id> <new_trans_id> 3636 * reserve_metadata_snap 3637 * release_metadata_snap 3638 */ 3639 static int pool_message(struct dm_target *ti, unsigned argc, char **argv) 3640 { 3641 int r = -EINVAL; 3642 struct pool_c *pt = ti->private; 3643 struct pool *pool = pt->pool; 3644 3645 if (get_pool_mode(pool) >= PM_READ_ONLY) { 3646 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", 3647 dm_device_name(pool->pool_md)); 3648 return -EOPNOTSUPP; 3649 } 3650 3651 if (!strcasecmp(argv[0], "create_thin")) 3652 r = process_create_thin_mesg(argc, argv, pool); 3653 3654 else if (!strcasecmp(argv[0], "create_snap")) 3655 r = process_create_snap_mesg(argc, argv, pool); 3656 3657 else if (!strcasecmp(argv[0], "delete")) 3658 r = process_delete_mesg(argc, argv, pool); 3659 3660 else if (!strcasecmp(argv[0], "set_transaction_id")) 3661 r = process_set_transaction_id_mesg(argc, argv, pool); 3662 3663 else if (!strcasecmp(argv[0], "reserve_metadata_snap")) 3664 r = process_reserve_metadata_snap_mesg(argc, argv, pool); 3665 3666 else if (!strcasecmp(argv[0], "release_metadata_snap")) 3667 r = process_release_metadata_snap_mesg(argc, argv, pool); 3668 3669 else 3670 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 3671 3672 if (!r) 3673 (void) commit(pool); 3674 3675 return r; 3676 } 3677 3678 static void emit_flags(struct pool_features *pf, char *result, 3679 unsigned sz, unsigned maxlen) 3680 { 3681 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + 3682 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) + 3683 pf->error_if_no_space; 3684 DMEMIT("%u ", count); 3685 3686 if (!pf->zero_new_blocks) 3687 DMEMIT("skip_block_zeroing "); 3688 3689 if (!pf->discard_enabled) 3690 DMEMIT("ignore_discard "); 3691 3692 if (!pf->discard_passdown) 3693 DMEMIT("no_discard_passdown "); 3694 3695 if (pf->mode == PM_READ_ONLY) 3696 DMEMIT("read_only "); 3697 3698 if (pf->error_if_no_space) 3699 DMEMIT("error_if_no_space "); 3700 } 3701 3702 /* 3703 * Status line is: 3704 * <transaction id> <used metadata sectors>/<total metadata sectors> 3705 * <used data sectors>/<total data sectors> <held metadata root> 3706 * <pool mode> <discard config> <no space config> <needs_check> 3707 */ 3708 static void pool_status(struct dm_target *ti, status_type_t type, 3709 unsigned status_flags, char *result, unsigned maxlen) 3710 { 3711 int r; 3712 unsigned sz = 0; 3713 uint64_t transaction_id; 3714 dm_block_t nr_free_blocks_data; 3715 dm_block_t nr_free_blocks_metadata; 3716 dm_block_t nr_blocks_data; 3717 dm_block_t nr_blocks_metadata; 3718 dm_block_t held_root; 3719 char buf[BDEVNAME_SIZE]; 3720 char buf2[BDEVNAME_SIZE]; 3721 struct pool_c *pt = ti->private; 3722 struct pool *pool = pt->pool; 3723 3724 switch (type) { 3725 case STATUSTYPE_INFO: 3726 if (get_pool_mode(pool) == PM_FAIL) { 3727 DMEMIT("Fail"); 3728 break; 3729 } 3730 3731 /* Commit to ensure statistics aren't out-of-date */ 3732 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) 3733 (void) commit(pool); 3734 3735 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); 3736 if (r) { 3737 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d", 3738 dm_device_name(pool->pool_md), r); 3739 goto err; 3740 } 3741 3742 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); 3743 if (r) { 3744 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d", 3745 dm_device_name(pool->pool_md), r); 3746 goto err; 3747 } 3748 3749 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); 3750 if (r) { 3751 DMERR("%s: dm_pool_get_metadata_dev_size returned %d", 3752 dm_device_name(pool->pool_md), r); 3753 goto err; 3754 } 3755 3756 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); 3757 if (r) { 3758 DMERR("%s: dm_pool_get_free_block_count returned %d", 3759 dm_device_name(pool->pool_md), r); 3760 goto err; 3761 } 3762 3763 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); 3764 if (r) { 3765 DMERR("%s: dm_pool_get_data_dev_size returned %d", 3766 dm_device_name(pool->pool_md), r); 3767 goto err; 3768 } 3769 3770 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); 3771 if (r) { 3772 DMERR("%s: dm_pool_get_metadata_snap returned %d", 3773 dm_device_name(pool->pool_md), r); 3774 goto err; 3775 } 3776 3777 DMEMIT("%llu %llu/%llu %llu/%llu ", 3778 (unsigned long long)transaction_id, 3779 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 3780 (unsigned long long)nr_blocks_metadata, 3781 (unsigned long long)(nr_blocks_data - nr_free_blocks_data), 3782 (unsigned long long)nr_blocks_data); 3783 3784 if (held_root) 3785 DMEMIT("%llu ", held_root); 3786 else 3787 DMEMIT("- "); 3788 3789 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) 3790 DMEMIT("out_of_data_space "); 3791 else if (pool->pf.mode == PM_READ_ONLY) 3792 DMEMIT("ro "); 3793 else 3794 DMEMIT("rw "); 3795 3796 if (!pool->pf.discard_enabled) 3797 DMEMIT("ignore_discard "); 3798 else if (pool->pf.discard_passdown) 3799 DMEMIT("discard_passdown "); 3800 else 3801 DMEMIT("no_discard_passdown "); 3802 3803 if (pool->pf.error_if_no_space) 3804 DMEMIT("error_if_no_space "); 3805 else 3806 DMEMIT("queue_if_no_space "); 3807 3808 if (dm_pool_metadata_needs_check(pool->pmd)) 3809 DMEMIT("needs_check "); 3810 else 3811 DMEMIT("- "); 3812 3813 break; 3814 3815 case STATUSTYPE_TABLE: 3816 DMEMIT("%s %s %lu %llu ", 3817 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev), 3818 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), 3819 (unsigned long)pool->sectors_per_block, 3820 (unsigned long long)pt->low_water_blocks); 3821 emit_flags(&pt->requested_pf, result, sz, maxlen); 3822 break; 3823 } 3824 return; 3825 3826 err: 3827 DMEMIT("Error"); 3828 } 3829 3830 static int pool_iterate_devices(struct dm_target *ti, 3831 iterate_devices_callout_fn fn, void *data) 3832 { 3833 struct pool_c *pt = ti->private; 3834 3835 return fn(ti, pt->data_dev, 0, ti->len, data); 3836 } 3837 3838 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 3839 { 3840 struct pool_c *pt = ti->private; 3841 struct pool *pool = pt->pool; 3842 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; 3843 3844 /* 3845 * If max_sectors is smaller than pool->sectors_per_block adjust it 3846 * to the highest possible power-of-2 factor of pool->sectors_per_block. 3847 * This is especially beneficial when the pool's data device is a RAID 3848 * device that has a full stripe width that matches pool->sectors_per_block 3849 * -- because even though partial RAID stripe-sized IOs will be issued to a 3850 * single RAID stripe; when aggregated they will end on a full RAID stripe 3851 * boundary.. which avoids additional partial RAID stripe writes cascading 3852 */ 3853 if (limits->max_sectors < pool->sectors_per_block) { 3854 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { 3855 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0) 3856 limits->max_sectors--; 3857 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors); 3858 } 3859 } 3860 3861 /* 3862 * If the system-determined stacked limits are compatible with the 3863 * pool's blocksize (io_opt is a factor) do not override them. 3864 */ 3865 if (io_opt_sectors < pool->sectors_per_block || 3866 !is_factor(io_opt_sectors, pool->sectors_per_block)) { 3867 if (is_factor(pool->sectors_per_block, limits->max_sectors)) 3868 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); 3869 else 3870 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); 3871 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 3872 } 3873 3874 /* 3875 * pt->adjusted_pf is a staging area for the actual features to use. 3876 * They get transferred to the live pool in bind_control_target() 3877 * called from pool_preresume(). 3878 */ 3879 if (!pt->adjusted_pf.discard_enabled) { 3880 /* 3881 * Must explicitly disallow stacking discard limits otherwise the 3882 * block layer will stack them if pool's data device has support. 3883 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the 3884 * user to see that, so make sure to set all discard limits to 0. 3885 */ 3886 limits->discard_granularity = 0; 3887 return; 3888 } 3889 3890 disable_passdown_if_not_supported(pt); 3891 3892 /* 3893 * The pool uses the same discard limits as the underlying data 3894 * device. DM core has already set this up. 3895 */ 3896 } 3897 3898 static struct target_type pool_target = { 3899 .name = "thin-pool", 3900 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 3901 DM_TARGET_IMMUTABLE, 3902 .version = {1, 18, 0}, 3903 .module = THIS_MODULE, 3904 .ctr = pool_ctr, 3905 .dtr = pool_dtr, 3906 .map = pool_map, 3907 .presuspend = pool_presuspend, 3908 .presuspend_undo = pool_presuspend_undo, 3909 .postsuspend = pool_postsuspend, 3910 .preresume = pool_preresume, 3911 .resume = pool_resume, 3912 .message = pool_message, 3913 .status = pool_status, 3914 .iterate_devices = pool_iterate_devices, 3915 .io_hints = pool_io_hints, 3916 }; 3917 3918 /*---------------------------------------------------------------- 3919 * Thin target methods 3920 *--------------------------------------------------------------*/ 3921 static void thin_get(struct thin_c *tc) 3922 { 3923 atomic_inc(&tc->refcount); 3924 } 3925 3926 static void thin_put(struct thin_c *tc) 3927 { 3928 if (atomic_dec_and_test(&tc->refcount)) 3929 complete(&tc->can_destroy); 3930 } 3931 3932 static void thin_dtr(struct dm_target *ti) 3933 { 3934 struct thin_c *tc = ti->private; 3935 unsigned long flags; 3936 3937 spin_lock_irqsave(&tc->pool->lock, flags); 3938 list_del_rcu(&tc->list); 3939 spin_unlock_irqrestore(&tc->pool->lock, flags); 3940 synchronize_rcu(); 3941 3942 thin_put(tc); 3943 wait_for_completion(&tc->can_destroy); 3944 3945 mutex_lock(&dm_thin_pool_table.mutex); 3946 3947 __pool_dec(tc->pool); 3948 dm_pool_close_thin_device(tc->td); 3949 dm_put_device(ti, tc->pool_dev); 3950 if (tc->origin_dev) 3951 dm_put_device(ti, tc->origin_dev); 3952 kfree(tc); 3953 3954 mutex_unlock(&dm_thin_pool_table.mutex); 3955 } 3956 3957 /* 3958 * Thin target parameters: 3959 * 3960 * <pool_dev> <dev_id> [origin_dev] 3961 * 3962 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool) 3963 * dev_id: the internal device identifier 3964 * origin_dev: a device external to the pool that should act as the origin 3965 * 3966 * If the pool device has discards disabled, they get disabled for the thin 3967 * device as well. 3968 */ 3969 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) 3970 { 3971 int r; 3972 struct thin_c *tc; 3973 struct dm_dev *pool_dev, *origin_dev; 3974 struct mapped_device *pool_md; 3975 unsigned long flags; 3976 3977 mutex_lock(&dm_thin_pool_table.mutex); 3978 3979 if (argc != 2 && argc != 3) { 3980 ti->error = "Invalid argument count"; 3981 r = -EINVAL; 3982 goto out_unlock; 3983 } 3984 3985 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); 3986 if (!tc) { 3987 ti->error = "Out of memory"; 3988 r = -ENOMEM; 3989 goto out_unlock; 3990 } 3991 tc->thin_md = dm_table_get_md(ti->table); 3992 spin_lock_init(&tc->lock); 3993 INIT_LIST_HEAD(&tc->deferred_cells); 3994 bio_list_init(&tc->deferred_bio_list); 3995 bio_list_init(&tc->retry_on_resume_list); 3996 tc->sort_bio_list = RB_ROOT; 3997 3998 if (argc == 3) { 3999 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); 4000 if (r) { 4001 ti->error = "Error opening origin device"; 4002 goto bad_origin_dev; 4003 } 4004 tc->origin_dev = origin_dev; 4005 } 4006 4007 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); 4008 if (r) { 4009 ti->error = "Error opening pool device"; 4010 goto bad_pool_dev; 4011 } 4012 tc->pool_dev = pool_dev; 4013 4014 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { 4015 ti->error = "Invalid device id"; 4016 r = -EINVAL; 4017 goto bad_common; 4018 } 4019 4020 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); 4021 if (!pool_md) { 4022 ti->error = "Couldn't get pool mapped device"; 4023 r = -EINVAL; 4024 goto bad_common; 4025 } 4026 4027 tc->pool = __pool_table_lookup(pool_md); 4028 if (!tc->pool) { 4029 ti->error = "Couldn't find pool object"; 4030 r = -EINVAL; 4031 goto bad_pool_lookup; 4032 } 4033 __pool_inc(tc->pool); 4034 4035 if (get_pool_mode(tc->pool) == PM_FAIL) { 4036 ti->error = "Couldn't open thin device, Pool is in fail mode"; 4037 r = -EINVAL; 4038 goto bad_pool; 4039 } 4040 4041 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); 4042 if (r) { 4043 ti->error = "Couldn't open thin internal device"; 4044 goto bad_pool; 4045 } 4046 4047 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); 4048 if (r) 4049 goto bad; 4050 4051 ti->num_flush_bios = 1; 4052 ti->flush_supported = true; 4053 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); 4054 4055 /* In case the pool supports discards, pass them on. */ 4056 ti->discard_zeroes_data_unsupported = true; 4057 if (tc->pool->pf.discard_enabled) { 4058 ti->discards_supported = true; 4059 ti->num_discard_bios = 1; 4060 ti->split_discard_bios = false; 4061 } 4062 4063 mutex_unlock(&dm_thin_pool_table.mutex); 4064 4065 spin_lock_irqsave(&tc->pool->lock, flags); 4066 if (tc->pool->suspended) { 4067 spin_unlock_irqrestore(&tc->pool->lock, flags); 4068 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */ 4069 ti->error = "Unable to activate thin device while pool is suspended"; 4070 r = -EINVAL; 4071 goto bad; 4072 } 4073 atomic_set(&tc->refcount, 1); 4074 init_completion(&tc->can_destroy); 4075 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 4076 spin_unlock_irqrestore(&tc->pool->lock, flags); 4077 /* 4078 * This synchronize_rcu() call is needed here otherwise we risk a 4079 * wake_worker() call finding no bios to process (because the newly 4080 * added tc isn't yet visible). So this reduces latency since we 4081 * aren't then dependent on the periodic commit to wake_worker(). 4082 */ 4083 synchronize_rcu(); 4084 4085 dm_put(pool_md); 4086 4087 return 0; 4088 4089 bad: 4090 dm_pool_close_thin_device(tc->td); 4091 bad_pool: 4092 __pool_dec(tc->pool); 4093 bad_pool_lookup: 4094 dm_put(pool_md); 4095 bad_common: 4096 dm_put_device(ti, tc->pool_dev); 4097 bad_pool_dev: 4098 if (tc->origin_dev) 4099 dm_put_device(ti, tc->origin_dev); 4100 bad_origin_dev: 4101 kfree(tc); 4102 out_unlock: 4103 mutex_unlock(&dm_thin_pool_table.mutex); 4104 4105 return r; 4106 } 4107 4108 static int thin_map(struct dm_target *ti, struct bio *bio) 4109 { 4110 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 4111 4112 return thin_bio_map(ti, bio); 4113 } 4114 4115 static int thin_endio(struct dm_target *ti, struct bio *bio, int err) 4116 { 4117 unsigned long flags; 4118 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 4119 struct list_head work; 4120 struct dm_thin_new_mapping *m, *tmp; 4121 struct pool *pool = h->tc->pool; 4122 4123 if (h->shared_read_entry) { 4124 INIT_LIST_HEAD(&work); 4125 dm_deferred_entry_dec(h->shared_read_entry, &work); 4126 4127 spin_lock_irqsave(&pool->lock, flags); 4128 list_for_each_entry_safe(m, tmp, &work, list) { 4129 list_del(&m->list); 4130 __complete_mapping_preparation(m); 4131 } 4132 spin_unlock_irqrestore(&pool->lock, flags); 4133 } 4134 4135 if (h->all_io_entry) { 4136 INIT_LIST_HEAD(&work); 4137 dm_deferred_entry_dec(h->all_io_entry, &work); 4138 if (!list_empty(&work)) { 4139 spin_lock_irqsave(&pool->lock, flags); 4140 list_for_each_entry_safe(m, tmp, &work, list) 4141 list_add_tail(&m->list, &pool->prepared_discards); 4142 spin_unlock_irqrestore(&pool->lock, flags); 4143 wake_worker(pool); 4144 } 4145 } 4146 4147 if (h->cell) 4148 cell_defer_no_holder(h->tc, h->cell); 4149 4150 return 0; 4151 } 4152 4153 static void thin_presuspend(struct dm_target *ti) 4154 { 4155 struct thin_c *tc = ti->private; 4156 4157 if (dm_noflush_suspending(ti)) 4158 noflush_work(tc, do_noflush_start); 4159 } 4160 4161 static void thin_postsuspend(struct dm_target *ti) 4162 { 4163 struct thin_c *tc = ti->private; 4164 4165 /* 4166 * The dm_noflush_suspending flag has been cleared by now, so 4167 * unfortunately we must always run this. 4168 */ 4169 noflush_work(tc, do_noflush_stop); 4170 } 4171 4172 static int thin_preresume(struct dm_target *ti) 4173 { 4174 struct thin_c *tc = ti->private; 4175 4176 if (tc->origin_dev) 4177 tc->origin_size = get_dev_size(tc->origin_dev->bdev); 4178 4179 return 0; 4180 } 4181 4182 /* 4183 * <nr mapped sectors> <highest mapped sector> 4184 */ 4185 static void thin_status(struct dm_target *ti, status_type_t type, 4186 unsigned status_flags, char *result, unsigned maxlen) 4187 { 4188 int r; 4189 ssize_t sz = 0; 4190 dm_block_t mapped, highest; 4191 char buf[BDEVNAME_SIZE]; 4192 struct thin_c *tc = ti->private; 4193 4194 if (get_pool_mode(tc->pool) == PM_FAIL) { 4195 DMEMIT("Fail"); 4196 return; 4197 } 4198 4199 if (!tc->td) 4200 DMEMIT("-"); 4201 else { 4202 switch (type) { 4203 case STATUSTYPE_INFO: 4204 r = dm_thin_get_mapped_count(tc->td, &mapped); 4205 if (r) { 4206 DMERR("dm_thin_get_mapped_count returned %d", r); 4207 goto err; 4208 } 4209 4210 r = dm_thin_get_highest_mapped_block(tc->td, &highest); 4211 if (r < 0) { 4212 DMERR("dm_thin_get_highest_mapped_block returned %d", r); 4213 goto err; 4214 } 4215 4216 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); 4217 if (r) 4218 DMEMIT("%llu", ((highest + 1) * 4219 tc->pool->sectors_per_block) - 1); 4220 else 4221 DMEMIT("-"); 4222 break; 4223 4224 case STATUSTYPE_TABLE: 4225 DMEMIT("%s %lu", 4226 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), 4227 (unsigned long) tc->dev_id); 4228 if (tc->origin_dev) 4229 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); 4230 break; 4231 } 4232 } 4233 4234 return; 4235 4236 err: 4237 DMEMIT("Error"); 4238 } 4239 4240 static int thin_iterate_devices(struct dm_target *ti, 4241 iterate_devices_callout_fn fn, void *data) 4242 { 4243 sector_t blocks; 4244 struct thin_c *tc = ti->private; 4245 struct pool *pool = tc->pool; 4246 4247 /* 4248 * We can't call dm_pool_get_data_dev_size() since that blocks. So 4249 * we follow a more convoluted path through to the pool's target. 4250 */ 4251 if (!pool->ti) 4252 return 0; /* nothing is bound */ 4253 4254 blocks = pool->ti->len; 4255 (void) sector_div(blocks, pool->sectors_per_block); 4256 if (blocks) 4257 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); 4258 4259 return 0; 4260 } 4261 4262 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) 4263 { 4264 struct thin_c *tc = ti->private; 4265 struct pool *pool = tc->pool; 4266 4267 if (!pool->pf.discard_enabled) 4268 return; 4269 4270 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4271 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4272 } 4273 4274 static struct target_type thin_target = { 4275 .name = "thin", 4276 .version = {1, 18, 0}, 4277 .module = THIS_MODULE, 4278 .ctr = thin_ctr, 4279 .dtr = thin_dtr, 4280 .map = thin_map, 4281 .end_io = thin_endio, 4282 .preresume = thin_preresume, 4283 .presuspend = thin_presuspend, 4284 .postsuspend = thin_postsuspend, 4285 .status = thin_status, 4286 .iterate_devices = thin_iterate_devices, 4287 .io_hints = thin_io_hints, 4288 }; 4289 4290 /*----------------------------------------------------------------*/ 4291 4292 static int __init dm_thin_init(void) 4293 { 4294 int r; 4295 4296 pool_table_init(); 4297 4298 r = dm_register_target(&thin_target); 4299 if (r) 4300 return r; 4301 4302 r = dm_register_target(&pool_target); 4303 if (r) 4304 goto bad_pool_target; 4305 4306 r = -ENOMEM; 4307 4308 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); 4309 if (!_new_mapping_cache) 4310 goto bad_new_mapping_cache; 4311 4312 return 0; 4313 4314 bad_new_mapping_cache: 4315 dm_unregister_target(&pool_target); 4316 bad_pool_target: 4317 dm_unregister_target(&thin_target); 4318 4319 return r; 4320 } 4321 4322 static void dm_thin_exit(void) 4323 { 4324 dm_unregister_target(&thin_target); 4325 dm_unregister_target(&pool_target); 4326 4327 kmem_cache_destroy(_new_mapping_cache); 4328 } 4329 4330 module_init(dm_thin_init); 4331 module_exit(dm_thin_exit); 4332 4333 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); 4334 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); 4335 4336 MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 4337 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 4338 MODULE_LICENSE("GPL"); 4339