1 /* 2 * Copyright (C) 2011-2012 Red Hat UK. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-thin-metadata.h" 8 #include "dm-bio-prison-v1.h" 9 #include "dm.h" 10 11 #include <linux/device-mapper.h> 12 #include <linux/dm-io.h> 13 #include <linux/dm-kcopyd.h> 14 #include <linux/jiffies.h> 15 #include <linux/log2.h> 16 #include <linux/list.h> 17 #include <linux/rculist.h> 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/sort.h> 23 #include <linux/rbtree.h> 24 25 #define DM_MSG_PREFIX "thin" 26 27 /* 28 * Tunable constants 29 */ 30 #define ENDIO_HOOK_POOL_SIZE 1024 31 #define MAPPING_POOL_SIZE 1024 32 #define COMMIT_PERIOD HZ 33 #define NO_SPACE_TIMEOUT_SECS 60 34 35 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; 36 37 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 38 "A percentage of time allocated for copy on write"); 39 40 /* 41 * The block size of the device holding pool data must be 42 * between 64KB and 1GB. 43 */ 44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) 45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 46 47 /* 48 * Device id is restricted to 24 bits. 49 */ 50 #define MAX_DEV_ID ((1 << 24) - 1) 51 52 /* 53 * How do we handle breaking sharing of data blocks? 54 * ================================================= 55 * 56 * We use a standard copy-on-write btree to store the mappings for the 57 * devices (note I'm talking about copy-on-write of the metadata here, not 58 * the data). When you take an internal snapshot you clone the root node 59 * of the origin btree. After this there is no concept of an origin or a 60 * snapshot. They are just two device trees that happen to point to the 61 * same data blocks. 62 * 63 * When we get a write in we decide if it's to a shared data block using 64 * some timestamp magic. If it is, we have to break sharing. 65 * 66 * Let's say we write to a shared block in what was the origin. The 67 * steps are: 68 * 69 * i) plug io further to this physical block. (see bio_prison code). 70 * 71 * ii) quiesce any read io to that shared data block. Obviously 72 * including all devices that share this block. (see dm_deferred_set code) 73 * 74 * iii) copy the data block to a newly allocate block. This step can be 75 * missed out if the io covers the block. (schedule_copy). 76 * 77 * iv) insert the new mapping into the origin's btree 78 * (process_prepared_mapping). This act of inserting breaks some 79 * sharing of btree nodes between the two devices. Breaking sharing only 80 * effects the btree of that specific device. Btrees for the other 81 * devices that share the block never change. The btree for the origin 82 * device as it was after the last commit is untouched, ie. we're using 83 * persistent data structures in the functional programming sense. 84 * 85 * v) unplug io to this physical block, including the io that triggered 86 * the breaking of sharing. 87 * 88 * Steps (ii) and (iii) occur in parallel. 89 * 90 * The metadata _doesn't_ need to be committed before the io continues. We 91 * get away with this because the io is always written to a _new_ block. 92 * If there's a crash, then: 93 * 94 * - The origin mapping will point to the old origin block (the shared 95 * one). This will contain the data as it was before the io that triggered 96 * the breaking of sharing came in. 97 * 98 * - The snap mapping still points to the old block. As it would after 99 * the commit. 100 * 101 * The downside of this scheme is the timestamp magic isn't perfect, and 102 * will continue to think that data block in the snapshot device is shared 103 * even after the write to the origin has broken sharing. I suspect data 104 * blocks will typically be shared by many different devices, so we're 105 * breaking sharing n + 1 times, rather than n, where n is the number of 106 * devices that reference this data block. At the moment I think the 107 * benefits far, far outweigh the disadvantages. 108 */ 109 110 /*----------------------------------------------------------------*/ 111 112 /* 113 * Key building. 114 */ 115 enum lock_space { 116 VIRTUAL, 117 PHYSICAL 118 }; 119 120 static void build_key(struct dm_thin_device *td, enum lock_space ls, 121 dm_block_t b, dm_block_t e, struct dm_cell_key *key) 122 { 123 key->virtual = (ls == VIRTUAL); 124 key->dev = dm_thin_dev_id(td); 125 key->block_begin = b; 126 key->block_end = e; 127 } 128 129 static void build_data_key(struct dm_thin_device *td, dm_block_t b, 130 struct dm_cell_key *key) 131 { 132 build_key(td, PHYSICAL, b, b + 1llu, key); 133 } 134 135 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, 136 struct dm_cell_key *key) 137 { 138 build_key(td, VIRTUAL, b, b + 1llu, key); 139 } 140 141 /*----------------------------------------------------------------*/ 142 143 #define THROTTLE_THRESHOLD (1 * HZ) 144 145 struct throttle { 146 struct rw_semaphore lock; 147 unsigned long threshold; 148 bool throttle_applied; 149 }; 150 151 static void throttle_init(struct throttle *t) 152 { 153 init_rwsem(&t->lock); 154 t->throttle_applied = false; 155 } 156 157 static void throttle_work_start(struct throttle *t) 158 { 159 t->threshold = jiffies + THROTTLE_THRESHOLD; 160 } 161 162 static void throttle_work_update(struct throttle *t) 163 { 164 if (!t->throttle_applied && jiffies > t->threshold) { 165 down_write(&t->lock); 166 t->throttle_applied = true; 167 } 168 } 169 170 static void throttle_work_complete(struct throttle *t) 171 { 172 if (t->throttle_applied) { 173 t->throttle_applied = false; 174 up_write(&t->lock); 175 } 176 } 177 178 static void throttle_lock(struct throttle *t) 179 { 180 down_read(&t->lock); 181 } 182 183 static void throttle_unlock(struct throttle *t) 184 { 185 up_read(&t->lock); 186 } 187 188 /*----------------------------------------------------------------*/ 189 190 /* 191 * A pool device ties together a metadata device and a data device. It 192 * also provides the interface for creating and destroying internal 193 * devices. 194 */ 195 struct dm_thin_new_mapping; 196 197 /* 198 * The pool runs in 4 modes. Ordered in degraded order for comparisons. 199 */ 200 enum pool_mode { 201 PM_WRITE, /* metadata may be changed */ 202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ 203 PM_READ_ONLY, /* metadata may not be changed */ 204 PM_FAIL, /* all I/O fails */ 205 }; 206 207 struct pool_features { 208 enum pool_mode mode; 209 210 bool zero_new_blocks:1; 211 bool discard_enabled:1; 212 bool discard_passdown:1; 213 bool error_if_no_space:1; 214 }; 215 216 struct thin_c; 217 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 218 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell); 219 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 220 221 #define CELL_SORT_ARRAY_SIZE 8192 222 223 struct pool { 224 struct list_head list; 225 struct dm_target *ti; /* Only set if a pool target is bound */ 226 227 struct mapped_device *pool_md; 228 struct block_device *md_dev; 229 struct dm_pool_metadata *pmd; 230 231 dm_block_t low_water_blocks; 232 uint32_t sectors_per_block; 233 int sectors_per_block_shift; 234 235 struct pool_features pf; 236 bool low_water_triggered:1; /* A dm event has been sent */ 237 bool suspended:1; 238 bool out_of_data_space:1; 239 240 struct dm_bio_prison *prison; 241 struct dm_kcopyd_client *copier; 242 243 struct work_struct worker; 244 struct workqueue_struct *wq; 245 struct throttle throttle; 246 struct delayed_work waker; 247 struct delayed_work no_space_timeout; 248 249 unsigned long last_commit_jiffies; 250 unsigned ref_count; 251 252 spinlock_t lock; 253 struct bio_list deferred_flush_bios; 254 struct list_head prepared_mappings; 255 struct list_head prepared_discards; 256 struct list_head prepared_discards_pt2; 257 struct list_head active_thins; 258 259 struct dm_deferred_set *shared_read_ds; 260 struct dm_deferred_set *all_io_ds; 261 262 struct dm_thin_new_mapping *next_mapping; 263 264 process_bio_fn process_bio; 265 process_bio_fn process_discard; 266 267 process_cell_fn process_cell; 268 process_cell_fn process_discard_cell; 269 270 process_mapping_fn process_prepared_mapping; 271 process_mapping_fn process_prepared_discard; 272 process_mapping_fn process_prepared_discard_pt2; 273 274 struct dm_bio_prison_cell **cell_sort_array; 275 276 mempool_t mapping_pool; 277 }; 278 279 static enum pool_mode get_pool_mode(struct pool *pool); 280 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 281 282 /* 283 * Target context for a pool. 284 */ 285 struct pool_c { 286 struct dm_target *ti; 287 struct pool *pool; 288 struct dm_dev *data_dev; 289 struct dm_dev *metadata_dev; 290 struct dm_target_callbacks callbacks; 291 292 dm_block_t low_water_blocks; 293 struct pool_features requested_pf; /* Features requested during table load */ 294 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ 295 }; 296 297 /* 298 * Target context for a thin. 299 */ 300 struct thin_c { 301 struct list_head list; 302 struct dm_dev *pool_dev; 303 struct dm_dev *origin_dev; 304 sector_t origin_size; 305 dm_thin_id dev_id; 306 307 struct pool *pool; 308 struct dm_thin_device *td; 309 struct mapped_device *thin_md; 310 311 bool requeue_mode:1; 312 spinlock_t lock; 313 struct list_head deferred_cells; 314 struct bio_list deferred_bio_list; 315 struct bio_list retry_on_resume_list; 316 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 317 318 /* 319 * Ensures the thin is not destroyed until the worker has finished 320 * iterating the active_thins list. 321 */ 322 atomic_t refcount; 323 struct completion can_destroy; 324 }; 325 326 /*----------------------------------------------------------------*/ 327 328 static bool block_size_is_power_of_two(struct pool *pool) 329 { 330 return pool->sectors_per_block_shift >= 0; 331 } 332 333 static sector_t block_to_sectors(struct pool *pool, dm_block_t b) 334 { 335 return block_size_is_power_of_two(pool) ? 336 (b << pool->sectors_per_block_shift) : 337 (b * pool->sectors_per_block); 338 } 339 340 /*----------------------------------------------------------------*/ 341 342 struct discard_op { 343 struct thin_c *tc; 344 struct blk_plug plug; 345 struct bio *parent_bio; 346 struct bio *bio; 347 }; 348 349 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) 350 { 351 BUG_ON(!parent); 352 353 op->tc = tc; 354 blk_start_plug(&op->plug); 355 op->parent_bio = parent; 356 op->bio = NULL; 357 } 358 359 static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e) 360 { 361 struct thin_c *tc = op->tc; 362 sector_t s = block_to_sectors(tc->pool, data_b); 363 sector_t len = block_to_sectors(tc->pool, data_e - data_b); 364 365 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, 366 GFP_NOWAIT, 0, &op->bio); 367 } 368 369 static void end_discard(struct discard_op *op, int r) 370 { 371 if (op->bio) { 372 /* 373 * Even if one of the calls to issue_discard failed, we 374 * need to wait for the chain to complete. 375 */ 376 bio_chain(op->bio, op->parent_bio); 377 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); 378 submit_bio(op->bio); 379 } 380 381 blk_finish_plug(&op->plug); 382 383 /* 384 * Even if r is set, there could be sub discards in flight that we 385 * need to wait for. 386 */ 387 if (r && !op->parent_bio->bi_status) 388 op->parent_bio->bi_status = errno_to_blk_status(r); 389 bio_endio(op->parent_bio); 390 } 391 392 /*----------------------------------------------------------------*/ 393 394 /* 395 * wake_worker() is used when new work is queued and when pool_resume is 396 * ready to continue deferred IO processing. 397 */ 398 static void wake_worker(struct pool *pool) 399 { 400 queue_work(pool->wq, &pool->worker); 401 } 402 403 /*----------------------------------------------------------------*/ 404 405 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, 406 struct dm_bio_prison_cell **cell_result) 407 { 408 int r; 409 struct dm_bio_prison_cell *cell_prealloc; 410 411 /* 412 * Allocate a cell from the prison's mempool. 413 * This might block but it can't fail. 414 */ 415 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); 416 417 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); 418 if (r) 419 /* 420 * We reused an old cell; we can get rid of 421 * the new one. 422 */ 423 dm_bio_prison_free_cell(pool->prison, cell_prealloc); 424 425 return r; 426 } 427 428 static void cell_release(struct pool *pool, 429 struct dm_bio_prison_cell *cell, 430 struct bio_list *bios) 431 { 432 dm_cell_release(pool->prison, cell, bios); 433 dm_bio_prison_free_cell(pool->prison, cell); 434 } 435 436 static void cell_visit_release(struct pool *pool, 437 void (*fn)(void *, struct dm_bio_prison_cell *), 438 void *context, 439 struct dm_bio_prison_cell *cell) 440 { 441 dm_cell_visit_release(pool->prison, fn, context, cell); 442 dm_bio_prison_free_cell(pool->prison, cell); 443 } 444 445 static void cell_release_no_holder(struct pool *pool, 446 struct dm_bio_prison_cell *cell, 447 struct bio_list *bios) 448 { 449 dm_cell_release_no_holder(pool->prison, cell, bios); 450 dm_bio_prison_free_cell(pool->prison, cell); 451 } 452 453 static void cell_error_with_code(struct pool *pool, 454 struct dm_bio_prison_cell *cell, blk_status_t error_code) 455 { 456 dm_cell_error(pool->prison, cell, error_code); 457 dm_bio_prison_free_cell(pool->prison, cell); 458 } 459 460 static blk_status_t get_pool_io_error_code(struct pool *pool) 461 { 462 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; 463 } 464 465 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) 466 { 467 cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); 468 } 469 470 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) 471 { 472 cell_error_with_code(pool, cell, 0); 473 } 474 475 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) 476 { 477 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); 478 } 479 480 /*----------------------------------------------------------------*/ 481 482 /* 483 * A global list of pools that uses a struct mapped_device as a key. 484 */ 485 static struct dm_thin_pool_table { 486 struct mutex mutex; 487 struct list_head pools; 488 } dm_thin_pool_table; 489 490 static void pool_table_init(void) 491 { 492 mutex_init(&dm_thin_pool_table.mutex); 493 INIT_LIST_HEAD(&dm_thin_pool_table.pools); 494 } 495 496 static void pool_table_exit(void) 497 { 498 mutex_destroy(&dm_thin_pool_table.mutex); 499 } 500 501 static void __pool_table_insert(struct pool *pool) 502 { 503 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 504 list_add(&pool->list, &dm_thin_pool_table.pools); 505 } 506 507 static void __pool_table_remove(struct pool *pool) 508 { 509 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 510 list_del(&pool->list); 511 } 512 513 static struct pool *__pool_table_lookup(struct mapped_device *md) 514 { 515 struct pool *pool = NULL, *tmp; 516 517 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 518 519 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 520 if (tmp->pool_md == md) { 521 pool = tmp; 522 break; 523 } 524 } 525 526 return pool; 527 } 528 529 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) 530 { 531 struct pool *pool = NULL, *tmp; 532 533 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 534 535 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { 536 if (tmp->md_dev == md_dev) { 537 pool = tmp; 538 break; 539 } 540 } 541 542 return pool; 543 } 544 545 /*----------------------------------------------------------------*/ 546 547 struct dm_thin_endio_hook { 548 struct thin_c *tc; 549 struct dm_deferred_entry *shared_read_entry; 550 struct dm_deferred_entry *all_io_entry; 551 struct dm_thin_new_mapping *overwrite_mapping; 552 struct rb_node rb_node; 553 struct dm_bio_prison_cell *cell; 554 }; 555 556 static void __merge_bio_list(struct bio_list *bios, struct bio_list *master) 557 { 558 bio_list_merge(bios, master); 559 bio_list_init(master); 560 } 561 562 static void error_bio_list(struct bio_list *bios, blk_status_t error) 563 { 564 struct bio *bio; 565 566 while ((bio = bio_list_pop(bios))) { 567 bio->bi_status = error; 568 bio_endio(bio); 569 } 570 } 571 572 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, 573 blk_status_t error) 574 { 575 struct bio_list bios; 576 unsigned long flags; 577 578 bio_list_init(&bios); 579 580 spin_lock_irqsave(&tc->lock, flags); 581 __merge_bio_list(&bios, master); 582 spin_unlock_irqrestore(&tc->lock, flags); 583 584 error_bio_list(&bios, error); 585 } 586 587 static void requeue_deferred_cells(struct thin_c *tc) 588 { 589 struct pool *pool = tc->pool; 590 unsigned long flags; 591 struct list_head cells; 592 struct dm_bio_prison_cell *cell, *tmp; 593 594 INIT_LIST_HEAD(&cells); 595 596 spin_lock_irqsave(&tc->lock, flags); 597 list_splice_init(&tc->deferred_cells, &cells); 598 spin_unlock_irqrestore(&tc->lock, flags); 599 600 list_for_each_entry_safe(cell, tmp, &cells, user_list) 601 cell_requeue(pool, cell); 602 } 603 604 static void requeue_io(struct thin_c *tc) 605 { 606 struct bio_list bios; 607 unsigned long flags; 608 609 bio_list_init(&bios); 610 611 spin_lock_irqsave(&tc->lock, flags); 612 __merge_bio_list(&bios, &tc->deferred_bio_list); 613 __merge_bio_list(&bios, &tc->retry_on_resume_list); 614 spin_unlock_irqrestore(&tc->lock, flags); 615 616 error_bio_list(&bios, BLK_STS_DM_REQUEUE); 617 requeue_deferred_cells(tc); 618 } 619 620 static void error_retry_list_with_code(struct pool *pool, blk_status_t error) 621 { 622 struct thin_c *tc; 623 624 rcu_read_lock(); 625 list_for_each_entry_rcu(tc, &pool->active_thins, list) 626 error_thin_bio_list(tc, &tc->retry_on_resume_list, error); 627 rcu_read_unlock(); 628 } 629 630 static void error_retry_list(struct pool *pool) 631 { 632 error_retry_list_with_code(pool, get_pool_io_error_code(pool)); 633 } 634 635 /* 636 * This section of code contains the logic for processing a thin device's IO. 637 * Much of the code depends on pool object resources (lists, workqueues, etc) 638 * but most is exclusively called from the thin target rather than the thin-pool 639 * target. 640 */ 641 642 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 643 { 644 struct pool *pool = tc->pool; 645 sector_t block_nr = bio->bi_iter.bi_sector; 646 647 if (block_size_is_power_of_two(pool)) 648 block_nr >>= pool->sectors_per_block_shift; 649 else 650 (void) sector_div(block_nr, pool->sectors_per_block); 651 652 return block_nr; 653 } 654 655 /* 656 * Returns the _complete_ blocks that this bio covers. 657 */ 658 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, 659 dm_block_t *begin, dm_block_t *end) 660 { 661 struct pool *pool = tc->pool; 662 sector_t b = bio->bi_iter.bi_sector; 663 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); 664 665 b += pool->sectors_per_block - 1ull; /* so we round up */ 666 667 if (block_size_is_power_of_two(pool)) { 668 b >>= pool->sectors_per_block_shift; 669 e >>= pool->sectors_per_block_shift; 670 } else { 671 (void) sector_div(b, pool->sectors_per_block); 672 (void) sector_div(e, pool->sectors_per_block); 673 } 674 675 if (e < b) 676 /* Can happen if the bio is within a single block. */ 677 e = b; 678 679 *begin = b; 680 *end = e; 681 } 682 683 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 684 { 685 struct pool *pool = tc->pool; 686 sector_t bi_sector = bio->bi_iter.bi_sector; 687 688 bio_set_dev(bio, tc->pool_dev->bdev); 689 if (block_size_is_power_of_two(pool)) 690 bio->bi_iter.bi_sector = 691 (block << pool->sectors_per_block_shift) | 692 (bi_sector & (pool->sectors_per_block - 1)); 693 else 694 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + 695 sector_div(bi_sector, pool->sectors_per_block); 696 } 697 698 static void remap_to_origin(struct thin_c *tc, struct bio *bio) 699 { 700 bio_set_dev(bio, tc->origin_dev->bdev); 701 } 702 703 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 704 { 705 return op_is_flush(bio->bi_opf) && 706 dm_thin_changed_this_transaction(tc->td); 707 } 708 709 static void inc_all_io_entry(struct pool *pool, struct bio *bio) 710 { 711 struct dm_thin_endio_hook *h; 712 713 if (bio_op(bio) == REQ_OP_DISCARD) 714 return; 715 716 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 717 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); 718 } 719 720 static void issue(struct thin_c *tc, struct bio *bio) 721 { 722 struct pool *pool = tc->pool; 723 unsigned long flags; 724 725 if (!bio_triggers_commit(tc, bio)) { 726 generic_make_request(bio); 727 return; 728 } 729 730 /* 731 * Complete bio with an error if earlier I/O caused changes to 732 * the metadata that can't be committed e.g, due to I/O errors 733 * on the metadata device. 734 */ 735 if (dm_thin_aborted_changes(tc->td)) { 736 bio_io_error(bio); 737 return; 738 } 739 740 /* 741 * Batch together any bios that trigger commits and then issue a 742 * single commit for them in process_deferred_bios(). 743 */ 744 spin_lock_irqsave(&pool->lock, flags); 745 bio_list_add(&pool->deferred_flush_bios, bio); 746 spin_unlock_irqrestore(&pool->lock, flags); 747 } 748 749 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) 750 { 751 remap_to_origin(tc, bio); 752 issue(tc, bio); 753 } 754 755 static void remap_and_issue(struct thin_c *tc, struct bio *bio, 756 dm_block_t block) 757 { 758 remap(tc, bio, block); 759 issue(tc, bio); 760 } 761 762 /*----------------------------------------------------------------*/ 763 764 /* 765 * Bio endio functions. 766 */ 767 struct dm_thin_new_mapping { 768 struct list_head list; 769 770 bool pass_discard:1; 771 bool maybe_shared:1; 772 773 /* 774 * Track quiescing, copying and zeroing preparation actions. When this 775 * counter hits zero the block is prepared and can be inserted into the 776 * btree. 777 */ 778 atomic_t prepare_actions; 779 780 blk_status_t status; 781 struct thin_c *tc; 782 dm_block_t virt_begin, virt_end; 783 dm_block_t data_block; 784 struct dm_bio_prison_cell *cell; 785 786 /* 787 * If the bio covers the whole area of a block then we can avoid 788 * zeroing or copying. Instead this bio is hooked. The bio will 789 * still be in the cell, so care has to be taken to avoid issuing 790 * the bio twice. 791 */ 792 struct bio *bio; 793 bio_end_io_t *saved_bi_end_io; 794 }; 795 796 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m) 797 { 798 struct pool *pool = m->tc->pool; 799 800 if (atomic_dec_and_test(&m->prepare_actions)) { 801 list_add_tail(&m->list, &pool->prepared_mappings); 802 wake_worker(pool); 803 } 804 } 805 806 static void complete_mapping_preparation(struct dm_thin_new_mapping *m) 807 { 808 unsigned long flags; 809 struct pool *pool = m->tc->pool; 810 811 spin_lock_irqsave(&pool->lock, flags); 812 __complete_mapping_preparation(m); 813 spin_unlock_irqrestore(&pool->lock, flags); 814 } 815 816 static void copy_complete(int read_err, unsigned long write_err, void *context) 817 { 818 struct dm_thin_new_mapping *m = context; 819 820 m->status = read_err || write_err ? BLK_STS_IOERR : 0; 821 complete_mapping_preparation(m); 822 } 823 824 static void overwrite_endio(struct bio *bio) 825 { 826 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 827 struct dm_thin_new_mapping *m = h->overwrite_mapping; 828 829 bio->bi_end_io = m->saved_bi_end_io; 830 831 m->status = bio->bi_status; 832 complete_mapping_preparation(m); 833 } 834 835 /*----------------------------------------------------------------*/ 836 837 /* 838 * Workqueue. 839 */ 840 841 /* 842 * Prepared mapping jobs. 843 */ 844 845 /* 846 * This sends the bios in the cell, except the original holder, back 847 * to the deferred_bios list. 848 */ 849 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) 850 { 851 struct pool *pool = tc->pool; 852 unsigned long flags; 853 854 spin_lock_irqsave(&tc->lock, flags); 855 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); 856 spin_unlock_irqrestore(&tc->lock, flags); 857 858 wake_worker(pool); 859 } 860 861 static void thin_defer_bio(struct thin_c *tc, struct bio *bio); 862 863 struct remap_info { 864 struct thin_c *tc; 865 struct bio_list defer_bios; 866 struct bio_list issue_bios; 867 }; 868 869 static void __inc_remap_and_issue_cell(void *context, 870 struct dm_bio_prison_cell *cell) 871 { 872 struct remap_info *info = context; 873 struct bio *bio; 874 875 while ((bio = bio_list_pop(&cell->bios))) { 876 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) 877 bio_list_add(&info->defer_bios, bio); 878 else { 879 inc_all_io_entry(info->tc->pool, bio); 880 881 /* 882 * We can't issue the bios with the bio prison lock 883 * held, so we add them to a list to issue on 884 * return from this function. 885 */ 886 bio_list_add(&info->issue_bios, bio); 887 } 888 } 889 } 890 891 static void inc_remap_and_issue_cell(struct thin_c *tc, 892 struct dm_bio_prison_cell *cell, 893 dm_block_t block) 894 { 895 struct bio *bio; 896 struct remap_info info; 897 898 info.tc = tc; 899 bio_list_init(&info.defer_bios); 900 bio_list_init(&info.issue_bios); 901 902 /* 903 * We have to be careful to inc any bios we're about to issue 904 * before the cell is released, and avoid a race with new bios 905 * being added to the cell. 906 */ 907 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, 908 &info, cell); 909 910 while ((bio = bio_list_pop(&info.defer_bios))) 911 thin_defer_bio(tc, bio); 912 913 while ((bio = bio_list_pop(&info.issue_bios))) 914 remap_and_issue(info.tc, bio, block); 915 } 916 917 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 918 { 919 cell_error(m->tc->pool, m->cell); 920 list_del(&m->list); 921 mempool_free(m, &m->tc->pool->mapping_pool); 922 } 923 924 static void process_prepared_mapping(struct dm_thin_new_mapping *m) 925 { 926 struct thin_c *tc = m->tc; 927 struct pool *pool = tc->pool; 928 struct bio *bio = m->bio; 929 int r; 930 931 if (m->status) { 932 cell_error(pool, m->cell); 933 goto out; 934 } 935 936 /* 937 * Commit the prepared block into the mapping btree. 938 * Any I/O for this block arriving after this point will get 939 * remapped to it directly. 940 */ 941 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); 942 if (r) { 943 metadata_operation_failed(pool, "dm_thin_insert_block", r); 944 cell_error(pool, m->cell); 945 goto out; 946 } 947 948 /* 949 * Release any bios held while the block was being provisioned. 950 * If we are processing a write bio that completely covers the block, 951 * we already processed it so can ignore it now when processing 952 * the bios in the cell. 953 */ 954 if (bio) { 955 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 956 bio_endio(bio); 957 } else { 958 inc_all_io_entry(tc->pool, m->cell->holder); 959 remap_and_issue(tc, m->cell->holder, m->data_block); 960 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 961 } 962 963 out: 964 list_del(&m->list); 965 mempool_free(m, &pool->mapping_pool); 966 } 967 968 /*----------------------------------------------------------------*/ 969 970 static void free_discard_mapping(struct dm_thin_new_mapping *m) 971 { 972 struct thin_c *tc = m->tc; 973 if (m->cell) 974 cell_defer_no_holder(tc, m->cell); 975 mempool_free(m, &tc->pool->mapping_pool); 976 } 977 978 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) 979 { 980 bio_io_error(m->bio); 981 free_discard_mapping(m); 982 } 983 984 static void process_prepared_discard_success(struct dm_thin_new_mapping *m) 985 { 986 bio_endio(m->bio); 987 free_discard_mapping(m); 988 } 989 990 static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m) 991 { 992 int r; 993 struct thin_c *tc = m->tc; 994 995 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); 996 if (r) { 997 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); 998 bio_io_error(m->bio); 999 } else 1000 bio_endio(m->bio); 1001 1002 cell_defer_no_holder(tc, m->cell); 1003 mempool_free(m, &tc->pool->mapping_pool); 1004 } 1005 1006 /*----------------------------------------------------------------*/ 1007 1008 static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m, 1009 struct bio *discard_parent) 1010 { 1011 /* 1012 * We've already unmapped this range of blocks, but before we 1013 * passdown we have to check that these blocks are now unused. 1014 */ 1015 int r = 0; 1016 bool used = true; 1017 struct thin_c *tc = m->tc; 1018 struct pool *pool = tc->pool; 1019 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; 1020 struct discard_op op; 1021 1022 begin_discard(&op, tc, discard_parent); 1023 while (b != end) { 1024 /* find start of unmapped run */ 1025 for (; b < end; b++) { 1026 r = dm_pool_block_is_used(pool->pmd, b, &used); 1027 if (r) 1028 goto out; 1029 1030 if (!used) 1031 break; 1032 } 1033 1034 if (b == end) 1035 break; 1036 1037 /* find end of run */ 1038 for (e = b + 1; e != end; e++) { 1039 r = dm_pool_block_is_used(pool->pmd, e, &used); 1040 if (r) 1041 goto out; 1042 1043 if (used) 1044 break; 1045 } 1046 1047 r = issue_discard(&op, b, e); 1048 if (r) 1049 goto out; 1050 1051 b = e; 1052 } 1053 out: 1054 end_discard(&op, r); 1055 } 1056 1057 static void queue_passdown_pt2(struct dm_thin_new_mapping *m) 1058 { 1059 unsigned long flags; 1060 struct pool *pool = m->tc->pool; 1061 1062 spin_lock_irqsave(&pool->lock, flags); 1063 list_add_tail(&m->list, &pool->prepared_discards_pt2); 1064 spin_unlock_irqrestore(&pool->lock, flags); 1065 wake_worker(pool); 1066 } 1067 1068 static void passdown_endio(struct bio *bio) 1069 { 1070 /* 1071 * It doesn't matter if the passdown discard failed, we still want 1072 * to unmap (we ignore err). 1073 */ 1074 queue_passdown_pt2(bio->bi_private); 1075 bio_put(bio); 1076 } 1077 1078 static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) 1079 { 1080 int r; 1081 struct thin_c *tc = m->tc; 1082 struct pool *pool = tc->pool; 1083 struct bio *discard_parent; 1084 dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin); 1085 1086 /* 1087 * Only this thread allocates blocks, so we can be sure that the 1088 * newly unmapped blocks will not be allocated before the end of 1089 * the function. 1090 */ 1091 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); 1092 if (r) { 1093 metadata_operation_failed(pool, "dm_thin_remove_range", r); 1094 bio_io_error(m->bio); 1095 cell_defer_no_holder(tc, m->cell); 1096 mempool_free(m, &pool->mapping_pool); 1097 return; 1098 } 1099 1100 /* 1101 * Increment the unmapped blocks. This prevents a race between the 1102 * passdown io and reallocation of freed blocks. 1103 */ 1104 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); 1105 if (r) { 1106 metadata_operation_failed(pool, "dm_pool_inc_data_range", r); 1107 bio_io_error(m->bio); 1108 cell_defer_no_holder(tc, m->cell); 1109 mempool_free(m, &pool->mapping_pool); 1110 return; 1111 } 1112 1113 discard_parent = bio_alloc(GFP_NOIO, 1); 1114 if (!discard_parent) { 1115 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", 1116 dm_device_name(tc->pool->pool_md)); 1117 queue_passdown_pt2(m); 1118 1119 } else { 1120 discard_parent->bi_end_io = passdown_endio; 1121 discard_parent->bi_private = m; 1122 1123 if (m->maybe_shared) 1124 passdown_double_checking_shared_status(m, discard_parent); 1125 else { 1126 struct discard_op op; 1127 1128 begin_discard(&op, tc, discard_parent); 1129 r = issue_discard(&op, m->data_block, data_end); 1130 end_discard(&op, r); 1131 } 1132 } 1133 } 1134 1135 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) 1136 { 1137 int r; 1138 struct thin_c *tc = m->tc; 1139 struct pool *pool = tc->pool; 1140 1141 /* 1142 * The passdown has completed, so now we can decrement all those 1143 * unmapped blocks. 1144 */ 1145 r = dm_pool_dec_data_range(pool->pmd, m->data_block, 1146 m->data_block + (m->virt_end - m->virt_begin)); 1147 if (r) { 1148 metadata_operation_failed(pool, "dm_pool_dec_data_range", r); 1149 bio_io_error(m->bio); 1150 } else 1151 bio_endio(m->bio); 1152 1153 cell_defer_no_holder(tc, m->cell); 1154 mempool_free(m, &pool->mapping_pool); 1155 } 1156 1157 static void process_prepared(struct pool *pool, struct list_head *head, 1158 process_mapping_fn *fn) 1159 { 1160 unsigned long flags; 1161 struct list_head maps; 1162 struct dm_thin_new_mapping *m, *tmp; 1163 1164 INIT_LIST_HEAD(&maps); 1165 spin_lock_irqsave(&pool->lock, flags); 1166 list_splice_init(head, &maps); 1167 spin_unlock_irqrestore(&pool->lock, flags); 1168 1169 list_for_each_entry_safe(m, tmp, &maps, list) 1170 (*fn)(m); 1171 } 1172 1173 /* 1174 * Deferred bio jobs. 1175 */ 1176 static int io_overlaps_block(struct pool *pool, struct bio *bio) 1177 { 1178 return bio->bi_iter.bi_size == 1179 (pool->sectors_per_block << SECTOR_SHIFT); 1180 } 1181 1182 static int io_overwrites_block(struct pool *pool, struct bio *bio) 1183 { 1184 return (bio_data_dir(bio) == WRITE) && 1185 io_overlaps_block(pool, bio); 1186 } 1187 1188 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, 1189 bio_end_io_t *fn) 1190 { 1191 *save = bio->bi_end_io; 1192 bio->bi_end_io = fn; 1193 } 1194 1195 static int ensure_next_mapping(struct pool *pool) 1196 { 1197 if (pool->next_mapping) 1198 return 0; 1199 1200 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC); 1201 1202 return pool->next_mapping ? 0 : -ENOMEM; 1203 } 1204 1205 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) 1206 { 1207 struct dm_thin_new_mapping *m = pool->next_mapping; 1208 1209 BUG_ON(!pool->next_mapping); 1210 1211 memset(m, 0, sizeof(struct dm_thin_new_mapping)); 1212 INIT_LIST_HEAD(&m->list); 1213 m->bio = NULL; 1214 1215 pool->next_mapping = NULL; 1216 1217 return m; 1218 } 1219 1220 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, 1221 sector_t begin, sector_t end) 1222 { 1223 struct dm_io_region to; 1224 1225 to.bdev = tc->pool_dev->bdev; 1226 to.sector = begin; 1227 to.count = end - begin; 1228 1229 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); 1230 } 1231 1232 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, 1233 dm_block_t data_begin, 1234 struct dm_thin_new_mapping *m) 1235 { 1236 struct pool *pool = tc->pool; 1237 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1238 1239 h->overwrite_mapping = m; 1240 m->bio = bio; 1241 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1242 inc_all_io_entry(pool, bio); 1243 remap_and_issue(tc, bio, data_begin); 1244 } 1245 1246 /* 1247 * A partial copy also needs to zero the uncopied region. 1248 */ 1249 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 1250 struct dm_dev *origin, dm_block_t data_origin, 1251 dm_block_t data_dest, 1252 struct dm_bio_prison_cell *cell, struct bio *bio, 1253 sector_t len) 1254 { 1255 struct pool *pool = tc->pool; 1256 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1257 1258 m->tc = tc; 1259 m->virt_begin = virt_block; 1260 m->virt_end = virt_block + 1u; 1261 m->data_block = data_dest; 1262 m->cell = cell; 1263 1264 /* 1265 * quiesce action + copy action + an extra reference held for the 1266 * duration of this function (we may need to inc later for a 1267 * partial zero). 1268 */ 1269 atomic_set(&m->prepare_actions, 3); 1270 1271 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) 1272 complete_mapping_preparation(m); /* already quiesced */ 1273 1274 /* 1275 * IO to pool_dev remaps to the pool target's data_dev. 1276 * 1277 * If the whole block of data is being overwritten, we can issue the 1278 * bio immediately. Otherwise we use kcopyd to clone the data first. 1279 */ 1280 if (io_overwrites_block(pool, bio)) 1281 remap_and_issue_overwrite(tc, bio, data_dest, m); 1282 else { 1283 struct dm_io_region from, to; 1284 1285 from.bdev = origin->bdev; 1286 from.sector = data_origin * pool->sectors_per_block; 1287 from.count = len; 1288 1289 to.bdev = tc->pool_dev->bdev; 1290 to.sector = data_dest * pool->sectors_per_block; 1291 to.count = len; 1292 1293 dm_kcopyd_copy(pool->copier, &from, 1, &to, 1294 0, copy_complete, m); 1295 1296 /* 1297 * Do we need to zero a tail region? 1298 */ 1299 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { 1300 atomic_inc(&m->prepare_actions); 1301 ll_zero(tc, m, 1302 data_dest * pool->sectors_per_block + len, 1303 (data_dest + 1) * pool->sectors_per_block); 1304 } 1305 } 1306 1307 complete_mapping_preparation(m); /* drop our ref */ 1308 } 1309 1310 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1311 dm_block_t data_origin, dm_block_t data_dest, 1312 struct dm_bio_prison_cell *cell, struct bio *bio) 1313 { 1314 schedule_copy(tc, virt_block, tc->pool_dev, 1315 data_origin, data_dest, cell, bio, 1316 tc->pool->sectors_per_block); 1317 } 1318 1319 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1320 dm_block_t data_block, struct dm_bio_prison_cell *cell, 1321 struct bio *bio) 1322 { 1323 struct pool *pool = tc->pool; 1324 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1325 1326 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */ 1327 m->tc = tc; 1328 m->virt_begin = virt_block; 1329 m->virt_end = virt_block + 1u; 1330 m->data_block = data_block; 1331 m->cell = cell; 1332 1333 /* 1334 * If the whole block of data is being overwritten or we are not 1335 * zeroing pre-existing data, we can issue the bio immediately. 1336 * Otherwise we use kcopyd to zero the data first. 1337 */ 1338 if (pool->pf.zero_new_blocks) { 1339 if (io_overwrites_block(pool, bio)) 1340 remap_and_issue_overwrite(tc, bio, data_block, m); 1341 else 1342 ll_zero(tc, m, data_block * pool->sectors_per_block, 1343 (data_block + 1) * pool->sectors_per_block); 1344 } else 1345 process_prepared_mapping(m); 1346 } 1347 1348 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1349 dm_block_t data_dest, 1350 struct dm_bio_prison_cell *cell, struct bio *bio) 1351 { 1352 struct pool *pool = tc->pool; 1353 sector_t virt_block_begin = virt_block * pool->sectors_per_block; 1354 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; 1355 1356 if (virt_block_end <= tc->origin_size) 1357 schedule_copy(tc, virt_block, tc->origin_dev, 1358 virt_block, data_dest, cell, bio, 1359 pool->sectors_per_block); 1360 1361 else if (virt_block_begin < tc->origin_size) 1362 schedule_copy(tc, virt_block, tc->origin_dev, 1363 virt_block, data_dest, cell, bio, 1364 tc->origin_size - virt_block_begin); 1365 1366 else 1367 schedule_zero(tc, virt_block, data_dest, cell, bio); 1368 } 1369 1370 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); 1371 1372 static void requeue_bios(struct pool *pool); 1373 1374 static void check_for_space(struct pool *pool) 1375 { 1376 int r; 1377 dm_block_t nr_free; 1378 1379 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) 1380 return; 1381 1382 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); 1383 if (r) 1384 return; 1385 1386 if (nr_free) { 1387 set_pool_mode(pool, PM_WRITE); 1388 requeue_bios(pool); 1389 } 1390 } 1391 1392 /* 1393 * A non-zero return indicates read_only or fail_io mode. 1394 * Many callers don't care about the return value. 1395 */ 1396 static int commit(struct pool *pool) 1397 { 1398 int r; 1399 1400 if (get_pool_mode(pool) >= PM_READ_ONLY) 1401 return -EINVAL; 1402 1403 r = dm_pool_commit_metadata(pool->pmd); 1404 if (r) 1405 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); 1406 else 1407 check_for_space(pool); 1408 1409 return r; 1410 } 1411 1412 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) 1413 { 1414 unsigned long flags; 1415 1416 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { 1417 DMWARN("%s: reached low water mark for data device: sending event.", 1418 dm_device_name(pool->pool_md)); 1419 spin_lock_irqsave(&pool->lock, flags); 1420 pool->low_water_triggered = true; 1421 spin_unlock_irqrestore(&pool->lock, flags); 1422 dm_table_event(pool->ti->table); 1423 } 1424 } 1425 1426 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 1427 { 1428 int r; 1429 dm_block_t free_blocks; 1430 struct pool *pool = tc->pool; 1431 1432 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) 1433 return -EINVAL; 1434 1435 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 1436 if (r) { 1437 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); 1438 return r; 1439 } 1440 1441 check_low_water_mark(pool, free_blocks); 1442 1443 if (!free_blocks) { 1444 /* 1445 * Try to commit to see if that will free up some 1446 * more space. 1447 */ 1448 r = commit(pool); 1449 if (r) 1450 return r; 1451 1452 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 1453 if (r) { 1454 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); 1455 return r; 1456 } 1457 1458 if (!free_blocks) { 1459 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); 1460 return -ENOSPC; 1461 } 1462 } 1463 1464 r = dm_pool_alloc_data_block(pool->pmd, result); 1465 if (r) { 1466 if (r == -ENOSPC) 1467 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); 1468 else 1469 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); 1470 return r; 1471 } 1472 1473 return 0; 1474 } 1475 1476 /* 1477 * If we have run out of space, queue bios until the device is 1478 * resumed, presumably after having been reloaded with more space. 1479 */ 1480 static void retry_on_resume(struct bio *bio) 1481 { 1482 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1483 struct thin_c *tc = h->tc; 1484 unsigned long flags; 1485 1486 spin_lock_irqsave(&tc->lock, flags); 1487 bio_list_add(&tc->retry_on_resume_list, bio); 1488 spin_unlock_irqrestore(&tc->lock, flags); 1489 } 1490 1491 static blk_status_t should_error_unserviceable_bio(struct pool *pool) 1492 { 1493 enum pool_mode m = get_pool_mode(pool); 1494 1495 switch (m) { 1496 case PM_WRITE: 1497 /* Shouldn't get here */ 1498 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); 1499 return BLK_STS_IOERR; 1500 1501 case PM_OUT_OF_DATA_SPACE: 1502 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; 1503 1504 case PM_READ_ONLY: 1505 case PM_FAIL: 1506 return BLK_STS_IOERR; 1507 default: 1508 /* Shouldn't get here */ 1509 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); 1510 return BLK_STS_IOERR; 1511 } 1512 } 1513 1514 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) 1515 { 1516 blk_status_t error = should_error_unserviceable_bio(pool); 1517 1518 if (error) { 1519 bio->bi_status = error; 1520 bio_endio(bio); 1521 } else 1522 retry_on_resume(bio); 1523 } 1524 1525 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) 1526 { 1527 struct bio *bio; 1528 struct bio_list bios; 1529 blk_status_t error; 1530 1531 error = should_error_unserviceable_bio(pool); 1532 if (error) { 1533 cell_error_with_code(pool, cell, error); 1534 return; 1535 } 1536 1537 bio_list_init(&bios); 1538 cell_release(pool, cell, &bios); 1539 1540 while ((bio = bio_list_pop(&bios))) 1541 retry_on_resume(bio); 1542 } 1543 1544 static void process_discard_cell_no_passdown(struct thin_c *tc, 1545 struct dm_bio_prison_cell *virt_cell) 1546 { 1547 struct pool *pool = tc->pool; 1548 struct dm_thin_new_mapping *m = get_next_mapping(pool); 1549 1550 /* 1551 * We don't need to lock the data blocks, since there's no 1552 * passdown. We only lock data blocks for allocation and breaking sharing. 1553 */ 1554 m->tc = tc; 1555 m->virt_begin = virt_cell->key.block_begin; 1556 m->virt_end = virt_cell->key.block_end; 1557 m->cell = virt_cell; 1558 m->bio = virt_cell->holder; 1559 1560 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) 1561 pool->process_prepared_discard(m); 1562 } 1563 1564 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end, 1565 struct bio *bio) 1566 { 1567 struct pool *pool = tc->pool; 1568 1569 int r; 1570 bool maybe_shared; 1571 struct dm_cell_key data_key; 1572 struct dm_bio_prison_cell *data_cell; 1573 struct dm_thin_new_mapping *m; 1574 dm_block_t virt_begin, virt_end, data_begin; 1575 1576 while (begin != end) { 1577 r = ensure_next_mapping(pool); 1578 if (r) 1579 /* we did our best */ 1580 return; 1581 1582 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, 1583 &data_begin, &maybe_shared); 1584 if (r) 1585 /* 1586 * Silently fail, letting any mappings we've 1587 * created complete. 1588 */ 1589 break; 1590 1591 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); 1592 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { 1593 /* contention, we'll give up with this range */ 1594 begin = virt_end; 1595 continue; 1596 } 1597 1598 /* 1599 * IO may still be going to the destination block. We must 1600 * quiesce before we can do the removal. 1601 */ 1602 m = get_next_mapping(pool); 1603 m->tc = tc; 1604 m->maybe_shared = maybe_shared; 1605 m->virt_begin = virt_begin; 1606 m->virt_end = virt_end; 1607 m->data_block = data_begin; 1608 m->cell = data_cell; 1609 m->bio = bio; 1610 1611 /* 1612 * The parent bio must not complete before sub discard bios are 1613 * chained to it (see end_discard's bio_chain)! 1614 * 1615 * This per-mapping bi_remaining increment is paired with 1616 * the implicit decrement that occurs via bio_endio() in 1617 * end_discard(). 1618 */ 1619 bio_inc_remaining(bio); 1620 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) 1621 pool->process_prepared_discard(m); 1622 1623 begin = virt_end; 1624 } 1625 } 1626 1627 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell) 1628 { 1629 struct bio *bio = virt_cell->holder; 1630 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1631 1632 /* 1633 * The virt_cell will only get freed once the origin bio completes. 1634 * This means it will remain locked while all the individual 1635 * passdown bios are in flight. 1636 */ 1637 h->cell = virt_cell; 1638 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); 1639 1640 /* 1641 * We complete the bio now, knowing that the bi_remaining field 1642 * will prevent completion until the sub range discards have 1643 * completed. 1644 */ 1645 bio_endio(bio); 1646 } 1647 1648 static void process_discard_bio(struct thin_c *tc, struct bio *bio) 1649 { 1650 dm_block_t begin, end; 1651 struct dm_cell_key virt_key; 1652 struct dm_bio_prison_cell *virt_cell; 1653 1654 get_bio_block_range(tc, bio, &begin, &end); 1655 if (begin == end) { 1656 /* 1657 * The discard covers less than a block. 1658 */ 1659 bio_endio(bio); 1660 return; 1661 } 1662 1663 build_key(tc->td, VIRTUAL, begin, end, &virt_key); 1664 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) 1665 /* 1666 * Potential starvation issue: We're relying on the 1667 * fs/application being well behaved, and not trying to 1668 * send IO to a region at the same time as discarding it. 1669 * If they do this persistently then it's possible this 1670 * cell will never be granted. 1671 */ 1672 return; 1673 1674 tc->pool->process_discard_cell(tc, virt_cell); 1675 } 1676 1677 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1678 struct dm_cell_key *key, 1679 struct dm_thin_lookup_result *lookup_result, 1680 struct dm_bio_prison_cell *cell) 1681 { 1682 int r; 1683 dm_block_t data_block; 1684 struct pool *pool = tc->pool; 1685 1686 r = alloc_data_block(tc, &data_block); 1687 switch (r) { 1688 case 0: 1689 schedule_internal_copy(tc, block, lookup_result->block, 1690 data_block, cell, bio); 1691 break; 1692 1693 case -ENOSPC: 1694 retry_bios_on_resume(pool, cell); 1695 break; 1696 1697 default: 1698 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1699 __func__, r); 1700 cell_error(pool, cell); 1701 break; 1702 } 1703 } 1704 1705 static void __remap_and_issue_shared_cell(void *context, 1706 struct dm_bio_prison_cell *cell) 1707 { 1708 struct remap_info *info = context; 1709 struct bio *bio; 1710 1711 while ((bio = bio_list_pop(&cell->bios))) { 1712 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || 1713 bio_op(bio) == REQ_OP_DISCARD) 1714 bio_list_add(&info->defer_bios, bio); 1715 else { 1716 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1717 1718 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); 1719 inc_all_io_entry(info->tc->pool, bio); 1720 bio_list_add(&info->issue_bios, bio); 1721 } 1722 } 1723 } 1724 1725 static void remap_and_issue_shared_cell(struct thin_c *tc, 1726 struct dm_bio_prison_cell *cell, 1727 dm_block_t block) 1728 { 1729 struct bio *bio; 1730 struct remap_info info; 1731 1732 info.tc = tc; 1733 bio_list_init(&info.defer_bios); 1734 bio_list_init(&info.issue_bios); 1735 1736 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, 1737 &info, cell); 1738 1739 while ((bio = bio_list_pop(&info.defer_bios))) 1740 thin_defer_bio(tc, bio); 1741 1742 while ((bio = bio_list_pop(&info.issue_bios))) 1743 remap_and_issue(tc, bio, block); 1744 } 1745 1746 static void process_shared_bio(struct thin_c *tc, struct bio *bio, 1747 dm_block_t block, 1748 struct dm_thin_lookup_result *lookup_result, 1749 struct dm_bio_prison_cell *virt_cell) 1750 { 1751 struct dm_bio_prison_cell *data_cell; 1752 struct pool *pool = tc->pool; 1753 struct dm_cell_key key; 1754 1755 /* 1756 * If cell is already occupied, then sharing is already in the process 1757 * of being broken so we have nothing further to do here. 1758 */ 1759 build_data_key(tc->td, lookup_result->block, &key); 1760 if (bio_detain(pool, &key, bio, &data_cell)) { 1761 cell_defer_no_holder(tc, virt_cell); 1762 return; 1763 } 1764 1765 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { 1766 break_sharing(tc, bio, block, &key, lookup_result, data_cell); 1767 cell_defer_no_holder(tc, virt_cell); 1768 } else { 1769 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1770 1771 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1772 inc_all_io_entry(pool, bio); 1773 remap_and_issue(tc, bio, lookup_result->block); 1774 1775 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); 1776 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); 1777 } 1778 } 1779 1780 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1781 struct dm_bio_prison_cell *cell) 1782 { 1783 int r; 1784 dm_block_t data_block; 1785 struct pool *pool = tc->pool; 1786 1787 /* 1788 * Remap empty bios (flushes) immediately, without provisioning. 1789 */ 1790 if (!bio->bi_iter.bi_size) { 1791 inc_all_io_entry(pool, bio); 1792 cell_defer_no_holder(tc, cell); 1793 1794 remap_and_issue(tc, bio, 0); 1795 return; 1796 } 1797 1798 /* 1799 * Fill read bios with zeroes and complete them immediately. 1800 */ 1801 if (bio_data_dir(bio) == READ) { 1802 zero_fill_bio(bio); 1803 cell_defer_no_holder(tc, cell); 1804 bio_endio(bio); 1805 return; 1806 } 1807 1808 r = alloc_data_block(tc, &data_block); 1809 switch (r) { 1810 case 0: 1811 if (tc->origin_dev) 1812 schedule_external_copy(tc, block, data_block, cell, bio); 1813 else 1814 schedule_zero(tc, block, data_block, cell, bio); 1815 break; 1816 1817 case -ENOSPC: 1818 retry_bios_on_resume(pool, cell); 1819 break; 1820 1821 default: 1822 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1823 __func__, r); 1824 cell_error(pool, cell); 1825 break; 1826 } 1827 } 1828 1829 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1830 { 1831 int r; 1832 struct pool *pool = tc->pool; 1833 struct bio *bio = cell->holder; 1834 dm_block_t block = get_bio_block(tc, bio); 1835 struct dm_thin_lookup_result lookup_result; 1836 1837 if (tc->requeue_mode) { 1838 cell_requeue(pool, cell); 1839 return; 1840 } 1841 1842 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1843 switch (r) { 1844 case 0: 1845 if (lookup_result.shared) 1846 process_shared_bio(tc, bio, block, &lookup_result, cell); 1847 else { 1848 inc_all_io_entry(pool, bio); 1849 remap_and_issue(tc, bio, lookup_result.block); 1850 inc_remap_and_issue_cell(tc, cell, lookup_result.block); 1851 } 1852 break; 1853 1854 case -ENODATA: 1855 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1856 inc_all_io_entry(pool, bio); 1857 cell_defer_no_holder(tc, cell); 1858 1859 if (bio_end_sector(bio) <= tc->origin_size) 1860 remap_to_origin_and_issue(tc, bio); 1861 1862 else if (bio->bi_iter.bi_sector < tc->origin_size) { 1863 zero_fill_bio(bio); 1864 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; 1865 remap_to_origin_and_issue(tc, bio); 1866 1867 } else { 1868 zero_fill_bio(bio); 1869 bio_endio(bio); 1870 } 1871 } else 1872 provision_block(tc, bio, block, cell); 1873 break; 1874 1875 default: 1876 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1877 __func__, r); 1878 cell_defer_no_holder(tc, cell); 1879 bio_io_error(bio); 1880 break; 1881 } 1882 } 1883 1884 static void process_bio(struct thin_c *tc, struct bio *bio) 1885 { 1886 struct pool *pool = tc->pool; 1887 dm_block_t block = get_bio_block(tc, bio); 1888 struct dm_bio_prison_cell *cell; 1889 struct dm_cell_key key; 1890 1891 /* 1892 * If cell is already occupied, then the block is already 1893 * being provisioned so we have nothing further to do here. 1894 */ 1895 build_virtual_key(tc->td, block, &key); 1896 if (bio_detain(pool, &key, bio, &cell)) 1897 return; 1898 1899 process_cell(tc, cell); 1900 } 1901 1902 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, 1903 struct dm_bio_prison_cell *cell) 1904 { 1905 int r; 1906 int rw = bio_data_dir(bio); 1907 dm_block_t block = get_bio_block(tc, bio); 1908 struct dm_thin_lookup_result lookup_result; 1909 1910 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1911 switch (r) { 1912 case 0: 1913 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { 1914 handle_unserviceable_bio(tc->pool, bio); 1915 if (cell) 1916 cell_defer_no_holder(tc, cell); 1917 } else { 1918 inc_all_io_entry(tc->pool, bio); 1919 remap_and_issue(tc, bio, lookup_result.block); 1920 if (cell) 1921 inc_remap_and_issue_cell(tc, cell, lookup_result.block); 1922 } 1923 break; 1924 1925 case -ENODATA: 1926 if (cell) 1927 cell_defer_no_holder(tc, cell); 1928 if (rw != READ) { 1929 handle_unserviceable_bio(tc->pool, bio); 1930 break; 1931 } 1932 1933 if (tc->origin_dev) { 1934 inc_all_io_entry(tc->pool, bio); 1935 remap_to_origin_and_issue(tc, bio); 1936 break; 1937 } 1938 1939 zero_fill_bio(bio); 1940 bio_endio(bio); 1941 break; 1942 1943 default: 1944 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1945 __func__, r); 1946 if (cell) 1947 cell_defer_no_holder(tc, cell); 1948 bio_io_error(bio); 1949 break; 1950 } 1951 } 1952 1953 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) 1954 { 1955 __process_bio_read_only(tc, bio, NULL); 1956 } 1957 1958 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1959 { 1960 __process_bio_read_only(tc, cell->holder, cell); 1961 } 1962 1963 static void process_bio_success(struct thin_c *tc, struct bio *bio) 1964 { 1965 bio_endio(bio); 1966 } 1967 1968 static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1969 { 1970 bio_io_error(bio); 1971 } 1972 1973 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1974 { 1975 cell_success(tc->pool, cell); 1976 } 1977 1978 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) 1979 { 1980 cell_error(tc->pool, cell); 1981 } 1982 1983 /* 1984 * FIXME: should we also commit due to size of transaction, measured in 1985 * metadata blocks? 1986 */ 1987 static int need_commit_due_to_time(struct pool *pool) 1988 { 1989 return !time_in_range(jiffies, pool->last_commit_jiffies, 1990 pool->last_commit_jiffies + COMMIT_PERIOD); 1991 } 1992 1993 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node) 1994 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook)) 1995 1996 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) 1997 { 1998 struct rb_node **rbp, *parent; 1999 struct dm_thin_endio_hook *pbd; 2000 sector_t bi_sector = bio->bi_iter.bi_sector; 2001 2002 rbp = &tc->sort_bio_list.rb_node; 2003 parent = NULL; 2004 while (*rbp) { 2005 parent = *rbp; 2006 pbd = thin_pbd(parent); 2007 2008 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) 2009 rbp = &(*rbp)->rb_left; 2010 else 2011 rbp = &(*rbp)->rb_right; 2012 } 2013 2014 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2015 rb_link_node(&pbd->rb_node, parent, rbp); 2016 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); 2017 } 2018 2019 static void __extract_sorted_bios(struct thin_c *tc) 2020 { 2021 struct rb_node *node; 2022 struct dm_thin_endio_hook *pbd; 2023 struct bio *bio; 2024 2025 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { 2026 pbd = thin_pbd(node); 2027 bio = thin_bio(pbd); 2028 2029 bio_list_add(&tc->deferred_bio_list, bio); 2030 rb_erase(&pbd->rb_node, &tc->sort_bio_list); 2031 } 2032 2033 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); 2034 } 2035 2036 static void __sort_thin_deferred_bios(struct thin_c *tc) 2037 { 2038 struct bio *bio; 2039 struct bio_list bios; 2040 2041 bio_list_init(&bios); 2042 bio_list_merge(&bios, &tc->deferred_bio_list); 2043 bio_list_init(&tc->deferred_bio_list); 2044 2045 /* Sort deferred_bio_list using rb-tree */ 2046 while ((bio = bio_list_pop(&bios))) 2047 __thin_bio_rb_add(tc, bio); 2048 2049 /* 2050 * Transfer the sorted bios in sort_bio_list back to 2051 * deferred_bio_list to allow lockless submission of 2052 * all bios. 2053 */ 2054 __extract_sorted_bios(tc); 2055 } 2056 2057 static void process_thin_deferred_bios(struct thin_c *tc) 2058 { 2059 struct pool *pool = tc->pool; 2060 unsigned long flags; 2061 struct bio *bio; 2062 struct bio_list bios; 2063 struct blk_plug plug; 2064 unsigned count = 0; 2065 2066 if (tc->requeue_mode) { 2067 error_thin_bio_list(tc, &tc->deferred_bio_list, 2068 BLK_STS_DM_REQUEUE); 2069 return; 2070 } 2071 2072 bio_list_init(&bios); 2073 2074 spin_lock_irqsave(&tc->lock, flags); 2075 2076 if (bio_list_empty(&tc->deferred_bio_list)) { 2077 spin_unlock_irqrestore(&tc->lock, flags); 2078 return; 2079 } 2080 2081 __sort_thin_deferred_bios(tc); 2082 2083 bio_list_merge(&bios, &tc->deferred_bio_list); 2084 bio_list_init(&tc->deferred_bio_list); 2085 2086 spin_unlock_irqrestore(&tc->lock, flags); 2087 2088 blk_start_plug(&plug); 2089 while ((bio = bio_list_pop(&bios))) { 2090 /* 2091 * If we've got no free new_mapping structs, and processing 2092 * this bio might require one, we pause until there are some 2093 * prepared mappings to process. 2094 */ 2095 if (ensure_next_mapping(pool)) { 2096 spin_lock_irqsave(&tc->lock, flags); 2097 bio_list_add(&tc->deferred_bio_list, bio); 2098 bio_list_merge(&tc->deferred_bio_list, &bios); 2099 spin_unlock_irqrestore(&tc->lock, flags); 2100 break; 2101 } 2102 2103 if (bio_op(bio) == REQ_OP_DISCARD) 2104 pool->process_discard(tc, bio); 2105 else 2106 pool->process_bio(tc, bio); 2107 2108 if ((count++ & 127) == 0) { 2109 throttle_work_update(&pool->throttle); 2110 dm_pool_issue_prefetches(pool->pmd); 2111 } 2112 } 2113 blk_finish_plug(&plug); 2114 } 2115 2116 static int cmp_cells(const void *lhs, const void *rhs) 2117 { 2118 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs); 2119 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs); 2120 2121 BUG_ON(!lhs_cell->holder); 2122 BUG_ON(!rhs_cell->holder); 2123 2124 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector) 2125 return -1; 2126 2127 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector) 2128 return 1; 2129 2130 return 0; 2131 } 2132 2133 static unsigned sort_cells(struct pool *pool, struct list_head *cells) 2134 { 2135 unsigned count = 0; 2136 struct dm_bio_prison_cell *cell, *tmp; 2137 2138 list_for_each_entry_safe(cell, tmp, cells, user_list) { 2139 if (count >= CELL_SORT_ARRAY_SIZE) 2140 break; 2141 2142 pool->cell_sort_array[count++] = cell; 2143 list_del(&cell->user_list); 2144 } 2145 2146 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); 2147 2148 return count; 2149 } 2150 2151 static void process_thin_deferred_cells(struct thin_c *tc) 2152 { 2153 struct pool *pool = tc->pool; 2154 unsigned long flags; 2155 struct list_head cells; 2156 struct dm_bio_prison_cell *cell; 2157 unsigned i, j, count; 2158 2159 INIT_LIST_HEAD(&cells); 2160 2161 spin_lock_irqsave(&tc->lock, flags); 2162 list_splice_init(&tc->deferred_cells, &cells); 2163 spin_unlock_irqrestore(&tc->lock, flags); 2164 2165 if (list_empty(&cells)) 2166 return; 2167 2168 do { 2169 count = sort_cells(tc->pool, &cells); 2170 2171 for (i = 0; i < count; i++) { 2172 cell = pool->cell_sort_array[i]; 2173 BUG_ON(!cell->holder); 2174 2175 /* 2176 * If we've got no free new_mapping structs, and processing 2177 * this bio might require one, we pause until there are some 2178 * prepared mappings to process. 2179 */ 2180 if (ensure_next_mapping(pool)) { 2181 for (j = i; j < count; j++) 2182 list_add(&pool->cell_sort_array[j]->user_list, &cells); 2183 2184 spin_lock_irqsave(&tc->lock, flags); 2185 list_splice(&cells, &tc->deferred_cells); 2186 spin_unlock_irqrestore(&tc->lock, flags); 2187 return; 2188 } 2189 2190 if (bio_op(cell->holder) == REQ_OP_DISCARD) 2191 pool->process_discard_cell(tc, cell); 2192 else 2193 pool->process_cell(tc, cell); 2194 } 2195 } while (!list_empty(&cells)); 2196 } 2197 2198 static void thin_get(struct thin_c *tc); 2199 static void thin_put(struct thin_c *tc); 2200 2201 /* 2202 * We can't hold rcu_read_lock() around code that can block. So we 2203 * find a thin with the rcu lock held; bump a refcount; then drop 2204 * the lock. 2205 */ 2206 static struct thin_c *get_first_thin(struct pool *pool) 2207 { 2208 struct thin_c *tc = NULL; 2209 2210 rcu_read_lock(); 2211 if (!list_empty(&pool->active_thins)) { 2212 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); 2213 thin_get(tc); 2214 } 2215 rcu_read_unlock(); 2216 2217 return tc; 2218 } 2219 2220 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) 2221 { 2222 struct thin_c *old_tc = tc; 2223 2224 rcu_read_lock(); 2225 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { 2226 thin_get(tc); 2227 thin_put(old_tc); 2228 rcu_read_unlock(); 2229 return tc; 2230 } 2231 thin_put(old_tc); 2232 rcu_read_unlock(); 2233 2234 return NULL; 2235 } 2236 2237 static void process_deferred_bios(struct pool *pool) 2238 { 2239 unsigned long flags; 2240 struct bio *bio; 2241 struct bio_list bios; 2242 struct thin_c *tc; 2243 2244 tc = get_first_thin(pool); 2245 while (tc) { 2246 process_thin_deferred_cells(tc); 2247 process_thin_deferred_bios(tc); 2248 tc = get_next_thin(pool, tc); 2249 } 2250 2251 /* 2252 * If there are any deferred flush bios, we must commit 2253 * the metadata before issuing them. 2254 */ 2255 bio_list_init(&bios); 2256 spin_lock_irqsave(&pool->lock, flags); 2257 bio_list_merge(&bios, &pool->deferred_flush_bios); 2258 bio_list_init(&pool->deferred_flush_bios); 2259 spin_unlock_irqrestore(&pool->lock, flags); 2260 2261 if (bio_list_empty(&bios) && 2262 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2263 return; 2264 2265 if (commit(pool)) { 2266 while ((bio = bio_list_pop(&bios))) 2267 bio_io_error(bio); 2268 return; 2269 } 2270 pool->last_commit_jiffies = jiffies; 2271 2272 while ((bio = bio_list_pop(&bios))) 2273 generic_make_request(bio); 2274 } 2275 2276 static void do_worker(struct work_struct *ws) 2277 { 2278 struct pool *pool = container_of(ws, struct pool, worker); 2279 2280 throttle_work_start(&pool->throttle); 2281 dm_pool_issue_prefetches(pool->pmd); 2282 throttle_work_update(&pool->throttle); 2283 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); 2284 throttle_work_update(&pool->throttle); 2285 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); 2286 throttle_work_update(&pool->throttle); 2287 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2); 2288 throttle_work_update(&pool->throttle); 2289 process_deferred_bios(pool); 2290 throttle_work_complete(&pool->throttle); 2291 } 2292 2293 /* 2294 * We want to commit periodically so that not too much 2295 * unwritten data builds up. 2296 */ 2297 static void do_waker(struct work_struct *ws) 2298 { 2299 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); 2300 wake_worker(pool); 2301 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 2302 } 2303 2304 static void notify_of_pool_mode_change_to_oods(struct pool *pool); 2305 2306 /* 2307 * We're holding onto IO to allow userland time to react. After the 2308 * timeout either the pool will have been resized (and thus back in 2309 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space. 2310 */ 2311 static void do_no_space_timeout(struct work_struct *ws) 2312 { 2313 struct pool *pool = container_of(to_delayed_work(ws), struct pool, 2314 no_space_timeout); 2315 2316 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2317 pool->pf.error_if_no_space = true; 2318 notify_of_pool_mode_change_to_oods(pool); 2319 error_retry_list_with_code(pool, BLK_STS_NOSPC); 2320 } 2321 } 2322 2323 /*----------------------------------------------------------------*/ 2324 2325 struct pool_work { 2326 struct work_struct worker; 2327 struct completion complete; 2328 }; 2329 2330 static struct pool_work *to_pool_work(struct work_struct *ws) 2331 { 2332 return container_of(ws, struct pool_work, worker); 2333 } 2334 2335 static void pool_work_complete(struct pool_work *pw) 2336 { 2337 complete(&pw->complete); 2338 } 2339 2340 static void pool_work_wait(struct pool_work *pw, struct pool *pool, 2341 void (*fn)(struct work_struct *)) 2342 { 2343 INIT_WORK_ONSTACK(&pw->worker, fn); 2344 init_completion(&pw->complete); 2345 queue_work(pool->wq, &pw->worker); 2346 wait_for_completion(&pw->complete); 2347 } 2348 2349 /*----------------------------------------------------------------*/ 2350 2351 struct noflush_work { 2352 struct pool_work pw; 2353 struct thin_c *tc; 2354 }; 2355 2356 static struct noflush_work *to_noflush(struct work_struct *ws) 2357 { 2358 return container_of(to_pool_work(ws), struct noflush_work, pw); 2359 } 2360 2361 static void do_noflush_start(struct work_struct *ws) 2362 { 2363 struct noflush_work *w = to_noflush(ws); 2364 w->tc->requeue_mode = true; 2365 requeue_io(w->tc); 2366 pool_work_complete(&w->pw); 2367 } 2368 2369 static void do_noflush_stop(struct work_struct *ws) 2370 { 2371 struct noflush_work *w = to_noflush(ws); 2372 w->tc->requeue_mode = false; 2373 pool_work_complete(&w->pw); 2374 } 2375 2376 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) 2377 { 2378 struct noflush_work w; 2379 2380 w.tc = tc; 2381 pool_work_wait(&w.pw, tc->pool, fn); 2382 } 2383 2384 /*----------------------------------------------------------------*/ 2385 2386 static enum pool_mode get_pool_mode(struct pool *pool) 2387 { 2388 return pool->pf.mode; 2389 } 2390 2391 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) 2392 { 2393 dm_table_event(pool->ti->table); 2394 DMINFO("%s: switching pool to %s mode", 2395 dm_device_name(pool->pool_md), new_mode); 2396 } 2397 2398 static void notify_of_pool_mode_change_to_oods(struct pool *pool) 2399 { 2400 if (!pool->pf.error_if_no_space) 2401 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); 2402 else 2403 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); 2404 } 2405 2406 static bool passdown_enabled(struct pool_c *pt) 2407 { 2408 return pt->adjusted_pf.discard_passdown; 2409 } 2410 2411 static void set_discard_callbacks(struct pool *pool) 2412 { 2413 struct pool_c *pt = pool->ti->private; 2414 2415 if (passdown_enabled(pt)) { 2416 pool->process_discard_cell = process_discard_cell_passdown; 2417 pool->process_prepared_discard = process_prepared_discard_passdown_pt1; 2418 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2; 2419 } else { 2420 pool->process_discard_cell = process_discard_cell_no_passdown; 2421 pool->process_prepared_discard = process_prepared_discard_no_passdown; 2422 } 2423 } 2424 2425 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) 2426 { 2427 struct pool_c *pt = pool->ti->private; 2428 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 2429 enum pool_mode old_mode = get_pool_mode(pool); 2430 unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ; 2431 2432 /* 2433 * Never allow the pool to transition to PM_WRITE mode if user 2434 * intervention is required to verify metadata and data consistency. 2435 */ 2436 if (new_mode == PM_WRITE && needs_check) { 2437 DMERR("%s: unable to switch pool to write mode until repaired.", 2438 dm_device_name(pool->pool_md)); 2439 if (old_mode != new_mode) 2440 new_mode = old_mode; 2441 else 2442 new_mode = PM_READ_ONLY; 2443 } 2444 /* 2445 * If we were in PM_FAIL mode, rollback of metadata failed. We're 2446 * not going to recover without a thin_repair. So we never let the 2447 * pool move out of the old mode. 2448 */ 2449 if (old_mode == PM_FAIL) 2450 new_mode = old_mode; 2451 2452 switch (new_mode) { 2453 case PM_FAIL: 2454 if (old_mode != new_mode) 2455 notify_of_pool_mode_change(pool, "failure"); 2456 dm_pool_metadata_read_only(pool->pmd); 2457 pool->process_bio = process_bio_fail; 2458 pool->process_discard = process_bio_fail; 2459 pool->process_cell = process_cell_fail; 2460 pool->process_discard_cell = process_cell_fail; 2461 pool->process_prepared_mapping = process_prepared_mapping_fail; 2462 pool->process_prepared_discard = process_prepared_discard_fail; 2463 2464 error_retry_list(pool); 2465 break; 2466 2467 case PM_READ_ONLY: 2468 if (old_mode != new_mode) 2469 notify_of_pool_mode_change(pool, "read-only"); 2470 dm_pool_metadata_read_only(pool->pmd); 2471 pool->process_bio = process_bio_read_only; 2472 pool->process_discard = process_bio_success; 2473 pool->process_cell = process_cell_read_only; 2474 pool->process_discard_cell = process_cell_success; 2475 pool->process_prepared_mapping = process_prepared_mapping_fail; 2476 pool->process_prepared_discard = process_prepared_discard_success; 2477 2478 error_retry_list(pool); 2479 break; 2480 2481 case PM_OUT_OF_DATA_SPACE: 2482 /* 2483 * Ideally we'd never hit this state; the low water mark 2484 * would trigger userland to extend the pool before we 2485 * completely run out of data space. However, many small 2486 * IOs to unprovisioned space can consume data space at an 2487 * alarming rate. Adjust your low water mark if you're 2488 * frequently seeing this mode. 2489 */ 2490 if (old_mode != new_mode) 2491 notify_of_pool_mode_change_to_oods(pool); 2492 pool->out_of_data_space = true; 2493 pool->process_bio = process_bio_read_only; 2494 pool->process_discard = process_discard_bio; 2495 pool->process_cell = process_cell_read_only; 2496 pool->process_prepared_mapping = process_prepared_mapping; 2497 set_discard_callbacks(pool); 2498 2499 if (!pool->pf.error_if_no_space && no_space_timeout) 2500 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); 2501 break; 2502 2503 case PM_WRITE: 2504 if (old_mode != new_mode) 2505 notify_of_pool_mode_change(pool, "write"); 2506 if (old_mode == PM_OUT_OF_DATA_SPACE) 2507 cancel_delayed_work_sync(&pool->no_space_timeout); 2508 pool->out_of_data_space = false; 2509 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; 2510 dm_pool_metadata_read_write(pool->pmd); 2511 pool->process_bio = process_bio; 2512 pool->process_discard = process_discard_bio; 2513 pool->process_cell = process_cell; 2514 pool->process_prepared_mapping = process_prepared_mapping; 2515 set_discard_callbacks(pool); 2516 break; 2517 } 2518 2519 pool->pf.mode = new_mode; 2520 /* 2521 * The pool mode may have changed, sync it so bind_control_target() 2522 * doesn't cause an unexpected mode transition on resume. 2523 */ 2524 pt->adjusted_pf.mode = new_mode; 2525 } 2526 2527 static void abort_transaction(struct pool *pool) 2528 { 2529 const char *dev_name = dm_device_name(pool->pool_md); 2530 2531 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); 2532 if (dm_pool_abort_metadata(pool->pmd)) { 2533 DMERR("%s: failed to abort metadata transaction", dev_name); 2534 set_pool_mode(pool, PM_FAIL); 2535 } 2536 2537 if (dm_pool_metadata_set_needs_check(pool->pmd)) { 2538 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); 2539 set_pool_mode(pool, PM_FAIL); 2540 } 2541 } 2542 2543 static void metadata_operation_failed(struct pool *pool, const char *op, int r) 2544 { 2545 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", 2546 dm_device_name(pool->pool_md), op, r); 2547 2548 abort_transaction(pool); 2549 set_pool_mode(pool, PM_READ_ONLY); 2550 } 2551 2552 /*----------------------------------------------------------------*/ 2553 2554 /* 2555 * Mapping functions. 2556 */ 2557 2558 /* 2559 * Called only while mapping a thin bio to hand it over to the workqueue. 2560 */ 2561 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) 2562 { 2563 unsigned long flags; 2564 struct pool *pool = tc->pool; 2565 2566 spin_lock_irqsave(&tc->lock, flags); 2567 bio_list_add(&tc->deferred_bio_list, bio); 2568 spin_unlock_irqrestore(&tc->lock, flags); 2569 2570 wake_worker(pool); 2571 } 2572 2573 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) 2574 { 2575 struct pool *pool = tc->pool; 2576 2577 throttle_lock(&pool->throttle); 2578 thin_defer_bio(tc, bio); 2579 throttle_unlock(&pool->throttle); 2580 } 2581 2582 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) 2583 { 2584 unsigned long flags; 2585 struct pool *pool = tc->pool; 2586 2587 throttle_lock(&pool->throttle); 2588 spin_lock_irqsave(&tc->lock, flags); 2589 list_add_tail(&cell->user_list, &tc->deferred_cells); 2590 spin_unlock_irqrestore(&tc->lock, flags); 2591 throttle_unlock(&pool->throttle); 2592 2593 wake_worker(pool); 2594 } 2595 2596 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 2597 { 2598 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2599 2600 h->tc = tc; 2601 h->shared_read_entry = NULL; 2602 h->all_io_entry = NULL; 2603 h->overwrite_mapping = NULL; 2604 h->cell = NULL; 2605 } 2606 2607 /* 2608 * Non-blocking function called from the thin target's map function. 2609 */ 2610 static int thin_bio_map(struct dm_target *ti, struct bio *bio) 2611 { 2612 int r; 2613 struct thin_c *tc = ti->private; 2614 dm_block_t block = get_bio_block(tc, bio); 2615 struct dm_thin_device *td = tc->td; 2616 struct dm_thin_lookup_result result; 2617 struct dm_bio_prison_cell *virt_cell, *data_cell; 2618 struct dm_cell_key key; 2619 2620 thin_hook_bio(tc, bio); 2621 2622 if (tc->requeue_mode) { 2623 bio->bi_status = BLK_STS_DM_REQUEUE; 2624 bio_endio(bio); 2625 return DM_MAPIO_SUBMITTED; 2626 } 2627 2628 if (get_pool_mode(tc->pool) == PM_FAIL) { 2629 bio_io_error(bio); 2630 return DM_MAPIO_SUBMITTED; 2631 } 2632 2633 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { 2634 thin_defer_bio_with_throttle(tc, bio); 2635 return DM_MAPIO_SUBMITTED; 2636 } 2637 2638 /* 2639 * We must hold the virtual cell before doing the lookup, otherwise 2640 * there's a race with discard. 2641 */ 2642 build_virtual_key(tc->td, block, &key); 2643 if (bio_detain(tc->pool, &key, bio, &virt_cell)) 2644 return DM_MAPIO_SUBMITTED; 2645 2646 r = dm_thin_find_block(td, block, 0, &result); 2647 2648 /* 2649 * Note that we defer readahead too. 2650 */ 2651 switch (r) { 2652 case 0: 2653 if (unlikely(result.shared)) { 2654 /* 2655 * We have a race condition here between the 2656 * result.shared value returned by the lookup and 2657 * snapshot creation, which may cause new 2658 * sharing. 2659 * 2660 * To avoid this always quiesce the origin before 2661 * taking the snap. You want to do this anyway to 2662 * ensure a consistent application view 2663 * (i.e. lockfs). 2664 * 2665 * More distant ancestors are irrelevant. The 2666 * shared flag will be set in their case. 2667 */ 2668 thin_defer_cell(tc, virt_cell); 2669 return DM_MAPIO_SUBMITTED; 2670 } 2671 2672 build_data_key(tc->td, result.block, &key); 2673 if (bio_detain(tc->pool, &key, bio, &data_cell)) { 2674 cell_defer_no_holder(tc, virt_cell); 2675 return DM_MAPIO_SUBMITTED; 2676 } 2677 2678 inc_all_io_entry(tc->pool, bio); 2679 cell_defer_no_holder(tc, data_cell); 2680 cell_defer_no_holder(tc, virt_cell); 2681 2682 remap(tc, bio, result.block); 2683 return DM_MAPIO_REMAPPED; 2684 2685 case -ENODATA: 2686 case -EWOULDBLOCK: 2687 thin_defer_cell(tc, virt_cell); 2688 return DM_MAPIO_SUBMITTED; 2689 2690 default: 2691 /* 2692 * Must always call bio_io_error on failure. 2693 * dm_thin_find_block can fail with -EINVAL if the 2694 * pool is switched to fail-io mode. 2695 */ 2696 bio_io_error(bio); 2697 cell_defer_no_holder(tc, virt_cell); 2698 return DM_MAPIO_SUBMITTED; 2699 } 2700 } 2701 2702 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 2703 { 2704 struct pool_c *pt = container_of(cb, struct pool_c, callbacks); 2705 struct request_queue *q; 2706 2707 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) 2708 return 1; 2709 2710 q = bdev_get_queue(pt->data_dev->bdev); 2711 return bdi_congested(q->backing_dev_info, bdi_bits); 2712 } 2713 2714 static void requeue_bios(struct pool *pool) 2715 { 2716 unsigned long flags; 2717 struct thin_c *tc; 2718 2719 rcu_read_lock(); 2720 list_for_each_entry_rcu(tc, &pool->active_thins, list) { 2721 spin_lock_irqsave(&tc->lock, flags); 2722 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); 2723 bio_list_init(&tc->retry_on_resume_list); 2724 spin_unlock_irqrestore(&tc->lock, flags); 2725 } 2726 rcu_read_unlock(); 2727 } 2728 2729 /*---------------------------------------------------------------- 2730 * Binding of control targets to a pool object 2731 *--------------------------------------------------------------*/ 2732 static bool data_dev_supports_discard(struct pool_c *pt) 2733 { 2734 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); 2735 2736 return q && blk_queue_discard(q); 2737 } 2738 2739 static bool is_factor(sector_t block_size, uint32_t n) 2740 { 2741 return !sector_div(block_size, n); 2742 } 2743 2744 /* 2745 * If discard_passdown was enabled verify that the data device 2746 * supports discards. Disable discard_passdown if not. 2747 */ 2748 static void disable_passdown_if_not_supported(struct pool_c *pt) 2749 { 2750 struct pool *pool = pt->pool; 2751 struct block_device *data_bdev = pt->data_dev->bdev; 2752 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; 2753 const char *reason = NULL; 2754 char buf[BDEVNAME_SIZE]; 2755 2756 if (!pt->adjusted_pf.discard_passdown) 2757 return; 2758 2759 if (!data_dev_supports_discard(pt)) 2760 reason = "discard unsupported"; 2761 2762 else if (data_limits->max_discard_sectors < pool->sectors_per_block) 2763 reason = "max discard sectors smaller than a block"; 2764 2765 if (reason) { 2766 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason); 2767 pt->adjusted_pf.discard_passdown = false; 2768 } 2769 } 2770 2771 static int bind_control_target(struct pool *pool, struct dm_target *ti) 2772 { 2773 struct pool_c *pt = ti->private; 2774 2775 /* 2776 * We want to make sure that a pool in PM_FAIL mode is never upgraded. 2777 */ 2778 enum pool_mode old_mode = get_pool_mode(pool); 2779 enum pool_mode new_mode = pt->adjusted_pf.mode; 2780 2781 /* 2782 * Don't change the pool's mode until set_pool_mode() below. 2783 * Otherwise the pool's process_* function pointers may 2784 * not match the desired pool mode. 2785 */ 2786 pt->adjusted_pf.mode = old_mode; 2787 2788 pool->ti = ti; 2789 pool->pf = pt->adjusted_pf; 2790 pool->low_water_blocks = pt->low_water_blocks; 2791 2792 set_pool_mode(pool, new_mode); 2793 2794 return 0; 2795 } 2796 2797 static void unbind_control_target(struct pool *pool, struct dm_target *ti) 2798 { 2799 if (pool->ti == ti) 2800 pool->ti = NULL; 2801 } 2802 2803 /*---------------------------------------------------------------- 2804 * Pool creation 2805 *--------------------------------------------------------------*/ 2806 /* Initialize pool features. */ 2807 static void pool_features_init(struct pool_features *pf) 2808 { 2809 pf->mode = PM_WRITE; 2810 pf->zero_new_blocks = true; 2811 pf->discard_enabled = true; 2812 pf->discard_passdown = true; 2813 pf->error_if_no_space = false; 2814 } 2815 2816 static void __pool_destroy(struct pool *pool) 2817 { 2818 __pool_table_remove(pool); 2819 2820 vfree(pool->cell_sort_array); 2821 if (dm_pool_metadata_close(pool->pmd) < 0) 2822 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 2823 2824 dm_bio_prison_destroy(pool->prison); 2825 dm_kcopyd_client_destroy(pool->copier); 2826 2827 if (pool->wq) 2828 destroy_workqueue(pool->wq); 2829 2830 if (pool->next_mapping) 2831 mempool_free(pool->next_mapping, &pool->mapping_pool); 2832 mempool_exit(&pool->mapping_pool); 2833 dm_deferred_set_destroy(pool->shared_read_ds); 2834 dm_deferred_set_destroy(pool->all_io_ds); 2835 kfree(pool); 2836 } 2837 2838 static struct kmem_cache *_new_mapping_cache; 2839 2840 static struct pool *pool_create(struct mapped_device *pool_md, 2841 struct block_device *metadata_dev, 2842 unsigned long block_size, 2843 int read_only, char **error) 2844 { 2845 int r; 2846 void *err_p; 2847 struct pool *pool; 2848 struct dm_pool_metadata *pmd; 2849 bool format_device = read_only ? false : true; 2850 2851 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); 2852 if (IS_ERR(pmd)) { 2853 *error = "Error creating metadata object"; 2854 return (struct pool *)pmd; 2855 } 2856 2857 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2858 if (!pool) { 2859 *error = "Error allocating memory for pool"; 2860 err_p = ERR_PTR(-ENOMEM); 2861 goto bad_pool; 2862 } 2863 2864 pool->pmd = pmd; 2865 pool->sectors_per_block = block_size; 2866 if (block_size & (block_size - 1)) 2867 pool->sectors_per_block_shift = -1; 2868 else 2869 pool->sectors_per_block_shift = __ffs(block_size); 2870 pool->low_water_blocks = 0; 2871 pool_features_init(&pool->pf); 2872 pool->prison = dm_bio_prison_create(); 2873 if (!pool->prison) { 2874 *error = "Error creating pool's bio prison"; 2875 err_p = ERR_PTR(-ENOMEM); 2876 goto bad_prison; 2877 } 2878 2879 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2880 if (IS_ERR(pool->copier)) { 2881 r = PTR_ERR(pool->copier); 2882 *error = "Error creating pool's kcopyd client"; 2883 err_p = ERR_PTR(r); 2884 goto bad_kcopyd_client; 2885 } 2886 2887 /* 2888 * Create singlethreaded workqueue that will service all devices 2889 * that use this metadata. 2890 */ 2891 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); 2892 if (!pool->wq) { 2893 *error = "Error creating pool's workqueue"; 2894 err_p = ERR_PTR(-ENOMEM); 2895 goto bad_wq; 2896 } 2897 2898 throttle_init(&pool->throttle); 2899 INIT_WORK(&pool->worker, do_worker); 2900 INIT_DELAYED_WORK(&pool->waker, do_waker); 2901 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2902 spin_lock_init(&pool->lock); 2903 bio_list_init(&pool->deferred_flush_bios); 2904 INIT_LIST_HEAD(&pool->prepared_mappings); 2905 INIT_LIST_HEAD(&pool->prepared_discards); 2906 INIT_LIST_HEAD(&pool->prepared_discards_pt2); 2907 INIT_LIST_HEAD(&pool->active_thins); 2908 pool->low_water_triggered = false; 2909 pool->suspended = true; 2910 pool->out_of_data_space = false; 2911 2912 pool->shared_read_ds = dm_deferred_set_create(); 2913 if (!pool->shared_read_ds) { 2914 *error = "Error creating pool's shared read deferred set"; 2915 err_p = ERR_PTR(-ENOMEM); 2916 goto bad_shared_read_ds; 2917 } 2918 2919 pool->all_io_ds = dm_deferred_set_create(); 2920 if (!pool->all_io_ds) { 2921 *error = "Error creating pool's all io deferred set"; 2922 err_p = ERR_PTR(-ENOMEM); 2923 goto bad_all_io_ds; 2924 } 2925 2926 pool->next_mapping = NULL; 2927 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE, 2928 _new_mapping_cache); 2929 if (r) { 2930 *error = "Error creating pool's mapping mempool"; 2931 err_p = ERR_PTR(r); 2932 goto bad_mapping_pool; 2933 } 2934 2935 pool->cell_sort_array = 2936 vmalloc(array_size(CELL_SORT_ARRAY_SIZE, 2937 sizeof(*pool->cell_sort_array))); 2938 if (!pool->cell_sort_array) { 2939 *error = "Error allocating cell sort array"; 2940 err_p = ERR_PTR(-ENOMEM); 2941 goto bad_sort_array; 2942 } 2943 2944 pool->ref_count = 1; 2945 pool->last_commit_jiffies = jiffies; 2946 pool->pool_md = pool_md; 2947 pool->md_dev = metadata_dev; 2948 __pool_table_insert(pool); 2949 2950 return pool; 2951 2952 bad_sort_array: 2953 mempool_exit(&pool->mapping_pool); 2954 bad_mapping_pool: 2955 dm_deferred_set_destroy(pool->all_io_ds); 2956 bad_all_io_ds: 2957 dm_deferred_set_destroy(pool->shared_read_ds); 2958 bad_shared_read_ds: 2959 destroy_workqueue(pool->wq); 2960 bad_wq: 2961 dm_kcopyd_client_destroy(pool->copier); 2962 bad_kcopyd_client: 2963 dm_bio_prison_destroy(pool->prison); 2964 bad_prison: 2965 kfree(pool); 2966 bad_pool: 2967 if (dm_pool_metadata_close(pmd)) 2968 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 2969 2970 return err_p; 2971 } 2972 2973 static void __pool_inc(struct pool *pool) 2974 { 2975 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 2976 pool->ref_count++; 2977 } 2978 2979 static void __pool_dec(struct pool *pool) 2980 { 2981 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex)); 2982 BUG_ON(!pool->ref_count); 2983 if (!--pool->ref_count) 2984 __pool_destroy(pool); 2985 } 2986 2987 static struct pool *__pool_find(struct mapped_device *pool_md, 2988 struct block_device *metadata_dev, 2989 unsigned long block_size, int read_only, 2990 char **error, int *created) 2991 { 2992 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); 2993 2994 if (pool) { 2995 if (pool->pool_md != pool_md) { 2996 *error = "metadata device already in use by a pool"; 2997 return ERR_PTR(-EBUSY); 2998 } 2999 __pool_inc(pool); 3000 3001 } else { 3002 pool = __pool_table_lookup(pool_md); 3003 if (pool) { 3004 if (pool->md_dev != metadata_dev) { 3005 *error = "different pool cannot replace a pool"; 3006 return ERR_PTR(-EINVAL); 3007 } 3008 __pool_inc(pool); 3009 3010 } else { 3011 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); 3012 *created = 1; 3013 } 3014 } 3015 3016 return pool; 3017 } 3018 3019 /*---------------------------------------------------------------- 3020 * Pool target methods 3021 *--------------------------------------------------------------*/ 3022 static void pool_dtr(struct dm_target *ti) 3023 { 3024 struct pool_c *pt = ti->private; 3025 3026 mutex_lock(&dm_thin_pool_table.mutex); 3027 3028 unbind_control_target(pt->pool, ti); 3029 __pool_dec(pt->pool); 3030 dm_put_device(ti, pt->metadata_dev); 3031 dm_put_device(ti, pt->data_dev); 3032 kfree(pt); 3033 3034 mutex_unlock(&dm_thin_pool_table.mutex); 3035 } 3036 3037 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, 3038 struct dm_target *ti) 3039 { 3040 int r; 3041 unsigned argc; 3042 const char *arg_name; 3043 3044 static const struct dm_arg _args[] = { 3045 {0, 4, "Invalid number of pool feature arguments"}, 3046 }; 3047 3048 /* 3049 * No feature arguments supplied. 3050 */ 3051 if (!as->argc) 3052 return 0; 3053 3054 r = dm_read_arg_group(_args, as, &argc, &ti->error); 3055 if (r) 3056 return -EINVAL; 3057 3058 while (argc && !r) { 3059 arg_name = dm_shift_arg(as); 3060 argc--; 3061 3062 if (!strcasecmp(arg_name, "skip_block_zeroing")) 3063 pf->zero_new_blocks = false; 3064 3065 else if (!strcasecmp(arg_name, "ignore_discard")) 3066 pf->discard_enabled = false; 3067 3068 else if (!strcasecmp(arg_name, "no_discard_passdown")) 3069 pf->discard_passdown = false; 3070 3071 else if (!strcasecmp(arg_name, "read_only")) 3072 pf->mode = PM_READ_ONLY; 3073 3074 else if (!strcasecmp(arg_name, "error_if_no_space")) 3075 pf->error_if_no_space = true; 3076 3077 else { 3078 ti->error = "Unrecognised pool feature requested"; 3079 r = -EINVAL; 3080 break; 3081 } 3082 } 3083 3084 return r; 3085 } 3086 3087 static void metadata_low_callback(void *context) 3088 { 3089 struct pool *pool = context; 3090 3091 DMWARN("%s: reached low water mark for metadata device: sending event.", 3092 dm_device_name(pool->pool_md)); 3093 3094 dm_table_event(pool->ti->table); 3095 } 3096 3097 static sector_t get_dev_size(struct block_device *bdev) 3098 { 3099 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 3100 } 3101 3102 static void warn_if_metadata_device_too_big(struct block_device *bdev) 3103 { 3104 sector_t metadata_dev_size = get_dev_size(bdev); 3105 char buffer[BDEVNAME_SIZE]; 3106 3107 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) 3108 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 3109 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); 3110 } 3111 3112 static sector_t get_metadata_dev_size(struct block_device *bdev) 3113 { 3114 sector_t metadata_dev_size = get_dev_size(bdev); 3115 3116 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS) 3117 metadata_dev_size = THIN_METADATA_MAX_SECTORS; 3118 3119 return metadata_dev_size; 3120 } 3121 3122 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) 3123 { 3124 sector_t metadata_dev_size = get_metadata_dev_size(bdev); 3125 3126 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE); 3127 3128 return metadata_dev_size; 3129 } 3130 3131 /* 3132 * When a metadata threshold is crossed a dm event is triggered, and 3133 * userland should respond by growing the metadata device. We could let 3134 * userland set the threshold, like we do with the data threshold, but I'm 3135 * not sure they know enough to do this well. 3136 */ 3137 static dm_block_t calc_metadata_threshold(struct pool_c *pt) 3138 { 3139 /* 3140 * 4M is ample for all ops with the possible exception of thin 3141 * device deletion which is harmless if it fails (just retry the 3142 * delete after you've grown the device). 3143 */ 3144 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; 3145 return min((dm_block_t)1024ULL /* 4M */, quarter); 3146 } 3147 3148 /* 3149 * thin-pool <metadata dev> <data dev> 3150 * <data block size (sectors)> 3151 * <low water mark (blocks)> 3152 * [<#feature args> [<arg>]*] 3153 * 3154 * Optional feature arguments are: 3155 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks. 3156 * ignore_discard: disable discard 3157 * no_discard_passdown: don't pass discards down to the data device 3158 * read_only: Don't allow any changes to be made to the pool metadata. 3159 * error_if_no_space: error IOs, instead of queueing, if no space. 3160 */ 3161 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) 3162 { 3163 int r, pool_created = 0; 3164 struct pool_c *pt; 3165 struct pool *pool; 3166 struct pool_features pf; 3167 struct dm_arg_set as; 3168 struct dm_dev *data_dev; 3169 unsigned long block_size; 3170 dm_block_t low_water_blocks; 3171 struct dm_dev *metadata_dev; 3172 fmode_t metadata_mode; 3173 3174 /* 3175 * FIXME Remove validation from scope of lock. 3176 */ 3177 mutex_lock(&dm_thin_pool_table.mutex); 3178 3179 if (argc < 4) { 3180 ti->error = "Invalid argument count"; 3181 r = -EINVAL; 3182 goto out_unlock; 3183 } 3184 3185 as.argc = argc; 3186 as.argv = argv; 3187 3188 /* 3189 * Set default pool features. 3190 */ 3191 pool_features_init(&pf); 3192 3193 dm_consume_args(&as, 4); 3194 r = parse_pool_features(&as, &pf, ti); 3195 if (r) 3196 goto out_unlock; 3197 3198 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); 3199 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); 3200 if (r) { 3201 ti->error = "Error opening metadata block device"; 3202 goto out_unlock; 3203 } 3204 warn_if_metadata_device_too_big(metadata_dev->bdev); 3205 3206 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); 3207 if (r) { 3208 ti->error = "Error getting data device"; 3209 goto out_metadata; 3210 } 3211 3212 if (kstrtoul(argv[2], 10, &block_size) || !block_size || 3213 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || 3214 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || 3215 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { 3216 ti->error = "Invalid block size"; 3217 r = -EINVAL; 3218 goto out; 3219 } 3220 3221 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) { 3222 ti->error = "Invalid low water mark"; 3223 r = -EINVAL; 3224 goto out; 3225 } 3226 3227 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 3228 if (!pt) { 3229 r = -ENOMEM; 3230 goto out; 3231 } 3232 3233 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, 3234 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); 3235 if (IS_ERR(pool)) { 3236 r = PTR_ERR(pool); 3237 goto out_free_pt; 3238 } 3239 3240 /* 3241 * 'pool_created' reflects whether this is the first table load. 3242 * Top level discard support is not allowed to be changed after 3243 * initial load. This would require a pool reload to trigger thin 3244 * device changes. 3245 */ 3246 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { 3247 ti->error = "Discard support cannot be disabled once enabled"; 3248 r = -EINVAL; 3249 goto out_flags_changed; 3250 } 3251 3252 pt->pool = pool; 3253 pt->ti = ti; 3254 pt->metadata_dev = metadata_dev; 3255 pt->data_dev = data_dev; 3256 pt->low_water_blocks = low_water_blocks; 3257 pt->adjusted_pf = pt->requested_pf = pf; 3258 ti->num_flush_bios = 1; 3259 3260 /* 3261 * Only need to enable discards if the pool should pass 3262 * them down to the data device. The thin device's discard 3263 * processing will cause mappings to be removed from the btree. 3264 */ 3265 if (pf.discard_enabled && pf.discard_passdown) { 3266 ti->num_discard_bios = 1; 3267 3268 /* 3269 * Setting 'discards_supported' circumvents the normal 3270 * stacking of discard limits (this keeps the pool and 3271 * thin devices' discard limits consistent). 3272 */ 3273 ti->discards_supported = true; 3274 } 3275 ti->private = pt; 3276 3277 r = dm_pool_register_metadata_threshold(pt->pool->pmd, 3278 calc_metadata_threshold(pt), 3279 metadata_low_callback, 3280 pool); 3281 if (r) 3282 goto out_flags_changed; 3283 3284 pt->callbacks.congested_fn = pool_is_congested; 3285 dm_table_add_target_callbacks(ti->table, &pt->callbacks); 3286 3287 mutex_unlock(&dm_thin_pool_table.mutex); 3288 3289 return 0; 3290 3291 out_flags_changed: 3292 __pool_dec(pool); 3293 out_free_pt: 3294 kfree(pt); 3295 out: 3296 dm_put_device(ti, data_dev); 3297 out_metadata: 3298 dm_put_device(ti, metadata_dev); 3299 out_unlock: 3300 mutex_unlock(&dm_thin_pool_table.mutex); 3301 3302 return r; 3303 } 3304 3305 static int pool_map(struct dm_target *ti, struct bio *bio) 3306 { 3307 int r; 3308 struct pool_c *pt = ti->private; 3309 struct pool *pool = pt->pool; 3310 unsigned long flags; 3311 3312 /* 3313 * As this is a singleton target, ti->begin is always zero. 3314 */ 3315 spin_lock_irqsave(&pool->lock, flags); 3316 bio_set_dev(bio, pt->data_dev->bdev); 3317 r = DM_MAPIO_REMAPPED; 3318 spin_unlock_irqrestore(&pool->lock, flags); 3319 3320 return r; 3321 } 3322 3323 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) 3324 { 3325 int r; 3326 struct pool_c *pt = ti->private; 3327 struct pool *pool = pt->pool; 3328 sector_t data_size = ti->len; 3329 dm_block_t sb_data_size; 3330 3331 *need_commit = false; 3332 3333 (void) sector_div(data_size, pool->sectors_per_block); 3334 3335 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); 3336 if (r) { 3337 DMERR("%s: failed to retrieve data device size", 3338 dm_device_name(pool->pool_md)); 3339 return r; 3340 } 3341 3342 if (data_size < sb_data_size) { 3343 DMERR("%s: pool target (%llu blocks) too small: expected %llu", 3344 dm_device_name(pool->pool_md), 3345 (unsigned long long)data_size, sb_data_size); 3346 return -EINVAL; 3347 3348 } else if (data_size > sb_data_size) { 3349 if (dm_pool_metadata_needs_check(pool->pmd)) { 3350 DMERR("%s: unable to grow the data device until repaired.", 3351 dm_device_name(pool->pool_md)); 3352 return 0; 3353 } 3354 3355 if (sb_data_size) 3356 DMINFO("%s: growing the data device from %llu to %llu blocks", 3357 dm_device_name(pool->pool_md), 3358 sb_data_size, (unsigned long long)data_size); 3359 r = dm_pool_resize_data_dev(pool->pmd, data_size); 3360 if (r) { 3361 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); 3362 return r; 3363 } 3364 3365 *need_commit = true; 3366 } 3367 3368 return 0; 3369 } 3370 3371 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) 3372 { 3373 int r; 3374 struct pool_c *pt = ti->private; 3375 struct pool *pool = pt->pool; 3376 dm_block_t metadata_dev_size, sb_metadata_dev_size; 3377 3378 *need_commit = false; 3379 3380 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); 3381 3382 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); 3383 if (r) { 3384 DMERR("%s: failed to retrieve metadata device size", 3385 dm_device_name(pool->pool_md)); 3386 return r; 3387 } 3388 3389 if (metadata_dev_size < sb_metadata_dev_size) { 3390 DMERR("%s: metadata device (%llu blocks) too small: expected %llu", 3391 dm_device_name(pool->pool_md), 3392 metadata_dev_size, sb_metadata_dev_size); 3393 return -EINVAL; 3394 3395 } else if (metadata_dev_size > sb_metadata_dev_size) { 3396 if (dm_pool_metadata_needs_check(pool->pmd)) { 3397 DMERR("%s: unable to grow the metadata device until repaired.", 3398 dm_device_name(pool->pool_md)); 3399 return 0; 3400 } 3401 3402 warn_if_metadata_device_too_big(pool->md_dev); 3403 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 3404 dm_device_name(pool->pool_md), 3405 sb_metadata_dev_size, metadata_dev_size); 3406 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); 3407 if (r) { 3408 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); 3409 return r; 3410 } 3411 3412 *need_commit = true; 3413 } 3414 3415 return 0; 3416 } 3417 3418 /* 3419 * Retrieves the number of blocks of the data device from 3420 * the superblock and compares it to the actual device size, 3421 * thus resizing the data device in case it has grown. 3422 * 3423 * This both copes with opening preallocated data devices in the ctr 3424 * being followed by a resume 3425 * -and- 3426 * calling the resume method individually after userspace has 3427 * grown the data device in reaction to a table event. 3428 */ 3429 static int pool_preresume(struct dm_target *ti) 3430 { 3431 int r; 3432 bool need_commit1, need_commit2; 3433 struct pool_c *pt = ti->private; 3434 struct pool *pool = pt->pool; 3435 3436 /* 3437 * Take control of the pool object. 3438 */ 3439 r = bind_control_target(pool, ti); 3440 if (r) 3441 return r; 3442 3443 r = maybe_resize_data_dev(ti, &need_commit1); 3444 if (r) 3445 return r; 3446 3447 r = maybe_resize_metadata_dev(ti, &need_commit2); 3448 if (r) 3449 return r; 3450 3451 if (need_commit1 || need_commit2) 3452 (void) commit(pool); 3453 3454 return 0; 3455 } 3456 3457 static void pool_suspend_active_thins(struct pool *pool) 3458 { 3459 struct thin_c *tc; 3460 3461 /* Suspend all active thin devices */ 3462 tc = get_first_thin(pool); 3463 while (tc) { 3464 dm_internal_suspend_noflush(tc->thin_md); 3465 tc = get_next_thin(pool, tc); 3466 } 3467 } 3468 3469 static void pool_resume_active_thins(struct pool *pool) 3470 { 3471 struct thin_c *tc; 3472 3473 /* Resume all active thin devices */ 3474 tc = get_first_thin(pool); 3475 while (tc) { 3476 dm_internal_resume(tc->thin_md); 3477 tc = get_next_thin(pool, tc); 3478 } 3479 } 3480 3481 static void pool_resume(struct dm_target *ti) 3482 { 3483 struct pool_c *pt = ti->private; 3484 struct pool *pool = pt->pool; 3485 unsigned long flags; 3486 3487 /* 3488 * Must requeue active_thins' bios and then resume 3489 * active_thins _before_ clearing 'suspend' flag. 3490 */ 3491 requeue_bios(pool); 3492 pool_resume_active_thins(pool); 3493 3494 spin_lock_irqsave(&pool->lock, flags); 3495 pool->low_water_triggered = false; 3496 pool->suspended = false; 3497 spin_unlock_irqrestore(&pool->lock, flags); 3498 3499 do_waker(&pool->waker.work); 3500 } 3501 3502 static void pool_presuspend(struct dm_target *ti) 3503 { 3504 struct pool_c *pt = ti->private; 3505 struct pool *pool = pt->pool; 3506 unsigned long flags; 3507 3508 spin_lock_irqsave(&pool->lock, flags); 3509 pool->suspended = true; 3510 spin_unlock_irqrestore(&pool->lock, flags); 3511 3512 pool_suspend_active_thins(pool); 3513 } 3514 3515 static void pool_presuspend_undo(struct dm_target *ti) 3516 { 3517 struct pool_c *pt = ti->private; 3518 struct pool *pool = pt->pool; 3519 unsigned long flags; 3520 3521 pool_resume_active_thins(pool); 3522 3523 spin_lock_irqsave(&pool->lock, flags); 3524 pool->suspended = false; 3525 spin_unlock_irqrestore(&pool->lock, flags); 3526 } 3527 3528 static void pool_postsuspend(struct dm_target *ti) 3529 { 3530 struct pool_c *pt = ti->private; 3531 struct pool *pool = pt->pool; 3532 3533 cancel_delayed_work_sync(&pool->waker); 3534 cancel_delayed_work_sync(&pool->no_space_timeout); 3535 flush_workqueue(pool->wq); 3536 (void) commit(pool); 3537 } 3538 3539 static int check_arg_count(unsigned argc, unsigned args_required) 3540 { 3541 if (argc != args_required) { 3542 DMWARN("Message received with %u arguments instead of %u.", 3543 argc, args_required); 3544 return -EINVAL; 3545 } 3546 3547 return 0; 3548 } 3549 3550 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning) 3551 { 3552 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) && 3553 *dev_id <= MAX_DEV_ID) 3554 return 0; 3555 3556 if (warning) 3557 DMWARN("Message received with invalid device id: %s", arg); 3558 3559 return -EINVAL; 3560 } 3561 3562 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) 3563 { 3564 dm_thin_id dev_id; 3565 int r; 3566 3567 r = check_arg_count(argc, 2); 3568 if (r) 3569 return r; 3570 3571 r = read_dev_id(argv[1], &dev_id, 1); 3572 if (r) 3573 return r; 3574 3575 r = dm_pool_create_thin(pool->pmd, dev_id); 3576 if (r) { 3577 DMWARN("Creation of new thinly-provisioned device with id %s failed.", 3578 argv[1]); 3579 return r; 3580 } 3581 3582 return 0; 3583 } 3584 3585 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) 3586 { 3587 dm_thin_id dev_id; 3588 dm_thin_id origin_dev_id; 3589 int r; 3590 3591 r = check_arg_count(argc, 3); 3592 if (r) 3593 return r; 3594 3595 r = read_dev_id(argv[1], &dev_id, 1); 3596 if (r) 3597 return r; 3598 3599 r = read_dev_id(argv[2], &origin_dev_id, 1); 3600 if (r) 3601 return r; 3602 3603 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); 3604 if (r) { 3605 DMWARN("Creation of new snapshot %s of device %s failed.", 3606 argv[1], argv[2]); 3607 return r; 3608 } 3609 3610 return 0; 3611 } 3612 3613 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) 3614 { 3615 dm_thin_id dev_id; 3616 int r; 3617 3618 r = check_arg_count(argc, 2); 3619 if (r) 3620 return r; 3621 3622 r = read_dev_id(argv[1], &dev_id, 1); 3623 if (r) 3624 return r; 3625 3626 r = dm_pool_delete_thin_device(pool->pmd, dev_id); 3627 if (r) 3628 DMWARN("Deletion of thin device %s failed.", argv[1]); 3629 3630 return r; 3631 } 3632 3633 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) 3634 { 3635 dm_thin_id old_id, new_id; 3636 int r; 3637 3638 r = check_arg_count(argc, 3); 3639 if (r) 3640 return r; 3641 3642 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) { 3643 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]); 3644 return -EINVAL; 3645 } 3646 3647 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) { 3648 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]); 3649 return -EINVAL; 3650 } 3651 3652 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); 3653 if (r) { 3654 DMWARN("Failed to change transaction id from %s to %s.", 3655 argv[1], argv[2]); 3656 return r; 3657 } 3658 3659 return 0; 3660 } 3661 3662 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) 3663 { 3664 int r; 3665 3666 r = check_arg_count(argc, 1); 3667 if (r) 3668 return r; 3669 3670 (void) commit(pool); 3671 3672 r = dm_pool_reserve_metadata_snap(pool->pmd); 3673 if (r) 3674 DMWARN("reserve_metadata_snap message failed."); 3675 3676 return r; 3677 } 3678 3679 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) 3680 { 3681 int r; 3682 3683 r = check_arg_count(argc, 1); 3684 if (r) 3685 return r; 3686 3687 r = dm_pool_release_metadata_snap(pool->pmd); 3688 if (r) 3689 DMWARN("release_metadata_snap message failed."); 3690 3691 return r; 3692 } 3693 3694 /* 3695 * Messages supported: 3696 * create_thin <dev_id> 3697 * create_snap <dev_id> <origin_id> 3698 * delete <dev_id> 3699 * set_transaction_id <current_trans_id> <new_trans_id> 3700 * reserve_metadata_snap 3701 * release_metadata_snap 3702 */ 3703 static int pool_message(struct dm_target *ti, unsigned argc, char **argv, 3704 char *result, unsigned maxlen) 3705 { 3706 int r = -EINVAL; 3707 struct pool_c *pt = ti->private; 3708 struct pool *pool = pt->pool; 3709 3710 if (get_pool_mode(pool) >= PM_READ_ONLY) { 3711 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", 3712 dm_device_name(pool->pool_md)); 3713 return -EOPNOTSUPP; 3714 } 3715 3716 if (!strcasecmp(argv[0], "create_thin")) 3717 r = process_create_thin_mesg(argc, argv, pool); 3718 3719 else if (!strcasecmp(argv[0], "create_snap")) 3720 r = process_create_snap_mesg(argc, argv, pool); 3721 3722 else if (!strcasecmp(argv[0], "delete")) 3723 r = process_delete_mesg(argc, argv, pool); 3724 3725 else if (!strcasecmp(argv[0], "set_transaction_id")) 3726 r = process_set_transaction_id_mesg(argc, argv, pool); 3727 3728 else if (!strcasecmp(argv[0], "reserve_metadata_snap")) 3729 r = process_reserve_metadata_snap_mesg(argc, argv, pool); 3730 3731 else if (!strcasecmp(argv[0], "release_metadata_snap")) 3732 r = process_release_metadata_snap_mesg(argc, argv, pool); 3733 3734 else 3735 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 3736 3737 if (!r) 3738 (void) commit(pool); 3739 3740 return r; 3741 } 3742 3743 static void emit_flags(struct pool_features *pf, char *result, 3744 unsigned sz, unsigned maxlen) 3745 { 3746 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + 3747 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) + 3748 pf->error_if_no_space; 3749 DMEMIT("%u ", count); 3750 3751 if (!pf->zero_new_blocks) 3752 DMEMIT("skip_block_zeroing "); 3753 3754 if (!pf->discard_enabled) 3755 DMEMIT("ignore_discard "); 3756 3757 if (!pf->discard_passdown) 3758 DMEMIT("no_discard_passdown "); 3759 3760 if (pf->mode == PM_READ_ONLY) 3761 DMEMIT("read_only "); 3762 3763 if (pf->error_if_no_space) 3764 DMEMIT("error_if_no_space "); 3765 } 3766 3767 /* 3768 * Status line is: 3769 * <transaction id> <used metadata sectors>/<total metadata sectors> 3770 * <used data sectors>/<total data sectors> <held metadata root> 3771 * <pool mode> <discard config> <no space config> <needs_check> 3772 */ 3773 static void pool_status(struct dm_target *ti, status_type_t type, 3774 unsigned status_flags, char *result, unsigned maxlen) 3775 { 3776 int r; 3777 unsigned sz = 0; 3778 uint64_t transaction_id; 3779 dm_block_t nr_free_blocks_data; 3780 dm_block_t nr_free_blocks_metadata; 3781 dm_block_t nr_blocks_data; 3782 dm_block_t nr_blocks_metadata; 3783 dm_block_t held_root; 3784 char buf[BDEVNAME_SIZE]; 3785 char buf2[BDEVNAME_SIZE]; 3786 struct pool_c *pt = ti->private; 3787 struct pool *pool = pt->pool; 3788 3789 switch (type) { 3790 case STATUSTYPE_INFO: 3791 if (get_pool_mode(pool) == PM_FAIL) { 3792 DMEMIT("Fail"); 3793 break; 3794 } 3795 3796 /* Commit to ensure statistics aren't out-of-date */ 3797 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) 3798 (void) commit(pool); 3799 3800 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); 3801 if (r) { 3802 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d", 3803 dm_device_name(pool->pool_md), r); 3804 goto err; 3805 } 3806 3807 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); 3808 if (r) { 3809 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d", 3810 dm_device_name(pool->pool_md), r); 3811 goto err; 3812 } 3813 3814 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); 3815 if (r) { 3816 DMERR("%s: dm_pool_get_metadata_dev_size returned %d", 3817 dm_device_name(pool->pool_md), r); 3818 goto err; 3819 } 3820 3821 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); 3822 if (r) { 3823 DMERR("%s: dm_pool_get_free_block_count returned %d", 3824 dm_device_name(pool->pool_md), r); 3825 goto err; 3826 } 3827 3828 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); 3829 if (r) { 3830 DMERR("%s: dm_pool_get_data_dev_size returned %d", 3831 dm_device_name(pool->pool_md), r); 3832 goto err; 3833 } 3834 3835 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); 3836 if (r) { 3837 DMERR("%s: dm_pool_get_metadata_snap returned %d", 3838 dm_device_name(pool->pool_md), r); 3839 goto err; 3840 } 3841 3842 DMEMIT("%llu %llu/%llu %llu/%llu ", 3843 (unsigned long long)transaction_id, 3844 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 3845 (unsigned long long)nr_blocks_metadata, 3846 (unsigned long long)(nr_blocks_data - nr_free_blocks_data), 3847 (unsigned long long)nr_blocks_data); 3848 3849 if (held_root) 3850 DMEMIT("%llu ", held_root); 3851 else 3852 DMEMIT("- "); 3853 3854 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) 3855 DMEMIT("out_of_data_space "); 3856 else if (pool->pf.mode == PM_READ_ONLY) 3857 DMEMIT("ro "); 3858 else 3859 DMEMIT("rw "); 3860 3861 if (!pool->pf.discard_enabled) 3862 DMEMIT("ignore_discard "); 3863 else if (pool->pf.discard_passdown) 3864 DMEMIT("discard_passdown "); 3865 else 3866 DMEMIT("no_discard_passdown "); 3867 3868 if (pool->pf.error_if_no_space) 3869 DMEMIT("error_if_no_space "); 3870 else 3871 DMEMIT("queue_if_no_space "); 3872 3873 if (dm_pool_metadata_needs_check(pool->pmd)) 3874 DMEMIT("needs_check "); 3875 else 3876 DMEMIT("- "); 3877 3878 DMEMIT("%llu ", (unsigned long long)calc_metadata_threshold(pt)); 3879 3880 break; 3881 3882 case STATUSTYPE_TABLE: 3883 DMEMIT("%s %s %lu %llu ", 3884 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev), 3885 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), 3886 (unsigned long)pool->sectors_per_block, 3887 (unsigned long long)pt->low_water_blocks); 3888 emit_flags(&pt->requested_pf, result, sz, maxlen); 3889 break; 3890 } 3891 return; 3892 3893 err: 3894 DMEMIT("Error"); 3895 } 3896 3897 static int pool_iterate_devices(struct dm_target *ti, 3898 iterate_devices_callout_fn fn, void *data) 3899 { 3900 struct pool_c *pt = ti->private; 3901 3902 return fn(ti, pt->data_dev, 0, ti->len, data); 3903 } 3904 3905 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 3906 { 3907 struct pool_c *pt = ti->private; 3908 struct pool *pool = pt->pool; 3909 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; 3910 3911 /* 3912 * If max_sectors is smaller than pool->sectors_per_block adjust it 3913 * to the highest possible power-of-2 factor of pool->sectors_per_block. 3914 * This is especially beneficial when the pool's data device is a RAID 3915 * device that has a full stripe width that matches pool->sectors_per_block 3916 * -- because even though partial RAID stripe-sized IOs will be issued to a 3917 * single RAID stripe; when aggregated they will end on a full RAID stripe 3918 * boundary.. which avoids additional partial RAID stripe writes cascading 3919 */ 3920 if (limits->max_sectors < pool->sectors_per_block) { 3921 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { 3922 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0) 3923 limits->max_sectors--; 3924 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors); 3925 } 3926 } 3927 3928 /* 3929 * If the system-determined stacked limits are compatible with the 3930 * pool's blocksize (io_opt is a factor) do not override them. 3931 */ 3932 if (io_opt_sectors < pool->sectors_per_block || 3933 !is_factor(io_opt_sectors, pool->sectors_per_block)) { 3934 if (is_factor(pool->sectors_per_block, limits->max_sectors)) 3935 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); 3936 else 3937 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); 3938 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 3939 } 3940 3941 /* 3942 * pt->adjusted_pf is a staging area for the actual features to use. 3943 * They get transferred to the live pool in bind_control_target() 3944 * called from pool_preresume(). 3945 */ 3946 if (!pt->adjusted_pf.discard_enabled) { 3947 /* 3948 * Must explicitly disallow stacking discard limits otherwise the 3949 * block layer will stack them if pool's data device has support. 3950 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the 3951 * user to see that, so make sure to set all discard limits to 0. 3952 */ 3953 limits->discard_granularity = 0; 3954 return; 3955 } 3956 3957 disable_passdown_if_not_supported(pt); 3958 3959 /* 3960 * The pool uses the same discard limits as the underlying data 3961 * device. DM core has already set this up. 3962 */ 3963 } 3964 3965 static struct target_type pool_target = { 3966 .name = "thin-pool", 3967 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 3968 DM_TARGET_IMMUTABLE, 3969 .version = {1, 20, 0}, 3970 .module = THIS_MODULE, 3971 .ctr = pool_ctr, 3972 .dtr = pool_dtr, 3973 .map = pool_map, 3974 .presuspend = pool_presuspend, 3975 .presuspend_undo = pool_presuspend_undo, 3976 .postsuspend = pool_postsuspend, 3977 .preresume = pool_preresume, 3978 .resume = pool_resume, 3979 .message = pool_message, 3980 .status = pool_status, 3981 .iterate_devices = pool_iterate_devices, 3982 .io_hints = pool_io_hints, 3983 }; 3984 3985 /*---------------------------------------------------------------- 3986 * Thin target methods 3987 *--------------------------------------------------------------*/ 3988 static void thin_get(struct thin_c *tc) 3989 { 3990 atomic_inc(&tc->refcount); 3991 } 3992 3993 static void thin_put(struct thin_c *tc) 3994 { 3995 if (atomic_dec_and_test(&tc->refcount)) 3996 complete(&tc->can_destroy); 3997 } 3998 3999 static void thin_dtr(struct dm_target *ti) 4000 { 4001 struct thin_c *tc = ti->private; 4002 unsigned long flags; 4003 4004 spin_lock_irqsave(&tc->pool->lock, flags); 4005 list_del_rcu(&tc->list); 4006 spin_unlock_irqrestore(&tc->pool->lock, flags); 4007 synchronize_rcu(); 4008 4009 thin_put(tc); 4010 wait_for_completion(&tc->can_destroy); 4011 4012 mutex_lock(&dm_thin_pool_table.mutex); 4013 4014 __pool_dec(tc->pool); 4015 dm_pool_close_thin_device(tc->td); 4016 dm_put_device(ti, tc->pool_dev); 4017 if (tc->origin_dev) 4018 dm_put_device(ti, tc->origin_dev); 4019 kfree(tc); 4020 4021 mutex_unlock(&dm_thin_pool_table.mutex); 4022 } 4023 4024 /* 4025 * Thin target parameters: 4026 * 4027 * <pool_dev> <dev_id> [origin_dev] 4028 * 4029 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool) 4030 * dev_id: the internal device identifier 4031 * origin_dev: a device external to the pool that should act as the origin 4032 * 4033 * If the pool device has discards disabled, they get disabled for the thin 4034 * device as well. 4035 */ 4036 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) 4037 { 4038 int r; 4039 struct thin_c *tc; 4040 struct dm_dev *pool_dev, *origin_dev; 4041 struct mapped_device *pool_md; 4042 unsigned long flags; 4043 4044 mutex_lock(&dm_thin_pool_table.mutex); 4045 4046 if (argc != 2 && argc != 3) { 4047 ti->error = "Invalid argument count"; 4048 r = -EINVAL; 4049 goto out_unlock; 4050 } 4051 4052 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); 4053 if (!tc) { 4054 ti->error = "Out of memory"; 4055 r = -ENOMEM; 4056 goto out_unlock; 4057 } 4058 tc->thin_md = dm_table_get_md(ti->table); 4059 spin_lock_init(&tc->lock); 4060 INIT_LIST_HEAD(&tc->deferred_cells); 4061 bio_list_init(&tc->deferred_bio_list); 4062 bio_list_init(&tc->retry_on_resume_list); 4063 tc->sort_bio_list = RB_ROOT; 4064 4065 if (argc == 3) { 4066 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); 4067 if (r) { 4068 ti->error = "Error opening origin device"; 4069 goto bad_origin_dev; 4070 } 4071 tc->origin_dev = origin_dev; 4072 } 4073 4074 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); 4075 if (r) { 4076 ti->error = "Error opening pool device"; 4077 goto bad_pool_dev; 4078 } 4079 tc->pool_dev = pool_dev; 4080 4081 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { 4082 ti->error = "Invalid device id"; 4083 r = -EINVAL; 4084 goto bad_common; 4085 } 4086 4087 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); 4088 if (!pool_md) { 4089 ti->error = "Couldn't get pool mapped device"; 4090 r = -EINVAL; 4091 goto bad_common; 4092 } 4093 4094 tc->pool = __pool_table_lookup(pool_md); 4095 if (!tc->pool) { 4096 ti->error = "Couldn't find pool object"; 4097 r = -EINVAL; 4098 goto bad_pool_lookup; 4099 } 4100 __pool_inc(tc->pool); 4101 4102 if (get_pool_mode(tc->pool) == PM_FAIL) { 4103 ti->error = "Couldn't open thin device, Pool is in fail mode"; 4104 r = -EINVAL; 4105 goto bad_pool; 4106 } 4107 4108 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); 4109 if (r) { 4110 ti->error = "Couldn't open thin internal device"; 4111 goto bad_pool; 4112 } 4113 4114 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); 4115 if (r) 4116 goto bad; 4117 4118 ti->num_flush_bios = 1; 4119 ti->flush_supported = true; 4120 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); 4121 4122 /* In case the pool supports discards, pass them on. */ 4123 if (tc->pool->pf.discard_enabled) { 4124 ti->discards_supported = true; 4125 ti->num_discard_bios = 1; 4126 ti->split_discard_bios = false; 4127 } 4128 4129 mutex_unlock(&dm_thin_pool_table.mutex); 4130 4131 spin_lock_irqsave(&tc->pool->lock, flags); 4132 if (tc->pool->suspended) { 4133 spin_unlock_irqrestore(&tc->pool->lock, flags); 4134 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */ 4135 ti->error = "Unable to activate thin device while pool is suspended"; 4136 r = -EINVAL; 4137 goto bad; 4138 } 4139 atomic_set(&tc->refcount, 1); 4140 init_completion(&tc->can_destroy); 4141 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 4142 spin_unlock_irqrestore(&tc->pool->lock, flags); 4143 /* 4144 * This synchronize_rcu() call is needed here otherwise we risk a 4145 * wake_worker() call finding no bios to process (because the newly 4146 * added tc isn't yet visible). So this reduces latency since we 4147 * aren't then dependent on the periodic commit to wake_worker(). 4148 */ 4149 synchronize_rcu(); 4150 4151 dm_put(pool_md); 4152 4153 return 0; 4154 4155 bad: 4156 dm_pool_close_thin_device(tc->td); 4157 bad_pool: 4158 __pool_dec(tc->pool); 4159 bad_pool_lookup: 4160 dm_put(pool_md); 4161 bad_common: 4162 dm_put_device(ti, tc->pool_dev); 4163 bad_pool_dev: 4164 if (tc->origin_dev) 4165 dm_put_device(ti, tc->origin_dev); 4166 bad_origin_dev: 4167 kfree(tc); 4168 out_unlock: 4169 mutex_unlock(&dm_thin_pool_table.mutex); 4170 4171 return r; 4172 } 4173 4174 static int thin_map(struct dm_target *ti, struct bio *bio) 4175 { 4176 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 4177 4178 return thin_bio_map(ti, bio); 4179 } 4180 4181 static int thin_endio(struct dm_target *ti, struct bio *bio, 4182 blk_status_t *err) 4183 { 4184 unsigned long flags; 4185 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 4186 struct list_head work; 4187 struct dm_thin_new_mapping *m, *tmp; 4188 struct pool *pool = h->tc->pool; 4189 4190 if (h->shared_read_entry) { 4191 INIT_LIST_HEAD(&work); 4192 dm_deferred_entry_dec(h->shared_read_entry, &work); 4193 4194 spin_lock_irqsave(&pool->lock, flags); 4195 list_for_each_entry_safe(m, tmp, &work, list) { 4196 list_del(&m->list); 4197 __complete_mapping_preparation(m); 4198 } 4199 spin_unlock_irqrestore(&pool->lock, flags); 4200 } 4201 4202 if (h->all_io_entry) { 4203 INIT_LIST_HEAD(&work); 4204 dm_deferred_entry_dec(h->all_io_entry, &work); 4205 if (!list_empty(&work)) { 4206 spin_lock_irqsave(&pool->lock, flags); 4207 list_for_each_entry_safe(m, tmp, &work, list) 4208 list_add_tail(&m->list, &pool->prepared_discards); 4209 spin_unlock_irqrestore(&pool->lock, flags); 4210 wake_worker(pool); 4211 } 4212 } 4213 4214 if (h->cell) 4215 cell_defer_no_holder(h->tc, h->cell); 4216 4217 return DM_ENDIO_DONE; 4218 } 4219 4220 static void thin_presuspend(struct dm_target *ti) 4221 { 4222 struct thin_c *tc = ti->private; 4223 4224 if (dm_noflush_suspending(ti)) 4225 noflush_work(tc, do_noflush_start); 4226 } 4227 4228 static void thin_postsuspend(struct dm_target *ti) 4229 { 4230 struct thin_c *tc = ti->private; 4231 4232 /* 4233 * The dm_noflush_suspending flag has been cleared by now, so 4234 * unfortunately we must always run this. 4235 */ 4236 noflush_work(tc, do_noflush_stop); 4237 } 4238 4239 static int thin_preresume(struct dm_target *ti) 4240 { 4241 struct thin_c *tc = ti->private; 4242 4243 if (tc->origin_dev) 4244 tc->origin_size = get_dev_size(tc->origin_dev->bdev); 4245 4246 return 0; 4247 } 4248 4249 /* 4250 * <nr mapped sectors> <highest mapped sector> 4251 */ 4252 static void thin_status(struct dm_target *ti, status_type_t type, 4253 unsigned status_flags, char *result, unsigned maxlen) 4254 { 4255 int r; 4256 ssize_t sz = 0; 4257 dm_block_t mapped, highest; 4258 char buf[BDEVNAME_SIZE]; 4259 struct thin_c *tc = ti->private; 4260 4261 if (get_pool_mode(tc->pool) == PM_FAIL) { 4262 DMEMIT("Fail"); 4263 return; 4264 } 4265 4266 if (!tc->td) 4267 DMEMIT("-"); 4268 else { 4269 switch (type) { 4270 case STATUSTYPE_INFO: 4271 r = dm_thin_get_mapped_count(tc->td, &mapped); 4272 if (r) { 4273 DMERR("dm_thin_get_mapped_count returned %d", r); 4274 goto err; 4275 } 4276 4277 r = dm_thin_get_highest_mapped_block(tc->td, &highest); 4278 if (r < 0) { 4279 DMERR("dm_thin_get_highest_mapped_block returned %d", r); 4280 goto err; 4281 } 4282 4283 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); 4284 if (r) 4285 DMEMIT("%llu", ((highest + 1) * 4286 tc->pool->sectors_per_block) - 1); 4287 else 4288 DMEMIT("-"); 4289 break; 4290 4291 case STATUSTYPE_TABLE: 4292 DMEMIT("%s %lu", 4293 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), 4294 (unsigned long) tc->dev_id); 4295 if (tc->origin_dev) 4296 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); 4297 break; 4298 } 4299 } 4300 4301 return; 4302 4303 err: 4304 DMEMIT("Error"); 4305 } 4306 4307 static int thin_iterate_devices(struct dm_target *ti, 4308 iterate_devices_callout_fn fn, void *data) 4309 { 4310 sector_t blocks; 4311 struct thin_c *tc = ti->private; 4312 struct pool *pool = tc->pool; 4313 4314 /* 4315 * We can't call dm_pool_get_data_dev_size() since that blocks. So 4316 * we follow a more convoluted path through to the pool's target. 4317 */ 4318 if (!pool->ti) 4319 return 0; /* nothing is bound */ 4320 4321 blocks = pool->ti->len; 4322 (void) sector_div(blocks, pool->sectors_per_block); 4323 if (blocks) 4324 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); 4325 4326 return 0; 4327 } 4328 4329 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) 4330 { 4331 struct thin_c *tc = ti->private; 4332 struct pool *pool = tc->pool; 4333 4334 if (!pool->pf.discard_enabled) 4335 return; 4336 4337 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4338 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4339 } 4340 4341 static struct target_type thin_target = { 4342 .name = "thin", 4343 .version = {1, 20, 0}, 4344 .module = THIS_MODULE, 4345 .ctr = thin_ctr, 4346 .dtr = thin_dtr, 4347 .map = thin_map, 4348 .end_io = thin_endio, 4349 .preresume = thin_preresume, 4350 .presuspend = thin_presuspend, 4351 .postsuspend = thin_postsuspend, 4352 .status = thin_status, 4353 .iterate_devices = thin_iterate_devices, 4354 .io_hints = thin_io_hints, 4355 }; 4356 4357 /*----------------------------------------------------------------*/ 4358 4359 static int __init dm_thin_init(void) 4360 { 4361 int r = -ENOMEM; 4362 4363 pool_table_init(); 4364 4365 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); 4366 if (!_new_mapping_cache) 4367 return r; 4368 4369 r = dm_register_target(&thin_target); 4370 if (r) 4371 goto bad_new_mapping_cache; 4372 4373 r = dm_register_target(&pool_target); 4374 if (r) 4375 goto bad_thin_target; 4376 4377 return 0; 4378 4379 bad_thin_target: 4380 dm_unregister_target(&thin_target); 4381 bad_new_mapping_cache: 4382 kmem_cache_destroy(_new_mapping_cache); 4383 4384 return r; 4385 } 4386 4387 static void dm_thin_exit(void) 4388 { 4389 dm_unregister_target(&thin_target); 4390 dm_unregister_target(&pool_target); 4391 4392 kmem_cache_destroy(_new_mapping_cache); 4393 4394 pool_table_exit(); 4395 } 4396 4397 module_init(dm_thin_init); 4398 module_exit(dm_thin_exit); 4399 4400 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); 4401 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); 4402 4403 MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 4404 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 4405 MODULE_LICENSE("GPL"); 4406