1 /* 2 * Copyright (C) 2012 Red Hat. All rights reserved. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm.h" 8 #include "dm-bio-prison.h" 9 #include "dm-bio-record.h" 10 #include "dm-cache-metadata.h" 11 12 #include <linux/dm-io.h> 13 #include <linux/dm-kcopyd.h> 14 #include <linux/init.h> 15 #include <linux/mempool.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 20 #define DM_MSG_PREFIX "cache" 21 22 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle, 23 "A percentage of time allocated for copying to and/or from cache"); 24 25 /*----------------------------------------------------------------*/ 26 27 /* 28 * Glossary: 29 * 30 * oblock: index of an origin block 31 * cblock: index of a cache block 32 * promotion: movement of a block from origin to cache 33 * demotion: movement of a block from cache to origin 34 * migration: movement of a block between the origin and cache device, 35 * either direction 36 */ 37 38 /*----------------------------------------------------------------*/ 39 40 static size_t bitset_size_in_bytes(unsigned nr_entries) 41 { 42 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); 43 } 44 45 static unsigned long *alloc_bitset(unsigned nr_entries) 46 { 47 size_t s = bitset_size_in_bytes(nr_entries); 48 return vzalloc(s); 49 } 50 51 static void clear_bitset(void *bitset, unsigned nr_entries) 52 { 53 size_t s = bitset_size_in_bytes(nr_entries); 54 memset(bitset, 0, s); 55 } 56 57 static void free_bitset(unsigned long *bits) 58 { 59 vfree(bits); 60 } 61 62 /*----------------------------------------------------------------*/ 63 64 /* 65 * There are a couple of places where we let a bio run, but want to do some 66 * work before calling its endio function. We do this by temporarily 67 * changing the endio fn. 68 */ 69 struct dm_hook_info { 70 bio_end_io_t *bi_end_io; 71 void *bi_private; 72 }; 73 74 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, 75 bio_end_io_t *bi_end_io, void *bi_private) 76 { 77 h->bi_end_io = bio->bi_end_io; 78 h->bi_private = bio->bi_private; 79 80 bio->bi_end_io = bi_end_io; 81 bio->bi_private = bi_private; 82 } 83 84 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) 85 { 86 bio->bi_end_io = h->bi_end_io; 87 bio->bi_private = h->bi_private; 88 } 89 90 /*----------------------------------------------------------------*/ 91 92 #define PRISON_CELLS 1024 93 #define MIGRATION_POOL_SIZE 128 94 #define COMMIT_PERIOD HZ 95 #define MIGRATION_COUNT_WINDOW 10 96 97 /* 98 * The block size of the device holding cache data must be 99 * between 32KB and 1GB. 100 */ 101 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) 102 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 103 104 /* 105 * FIXME: the cache is read/write for the time being. 106 */ 107 enum cache_metadata_mode { 108 CM_WRITE, /* metadata may be changed */ 109 CM_READ_ONLY, /* metadata may not be changed */ 110 }; 111 112 enum cache_io_mode { 113 /* 114 * Data is written to cached blocks only. These blocks are marked 115 * dirty. If you lose the cache device you will lose data. 116 * Potential performance increase for both reads and writes. 117 */ 118 CM_IO_WRITEBACK, 119 120 /* 121 * Data is written to both cache and origin. Blocks are never 122 * dirty. Potential performance benfit for reads only. 123 */ 124 CM_IO_WRITETHROUGH, 125 126 /* 127 * A degraded mode useful for various cache coherency situations 128 * (eg, rolling back snapshots). Reads and writes always go to the 129 * origin. If a write goes to a cached oblock, then the cache 130 * block is invalidated. 131 */ 132 CM_IO_PASSTHROUGH 133 }; 134 135 struct cache_features { 136 enum cache_metadata_mode mode; 137 enum cache_io_mode io_mode; 138 }; 139 140 struct cache_stats { 141 atomic_t read_hit; 142 atomic_t read_miss; 143 atomic_t write_hit; 144 atomic_t write_miss; 145 atomic_t demotion; 146 atomic_t promotion; 147 atomic_t copies_avoided; 148 atomic_t cache_cell_clash; 149 atomic_t commit_count; 150 atomic_t discard_count; 151 }; 152 153 /* 154 * Defines a range of cblocks, begin to (end - 1) are in the range. end is 155 * the one-past-the-end value. 156 */ 157 struct cblock_range { 158 dm_cblock_t begin; 159 dm_cblock_t end; 160 }; 161 162 struct invalidation_request { 163 struct list_head list; 164 struct cblock_range *cblocks; 165 166 atomic_t complete; 167 int err; 168 169 wait_queue_head_t result_wait; 170 }; 171 172 struct cache { 173 struct dm_target *ti; 174 struct dm_target_callbacks callbacks; 175 176 struct dm_cache_metadata *cmd; 177 178 /* 179 * Metadata is written to this device. 180 */ 181 struct dm_dev *metadata_dev; 182 183 /* 184 * The slower of the two data devices. Typically a spindle. 185 */ 186 struct dm_dev *origin_dev; 187 188 /* 189 * The faster of the two data devices. Typically an SSD. 190 */ 191 struct dm_dev *cache_dev; 192 193 /* 194 * Size of the origin device in _complete_ blocks and native sectors. 195 */ 196 dm_oblock_t origin_blocks; 197 sector_t origin_sectors; 198 199 /* 200 * Size of the cache device in blocks. 201 */ 202 dm_cblock_t cache_size; 203 204 /* 205 * Fields for converting from sectors to blocks. 206 */ 207 uint32_t sectors_per_block; 208 int sectors_per_block_shift; 209 210 spinlock_t lock; 211 struct bio_list deferred_bios; 212 struct bio_list deferred_flush_bios; 213 struct bio_list deferred_writethrough_bios; 214 struct list_head quiesced_migrations; 215 struct list_head completed_migrations; 216 struct list_head need_commit_migrations; 217 sector_t migration_threshold; 218 wait_queue_head_t migration_wait; 219 atomic_t nr_migrations; 220 221 wait_queue_head_t quiescing_wait; 222 atomic_t quiescing; 223 atomic_t quiescing_ack; 224 225 /* 226 * cache_size entries, dirty if set 227 */ 228 dm_cblock_t nr_dirty; 229 unsigned long *dirty_bitset; 230 231 /* 232 * origin_blocks entries, discarded if set. 233 */ 234 dm_dblock_t discard_nr_blocks; 235 unsigned long *discard_bitset; 236 uint32_t discard_block_size; /* a power of 2 times sectors per block */ 237 238 /* 239 * Rather than reconstructing the table line for the status we just 240 * save it and regurgitate. 241 */ 242 unsigned nr_ctr_args; 243 const char **ctr_args; 244 245 struct dm_kcopyd_client *copier; 246 struct workqueue_struct *wq; 247 struct work_struct worker; 248 249 struct delayed_work waker; 250 unsigned long last_commit_jiffies; 251 252 struct dm_bio_prison *prison; 253 struct dm_deferred_set *all_io_ds; 254 255 mempool_t *migration_pool; 256 struct dm_cache_migration *next_migration; 257 258 struct dm_cache_policy *policy; 259 unsigned policy_nr_args; 260 261 bool need_tick_bio:1; 262 bool sized:1; 263 bool invalidate:1; 264 bool commit_requested:1; 265 bool loaded_mappings:1; 266 bool loaded_discards:1; 267 268 /* 269 * Cache features such as write-through. 270 */ 271 struct cache_features features; 272 273 struct cache_stats stats; 274 275 /* 276 * Invalidation fields. 277 */ 278 spinlock_t invalidation_lock; 279 struct list_head invalidation_requests; 280 }; 281 282 struct per_bio_data { 283 bool tick:1; 284 unsigned req_nr:2; 285 struct dm_deferred_entry *all_io_entry; 286 287 /* 288 * writethrough fields. These MUST remain at the end of this 289 * structure and the 'cache' member must be the first as it 290 * is used to determine the offset of the writethrough fields. 291 */ 292 struct cache *cache; 293 dm_cblock_t cblock; 294 struct dm_hook_info hook_info; 295 struct dm_bio_details bio_details; 296 }; 297 298 struct dm_cache_migration { 299 struct list_head list; 300 struct cache *cache; 301 302 unsigned long start_jiffies; 303 dm_oblock_t old_oblock; 304 dm_oblock_t new_oblock; 305 dm_cblock_t cblock; 306 307 bool err:1; 308 bool writeback:1; 309 bool demote:1; 310 bool promote:1; 311 bool requeue_holder:1; 312 bool invalidate:1; 313 314 struct dm_bio_prison_cell *old_ocell; 315 struct dm_bio_prison_cell *new_ocell; 316 }; 317 318 /* 319 * Processing a bio in the worker thread may require these memory 320 * allocations. We prealloc to avoid deadlocks (the same worker thread 321 * frees them back to the mempool). 322 */ 323 struct prealloc { 324 struct dm_cache_migration *mg; 325 struct dm_bio_prison_cell *cell1; 326 struct dm_bio_prison_cell *cell2; 327 }; 328 329 static void wake_worker(struct cache *cache) 330 { 331 queue_work(cache->wq, &cache->worker); 332 } 333 334 /*----------------------------------------------------------------*/ 335 336 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) 337 { 338 /* FIXME: change to use a local slab. */ 339 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); 340 } 341 342 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) 343 { 344 dm_bio_prison_free_cell(cache->prison, cell); 345 } 346 347 static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 348 { 349 if (!p->mg) { 350 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); 351 if (!p->mg) 352 return -ENOMEM; 353 } 354 355 if (!p->cell1) { 356 p->cell1 = alloc_prison_cell(cache); 357 if (!p->cell1) 358 return -ENOMEM; 359 } 360 361 if (!p->cell2) { 362 p->cell2 = alloc_prison_cell(cache); 363 if (!p->cell2) 364 return -ENOMEM; 365 } 366 367 return 0; 368 } 369 370 static void prealloc_free_structs(struct cache *cache, struct prealloc *p) 371 { 372 if (p->cell2) 373 free_prison_cell(cache, p->cell2); 374 375 if (p->cell1) 376 free_prison_cell(cache, p->cell1); 377 378 if (p->mg) 379 mempool_free(p->mg, cache->migration_pool); 380 } 381 382 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) 383 { 384 struct dm_cache_migration *mg = p->mg; 385 386 BUG_ON(!mg); 387 p->mg = NULL; 388 389 return mg; 390 } 391 392 /* 393 * You must have a cell within the prealloc struct to return. If not this 394 * function will BUG() rather than returning NULL. 395 */ 396 static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) 397 { 398 struct dm_bio_prison_cell *r = NULL; 399 400 if (p->cell1) { 401 r = p->cell1; 402 p->cell1 = NULL; 403 404 } else if (p->cell2) { 405 r = p->cell2; 406 p->cell2 = NULL; 407 } else 408 BUG(); 409 410 return r; 411 } 412 413 /* 414 * You can't have more than two cells in a prealloc struct. BUG() will be 415 * called if you try and overfill. 416 */ 417 static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) 418 { 419 if (!p->cell2) 420 p->cell2 = cell; 421 422 else if (!p->cell1) 423 p->cell1 = cell; 424 425 else 426 BUG(); 427 } 428 429 /*----------------------------------------------------------------*/ 430 431 static void build_key(dm_oblock_t oblock, struct dm_cell_key *key) 432 { 433 key->virtual = 0; 434 key->dev = 0; 435 key->block = from_oblock(oblock); 436 } 437 438 /* 439 * The caller hands in a preallocated cell, and a free function for it. 440 * The cell will be freed if there's an error, or if it wasn't used because 441 * a cell with that key already exists. 442 */ 443 typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); 444 445 static int bio_detain(struct cache *cache, dm_oblock_t oblock, 446 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, 447 cell_free_fn free_fn, void *free_context, 448 struct dm_bio_prison_cell **cell_result) 449 { 450 int r; 451 struct dm_cell_key key; 452 453 build_key(oblock, &key); 454 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); 455 if (r) 456 free_fn(free_context, cell_prealloc); 457 458 return r; 459 } 460 461 static int get_cell(struct cache *cache, 462 dm_oblock_t oblock, 463 struct prealloc *structs, 464 struct dm_bio_prison_cell **cell_result) 465 { 466 int r; 467 struct dm_cell_key key; 468 struct dm_bio_prison_cell *cell_prealloc; 469 470 cell_prealloc = prealloc_get_cell(structs); 471 472 build_key(oblock, &key); 473 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); 474 if (r) 475 prealloc_put_cell(structs, cell_prealloc); 476 477 return r; 478 } 479 480 /*----------------------------------------------------------------*/ 481 482 static bool is_dirty(struct cache *cache, dm_cblock_t b) 483 { 484 return test_bit(from_cblock(b), cache->dirty_bitset); 485 } 486 487 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) 488 { 489 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { 490 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); 491 policy_set_dirty(cache->policy, oblock); 492 } 493 } 494 495 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) 496 { 497 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { 498 policy_clear_dirty(cache->policy, oblock); 499 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); 500 if (!from_cblock(cache->nr_dirty)) 501 dm_table_event(cache->ti->table); 502 } 503 } 504 505 /*----------------------------------------------------------------*/ 506 507 static bool block_size_is_power_of_two(struct cache *cache) 508 { 509 return cache->sectors_per_block_shift >= 0; 510 } 511 512 /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ 513 #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 514 __always_inline 515 #endif 516 static dm_block_t block_div(dm_block_t b, uint32_t n) 517 { 518 do_div(b, n); 519 520 return b; 521 } 522 523 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) 524 { 525 uint32_t discard_blocks = cache->discard_block_size; 526 dm_block_t b = from_oblock(oblock); 527 528 if (!block_size_is_power_of_two(cache)) 529 discard_blocks = discard_blocks / cache->sectors_per_block; 530 else 531 discard_blocks >>= cache->sectors_per_block_shift; 532 533 b = block_div(b, discard_blocks); 534 535 return to_dblock(b); 536 } 537 538 static void set_discard(struct cache *cache, dm_dblock_t b) 539 { 540 unsigned long flags; 541 542 atomic_inc(&cache->stats.discard_count); 543 544 spin_lock_irqsave(&cache->lock, flags); 545 set_bit(from_dblock(b), cache->discard_bitset); 546 spin_unlock_irqrestore(&cache->lock, flags); 547 } 548 549 static void clear_discard(struct cache *cache, dm_dblock_t b) 550 { 551 unsigned long flags; 552 553 spin_lock_irqsave(&cache->lock, flags); 554 clear_bit(from_dblock(b), cache->discard_bitset); 555 spin_unlock_irqrestore(&cache->lock, flags); 556 } 557 558 static bool is_discarded(struct cache *cache, dm_dblock_t b) 559 { 560 int r; 561 unsigned long flags; 562 563 spin_lock_irqsave(&cache->lock, flags); 564 r = test_bit(from_dblock(b), cache->discard_bitset); 565 spin_unlock_irqrestore(&cache->lock, flags); 566 567 return r; 568 } 569 570 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) 571 { 572 int r; 573 unsigned long flags; 574 575 spin_lock_irqsave(&cache->lock, flags); 576 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), 577 cache->discard_bitset); 578 spin_unlock_irqrestore(&cache->lock, flags); 579 580 return r; 581 } 582 583 /*----------------------------------------------------------------*/ 584 585 static void load_stats(struct cache *cache) 586 { 587 struct dm_cache_statistics stats; 588 589 dm_cache_metadata_get_stats(cache->cmd, &stats); 590 atomic_set(&cache->stats.read_hit, stats.read_hits); 591 atomic_set(&cache->stats.read_miss, stats.read_misses); 592 atomic_set(&cache->stats.write_hit, stats.write_hits); 593 atomic_set(&cache->stats.write_miss, stats.write_misses); 594 } 595 596 static void save_stats(struct cache *cache) 597 { 598 struct dm_cache_statistics stats; 599 600 stats.read_hits = atomic_read(&cache->stats.read_hit); 601 stats.read_misses = atomic_read(&cache->stats.read_miss); 602 stats.write_hits = atomic_read(&cache->stats.write_hit); 603 stats.write_misses = atomic_read(&cache->stats.write_miss); 604 605 dm_cache_metadata_set_stats(cache->cmd, &stats); 606 } 607 608 /*---------------------------------------------------------------- 609 * Per bio data 610 *--------------------------------------------------------------*/ 611 612 /* 613 * If using writeback, leave out struct per_bio_data's writethrough fields. 614 */ 615 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) 616 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) 617 618 static bool writethrough_mode(struct cache_features *f) 619 { 620 return f->io_mode == CM_IO_WRITETHROUGH; 621 } 622 623 static bool writeback_mode(struct cache_features *f) 624 { 625 return f->io_mode == CM_IO_WRITEBACK; 626 } 627 628 static bool passthrough_mode(struct cache_features *f) 629 { 630 return f->io_mode == CM_IO_PASSTHROUGH; 631 } 632 633 static size_t get_per_bio_data_size(struct cache *cache) 634 { 635 return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; 636 } 637 638 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) 639 { 640 struct per_bio_data *pb = dm_per_bio_data(bio, data_size); 641 BUG_ON(!pb); 642 return pb; 643 } 644 645 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) 646 { 647 struct per_bio_data *pb = get_per_bio_data(bio, data_size); 648 649 pb->tick = false; 650 pb->req_nr = dm_bio_get_target_bio_nr(bio); 651 pb->all_io_entry = NULL; 652 653 return pb; 654 } 655 656 /*---------------------------------------------------------------- 657 * Remapping 658 *--------------------------------------------------------------*/ 659 static void remap_to_origin(struct cache *cache, struct bio *bio) 660 { 661 bio->bi_bdev = cache->origin_dev->bdev; 662 } 663 664 static void remap_to_cache(struct cache *cache, struct bio *bio, 665 dm_cblock_t cblock) 666 { 667 sector_t bi_sector = bio->bi_sector; 668 669 bio->bi_bdev = cache->cache_dev->bdev; 670 if (!block_size_is_power_of_two(cache)) 671 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + 672 sector_div(bi_sector, cache->sectors_per_block); 673 else 674 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | 675 (bi_sector & (cache->sectors_per_block - 1)); 676 } 677 678 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 679 { 680 unsigned long flags; 681 size_t pb_data_size = get_per_bio_data_size(cache); 682 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 683 684 spin_lock_irqsave(&cache->lock, flags); 685 if (cache->need_tick_bio && 686 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { 687 pb->tick = true; 688 cache->need_tick_bio = false; 689 } 690 spin_unlock_irqrestore(&cache->lock, flags); 691 } 692 693 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, 694 dm_oblock_t oblock) 695 { 696 check_if_tick_bio_needed(cache, bio); 697 remap_to_origin(cache, bio); 698 if (bio_data_dir(bio) == WRITE) 699 clear_discard(cache, oblock_to_dblock(cache, oblock)); 700 } 701 702 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, 703 dm_oblock_t oblock, dm_cblock_t cblock) 704 { 705 check_if_tick_bio_needed(cache, bio); 706 remap_to_cache(cache, bio, cblock); 707 if (bio_data_dir(bio) == WRITE) { 708 set_dirty(cache, oblock, cblock); 709 clear_discard(cache, oblock_to_dblock(cache, oblock)); 710 } 711 } 712 713 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 714 { 715 sector_t block_nr = bio->bi_sector; 716 717 if (!block_size_is_power_of_two(cache)) 718 (void) sector_div(block_nr, cache->sectors_per_block); 719 else 720 block_nr >>= cache->sectors_per_block_shift; 721 722 return to_oblock(block_nr); 723 } 724 725 static int bio_triggers_commit(struct cache *cache, struct bio *bio) 726 { 727 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); 728 } 729 730 static void issue(struct cache *cache, struct bio *bio) 731 { 732 unsigned long flags; 733 734 if (!bio_triggers_commit(cache, bio)) { 735 generic_make_request(bio); 736 return; 737 } 738 739 /* 740 * Batch together any bios that trigger commits and then issue a 741 * single commit for them in do_worker(). 742 */ 743 spin_lock_irqsave(&cache->lock, flags); 744 cache->commit_requested = true; 745 bio_list_add(&cache->deferred_flush_bios, bio); 746 spin_unlock_irqrestore(&cache->lock, flags); 747 } 748 749 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) 750 { 751 unsigned long flags; 752 753 spin_lock_irqsave(&cache->lock, flags); 754 bio_list_add(&cache->deferred_writethrough_bios, bio); 755 spin_unlock_irqrestore(&cache->lock, flags); 756 757 wake_worker(cache); 758 } 759 760 static void writethrough_endio(struct bio *bio, int err) 761 { 762 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 763 764 dm_unhook_bio(&pb->hook_info, bio); 765 766 if (err) { 767 bio_endio(bio, err); 768 return; 769 } 770 771 dm_bio_restore(&pb->bio_details, bio); 772 remap_to_cache(pb->cache, bio, pb->cblock); 773 774 /* 775 * We can't issue this bio directly, since we're in interrupt 776 * context. So it gets put on a bio list for processing by the 777 * worker thread. 778 */ 779 defer_writethrough_bio(pb->cache, bio); 780 } 781 782 /* 783 * When running in writethrough mode we need to send writes to clean blocks 784 * to both the cache and origin devices. In future we'd like to clone the 785 * bio and send them in parallel, but for now we're doing them in 786 * series as this is easier. 787 */ 788 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 789 dm_oblock_t oblock, dm_cblock_t cblock) 790 { 791 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 792 793 pb->cache = cache; 794 pb->cblock = cblock; 795 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL); 796 dm_bio_record(&pb->bio_details, bio); 797 798 remap_to_origin_clear_discard(pb->cache, bio, oblock); 799 } 800 801 /*---------------------------------------------------------------- 802 * Migration processing 803 * 804 * Migration covers moving data from the origin device to the cache, or 805 * vice versa. 806 *--------------------------------------------------------------*/ 807 static void free_migration(struct dm_cache_migration *mg) 808 { 809 mempool_free(mg, mg->cache->migration_pool); 810 } 811 812 static void inc_nr_migrations(struct cache *cache) 813 { 814 atomic_inc(&cache->nr_migrations); 815 } 816 817 static void dec_nr_migrations(struct cache *cache) 818 { 819 atomic_dec(&cache->nr_migrations); 820 821 /* 822 * Wake the worker in case we're suspending the target. 823 */ 824 wake_up(&cache->migration_wait); 825 } 826 827 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 828 bool holder) 829 { 830 (holder ? dm_cell_release : dm_cell_release_no_holder) 831 (cache->prison, cell, &cache->deferred_bios); 832 free_prison_cell(cache, cell); 833 } 834 835 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 836 bool holder) 837 { 838 unsigned long flags; 839 840 spin_lock_irqsave(&cache->lock, flags); 841 __cell_defer(cache, cell, holder); 842 spin_unlock_irqrestore(&cache->lock, flags); 843 844 wake_worker(cache); 845 } 846 847 static void cleanup_migration(struct dm_cache_migration *mg) 848 { 849 struct cache *cache = mg->cache; 850 free_migration(mg); 851 dec_nr_migrations(cache); 852 } 853 854 static void migration_failure(struct dm_cache_migration *mg) 855 { 856 struct cache *cache = mg->cache; 857 858 if (mg->writeback) { 859 DMWARN_LIMIT("writeback failed; couldn't copy block"); 860 set_dirty(cache, mg->old_oblock, mg->cblock); 861 cell_defer(cache, mg->old_ocell, false); 862 863 } else if (mg->demote) { 864 DMWARN_LIMIT("demotion failed; couldn't copy block"); 865 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); 866 867 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); 868 if (mg->promote) 869 cell_defer(cache, mg->new_ocell, true); 870 } else { 871 DMWARN_LIMIT("promotion failed; couldn't copy block"); 872 policy_remove_mapping(cache->policy, mg->new_oblock); 873 cell_defer(cache, mg->new_ocell, true); 874 } 875 876 cleanup_migration(mg); 877 } 878 879 static void migration_success_pre_commit(struct dm_cache_migration *mg) 880 { 881 unsigned long flags; 882 struct cache *cache = mg->cache; 883 884 if (mg->writeback) { 885 cell_defer(cache, mg->old_ocell, false); 886 clear_dirty(cache, mg->old_oblock, mg->cblock); 887 cleanup_migration(mg); 888 return; 889 890 } else if (mg->demote) { 891 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { 892 DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); 893 policy_force_mapping(cache->policy, mg->new_oblock, 894 mg->old_oblock); 895 if (mg->promote) 896 cell_defer(cache, mg->new_ocell, true); 897 cleanup_migration(mg); 898 return; 899 } 900 } else { 901 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { 902 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); 903 policy_remove_mapping(cache->policy, mg->new_oblock); 904 cleanup_migration(mg); 905 return; 906 } 907 } 908 909 spin_lock_irqsave(&cache->lock, flags); 910 list_add_tail(&mg->list, &cache->need_commit_migrations); 911 cache->commit_requested = true; 912 spin_unlock_irqrestore(&cache->lock, flags); 913 } 914 915 static void migration_success_post_commit(struct dm_cache_migration *mg) 916 { 917 unsigned long flags; 918 struct cache *cache = mg->cache; 919 920 if (mg->writeback) { 921 DMWARN("writeback unexpectedly triggered commit"); 922 return; 923 924 } else if (mg->demote) { 925 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); 926 927 if (mg->promote) { 928 mg->demote = false; 929 930 spin_lock_irqsave(&cache->lock, flags); 931 list_add_tail(&mg->list, &cache->quiesced_migrations); 932 spin_unlock_irqrestore(&cache->lock, flags); 933 934 } else { 935 if (mg->invalidate) 936 policy_remove_mapping(cache->policy, mg->old_oblock); 937 cleanup_migration(mg); 938 } 939 940 } else { 941 if (mg->requeue_holder) 942 cell_defer(cache, mg->new_ocell, true); 943 else { 944 bio_endio(mg->new_ocell->holder, 0); 945 cell_defer(cache, mg->new_ocell, false); 946 } 947 clear_dirty(cache, mg->new_oblock, mg->cblock); 948 cleanup_migration(mg); 949 } 950 } 951 952 static void copy_complete(int read_err, unsigned long write_err, void *context) 953 { 954 unsigned long flags; 955 struct dm_cache_migration *mg = (struct dm_cache_migration *) context; 956 struct cache *cache = mg->cache; 957 958 if (read_err || write_err) 959 mg->err = true; 960 961 spin_lock_irqsave(&cache->lock, flags); 962 list_add_tail(&mg->list, &cache->completed_migrations); 963 spin_unlock_irqrestore(&cache->lock, flags); 964 965 wake_worker(cache); 966 } 967 968 static void issue_copy_real(struct dm_cache_migration *mg) 969 { 970 int r; 971 struct dm_io_region o_region, c_region; 972 struct cache *cache = mg->cache; 973 974 o_region.bdev = cache->origin_dev->bdev; 975 o_region.count = cache->sectors_per_block; 976 977 c_region.bdev = cache->cache_dev->bdev; 978 c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; 979 c_region.count = cache->sectors_per_block; 980 981 if (mg->writeback || mg->demote) { 982 /* demote */ 983 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; 984 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); 985 } else { 986 /* promote */ 987 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; 988 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); 989 } 990 991 if (r < 0) { 992 DMERR_LIMIT("issuing migration failed"); 993 migration_failure(mg); 994 } 995 } 996 997 static void overwrite_endio(struct bio *bio, int err) 998 { 999 struct dm_cache_migration *mg = bio->bi_private; 1000 struct cache *cache = mg->cache; 1001 size_t pb_data_size = get_per_bio_data_size(cache); 1002 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1003 unsigned long flags; 1004 1005 if (err) 1006 mg->err = true; 1007 1008 spin_lock_irqsave(&cache->lock, flags); 1009 list_add_tail(&mg->list, &cache->completed_migrations); 1010 dm_unhook_bio(&pb->hook_info, bio); 1011 mg->requeue_holder = false; 1012 spin_unlock_irqrestore(&cache->lock, flags); 1013 1014 wake_worker(cache); 1015 } 1016 1017 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) 1018 { 1019 size_t pb_data_size = get_per_bio_data_size(mg->cache); 1020 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1021 1022 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); 1023 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); 1024 generic_make_request(bio); 1025 } 1026 1027 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1028 { 1029 return (bio_data_dir(bio) == WRITE) && 1030 (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1031 } 1032 1033 static void avoid_copy(struct dm_cache_migration *mg) 1034 { 1035 atomic_inc(&mg->cache->stats.copies_avoided); 1036 migration_success_pre_commit(mg); 1037 } 1038 1039 static void issue_copy(struct dm_cache_migration *mg) 1040 { 1041 bool avoid; 1042 struct cache *cache = mg->cache; 1043 1044 if (mg->writeback || mg->demote) 1045 avoid = !is_dirty(cache, mg->cblock) || 1046 is_discarded_oblock(cache, mg->old_oblock); 1047 else { 1048 struct bio *bio = mg->new_ocell->holder; 1049 1050 avoid = is_discarded_oblock(cache, mg->new_oblock); 1051 1052 if (!avoid && bio_writes_complete_block(cache, bio)) { 1053 issue_overwrite(mg, bio); 1054 return; 1055 } 1056 } 1057 1058 avoid ? avoid_copy(mg) : issue_copy_real(mg); 1059 } 1060 1061 static void complete_migration(struct dm_cache_migration *mg) 1062 { 1063 if (mg->err) 1064 migration_failure(mg); 1065 else 1066 migration_success_pre_commit(mg); 1067 } 1068 1069 static void process_migrations(struct cache *cache, struct list_head *head, 1070 void (*fn)(struct dm_cache_migration *)) 1071 { 1072 unsigned long flags; 1073 struct list_head list; 1074 struct dm_cache_migration *mg, *tmp; 1075 1076 INIT_LIST_HEAD(&list); 1077 spin_lock_irqsave(&cache->lock, flags); 1078 list_splice_init(head, &list); 1079 spin_unlock_irqrestore(&cache->lock, flags); 1080 1081 list_for_each_entry_safe(mg, tmp, &list, list) 1082 fn(mg); 1083 } 1084 1085 static void __queue_quiesced_migration(struct dm_cache_migration *mg) 1086 { 1087 list_add_tail(&mg->list, &mg->cache->quiesced_migrations); 1088 } 1089 1090 static void queue_quiesced_migration(struct dm_cache_migration *mg) 1091 { 1092 unsigned long flags; 1093 struct cache *cache = mg->cache; 1094 1095 spin_lock_irqsave(&cache->lock, flags); 1096 __queue_quiesced_migration(mg); 1097 spin_unlock_irqrestore(&cache->lock, flags); 1098 1099 wake_worker(cache); 1100 } 1101 1102 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) 1103 { 1104 unsigned long flags; 1105 struct dm_cache_migration *mg, *tmp; 1106 1107 spin_lock_irqsave(&cache->lock, flags); 1108 list_for_each_entry_safe(mg, tmp, work, list) 1109 __queue_quiesced_migration(mg); 1110 spin_unlock_irqrestore(&cache->lock, flags); 1111 1112 wake_worker(cache); 1113 } 1114 1115 static void check_for_quiesced_migrations(struct cache *cache, 1116 struct per_bio_data *pb) 1117 { 1118 struct list_head work; 1119 1120 if (!pb->all_io_entry) 1121 return; 1122 1123 INIT_LIST_HEAD(&work); 1124 if (pb->all_io_entry) 1125 dm_deferred_entry_dec(pb->all_io_entry, &work); 1126 1127 if (!list_empty(&work)) 1128 queue_quiesced_migrations(cache, &work); 1129 } 1130 1131 static void quiesce_migration(struct dm_cache_migration *mg) 1132 { 1133 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) 1134 queue_quiesced_migration(mg); 1135 } 1136 1137 static void promote(struct cache *cache, struct prealloc *structs, 1138 dm_oblock_t oblock, dm_cblock_t cblock, 1139 struct dm_bio_prison_cell *cell) 1140 { 1141 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1142 1143 mg->err = false; 1144 mg->writeback = false; 1145 mg->demote = false; 1146 mg->promote = true; 1147 mg->requeue_holder = true; 1148 mg->invalidate = false; 1149 mg->cache = cache; 1150 mg->new_oblock = oblock; 1151 mg->cblock = cblock; 1152 mg->old_ocell = NULL; 1153 mg->new_ocell = cell; 1154 mg->start_jiffies = jiffies; 1155 1156 inc_nr_migrations(cache); 1157 quiesce_migration(mg); 1158 } 1159 1160 static void writeback(struct cache *cache, struct prealloc *structs, 1161 dm_oblock_t oblock, dm_cblock_t cblock, 1162 struct dm_bio_prison_cell *cell) 1163 { 1164 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1165 1166 mg->err = false; 1167 mg->writeback = true; 1168 mg->demote = false; 1169 mg->promote = false; 1170 mg->requeue_holder = true; 1171 mg->invalidate = false; 1172 mg->cache = cache; 1173 mg->old_oblock = oblock; 1174 mg->cblock = cblock; 1175 mg->old_ocell = cell; 1176 mg->new_ocell = NULL; 1177 mg->start_jiffies = jiffies; 1178 1179 inc_nr_migrations(cache); 1180 quiesce_migration(mg); 1181 } 1182 1183 static void demote_then_promote(struct cache *cache, struct prealloc *structs, 1184 dm_oblock_t old_oblock, dm_oblock_t new_oblock, 1185 dm_cblock_t cblock, 1186 struct dm_bio_prison_cell *old_ocell, 1187 struct dm_bio_prison_cell *new_ocell) 1188 { 1189 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1190 1191 mg->err = false; 1192 mg->writeback = false; 1193 mg->demote = true; 1194 mg->promote = true; 1195 mg->requeue_holder = true; 1196 mg->invalidate = false; 1197 mg->cache = cache; 1198 mg->old_oblock = old_oblock; 1199 mg->new_oblock = new_oblock; 1200 mg->cblock = cblock; 1201 mg->old_ocell = old_ocell; 1202 mg->new_ocell = new_ocell; 1203 mg->start_jiffies = jiffies; 1204 1205 inc_nr_migrations(cache); 1206 quiesce_migration(mg); 1207 } 1208 1209 /* 1210 * Invalidate a cache entry. No writeback occurs; any changes in the cache 1211 * block are thrown away. 1212 */ 1213 static void invalidate(struct cache *cache, struct prealloc *structs, 1214 dm_oblock_t oblock, dm_cblock_t cblock, 1215 struct dm_bio_prison_cell *cell) 1216 { 1217 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1218 1219 mg->err = false; 1220 mg->writeback = false; 1221 mg->demote = true; 1222 mg->promote = false; 1223 mg->requeue_holder = true; 1224 mg->invalidate = true; 1225 mg->cache = cache; 1226 mg->old_oblock = oblock; 1227 mg->cblock = cblock; 1228 mg->old_ocell = cell; 1229 mg->new_ocell = NULL; 1230 mg->start_jiffies = jiffies; 1231 1232 inc_nr_migrations(cache); 1233 quiesce_migration(mg); 1234 } 1235 1236 /*---------------------------------------------------------------- 1237 * bio processing 1238 *--------------------------------------------------------------*/ 1239 static void defer_bio(struct cache *cache, struct bio *bio) 1240 { 1241 unsigned long flags; 1242 1243 spin_lock_irqsave(&cache->lock, flags); 1244 bio_list_add(&cache->deferred_bios, bio); 1245 spin_unlock_irqrestore(&cache->lock, flags); 1246 1247 wake_worker(cache); 1248 } 1249 1250 static void process_flush_bio(struct cache *cache, struct bio *bio) 1251 { 1252 size_t pb_data_size = get_per_bio_data_size(cache); 1253 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1254 1255 BUG_ON(bio->bi_size); 1256 if (!pb->req_nr) 1257 remap_to_origin(cache, bio); 1258 else 1259 remap_to_cache(cache, bio, 0); 1260 1261 issue(cache, bio); 1262 } 1263 1264 /* 1265 * People generally discard large parts of a device, eg, the whole device 1266 * when formatting. Splitting these large discards up into cache block 1267 * sized ios and then quiescing (always neccessary for discard) takes too 1268 * long. 1269 * 1270 * We keep it simple, and allow any size of discard to come in, and just 1271 * mark off blocks on the discard bitset. No passdown occurs! 1272 * 1273 * To implement passdown we need to change the bio_prison such that a cell 1274 * can have a key that spans many blocks. 1275 */ 1276 static void process_discard_bio(struct cache *cache, struct bio *bio) 1277 { 1278 dm_block_t start_block = dm_sector_div_up(bio->bi_sector, 1279 cache->discard_block_size); 1280 dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1281 dm_block_t b; 1282 1283 end_block = block_div(end_block, cache->discard_block_size); 1284 1285 for (b = start_block; b < end_block; b++) 1286 set_discard(cache, to_dblock(b)); 1287 1288 bio_endio(bio, 0); 1289 } 1290 1291 static bool spare_migration_bandwidth(struct cache *cache) 1292 { 1293 sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * 1294 cache->sectors_per_block; 1295 return current_volume < cache->migration_threshold; 1296 } 1297 1298 static void inc_hit_counter(struct cache *cache, struct bio *bio) 1299 { 1300 atomic_inc(bio_data_dir(bio) == READ ? 1301 &cache->stats.read_hit : &cache->stats.write_hit); 1302 } 1303 1304 static void inc_miss_counter(struct cache *cache, struct bio *bio) 1305 { 1306 atomic_inc(bio_data_dir(bio) == READ ? 1307 &cache->stats.read_miss : &cache->stats.write_miss); 1308 } 1309 1310 static void issue_cache_bio(struct cache *cache, struct bio *bio, 1311 struct per_bio_data *pb, 1312 dm_oblock_t oblock, dm_cblock_t cblock) 1313 { 1314 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1315 remap_to_cache_dirty(cache, bio, oblock, cblock); 1316 issue(cache, bio); 1317 } 1318 1319 static void process_bio(struct cache *cache, struct prealloc *structs, 1320 struct bio *bio) 1321 { 1322 int r; 1323 bool release_cell = true; 1324 dm_oblock_t block = get_bio_block(cache, bio); 1325 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1326 struct policy_result lookup_result; 1327 size_t pb_data_size = get_per_bio_data_size(cache); 1328 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1329 bool discarded_block = is_discarded_oblock(cache, block); 1330 bool passthrough = passthrough_mode(&cache->features); 1331 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1332 1333 /* 1334 * Check to see if that block is currently migrating. 1335 */ 1336 cell_prealloc = prealloc_get_cell(structs); 1337 r = bio_detain(cache, block, bio, cell_prealloc, 1338 (cell_free_fn) prealloc_put_cell, 1339 structs, &new_ocell); 1340 if (r > 0) 1341 return; 1342 1343 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, 1344 bio, &lookup_result); 1345 1346 if (r == -EWOULDBLOCK) 1347 /* migration has been denied */ 1348 lookup_result.op = POLICY_MISS; 1349 1350 switch (lookup_result.op) { 1351 case POLICY_HIT: 1352 if (passthrough) { 1353 inc_miss_counter(cache, bio); 1354 1355 /* 1356 * Passthrough always maps to the origin, 1357 * invalidating any cache blocks that are written 1358 * to. 1359 */ 1360 1361 if (bio_data_dir(bio) == WRITE) { 1362 atomic_inc(&cache->stats.demotion); 1363 invalidate(cache, structs, block, lookup_result.cblock, new_ocell); 1364 release_cell = false; 1365 1366 } else { 1367 /* FIXME: factor out issue_origin() */ 1368 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1369 remap_to_origin_clear_discard(cache, bio, block); 1370 issue(cache, bio); 1371 } 1372 } else { 1373 inc_hit_counter(cache, bio); 1374 1375 if (bio_data_dir(bio) == WRITE && 1376 writethrough_mode(&cache->features) && 1377 !is_dirty(cache, lookup_result.cblock)) { 1378 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1379 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1380 issue(cache, bio); 1381 } else 1382 issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); 1383 } 1384 1385 break; 1386 1387 case POLICY_MISS: 1388 inc_miss_counter(cache, bio); 1389 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 1390 remap_to_origin_clear_discard(cache, bio, block); 1391 issue(cache, bio); 1392 break; 1393 1394 case POLICY_NEW: 1395 atomic_inc(&cache->stats.promotion); 1396 promote(cache, structs, block, lookup_result.cblock, new_ocell); 1397 release_cell = false; 1398 break; 1399 1400 case POLICY_REPLACE: 1401 cell_prealloc = prealloc_get_cell(structs); 1402 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc, 1403 (cell_free_fn) prealloc_put_cell, 1404 structs, &old_ocell); 1405 if (r > 0) { 1406 /* 1407 * We have to be careful to avoid lock inversion of 1408 * the cells. So we back off, and wait for the 1409 * old_ocell to become free. 1410 */ 1411 policy_force_mapping(cache->policy, block, 1412 lookup_result.old_oblock); 1413 atomic_inc(&cache->stats.cache_cell_clash); 1414 break; 1415 } 1416 atomic_inc(&cache->stats.demotion); 1417 atomic_inc(&cache->stats.promotion); 1418 1419 demote_then_promote(cache, structs, lookup_result.old_oblock, 1420 block, lookup_result.cblock, 1421 old_ocell, new_ocell); 1422 release_cell = false; 1423 break; 1424 1425 default: 1426 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, 1427 (unsigned) lookup_result.op); 1428 bio_io_error(bio); 1429 } 1430 1431 if (release_cell) 1432 cell_defer(cache, new_ocell, false); 1433 } 1434 1435 static int need_commit_due_to_time(struct cache *cache) 1436 { 1437 return jiffies < cache->last_commit_jiffies || 1438 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; 1439 } 1440 1441 static int commit_if_needed(struct cache *cache) 1442 { 1443 int r = 0; 1444 1445 if ((cache->commit_requested || need_commit_due_to_time(cache)) && 1446 dm_cache_changed_this_transaction(cache->cmd)) { 1447 atomic_inc(&cache->stats.commit_count); 1448 cache->commit_requested = false; 1449 r = dm_cache_commit(cache->cmd, false); 1450 cache->last_commit_jiffies = jiffies; 1451 } 1452 1453 return r; 1454 } 1455 1456 static void process_deferred_bios(struct cache *cache) 1457 { 1458 unsigned long flags; 1459 struct bio_list bios; 1460 struct bio *bio; 1461 struct prealloc structs; 1462 1463 memset(&structs, 0, sizeof(structs)); 1464 bio_list_init(&bios); 1465 1466 spin_lock_irqsave(&cache->lock, flags); 1467 bio_list_merge(&bios, &cache->deferred_bios); 1468 bio_list_init(&cache->deferred_bios); 1469 spin_unlock_irqrestore(&cache->lock, flags); 1470 1471 while (!bio_list_empty(&bios)) { 1472 /* 1473 * If we've got no free migration structs, and processing 1474 * this bio might require one, we pause until there are some 1475 * prepared mappings to process. 1476 */ 1477 if (prealloc_data_structs(cache, &structs)) { 1478 spin_lock_irqsave(&cache->lock, flags); 1479 bio_list_merge(&cache->deferred_bios, &bios); 1480 spin_unlock_irqrestore(&cache->lock, flags); 1481 break; 1482 } 1483 1484 bio = bio_list_pop(&bios); 1485 1486 if (bio->bi_rw & REQ_FLUSH) 1487 process_flush_bio(cache, bio); 1488 else if (bio->bi_rw & REQ_DISCARD) 1489 process_discard_bio(cache, bio); 1490 else 1491 process_bio(cache, &structs, bio); 1492 } 1493 1494 prealloc_free_structs(cache, &structs); 1495 } 1496 1497 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) 1498 { 1499 unsigned long flags; 1500 struct bio_list bios; 1501 struct bio *bio; 1502 1503 bio_list_init(&bios); 1504 1505 spin_lock_irqsave(&cache->lock, flags); 1506 bio_list_merge(&bios, &cache->deferred_flush_bios); 1507 bio_list_init(&cache->deferred_flush_bios); 1508 spin_unlock_irqrestore(&cache->lock, flags); 1509 1510 while ((bio = bio_list_pop(&bios))) 1511 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 1512 } 1513 1514 static void process_deferred_writethrough_bios(struct cache *cache) 1515 { 1516 unsigned long flags; 1517 struct bio_list bios; 1518 struct bio *bio; 1519 1520 bio_list_init(&bios); 1521 1522 spin_lock_irqsave(&cache->lock, flags); 1523 bio_list_merge(&bios, &cache->deferred_writethrough_bios); 1524 bio_list_init(&cache->deferred_writethrough_bios); 1525 spin_unlock_irqrestore(&cache->lock, flags); 1526 1527 while ((bio = bio_list_pop(&bios))) 1528 generic_make_request(bio); 1529 } 1530 1531 static void writeback_some_dirty_blocks(struct cache *cache) 1532 { 1533 int r = 0; 1534 dm_oblock_t oblock; 1535 dm_cblock_t cblock; 1536 struct prealloc structs; 1537 struct dm_bio_prison_cell *old_ocell; 1538 1539 memset(&structs, 0, sizeof(structs)); 1540 1541 while (spare_migration_bandwidth(cache)) { 1542 if (prealloc_data_structs(cache, &structs)) 1543 break; 1544 1545 r = policy_writeback_work(cache->policy, &oblock, &cblock); 1546 if (r) 1547 break; 1548 1549 r = get_cell(cache, oblock, &structs, &old_ocell); 1550 if (r) { 1551 policy_set_dirty(cache->policy, oblock); 1552 break; 1553 } 1554 1555 writeback(cache, &structs, oblock, cblock, old_ocell); 1556 } 1557 1558 prealloc_free_structs(cache, &structs); 1559 } 1560 1561 /*---------------------------------------------------------------- 1562 * Invalidations. 1563 * Dropping something from the cache *without* writing back. 1564 *--------------------------------------------------------------*/ 1565 1566 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req) 1567 { 1568 int r = 0; 1569 uint64_t begin = from_cblock(req->cblocks->begin); 1570 uint64_t end = from_cblock(req->cblocks->end); 1571 1572 while (begin != end) { 1573 r = policy_remove_cblock(cache->policy, to_cblock(begin)); 1574 if (!r) { 1575 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); 1576 if (r) 1577 break; 1578 1579 } else if (r == -ENODATA) { 1580 /* harmless, already unmapped */ 1581 r = 0; 1582 1583 } else { 1584 DMERR("policy_remove_cblock failed"); 1585 break; 1586 } 1587 1588 begin++; 1589 } 1590 1591 cache->commit_requested = true; 1592 1593 req->err = r; 1594 atomic_set(&req->complete, 1); 1595 1596 wake_up(&req->result_wait); 1597 } 1598 1599 static void process_invalidation_requests(struct cache *cache) 1600 { 1601 struct list_head list; 1602 struct invalidation_request *req, *tmp; 1603 1604 INIT_LIST_HEAD(&list); 1605 spin_lock(&cache->invalidation_lock); 1606 list_splice_init(&cache->invalidation_requests, &list); 1607 spin_unlock(&cache->invalidation_lock); 1608 1609 list_for_each_entry_safe (req, tmp, &list, list) 1610 process_invalidation_request(cache, req); 1611 } 1612 1613 /*---------------------------------------------------------------- 1614 * Main worker loop 1615 *--------------------------------------------------------------*/ 1616 static bool is_quiescing(struct cache *cache) 1617 { 1618 return atomic_read(&cache->quiescing); 1619 } 1620 1621 static void ack_quiescing(struct cache *cache) 1622 { 1623 if (is_quiescing(cache)) { 1624 atomic_inc(&cache->quiescing_ack); 1625 wake_up(&cache->quiescing_wait); 1626 } 1627 } 1628 1629 static void wait_for_quiescing_ack(struct cache *cache) 1630 { 1631 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); 1632 } 1633 1634 static void start_quiescing(struct cache *cache) 1635 { 1636 atomic_inc(&cache->quiescing); 1637 wait_for_quiescing_ack(cache); 1638 } 1639 1640 static void stop_quiescing(struct cache *cache) 1641 { 1642 atomic_set(&cache->quiescing, 0); 1643 atomic_set(&cache->quiescing_ack, 0); 1644 } 1645 1646 static void wait_for_migrations(struct cache *cache) 1647 { 1648 wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); 1649 } 1650 1651 static void stop_worker(struct cache *cache) 1652 { 1653 cancel_delayed_work(&cache->waker); 1654 flush_workqueue(cache->wq); 1655 } 1656 1657 static void requeue_deferred_io(struct cache *cache) 1658 { 1659 struct bio *bio; 1660 struct bio_list bios; 1661 1662 bio_list_init(&bios); 1663 bio_list_merge(&bios, &cache->deferred_bios); 1664 bio_list_init(&cache->deferred_bios); 1665 1666 while ((bio = bio_list_pop(&bios))) 1667 bio_endio(bio, DM_ENDIO_REQUEUE); 1668 } 1669 1670 static int more_work(struct cache *cache) 1671 { 1672 if (is_quiescing(cache)) 1673 return !list_empty(&cache->quiesced_migrations) || 1674 !list_empty(&cache->completed_migrations) || 1675 !list_empty(&cache->need_commit_migrations); 1676 else 1677 return !bio_list_empty(&cache->deferred_bios) || 1678 !bio_list_empty(&cache->deferred_flush_bios) || 1679 !bio_list_empty(&cache->deferred_writethrough_bios) || 1680 !list_empty(&cache->quiesced_migrations) || 1681 !list_empty(&cache->completed_migrations) || 1682 !list_empty(&cache->need_commit_migrations) || 1683 cache->invalidate; 1684 } 1685 1686 static void do_worker(struct work_struct *ws) 1687 { 1688 struct cache *cache = container_of(ws, struct cache, worker); 1689 1690 do { 1691 if (!is_quiescing(cache)) { 1692 writeback_some_dirty_blocks(cache); 1693 process_deferred_writethrough_bios(cache); 1694 process_deferred_bios(cache); 1695 process_invalidation_requests(cache); 1696 } 1697 1698 process_migrations(cache, &cache->quiesced_migrations, issue_copy); 1699 process_migrations(cache, &cache->completed_migrations, complete_migration); 1700 1701 if (commit_if_needed(cache)) { 1702 process_deferred_flush_bios(cache, false); 1703 1704 /* 1705 * FIXME: rollback metadata or just go into a 1706 * failure mode and error everything 1707 */ 1708 } else { 1709 process_deferred_flush_bios(cache, true); 1710 process_migrations(cache, &cache->need_commit_migrations, 1711 migration_success_post_commit); 1712 } 1713 1714 ack_quiescing(cache); 1715 1716 } while (more_work(cache)); 1717 } 1718 1719 /* 1720 * We want to commit periodically so that not too much 1721 * unwritten metadata builds up. 1722 */ 1723 static void do_waker(struct work_struct *ws) 1724 { 1725 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); 1726 policy_tick(cache->policy); 1727 wake_worker(cache); 1728 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); 1729 } 1730 1731 /*----------------------------------------------------------------*/ 1732 1733 static int is_congested(struct dm_dev *dev, int bdi_bits) 1734 { 1735 struct request_queue *q = bdev_get_queue(dev->bdev); 1736 return bdi_congested(&q->backing_dev_info, bdi_bits); 1737 } 1738 1739 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1740 { 1741 struct cache *cache = container_of(cb, struct cache, callbacks); 1742 1743 return is_congested(cache->origin_dev, bdi_bits) || 1744 is_congested(cache->cache_dev, bdi_bits); 1745 } 1746 1747 /*---------------------------------------------------------------- 1748 * Target methods 1749 *--------------------------------------------------------------*/ 1750 1751 /* 1752 * This function gets called on the error paths of the constructor, so we 1753 * have to cope with a partially initialised struct. 1754 */ 1755 static void destroy(struct cache *cache) 1756 { 1757 unsigned i; 1758 1759 if (cache->next_migration) 1760 mempool_free(cache->next_migration, cache->migration_pool); 1761 1762 if (cache->migration_pool) 1763 mempool_destroy(cache->migration_pool); 1764 1765 if (cache->all_io_ds) 1766 dm_deferred_set_destroy(cache->all_io_ds); 1767 1768 if (cache->prison) 1769 dm_bio_prison_destroy(cache->prison); 1770 1771 if (cache->wq) 1772 destroy_workqueue(cache->wq); 1773 1774 if (cache->dirty_bitset) 1775 free_bitset(cache->dirty_bitset); 1776 1777 if (cache->discard_bitset) 1778 free_bitset(cache->discard_bitset); 1779 1780 if (cache->copier) 1781 dm_kcopyd_client_destroy(cache->copier); 1782 1783 if (cache->cmd) 1784 dm_cache_metadata_close(cache->cmd); 1785 1786 if (cache->metadata_dev) 1787 dm_put_device(cache->ti, cache->metadata_dev); 1788 1789 if (cache->origin_dev) 1790 dm_put_device(cache->ti, cache->origin_dev); 1791 1792 if (cache->cache_dev) 1793 dm_put_device(cache->ti, cache->cache_dev); 1794 1795 if (cache->policy) 1796 dm_cache_policy_destroy(cache->policy); 1797 1798 for (i = 0; i < cache->nr_ctr_args ; i++) 1799 kfree(cache->ctr_args[i]); 1800 kfree(cache->ctr_args); 1801 1802 kfree(cache); 1803 } 1804 1805 static void cache_dtr(struct dm_target *ti) 1806 { 1807 struct cache *cache = ti->private; 1808 1809 destroy(cache); 1810 } 1811 1812 static sector_t get_dev_size(struct dm_dev *dev) 1813 { 1814 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; 1815 } 1816 1817 /*----------------------------------------------------------------*/ 1818 1819 /* 1820 * Construct a cache device mapping. 1821 * 1822 * cache <metadata dev> <cache dev> <origin dev> <block size> 1823 * <#feature args> [<feature arg>]* 1824 * <policy> <#policy args> [<policy arg>]* 1825 * 1826 * metadata dev : fast device holding the persistent metadata 1827 * cache dev : fast device holding cached data blocks 1828 * origin dev : slow device holding original data blocks 1829 * block size : cache unit size in sectors 1830 * 1831 * #feature args : number of feature arguments passed 1832 * feature args : writethrough. (The default is writeback.) 1833 * 1834 * policy : the replacement policy to use 1835 * #policy args : an even number of policy arguments corresponding 1836 * to key/value pairs passed to the policy 1837 * policy args : key/value pairs passed to the policy 1838 * E.g. 'sequential_threshold 1024' 1839 * See cache-policies.txt for details. 1840 * 1841 * Optional feature arguments are: 1842 * writethrough : write through caching that prohibits cache block 1843 * content from being different from origin block content. 1844 * Without this argument, the default behaviour is to write 1845 * back cache block contents later for performance reasons, 1846 * so they may differ from the corresponding origin blocks. 1847 */ 1848 struct cache_args { 1849 struct dm_target *ti; 1850 1851 struct dm_dev *metadata_dev; 1852 1853 struct dm_dev *cache_dev; 1854 sector_t cache_sectors; 1855 1856 struct dm_dev *origin_dev; 1857 sector_t origin_sectors; 1858 1859 uint32_t block_size; 1860 1861 const char *policy_name; 1862 int policy_argc; 1863 const char **policy_argv; 1864 1865 struct cache_features features; 1866 }; 1867 1868 static void destroy_cache_args(struct cache_args *ca) 1869 { 1870 if (ca->metadata_dev) 1871 dm_put_device(ca->ti, ca->metadata_dev); 1872 1873 if (ca->cache_dev) 1874 dm_put_device(ca->ti, ca->cache_dev); 1875 1876 if (ca->origin_dev) 1877 dm_put_device(ca->ti, ca->origin_dev); 1878 1879 kfree(ca); 1880 } 1881 1882 static bool at_least_one_arg(struct dm_arg_set *as, char **error) 1883 { 1884 if (!as->argc) { 1885 *error = "Insufficient args"; 1886 return false; 1887 } 1888 1889 return true; 1890 } 1891 1892 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, 1893 char **error) 1894 { 1895 int r; 1896 sector_t metadata_dev_size; 1897 char b[BDEVNAME_SIZE]; 1898 1899 if (!at_least_one_arg(as, error)) 1900 return -EINVAL; 1901 1902 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, 1903 &ca->metadata_dev); 1904 if (r) { 1905 *error = "Error opening metadata device"; 1906 return r; 1907 } 1908 1909 metadata_dev_size = get_dev_size(ca->metadata_dev); 1910 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING) 1911 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 1912 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); 1913 1914 return 0; 1915 } 1916 1917 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, 1918 char **error) 1919 { 1920 int r; 1921 1922 if (!at_least_one_arg(as, error)) 1923 return -EINVAL; 1924 1925 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, 1926 &ca->cache_dev); 1927 if (r) { 1928 *error = "Error opening cache device"; 1929 return r; 1930 } 1931 ca->cache_sectors = get_dev_size(ca->cache_dev); 1932 1933 return 0; 1934 } 1935 1936 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, 1937 char **error) 1938 { 1939 int r; 1940 1941 if (!at_least_one_arg(as, error)) 1942 return -EINVAL; 1943 1944 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, 1945 &ca->origin_dev); 1946 if (r) { 1947 *error = "Error opening origin device"; 1948 return r; 1949 } 1950 1951 ca->origin_sectors = get_dev_size(ca->origin_dev); 1952 if (ca->ti->len > ca->origin_sectors) { 1953 *error = "Device size larger than cached device"; 1954 return -EINVAL; 1955 } 1956 1957 return 0; 1958 } 1959 1960 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, 1961 char **error) 1962 { 1963 unsigned long block_size; 1964 1965 if (!at_least_one_arg(as, error)) 1966 return -EINVAL; 1967 1968 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size || 1969 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || 1970 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || 1971 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { 1972 *error = "Invalid data block size"; 1973 return -EINVAL; 1974 } 1975 1976 if (block_size > ca->cache_sectors) { 1977 *error = "Data block size is larger than the cache device"; 1978 return -EINVAL; 1979 } 1980 1981 ca->block_size = block_size; 1982 1983 return 0; 1984 } 1985 1986 static void init_features(struct cache_features *cf) 1987 { 1988 cf->mode = CM_WRITE; 1989 cf->io_mode = CM_IO_WRITEBACK; 1990 } 1991 1992 static int parse_features(struct cache_args *ca, struct dm_arg_set *as, 1993 char **error) 1994 { 1995 static struct dm_arg _args[] = { 1996 {0, 1, "Invalid number of cache feature arguments"}, 1997 }; 1998 1999 int r; 2000 unsigned argc; 2001 const char *arg; 2002 struct cache_features *cf = &ca->features; 2003 2004 init_features(cf); 2005 2006 r = dm_read_arg_group(_args, as, &argc, error); 2007 if (r) 2008 return -EINVAL; 2009 2010 while (argc--) { 2011 arg = dm_shift_arg(as); 2012 2013 if (!strcasecmp(arg, "writeback")) 2014 cf->io_mode = CM_IO_WRITEBACK; 2015 2016 else if (!strcasecmp(arg, "writethrough")) 2017 cf->io_mode = CM_IO_WRITETHROUGH; 2018 2019 else if (!strcasecmp(arg, "passthrough")) 2020 cf->io_mode = CM_IO_PASSTHROUGH; 2021 2022 else { 2023 *error = "Unrecognised cache feature requested"; 2024 return -EINVAL; 2025 } 2026 } 2027 2028 return 0; 2029 } 2030 2031 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, 2032 char **error) 2033 { 2034 static struct dm_arg _args[] = { 2035 {0, 1024, "Invalid number of policy arguments"}, 2036 }; 2037 2038 int r; 2039 2040 if (!at_least_one_arg(as, error)) 2041 return -EINVAL; 2042 2043 ca->policy_name = dm_shift_arg(as); 2044 2045 r = dm_read_arg_group(_args, as, &ca->policy_argc, error); 2046 if (r) 2047 return -EINVAL; 2048 2049 ca->policy_argv = (const char **)as->argv; 2050 dm_consume_args(as, ca->policy_argc); 2051 2052 return 0; 2053 } 2054 2055 static int parse_cache_args(struct cache_args *ca, int argc, char **argv, 2056 char **error) 2057 { 2058 int r; 2059 struct dm_arg_set as; 2060 2061 as.argc = argc; 2062 as.argv = argv; 2063 2064 r = parse_metadata_dev(ca, &as, error); 2065 if (r) 2066 return r; 2067 2068 r = parse_cache_dev(ca, &as, error); 2069 if (r) 2070 return r; 2071 2072 r = parse_origin_dev(ca, &as, error); 2073 if (r) 2074 return r; 2075 2076 r = parse_block_size(ca, &as, error); 2077 if (r) 2078 return r; 2079 2080 r = parse_features(ca, &as, error); 2081 if (r) 2082 return r; 2083 2084 r = parse_policy(ca, &as, error); 2085 if (r) 2086 return r; 2087 2088 return 0; 2089 } 2090 2091 /*----------------------------------------------------------------*/ 2092 2093 static struct kmem_cache *migration_cache; 2094 2095 #define NOT_CORE_OPTION 1 2096 2097 static int process_config_option(struct cache *cache, const char *key, const char *value) 2098 { 2099 unsigned long tmp; 2100 2101 if (!strcasecmp(key, "migration_threshold")) { 2102 if (kstrtoul(value, 10, &tmp)) 2103 return -EINVAL; 2104 2105 cache->migration_threshold = tmp; 2106 return 0; 2107 } 2108 2109 return NOT_CORE_OPTION; 2110 } 2111 2112 static int set_config_value(struct cache *cache, const char *key, const char *value) 2113 { 2114 int r = process_config_option(cache, key, value); 2115 2116 if (r == NOT_CORE_OPTION) 2117 r = policy_set_config_value(cache->policy, key, value); 2118 2119 if (r) 2120 DMWARN("bad config value for %s: %s", key, value); 2121 2122 return r; 2123 } 2124 2125 static int set_config_values(struct cache *cache, int argc, const char **argv) 2126 { 2127 int r = 0; 2128 2129 if (argc & 1) { 2130 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs."); 2131 return -EINVAL; 2132 } 2133 2134 while (argc) { 2135 r = set_config_value(cache, argv[0], argv[1]); 2136 if (r) 2137 break; 2138 2139 argc -= 2; 2140 argv += 2; 2141 } 2142 2143 return r; 2144 } 2145 2146 static int create_cache_policy(struct cache *cache, struct cache_args *ca, 2147 char **error) 2148 { 2149 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name, 2150 cache->cache_size, 2151 cache->origin_sectors, 2152 cache->sectors_per_block); 2153 if (IS_ERR(p)) { 2154 *error = "Error creating cache's policy"; 2155 return PTR_ERR(p); 2156 } 2157 cache->policy = p; 2158 2159 return 0; 2160 } 2161 2162 /* 2163 * We want the discard block size to be a power of two, at least the size 2164 * of the cache block size, and have no more than 2^14 discard blocks 2165 * across the origin. 2166 */ 2167 #define MAX_DISCARD_BLOCKS (1 << 14) 2168 2169 static bool too_many_discard_blocks(sector_t discard_block_size, 2170 sector_t origin_size) 2171 { 2172 (void) sector_div(origin_size, discard_block_size); 2173 2174 return origin_size > MAX_DISCARD_BLOCKS; 2175 } 2176 2177 static sector_t calculate_discard_block_size(sector_t cache_block_size, 2178 sector_t origin_size) 2179 { 2180 sector_t discard_block_size; 2181 2182 discard_block_size = roundup_pow_of_two(cache_block_size); 2183 2184 if (origin_size) 2185 while (too_many_discard_blocks(discard_block_size, origin_size)) 2186 discard_block_size *= 2; 2187 2188 return discard_block_size; 2189 } 2190 2191 #define DEFAULT_MIGRATION_THRESHOLD 2048 2192 2193 static int cache_create(struct cache_args *ca, struct cache **result) 2194 { 2195 int r = 0; 2196 char **error = &ca->ti->error; 2197 struct cache *cache; 2198 struct dm_target *ti = ca->ti; 2199 dm_block_t origin_blocks; 2200 struct dm_cache_metadata *cmd; 2201 bool may_format = ca->features.mode == CM_WRITE; 2202 2203 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 2204 if (!cache) 2205 return -ENOMEM; 2206 2207 cache->ti = ca->ti; 2208 ti->private = cache; 2209 ti->num_flush_bios = 2; 2210 ti->flush_supported = true; 2211 2212 ti->num_discard_bios = 1; 2213 ti->discards_supported = true; 2214 ti->discard_zeroes_data_unsupported = true; 2215 2216 cache->features = ca->features; 2217 ti->per_bio_data_size = get_per_bio_data_size(cache); 2218 2219 cache->callbacks.congested_fn = cache_is_congested; 2220 dm_table_add_target_callbacks(ti->table, &cache->callbacks); 2221 2222 cache->metadata_dev = ca->metadata_dev; 2223 cache->origin_dev = ca->origin_dev; 2224 cache->cache_dev = ca->cache_dev; 2225 2226 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; 2227 2228 /* FIXME: factor out this whole section */ 2229 origin_blocks = cache->origin_sectors = ca->origin_sectors; 2230 origin_blocks = block_div(origin_blocks, ca->block_size); 2231 cache->origin_blocks = to_oblock(origin_blocks); 2232 2233 cache->sectors_per_block = ca->block_size; 2234 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { 2235 r = -EINVAL; 2236 goto bad; 2237 } 2238 2239 if (ca->block_size & (ca->block_size - 1)) { 2240 dm_block_t cache_size = ca->cache_sectors; 2241 2242 cache->sectors_per_block_shift = -1; 2243 cache_size = block_div(cache_size, ca->block_size); 2244 cache->cache_size = to_cblock(cache_size); 2245 } else { 2246 cache->sectors_per_block_shift = __ffs(ca->block_size); 2247 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift); 2248 } 2249 2250 r = create_cache_policy(cache, ca, error); 2251 if (r) 2252 goto bad; 2253 2254 cache->policy_nr_args = ca->policy_argc; 2255 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; 2256 2257 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); 2258 if (r) { 2259 *error = "Error setting cache policy's config values"; 2260 goto bad; 2261 } 2262 2263 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, 2264 ca->block_size, may_format, 2265 dm_cache_policy_get_hint_size(cache->policy)); 2266 if (IS_ERR(cmd)) { 2267 *error = "Error creating metadata object"; 2268 r = PTR_ERR(cmd); 2269 goto bad; 2270 } 2271 cache->cmd = cmd; 2272 2273 if (passthrough_mode(&cache->features)) { 2274 bool all_clean; 2275 2276 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); 2277 if (r) { 2278 *error = "dm_cache_metadata_all_clean() failed"; 2279 goto bad; 2280 } 2281 2282 if (!all_clean) { 2283 *error = "Cannot enter passthrough mode unless all blocks are clean"; 2284 r = -EINVAL; 2285 goto bad; 2286 } 2287 } 2288 2289 spin_lock_init(&cache->lock); 2290 bio_list_init(&cache->deferred_bios); 2291 bio_list_init(&cache->deferred_flush_bios); 2292 bio_list_init(&cache->deferred_writethrough_bios); 2293 INIT_LIST_HEAD(&cache->quiesced_migrations); 2294 INIT_LIST_HEAD(&cache->completed_migrations); 2295 INIT_LIST_HEAD(&cache->need_commit_migrations); 2296 atomic_set(&cache->nr_migrations, 0); 2297 init_waitqueue_head(&cache->migration_wait); 2298 2299 init_waitqueue_head(&cache->quiescing_wait); 2300 atomic_set(&cache->quiescing, 0); 2301 atomic_set(&cache->quiescing_ack, 0); 2302 2303 r = -ENOMEM; 2304 cache->nr_dirty = 0; 2305 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); 2306 if (!cache->dirty_bitset) { 2307 *error = "could not allocate dirty bitset"; 2308 goto bad; 2309 } 2310 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); 2311 2312 cache->discard_block_size = 2313 calculate_discard_block_size(cache->sectors_per_block, 2314 cache->origin_sectors); 2315 cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); 2316 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); 2317 if (!cache->discard_bitset) { 2318 *error = "could not allocate discard bitset"; 2319 goto bad; 2320 } 2321 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); 2322 2323 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2324 if (IS_ERR(cache->copier)) { 2325 *error = "could not create kcopyd client"; 2326 r = PTR_ERR(cache->copier); 2327 goto bad; 2328 } 2329 2330 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); 2331 if (!cache->wq) { 2332 *error = "could not create workqueue for metadata object"; 2333 goto bad; 2334 } 2335 INIT_WORK(&cache->worker, do_worker); 2336 INIT_DELAYED_WORK(&cache->waker, do_waker); 2337 cache->last_commit_jiffies = jiffies; 2338 2339 cache->prison = dm_bio_prison_create(PRISON_CELLS); 2340 if (!cache->prison) { 2341 *error = "could not create bio prison"; 2342 goto bad; 2343 } 2344 2345 cache->all_io_ds = dm_deferred_set_create(); 2346 if (!cache->all_io_ds) { 2347 *error = "could not create all_io deferred set"; 2348 goto bad; 2349 } 2350 2351 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, 2352 migration_cache); 2353 if (!cache->migration_pool) { 2354 *error = "Error creating cache's migration mempool"; 2355 goto bad; 2356 } 2357 2358 cache->next_migration = NULL; 2359 2360 cache->need_tick_bio = true; 2361 cache->sized = false; 2362 cache->invalidate = false; 2363 cache->commit_requested = false; 2364 cache->loaded_mappings = false; 2365 cache->loaded_discards = false; 2366 2367 load_stats(cache); 2368 2369 atomic_set(&cache->stats.demotion, 0); 2370 atomic_set(&cache->stats.promotion, 0); 2371 atomic_set(&cache->stats.copies_avoided, 0); 2372 atomic_set(&cache->stats.cache_cell_clash, 0); 2373 atomic_set(&cache->stats.commit_count, 0); 2374 atomic_set(&cache->stats.discard_count, 0); 2375 2376 spin_lock_init(&cache->invalidation_lock); 2377 INIT_LIST_HEAD(&cache->invalidation_requests); 2378 2379 *result = cache; 2380 return 0; 2381 2382 bad: 2383 destroy(cache); 2384 return r; 2385 } 2386 2387 static int copy_ctr_args(struct cache *cache, int argc, const char **argv) 2388 { 2389 unsigned i; 2390 const char **copy; 2391 2392 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL); 2393 if (!copy) 2394 return -ENOMEM; 2395 for (i = 0; i < argc; i++) { 2396 copy[i] = kstrdup(argv[i], GFP_KERNEL); 2397 if (!copy[i]) { 2398 while (i--) 2399 kfree(copy[i]); 2400 kfree(copy); 2401 return -ENOMEM; 2402 } 2403 } 2404 2405 cache->nr_ctr_args = argc; 2406 cache->ctr_args = copy; 2407 2408 return 0; 2409 } 2410 2411 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) 2412 { 2413 int r = -EINVAL; 2414 struct cache_args *ca; 2415 struct cache *cache = NULL; 2416 2417 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 2418 if (!ca) { 2419 ti->error = "Error allocating memory for cache"; 2420 return -ENOMEM; 2421 } 2422 ca->ti = ti; 2423 2424 r = parse_cache_args(ca, argc, argv, &ti->error); 2425 if (r) 2426 goto out; 2427 2428 r = cache_create(ca, &cache); 2429 if (r) 2430 goto out; 2431 2432 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); 2433 if (r) { 2434 destroy(cache); 2435 goto out; 2436 } 2437 2438 ti->private = cache; 2439 2440 out: 2441 destroy_cache_args(ca); 2442 return r; 2443 } 2444 2445 static int cache_map(struct dm_target *ti, struct bio *bio) 2446 { 2447 struct cache *cache = ti->private; 2448 2449 int r; 2450 dm_oblock_t block = get_bio_block(cache, bio); 2451 size_t pb_data_size = get_per_bio_data_size(cache); 2452 bool can_migrate = false; 2453 bool discarded_block; 2454 struct dm_bio_prison_cell *cell; 2455 struct policy_result lookup_result; 2456 struct per_bio_data *pb; 2457 2458 if (from_oblock(block) > from_oblock(cache->origin_blocks)) { 2459 /* 2460 * This can only occur if the io goes to a partial block at 2461 * the end of the origin device. We don't cache these. 2462 * Just remap to the origin and carry on. 2463 */ 2464 remap_to_origin_clear_discard(cache, bio, block); 2465 return DM_MAPIO_REMAPPED; 2466 } 2467 2468 pb = init_per_bio_data(bio, pb_data_size); 2469 2470 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2471 defer_bio(cache, bio); 2472 return DM_MAPIO_SUBMITTED; 2473 } 2474 2475 /* 2476 * Check to see if that block is currently migrating. 2477 */ 2478 cell = alloc_prison_cell(cache); 2479 if (!cell) { 2480 defer_bio(cache, bio); 2481 return DM_MAPIO_SUBMITTED; 2482 } 2483 2484 r = bio_detain(cache, block, bio, cell, 2485 (cell_free_fn) free_prison_cell, 2486 cache, &cell); 2487 if (r) { 2488 if (r < 0) 2489 defer_bio(cache, bio); 2490 2491 return DM_MAPIO_SUBMITTED; 2492 } 2493 2494 discarded_block = is_discarded_oblock(cache, block); 2495 2496 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2497 bio, &lookup_result); 2498 if (r == -EWOULDBLOCK) { 2499 cell_defer(cache, cell, true); 2500 return DM_MAPIO_SUBMITTED; 2501 2502 } else if (r) { 2503 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 2504 bio_io_error(bio); 2505 return DM_MAPIO_SUBMITTED; 2506 } 2507 2508 r = DM_MAPIO_REMAPPED; 2509 switch (lookup_result.op) { 2510 case POLICY_HIT: 2511 if (passthrough_mode(&cache->features)) { 2512 if (bio_data_dir(bio) == WRITE) { 2513 /* 2514 * We need to invalidate this block, so 2515 * defer for the worker thread. 2516 */ 2517 cell_defer(cache, cell, true); 2518 r = DM_MAPIO_SUBMITTED; 2519 2520 } else { 2521 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2522 inc_miss_counter(cache, bio); 2523 remap_to_origin_clear_discard(cache, bio, block); 2524 2525 cell_defer(cache, cell, false); 2526 } 2527 2528 } else { 2529 inc_hit_counter(cache, bio); 2530 2531 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2532 !is_dirty(cache, lookup_result.cblock)) 2533 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 2534 else 2535 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 2536 2537 cell_defer(cache, cell, false); 2538 } 2539 break; 2540 2541 case POLICY_MISS: 2542 inc_miss_counter(cache, bio); 2543 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 2544 2545 if (pb->req_nr != 0) { 2546 /* 2547 * This is a duplicate writethrough io that is no 2548 * longer needed because the block has been demoted. 2549 */ 2550 bio_endio(bio, 0); 2551 cell_defer(cache, cell, false); 2552 return DM_MAPIO_SUBMITTED; 2553 } else { 2554 remap_to_origin_clear_discard(cache, bio, block); 2555 cell_defer(cache, cell, false); 2556 } 2557 break; 2558 2559 default: 2560 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 2561 (unsigned) lookup_result.op); 2562 bio_io_error(bio); 2563 r = DM_MAPIO_SUBMITTED; 2564 } 2565 2566 return r; 2567 } 2568 2569 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) 2570 { 2571 struct cache *cache = ti->private; 2572 unsigned long flags; 2573 size_t pb_data_size = get_per_bio_data_size(cache); 2574 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 2575 2576 if (pb->tick) { 2577 policy_tick(cache->policy); 2578 2579 spin_lock_irqsave(&cache->lock, flags); 2580 cache->need_tick_bio = true; 2581 spin_unlock_irqrestore(&cache->lock, flags); 2582 } 2583 2584 check_for_quiesced_migrations(cache, pb); 2585 2586 return 0; 2587 } 2588 2589 static int write_dirty_bitset(struct cache *cache) 2590 { 2591 unsigned i, r; 2592 2593 for (i = 0; i < from_cblock(cache->cache_size); i++) { 2594 r = dm_cache_set_dirty(cache->cmd, to_cblock(i), 2595 is_dirty(cache, to_cblock(i))); 2596 if (r) 2597 return r; 2598 } 2599 2600 return 0; 2601 } 2602 2603 static int write_discard_bitset(struct cache *cache) 2604 { 2605 unsigned i, r; 2606 2607 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, 2608 cache->discard_nr_blocks); 2609 if (r) { 2610 DMERR("could not resize on-disk discard bitset"); 2611 return r; 2612 } 2613 2614 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { 2615 r = dm_cache_set_discard(cache->cmd, to_dblock(i), 2616 is_discarded(cache, to_dblock(i))); 2617 if (r) 2618 return r; 2619 } 2620 2621 return 0; 2622 } 2623 2624 static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, 2625 uint32_t hint) 2626 { 2627 struct cache *cache = context; 2628 return dm_cache_save_hint(cache->cmd, cblock, hint); 2629 } 2630 2631 static int write_hints(struct cache *cache) 2632 { 2633 int r; 2634 2635 r = dm_cache_begin_hints(cache->cmd, cache->policy); 2636 if (r) { 2637 DMERR("dm_cache_begin_hints failed"); 2638 return r; 2639 } 2640 2641 r = policy_walk_mappings(cache->policy, save_hint, cache); 2642 if (r) 2643 DMERR("policy_walk_mappings failed"); 2644 2645 return r; 2646 } 2647 2648 /* 2649 * returns true on success 2650 */ 2651 static bool sync_metadata(struct cache *cache) 2652 { 2653 int r1, r2, r3, r4; 2654 2655 r1 = write_dirty_bitset(cache); 2656 if (r1) 2657 DMERR("could not write dirty bitset"); 2658 2659 r2 = write_discard_bitset(cache); 2660 if (r2) 2661 DMERR("could not write discard bitset"); 2662 2663 save_stats(cache); 2664 2665 r3 = write_hints(cache); 2666 if (r3) 2667 DMERR("could not write hints"); 2668 2669 /* 2670 * If writing the above metadata failed, we still commit, but don't 2671 * set the clean shutdown flag. This will effectively force every 2672 * dirty bit to be set on reload. 2673 */ 2674 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); 2675 if (r4) 2676 DMERR("could not write cache metadata. Data loss may occur."); 2677 2678 return !r1 && !r2 && !r3 && !r4; 2679 } 2680 2681 static void cache_postsuspend(struct dm_target *ti) 2682 { 2683 struct cache *cache = ti->private; 2684 2685 start_quiescing(cache); 2686 wait_for_migrations(cache); 2687 stop_worker(cache); 2688 requeue_deferred_io(cache); 2689 stop_quiescing(cache); 2690 2691 (void) sync_metadata(cache); 2692 } 2693 2694 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, 2695 bool dirty, uint32_t hint, bool hint_valid) 2696 { 2697 int r; 2698 struct cache *cache = context; 2699 2700 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); 2701 if (r) 2702 return r; 2703 2704 if (dirty) 2705 set_dirty(cache, oblock, cblock); 2706 else 2707 clear_dirty(cache, oblock, cblock); 2708 2709 return 0; 2710 } 2711 2712 static int load_discard(void *context, sector_t discard_block_size, 2713 dm_dblock_t dblock, bool discard) 2714 { 2715 struct cache *cache = context; 2716 2717 /* FIXME: handle mis-matched block size */ 2718 2719 if (discard) 2720 set_discard(cache, dblock); 2721 else 2722 clear_discard(cache, dblock); 2723 2724 return 0; 2725 } 2726 2727 static dm_cblock_t get_cache_dev_size(struct cache *cache) 2728 { 2729 sector_t size = get_dev_size(cache->cache_dev); 2730 (void) sector_div(size, cache->sectors_per_block); 2731 return to_cblock(size); 2732 } 2733 2734 static bool can_resize(struct cache *cache, dm_cblock_t new_size) 2735 { 2736 if (from_cblock(new_size) > from_cblock(cache->cache_size)) 2737 return true; 2738 2739 /* 2740 * We can't drop a dirty block when shrinking the cache. 2741 */ 2742 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { 2743 new_size = to_cblock(from_cblock(new_size) + 1); 2744 if (is_dirty(cache, new_size)) { 2745 DMERR("unable to shrink cache; cache block %llu is dirty", 2746 (unsigned long long) from_cblock(new_size)); 2747 return false; 2748 } 2749 } 2750 2751 return true; 2752 } 2753 2754 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) 2755 { 2756 int r; 2757 2758 r = dm_cache_resize(cache->cmd, new_size); 2759 if (r) { 2760 DMERR("could not resize cache metadata"); 2761 return r; 2762 } 2763 2764 cache->cache_size = new_size; 2765 2766 return 0; 2767 } 2768 2769 static int cache_preresume(struct dm_target *ti) 2770 { 2771 int r = 0; 2772 struct cache *cache = ti->private; 2773 dm_cblock_t csize = get_cache_dev_size(cache); 2774 2775 /* 2776 * Check to see if the cache has resized. 2777 */ 2778 if (!cache->sized) { 2779 r = resize_cache_dev(cache, csize); 2780 if (r) 2781 return r; 2782 2783 cache->sized = true; 2784 2785 } else if (csize != cache->cache_size) { 2786 if (!can_resize(cache, csize)) 2787 return -EINVAL; 2788 2789 r = resize_cache_dev(cache, csize); 2790 if (r) 2791 return r; 2792 } 2793 2794 if (!cache->loaded_mappings) { 2795 r = dm_cache_load_mappings(cache->cmd, cache->policy, 2796 load_mapping, cache); 2797 if (r) { 2798 DMERR("could not load cache mappings"); 2799 return r; 2800 } 2801 2802 cache->loaded_mappings = true; 2803 } 2804 2805 if (!cache->loaded_discards) { 2806 r = dm_cache_load_discards(cache->cmd, load_discard, cache); 2807 if (r) { 2808 DMERR("could not load origin discards"); 2809 return r; 2810 } 2811 2812 cache->loaded_discards = true; 2813 } 2814 2815 return r; 2816 } 2817 2818 static void cache_resume(struct dm_target *ti) 2819 { 2820 struct cache *cache = ti->private; 2821 2822 cache->need_tick_bio = true; 2823 do_waker(&cache->waker.work); 2824 } 2825 2826 /* 2827 * Status format: 2828 * 2829 * <#used metadata blocks>/<#total metadata blocks> 2830 * <#read hits> <#read misses> <#write hits> <#write misses> 2831 * <#demotions> <#promotions> <#blocks in cache> <#dirty> 2832 * <#features> <features>* 2833 * <#core args> <core args> 2834 * <#policy args> <policy args>* 2835 */ 2836 static void cache_status(struct dm_target *ti, status_type_t type, 2837 unsigned status_flags, char *result, unsigned maxlen) 2838 { 2839 int r = 0; 2840 unsigned i; 2841 ssize_t sz = 0; 2842 dm_block_t nr_free_blocks_metadata = 0; 2843 dm_block_t nr_blocks_metadata = 0; 2844 char buf[BDEVNAME_SIZE]; 2845 struct cache *cache = ti->private; 2846 dm_cblock_t residency; 2847 2848 switch (type) { 2849 case STATUSTYPE_INFO: 2850 /* Commit to ensure statistics aren't out-of-date */ 2851 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) { 2852 r = dm_cache_commit(cache->cmd, false); 2853 if (r) 2854 DMERR("could not commit metadata for accurate status"); 2855 } 2856 2857 r = dm_cache_get_free_metadata_block_count(cache->cmd, 2858 &nr_free_blocks_metadata); 2859 if (r) { 2860 DMERR("could not get metadata free block count"); 2861 goto err; 2862 } 2863 2864 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); 2865 if (r) { 2866 DMERR("could not get metadata device size"); 2867 goto err; 2868 } 2869 2870 residency = policy_residency(cache->policy); 2871 2872 DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", 2873 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), 2874 (unsigned long long)nr_blocks_metadata, 2875 (unsigned) atomic_read(&cache->stats.read_hit), 2876 (unsigned) atomic_read(&cache->stats.read_miss), 2877 (unsigned) atomic_read(&cache->stats.write_hit), 2878 (unsigned) atomic_read(&cache->stats.write_miss), 2879 (unsigned) atomic_read(&cache->stats.demotion), 2880 (unsigned) atomic_read(&cache->stats.promotion), 2881 (unsigned long long) from_cblock(residency), 2882 cache->nr_dirty); 2883 2884 if (writethrough_mode(&cache->features)) 2885 DMEMIT("1 writethrough "); 2886 2887 else if (passthrough_mode(&cache->features)) 2888 DMEMIT("1 passthrough "); 2889 2890 else if (writeback_mode(&cache->features)) 2891 DMEMIT("1 writeback "); 2892 2893 else { 2894 DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); 2895 goto err; 2896 } 2897 2898 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); 2899 if (sz < maxlen) { 2900 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); 2901 if (r) 2902 DMERR("policy_emit_config_values returned %d", r); 2903 } 2904 2905 break; 2906 2907 case STATUSTYPE_TABLE: 2908 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); 2909 DMEMIT("%s ", buf); 2910 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); 2911 DMEMIT("%s ", buf); 2912 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); 2913 DMEMIT("%s", buf); 2914 2915 for (i = 0; i < cache->nr_ctr_args - 1; i++) 2916 DMEMIT(" %s", cache->ctr_args[i]); 2917 if (cache->nr_ctr_args) 2918 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); 2919 } 2920 2921 return; 2922 2923 err: 2924 DMEMIT("Error"); 2925 } 2926 2927 /* 2928 * A cache block range can take two forms: 2929 * 2930 * i) A single cblock, eg. '3456' 2931 * ii) A begin and end cblock with dots between, eg. 123-234 2932 */ 2933 static int parse_cblock_range(struct cache *cache, const char *str, 2934 struct cblock_range *result) 2935 { 2936 char dummy; 2937 uint64_t b, e; 2938 int r; 2939 2940 /* 2941 * Try and parse form (ii) first. 2942 */ 2943 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); 2944 if (r < 0) 2945 return r; 2946 2947 if (r == 2) { 2948 result->begin = to_cblock(b); 2949 result->end = to_cblock(e); 2950 return 0; 2951 } 2952 2953 /* 2954 * That didn't work, try form (i). 2955 */ 2956 r = sscanf(str, "%llu%c", &b, &dummy); 2957 if (r < 0) 2958 return r; 2959 2960 if (r == 1) { 2961 result->begin = to_cblock(b); 2962 result->end = to_cblock(from_cblock(result->begin) + 1u); 2963 return 0; 2964 } 2965 2966 DMERR("invalid cblock range '%s'", str); 2967 return -EINVAL; 2968 } 2969 2970 static int validate_cblock_range(struct cache *cache, struct cblock_range *range) 2971 { 2972 uint64_t b = from_cblock(range->begin); 2973 uint64_t e = from_cblock(range->end); 2974 uint64_t n = from_cblock(cache->cache_size); 2975 2976 if (b >= n) { 2977 DMERR("begin cblock out of range: %llu >= %llu", b, n); 2978 return -EINVAL; 2979 } 2980 2981 if (e > n) { 2982 DMERR("end cblock out of range: %llu > %llu", e, n); 2983 return -EINVAL; 2984 } 2985 2986 if (b >= e) { 2987 DMERR("invalid cblock range: %llu >= %llu", b, e); 2988 return -EINVAL; 2989 } 2990 2991 return 0; 2992 } 2993 2994 static int request_invalidation(struct cache *cache, struct cblock_range *range) 2995 { 2996 struct invalidation_request req; 2997 2998 INIT_LIST_HEAD(&req.list); 2999 req.cblocks = range; 3000 atomic_set(&req.complete, 0); 3001 req.err = 0; 3002 init_waitqueue_head(&req.result_wait); 3003 3004 spin_lock(&cache->invalidation_lock); 3005 list_add(&req.list, &cache->invalidation_requests); 3006 spin_unlock(&cache->invalidation_lock); 3007 wake_worker(cache); 3008 3009 wait_event(req.result_wait, atomic_read(&req.complete)); 3010 return req.err; 3011 } 3012 3013 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, 3014 const char **cblock_ranges) 3015 { 3016 int r = 0; 3017 unsigned i; 3018 struct cblock_range range; 3019 3020 if (!passthrough_mode(&cache->features)) { 3021 DMERR("cache has to be in passthrough mode for invalidation"); 3022 return -EPERM; 3023 } 3024 3025 for (i = 0; i < count; i++) { 3026 r = parse_cblock_range(cache, cblock_ranges[i], &range); 3027 if (r) 3028 break; 3029 3030 r = validate_cblock_range(cache, &range); 3031 if (r) 3032 break; 3033 3034 /* 3035 * Pass begin and end origin blocks to the worker and wake it. 3036 */ 3037 r = request_invalidation(cache, &range); 3038 if (r) 3039 break; 3040 } 3041 3042 return r; 3043 } 3044 3045 /* 3046 * Supports 3047 * "<key> <value>" 3048 * and 3049 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]* 3050 * 3051 * The key migration_threshold is supported by the cache target core. 3052 */ 3053 static int cache_message(struct dm_target *ti, unsigned argc, char **argv) 3054 { 3055 struct cache *cache = ti->private; 3056 3057 if (!argc) 3058 return -EINVAL; 3059 3060 if (!strcasecmp(argv[0], "invalidate_cblocks")) 3061 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); 3062 3063 if (argc != 2) 3064 return -EINVAL; 3065 3066 return set_config_value(cache, argv[0], argv[1]); 3067 } 3068 3069 static int cache_iterate_devices(struct dm_target *ti, 3070 iterate_devices_callout_fn fn, void *data) 3071 { 3072 int r = 0; 3073 struct cache *cache = ti->private; 3074 3075 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); 3076 if (!r) 3077 r = fn(ti, cache->origin_dev, 0, ti->len, data); 3078 3079 return r; 3080 } 3081 3082 /* 3083 * We assume I/O is going to the origin (which is the volume 3084 * more likely to have restrictions e.g. by being striped). 3085 * (Looking up the exact location of the data would be expensive 3086 * and could always be out of date by the time the bio is submitted.) 3087 */ 3088 static int cache_bvec_merge(struct dm_target *ti, 3089 struct bvec_merge_data *bvm, 3090 struct bio_vec *biovec, int max_size) 3091 { 3092 struct cache *cache = ti->private; 3093 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); 3094 3095 if (!q->merge_bvec_fn) 3096 return max_size; 3097 3098 bvm->bi_bdev = cache->origin_dev->bdev; 3099 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 3100 } 3101 3102 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) 3103 { 3104 /* 3105 * FIXME: these limits may be incompatible with the cache device 3106 */ 3107 limits->max_discard_sectors = cache->discard_block_size * 1024; 3108 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; 3109 } 3110 3111 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) 3112 { 3113 struct cache *cache = ti->private; 3114 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; 3115 3116 /* 3117 * If the system-determined stacked limits are compatible with the 3118 * cache's blocksize (io_opt is a factor) do not override them. 3119 */ 3120 if (io_opt_sectors < cache->sectors_per_block || 3121 do_div(io_opt_sectors, cache->sectors_per_block)) { 3122 blk_limits_io_min(limits, 0); 3123 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); 3124 } 3125 set_discard_limits(cache, limits); 3126 } 3127 3128 /*----------------------------------------------------------------*/ 3129 3130 static struct target_type cache_target = { 3131 .name = "cache", 3132 .version = {1, 2, 0}, 3133 .module = THIS_MODULE, 3134 .ctr = cache_ctr, 3135 .dtr = cache_dtr, 3136 .map = cache_map, 3137 .end_io = cache_end_io, 3138 .postsuspend = cache_postsuspend, 3139 .preresume = cache_preresume, 3140 .resume = cache_resume, 3141 .status = cache_status, 3142 .message = cache_message, 3143 .iterate_devices = cache_iterate_devices, 3144 .merge = cache_bvec_merge, 3145 .io_hints = cache_io_hints, 3146 }; 3147 3148 static int __init dm_cache_init(void) 3149 { 3150 int r; 3151 3152 r = dm_register_target(&cache_target); 3153 if (r) { 3154 DMERR("cache target registration failed: %d", r); 3155 return r; 3156 } 3157 3158 migration_cache = KMEM_CACHE(dm_cache_migration, 0); 3159 if (!migration_cache) { 3160 dm_unregister_target(&cache_target); 3161 return -ENOMEM; 3162 } 3163 3164 return 0; 3165 } 3166 3167 static void __exit dm_cache_exit(void) 3168 { 3169 dm_unregister_target(&cache_target); 3170 kmem_cache_destroy(migration_cache); 3171 } 3172 3173 module_init(dm_cache_init); 3174 module_exit(dm_cache_exit); 3175 3176 MODULE_DESCRIPTION(DM_NAME " cache target"); 3177 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); 3178 MODULE_LICENSE("GPL"); 3179