1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/device-mapper.h> 11 #include <linux/delay.h> 12 #include <linux/fs.h> 13 #include <linux/init.h> 14 #include <linux/kdev_t.h> 15 #include <linux/list.h> 16 #include <linux/mempool.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/log2.h> 21 #include <linux/dm-kcopyd.h> 22 #include <linux/semaphore.h> 23 24 #include "dm.h" 25 26 #include "dm-exception-store.h" 27 28 #define DM_MSG_PREFIX "snapshots" 29 30 static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; 31 32 #define dm_target_is_snapshot_merge(ti) \ 33 ((ti)->type->name == dm_snapshot_merge_target_name) 34 35 /* 36 * The size of the mempool used to track chunks in use. 37 */ 38 #define MIN_IOS 256 39 40 #define DM_TRACKED_CHUNK_HASH_SIZE 16 41 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 42 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 43 44 struct dm_exception_table { 45 uint32_t hash_mask; 46 unsigned hash_shift; 47 struct list_head *table; 48 }; 49 50 struct dm_snapshot { 51 struct mutex lock; 52 53 struct dm_dev *origin; 54 struct dm_dev *cow; 55 56 struct dm_target *ti; 57 58 /* List of snapshots per Origin */ 59 struct list_head list; 60 61 /* 62 * You can't use a snapshot if this is 0 (e.g. if full). 63 * A snapshot-merge target never clears this. 64 */ 65 int valid; 66 67 /* 68 * The snapshot overflowed because of a write to the snapshot device. 69 * We don't have to invalidate the snapshot in this case, but we need 70 * to prevent further writes. 71 */ 72 int snapshot_overflowed; 73 74 /* Origin writes don't trigger exceptions until this is set */ 75 int active; 76 77 atomic_t pending_exceptions_count; 78 79 /* Protected by "lock" */ 80 sector_t exception_start_sequence; 81 82 /* Protected by kcopyd single-threaded callback */ 83 sector_t exception_complete_sequence; 84 85 /* 86 * A list of pending exceptions that completed out of order. 87 * Protected by kcopyd single-threaded callback. 88 */ 89 struct rb_root out_of_order_tree; 90 91 mempool_t pending_pool; 92 93 struct dm_exception_table pending; 94 struct dm_exception_table complete; 95 96 /* 97 * pe_lock protects all pending_exception operations and access 98 * as well as the snapshot_bios list. 99 */ 100 spinlock_t pe_lock; 101 102 /* Chunks with outstanding reads */ 103 spinlock_t tracked_chunk_lock; 104 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 105 106 /* The on disk metadata handler */ 107 struct dm_exception_store *store; 108 109 /* Maximum number of in-flight COW jobs. */ 110 struct semaphore cow_count; 111 112 struct dm_kcopyd_client *kcopyd_client; 113 114 /* Wait for events based on state_bits */ 115 unsigned long state_bits; 116 117 /* Range of chunks currently being merged. */ 118 chunk_t first_merging_chunk; 119 int num_merging_chunks; 120 121 /* 122 * The merge operation failed if this flag is set. 123 * Failure modes are handled as follows: 124 * - I/O error reading the header 125 * => don't load the target; abort. 126 * - Header does not have "valid" flag set 127 * => use the origin; forget about the snapshot. 128 * - I/O error when reading exceptions 129 * => don't load the target; abort. 130 * (We can't use the intermediate origin state.) 131 * - I/O error while merging 132 * => stop merging; set merge_failed; process I/O normally. 133 */ 134 int merge_failed; 135 136 /* 137 * Incoming bios that overlap with chunks being merged must wait 138 * for them to be committed. 139 */ 140 struct bio_list bios_queued_during_merge; 141 }; 142 143 /* 144 * state_bits: 145 * RUNNING_MERGE - Merge operation is in progress. 146 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; 147 * cleared afterwards. 148 */ 149 #define RUNNING_MERGE 0 150 #define SHUTDOWN_MERGE 1 151 152 /* 153 * Maximum number of chunks being copied on write. 154 * 155 * The value was decided experimentally as a trade-off between memory 156 * consumption, stalling the kernel's workqueues and maintaining a high enough 157 * throughput. 158 */ 159 #define DEFAULT_COW_THRESHOLD 2048 160 161 static int cow_threshold = DEFAULT_COW_THRESHOLD; 162 module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); 163 MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); 164 165 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 166 "A percentage of time allocated for copy on write"); 167 168 struct dm_dev *dm_snap_origin(struct dm_snapshot *s) 169 { 170 return s->origin; 171 } 172 EXPORT_SYMBOL(dm_snap_origin); 173 174 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 175 { 176 return s->cow; 177 } 178 EXPORT_SYMBOL(dm_snap_cow); 179 180 static sector_t chunk_to_sector(struct dm_exception_store *store, 181 chunk_t chunk) 182 { 183 return chunk << store->chunk_shift; 184 } 185 186 static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 187 { 188 /* 189 * There is only ever one instance of a particular block 190 * device so we can compare pointers safely. 191 */ 192 return lhs == rhs; 193 } 194 195 struct dm_snap_pending_exception { 196 struct dm_exception e; 197 198 /* 199 * Origin buffers waiting for this to complete are held 200 * in a bio list 201 */ 202 struct bio_list origin_bios; 203 struct bio_list snapshot_bios; 204 205 /* Pointer back to snapshot context */ 206 struct dm_snapshot *snap; 207 208 /* 209 * 1 indicates the exception has already been sent to 210 * kcopyd. 211 */ 212 int started; 213 214 /* There was copying error. */ 215 int copy_error; 216 217 /* A sequence number, it is used for in-order completion. */ 218 sector_t exception_sequence; 219 220 struct rb_node out_of_order_node; 221 222 /* 223 * For writing a complete chunk, bypassing the copy. 224 */ 225 struct bio *full_bio; 226 bio_end_io_t *full_bio_end_io; 227 }; 228 229 /* 230 * Hash table mapping origin volumes to lists of snapshots and 231 * a lock to protect it 232 */ 233 static struct kmem_cache *exception_cache; 234 static struct kmem_cache *pending_cache; 235 236 struct dm_snap_tracked_chunk { 237 struct hlist_node node; 238 chunk_t chunk; 239 }; 240 241 static void init_tracked_chunk(struct bio *bio) 242 { 243 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 244 INIT_HLIST_NODE(&c->node); 245 } 246 247 static bool is_bio_tracked(struct bio *bio) 248 { 249 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 250 return !hlist_unhashed(&c->node); 251 } 252 253 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) 254 { 255 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 256 257 c->chunk = chunk; 258 259 spin_lock_irq(&s->tracked_chunk_lock); 260 hlist_add_head(&c->node, 261 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 262 spin_unlock_irq(&s->tracked_chunk_lock); 263 } 264 265 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) 266 { 267 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 268 unsigned long flags; 269 270 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 271 hlist_del(&c->node); 272 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 273 } 274 275 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 276 { 277 struct dm_snap_tracked_chunk *c; 278 int found = 0; 279 280 spin_lock_irq(&s->tracked_chunk_lock); 281 282 hlist_for_each_entry(c, 283 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 284 if (c->chunk == chunk) { 285 found = 1; 286 break; 287 } 288 } 289 290 spin_unlock_irq(&s->tracked_chunk_lock); 291 292 return found; 293 } 294 295 /* 296 * This conflicting I/O is extremely improbable in the caller, 297 * so msleep(1) is sufficient and there is no need for a wait queue. 298 */ 299 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) 300 { 301 while (__chunk_is_tracked(s, chunk)) 302 msleep(1); 303 } 304 305 /* 306 * One of these per registered origin, held in the snapshot_origins hash 307 */ 308 struct origin { 309 /* The origin device */ 310 struct block_device *bdev; 311 312 struct list_head hash_list; 313 314 /* List of snapshots for this origin */ 315 struct list_head snapshots; 316 }; 317 318 /* 319 * This structure is allocated for each origin target 320 */ 321 struct dm_origin { 322 struct dm_dev *dev; 323 struct dm_target *ti; 324 unsigned split_boundary; 325 struct list_head hash_list; 326 }; 327 328 /* 329 * Size of the hash table for origin volumes. If we make this 330 * the size of the minors list then it should be nearly perfect 331 */ 332 #define ORIGIN_HASH_SIZE 256 333 #define ORIGIN_MASK 0xFF 334 static struct list_head *_origins; 335 static struct list_head *_dm_origins; 336 static struct rw_semaphore _origins_lock; 337 338 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 339 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); 340 static uint64_t _pending_exceptions_done_count; 341 342 static int init_origin_hash(void) 343 { 344 int i; 345 346 _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head), 347 GFP_KERNEL); 348 if (!_origins) { 349 DMERR("unable to allocate memory for _origins"); 350 return -ENOMEM; 351 } 352 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 353 INIT_LIST_HEAD(_origins + i); 354 355 _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE, 356 sizeof(struct list_head), 357 GFP_KERNEL); 358 if (!_dm_origins) { 359 DMERR("unable to allocate memory for _dm_origins"); 360 kfree(_origins); 361 return -ENOMEM; 362 } 363 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 364 INIT_LIST_HEAD(_dm_origins + i); 365 366 init_rwsem(&_origins_lock); 367 368 return 0; 369 } 370 371 static void exit_origin_hash(void) 372 { 373 kfree(_origins); 374 kfree(_dm_origins); 375 } 376 377 static unsigned origin_hash(struct block_device *bdev) 378 { 379 return bdev->bd_dev & ORIGIN_MASK; 380 } 381 382 static struct origin *__lookup_origin(struct block_device *origin) 383 { 384 struct list_head *ol; 385 struct origin *o; 386 387 ol = &_origins[origin_hash(origin)]; 388 list_for_each_entry (o, ol, hash_list) 389 if (bdev_equal(o->bdev, origin)) 390 return o; 391 392 return NULL; 393 } 394 395 static void __insert_origin(struct origin *o) 396 { 397 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 398 list_add_tail(&o->hash_list, sl); 399 } 400 401 static struct dm_origin *__lookup_dm_origin(struct block_device *origin) 402 { 403 struct list_head *ol; 404 struct dm_origin *o; 405 406 ol = &_dm_origins[origin_hash(origin)]; 407 list_for_each_entry (o, ol, hash_list) 408 if (bdev_equal(o->dev->bdev, origin)) 409 return o; 410 411 return NULL; 412 } 413 414 static void __insert_dm_origin(struct dm_origin *o) 415 { 416 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; 417 list_add_tail(&o->hash_list, sl); 418 } 419 420 static void __remove_dm_origin(struct dm_origin *o) 421 { 422 list_del(&o->hash_list); 423 } 424 425 /* 426 * _origins_lock must be held when calling this function. 427 * Returns number of snapshots registered using the supplied cow device, plus: 428 * snap_src - a snapshot suitable for use as a source of exception handover 429 * snap_dest - a snapshot capable of receiving exception handover. 430 * snap_merge - an existing snapshot-merge target linked to the same origin. 431 * There can be at most one snapshot-merge target. The parameter is optional. 432 * 433 * Possible return values and states of snap_src and snap_dest. 434 * 0: NULL, NULL - first new snapshot 435 * 1: snap_src, NULL - normal snapshot 436 * 2: snap_src, snap_dest - waiting for handover 437 * 2: snap_src, NULL - handed over, waiting for old to be deleted 438 * 1: NULL, snap_dest - source got destroyed without handover 439 */ 440 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, 441 struct dm_snapshot **snap_src, 442 struct dm_snapshot **snap_dest, 443 struct dm_snapshot **snap_merge) 444 { 445 struct dm_snapshot *s; 446 struct origin *o; 447 int count = 0; 448 int active; 449 450 o = __lookup_origin(snap->origin->bdev); 451 if (!o) 452 goto out; 453 454 list_for_each_entry(s, &o->snapshots, list) { 455 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) 456 *snap_merge = s; 457 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) 458 continue; 459 460 mutex_lock(&s->lock); 461 active = s->active; 462 mutex_unlock(&s->lock); 463 464 if (active) { 465 if (snap_src) 466 *snap_src = s; 467 } else if (snap_dest) 468 *snap_dest = s; 469 470 count++; 471 } 472 473 out: 474 return count; 475 } 476 477 /* 478 * On success, returns 1 if this snapshot is a handover destination, 479 * otherwise returns 0. 480 */ 481 static int __validate_exception_handover(struct dm_snapshot *snap) 482 { 483 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 484 struct dm_snapshot *snap_merge = NULL; 485 486 /* Does snapshot need exceptions handed over to it? */ 487 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, 488 &snap_merge) == 2) || 489 snap_dest) { 490 snap->ti->error = "Snapshot cow pairing for exception " 491 "table handover failed"; 492 return -EINVAL; 493 } 494 495 /* 496 * If no snap_src was found, snap cannot become a handover 497 * destination. 498 */ 499 if (!snap_src) 500 return 0; 501 502 /* 503 * Non-snapshot-merge handover? 504 */ 505 if (!dm_target_is_snapshot_merge(snap->ti)) 506 return 1; 507 508 /* 509 * Do not allow more than one merging snapshot. 510 */ 511 if (snap_merge) { 512 snap->ti->error = "A snapshot is already merging."; 513 return -EINVAL; 514 } 515 516 if (!snap_src->store->type->prepare_merge || 517 !snap_src->store->type->commit_merge) { 518 snap->ti->error = "Snapshot exception store does not " 519 "support snapshot-merge."; 520 return -EINVAL; 521 } 522 523 return 1; 524 } 525 526 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) 527 { 528 struct dm_snapshot *l; 529 530 /* Sort the list according to chunk size, largest-first smallest-last */ 531 list_for_each_entry(l, &o->snapshots, list) 532 if (l->store->chunk_size < s->store->chunk_size) 533 break; 534 list_add_tail(&s->list, &l->list); 535 } 536 537 /* 538 * Make a note of the snapshot and its origin so we can look it 539 * up when the origin has a write on it. 540 * 541 * Also validate snapshot exception store handovers. 542 * On success, returns 1 if this registration is a handover destination, 543 * otherwise returns 0. 544 */ 545 static int register_snapshot(struct dm_snapshot *snap) 546 { 547 struct origin *o, *new_o = NULL; 548 struct block_device *bdev = snap->origin->bdev; 549 int r = 0; 550 551 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 552 if (!new_o) 553 return -ENOMEM; 554 555 down_write(&_origins_lock); 556 557 r = __validate_exception_handover(snap); 558 if (r < 0) { 559 kfree(new_o); 560 goto out; 561 } 562 563 o = __lookup_origin(bdev); 564 if (o) 565 kfree(new_o); 566 else { 567 /* New origin */ 568 o = new_o; 569 570 /* Initialise the struct */ 571 INIT_LIST_HEAD(&o->snapshots); 572 o->bdev = bdev; 573 574 __insert_origin(o); 575 } 576 577 __insert_snapshot(o, snap); 578 579 out: 580 up_write(&_origins_lock); 581 582 return r; 583 } 584 585 /* 586 * Move snapshot to correct place in list according to chunk size. 587 */ 588 static void reregister_snapshot(struct dm_snapshot *s) 589 { 590 struct block_device *bdev = s->origin->bdev; 591 592 down_write(&_origins_lock); 593 594 list_del(&s->list); 595 __insert_snapshot(__lookup_origin(bdev), s); 596 597 up_write(&_origins_lock); 598 } 599 600 static void unregister_snapshot(struct dm_snapshot *s) 601 { 602 struct origin *o; 603 604 down_write(&_origins_lock); 605 o = __lookup_origin(s->origin->bdev); 606 607 list_del(&s->list); 608 if (o && list_empty(&o->snapshots)) { 609 list_del(&o->hash_list); 610 kfree(o); 611 } 612 613 up_write(&_origins_lock); 614 } 615 616 /* 617 * Implementation of the exception hash tables. 618 * The lowest hash_shift bits of the chunk number are ignored, allowing 619 * some consecutive chunks to be grouped together. 620 */ 621 static int dm_exception_table_init(struct dm_exception_table *et, 622 uint32_t size, unsigned hash_shift) 623 { 624 unsigned int i; 625 626 et->hash_shift = hash_shift; 627 et->hash_mask = size - 1; 628 et->table = dm_vcalloc(size, sizeof(struct list_head)); 629 if (!et->table) 630 return -ENOMEM; 631 632 for (i = 0; i < size; i++) 633 INIT_LIST_HEAD(et->table + i); 634 635 return 0; 636 } 637 638 static void dm_exception_table_exit(struct dm_exception_table *et, 639 struct kmem_cache *mem) 640 { 641 struct list_head *slot; 642 struct dm_exception *ex, *next; 643 int i, size; 644 645 size = et->hash_mask + 1; 646 for (i = 0; i < size; i++) { 647 slot = et->table + i; 648 649 list_for_each_entry_safe (ex, next, slot, hash_list) 650 kmem_cache_free(mem, ex); 651 } 652 653 vfree(et->table); 654 } 655 656 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) 657 { 658 return (chunk >> et->hash_shift) & et->hash_mask; 659 } 660 661 static void dm_remove_exception(struct dm_exception *e) 662 { 663 list_del(&e->hash_list); 664 } 665 666 /* 667 * Return the exception data for a sector, or NULL if not 668 * remapped. 669 */ 670 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, 671 chunk_t chunk) 672 { 673 struct list_head *slot; 674 struct dm_exception *e; 675 676 slot = &et->table[exception_hash(et, chunk)]; 677 list_for_each_entry (e, slot, hash_list) 678 if (chunk >= e->old_chunk && 679 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 680 return e; 681 682 return NULL; 683 } 684 685 static struct dm_exception *alloc_completed_exception(gfp_t gfp) 686 { 687 struct dm_exception *e; 688 689 e = kmem_cache_alloc(exception_cache, gfp); 690 if (!e && gfp == GFP_NOIO) 691 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 692 693 return e; 694 } 695 696 static void free_completed_exception(struct dm_exception *e) 697 { 698 kmem_cache_free(exception_cache, e); 699 } 700 701 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 702 { 703 struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool, 704 GFP_NOIO); 705 706 atomic_inc(&s->pending_exceptions_count); 707 pe->snap = s; 708 709 return pe; 710 } 711 712 static void free_pending_exception(struct dm_snap_pending_exception *pe) 713 { 714 struct dm_snapshot *s = pe->snap; 715 716 mempool_free(pe, &s->pending_pool); 717 smp_mb__before_atomic(); 718 atomic_dec(&s->pending_exceptions_count); 719 } 720 721 static void dm_insert_exception(struct dm_exception_table *eh, 722 struct dm_exception *new_e) 723 { 724 struct list_head *l; 725 struct dm_exception *e = NULL; 726 727 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 728 729 /* Add immediately if this table doesn't support consecutive chunks */ 730 if (!eh->hash_shift) 731 goto out; 732 733 /* List is ordered by old_chunk */ 734 list_for_each_entry_reverse(e, l, hash_list) { 735 /* Insert after an existing chunk? */ 736 if (new_e->old_chunk == (e->old_chunk + 737 dm_consecutive_chunk_count(e) + 1) && 738 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 739 dm_consecutive_chunk_count(e) + 1)) { 740 dm_consecutive_chunk_count_inc(e); 741 free_completed_exception(new_e); 742 return; 743 } 744 745 /* Insert before an existing chunk? */ 746 if (new_e->old_chunk == (e->old_chunk - 1) && 747 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 748 dm_consecutive_chunk_count_inc(e); 749 e->old_chunk--; 750 e->new_chunk--; 751 free_completed_exception(new_e); 752 return; 753 } 754 755 if (new_e->old_chunk > e->old_chunk) 756 break; 757 } 758 759 out: 760 list_add(&new_e->hash_list, e ? &e->hash_list : l); 761 } 762 763 /* 764 * Callback used by the exception stores to load exceptions when 765 * initialising. 766 */ 767 static int dm_add_exception(void *context, chunk_t old, chunk_t new) 768 { 769 struct dm_snapshot *s = context; 770 struct dm_exception *e; 771 772 e = alloc_completed_exception(GFP_KERNEL); 773 if (!e) 774 return -ENOMEM; 775 776 e->old_chunk = old; 777 778 /* Consecutive_count is implicitly initialised to zero */ 779 e->new_chunk = new; 780 781 dm_insert_exception(&s->complete, e); 782 783 return 0; 784 } 785 786 /* 787 * Return a minimum chunk size of all snapshots that have the specified origin. 788 * Return zero if the origin has no snapshots. 789 */ 790 static uint32_t __minimum_chunk_size(struct origin *o) 791 { 792 struct dm_snapshot *snap; 793 unsigned chunk_size = 0; 794 795 if (o) 796 list_for_each_entry(snap, &o->snapshots, list) 797 chunk_size = min_not_zero(chunk_size, 798 snap->store->chunk_size); 799 800 return (uint32_t) chunk_size; 801 } 802 803 /* 804 * Hard coded magic. 805 */ 806 static int calc_max_buckets(void) 807 { 808 /* use a fixed size of 2MB */ 809 unsigned long mem = 2 * 1024 * 1024; 810 mem /= sizeof(struct list_head); 811 812 return mem; 813 } 814 815 /* 816 * Allocate room for a suitable hash table. 817 */ 818 static int init_hash_tables(struct dm_snapshot *s) 819 { 820 sector_t hash_size, cow_dev_size, max_buckets; 821 822 /* 823 * Calculate based on the size of the original volume or 824 * the COW volume... 825 */ 826 cow_dev_size = get_dev_size(s->cow->bdev); 827 max_buckets = calc_max_buckets(); 828 829 hash_size = cow_dev_size >> s->store->chunk_shift; 830 hash_size = min(hash_size, max_buckets); 831 832 if (hash_size < 64) 833 hash_size = 64; 834 hash_size = rounddown_pow_of_two(hash_size); 835 if (dm_exception_table_init(&s->complete, hash_size, 836 DM_CHUNK_CONSECUTIVE_BITS)) 837 return -ENOMEM; 838 839 /* 840 * Allocate hash table for in-flight exceptions 841 * Make this smaller than the real hash table 842 */ 843 hash_size >>= 3; 844 if (hash_size < 64) 845 hash_size = 64; 846 847 if (dm_exception_table_init(&s->pending, hash_size, 0)) { 848 dm_exception_table_exit(&s->complete, exception_cache); 849 return -ENOMEM; 850 } 851 852 return 0; 853 } 854 855 static void merge_shutdown(struct dm_snapshot *s) 856 { 857 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); 858 smp_mb__after_atomic(); 859 wake_up_bit(&s->state_bits, RUNNING_MERGE); 860 } 861 862 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) 863 { 864 s->first_merging_chunk = 0; 865 s->num_merging_chunks = 0; 866 867 return bio_list_get(&s->bios_queued_during_merge); 868 } 869 870 /* 871 * Remove one chunk from the index of completed exceptions. 872 */ 873 static int __remove_single_exception_chunk(struct dm_snapshot *s, 874 chunk_t old_chunk) 875 { 876 struct dm_exception *e; 877 878 e = dm_lookup_exception(&s->complete, old_chunk); 879 if (!e) { 880 DMERR("Corruption detected: exception for block %llu is " 881 "on disk but not in memory", 882 (unsigned long long)old_chunk); 883 return -EINVAL; 884 } 885 886 /* 887 * If this is the only chunk using this exception, remove exception. 888 */ 889 if (!dm_consecutive_chunk_count(e)) { 890 dm_remove_exception(e); 891 free_completed_exception(e); 892 return 0; 893 } 894 895 /* 896 * The chunk may be either at the beginning or the end of a 897 * group of consecutive chunks - never in the middle. We are 898 * removing chunks in the opposite order to that in which they 899 * were added, so this should always be true. 900 * Decrement the consecutive chunk counter and adjust the 901 * starting point if necessary. 902 */ 903 if (old_chunk == e->old_chunk) { 904 e->old_chunk++; 905 e->new_chunk++; 906 } else if (old_chunk != e->old_chunk + 907 dm_consecutive_chunk_count(e)) { 908 DMERR("Attempt to merge block %llu from the " 909 "middle of a chunk range [%llu - %llu]", 910 (unsigned long long)old_chunk, 911 (unsigned long long)e->old_chunk, 912 (unsigned long long) 913 e->old_chunk + dm_consecutive_chunk_count(e)); 914 return -EINVAL; 915 } 916 917 dm_consecutive_chunk_count_dec(e); 918 919 return 0; 920 } 921 922 static void flush_bios(struct bio *bio); 923 924 static int remove_single_exception_chunk(struct dm_snapshot *s) 925 { 926 struct bio *b = NULL; 927 int r; 928 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; 929 930 mutex_lock(&s->lock); 931 932 /* 933 * Process chunks (and associated exceptions) in reverse order 934 * so that dm_consecutive_chunk_count_dec() accounting works. 935 */ 936 do { 937 r = __remove_single_exception_chunk(s, old_chunk); 938 if (r) 939 goto out; 940 } while (old_chunk-- > s->first_merging_chunk); 941 942 b = __release_queued_bios_after_merge(s); 943 944 out: 945 mutex_unlock(&s->lock); 946 if (b) 947 flush_bios(b); 948 949 return r; 950 } 951 952 static int origin_write_extent(struct dm_snapshot *merging_snap, 953 sector_t sector, unsigned chunk_size); 954 955 static void merge_callback(int read_err, unsigned long write_err, 956 void *context); 957 958 static uint64_t read_pending_exceptions_done_count(void) 959 { 960 uint64_t pending_exceptions_done; 961 962 spin_lock(&_pending_exceptions_done_spinlock); 963 pending_exceptions_done = _pending_exceptions_done_count; 964 spin_unlock(&_pending_exceptions_done_spinlock); 965 966 return pending_exceptions_done; 967 } 968 969 static void increment_pending_exceptions_done_count(void) 970 { 971 spin_lock(&_pending_exceptions_done_spinlock); 972 _pending_exceptions_done_count++; 973 spin_unlock(&_pending_exceptions_done_spinlock); 974 975 wake_up_all(&_pending_exceptions_done); 976 } 977 978 static void snapshot_merge_next_chunks(struct dm_snapshot *s) 979 { 980 int i, linear_chunks; 981 chunk_t old_chunk, new_chunk; 982 struct dm_io_region src, dest; 983 sector_t io_size; 984 uint64_t previous_count; 985 986 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); 987 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) 988 goto shut; 989 990 /* 991 * valid flag never changes during merge, so no lock required. 992 */ 993 if (!s->valid) { 994 DMERR("Snapshot is invalid: can't merge"); 995 goto shut; 996 } 997 998 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, 999 &new_chunk); 1000 if (linear_chunks <= 0) { 1001 if (linear_chunks < 0) { 1002 DMERR("Read error in exception store: " 1003 "shutting down merge"); 1004 mutex_lock(&s->lock); 1005 s->merge_failed = 1; 1006 mutex_unlock(&s->lock); 1007 } 1008 goto shut; 1009 } 1010 1011 /* Adjust old_chunk and new_chunk to reflect start of linear region */ 1012 old_chunk = old_chunk + 1 - linear_chunks; 1013 new_chunk = new_chunk + 1 - linear_chunks; 1014 1015 /* 1016 * Use one (potentially large) I/O to copy all 'linear_chunks' 1017 * from the exception store to the origin 1018 */ 1019 io_size = linear_chunks * s->store->chunk_size; 1020 1021 dest.bdev = s->origin->bdev; 1022 dest.sector = chunk_to_sector(s->store, old_chunk); 1023 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); 1024 1025 src.bdev = s->cow->bdev; 1026 src.sector = chunk_to_sector(s->store, new_chunk); 1027 src.count = dest.count; 1028 1029 /* 1030 * Reallocate any exceptions needed in other snapshots then 1031 * wait for the pending exceptions to complete. 1032 * Each time any pending exception (globally on the system) 1033 * completes we are woken and repeat the process to find out 1034 * if we can proceed. While this may not seem a particularly 1035 * efficient algorithm, it is not expected to have any 1036 * significant impact on performance. 1037 */ 1038 previous_count = read_pending_exceptions_done_count(); 1039 while (origin_write_extent(s, dest.sector, io_size)) { 1040 wait_event(_pending_exceptions_done, 1041 (read_pending_exceptions_done_count() != 1042 previous_count)); 1043 /* Retry after the wait, until all exceptions are done. */ 1044 previous_count = read_pending_exceptions_done_count(); 1045 } 1046 1047 mutex_lock(&s->lock); 1048 s->first_merging_chunk = old_chunk; 1049 s->num_merging_chunks = linear_chunks; 1050 mutex_unlock(&s->lock); 1051 1052 /* Wait until writes to all 'linear_chunks' drain */ 1053 for (i = 0; i < linear_chunks; i++) 1054 __check_for_conflicting_io(s, old_chunk + i); 1055 1056 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); 1057 return; 1058 1059 shut: 1060 merge_shutdown(s); 1061 } 1062 1063 static void error_bios(struct bio *bio); 1064 1065 static void merge_callback(int read_err, unsigned long write_err, void *context) 1066 { 1067 struct dm_snapshot *s = context; 1068 struct bio *b = NULL; 1069 1070 if (read_err || write_err) { 1071 if (read_err) 1072 DMERR("Read error: shutting down merge."); 1073 else 1074 DMERR("Write error: shutting down merge."); 1075 goto shut; 1076 } 1077 1078 if (s->store->type->commit_merge(s->store, 1079 s->num_merging_chunks) < 0) { 1080 DMERR("Write error in exception store: shutting down merge"); 1081 goto shut; 1082 } 1083 1084 if (remove_single_exception_chunk(s) < 0) 1085 goto shut; 1086 1087 snapshot_merge_next_chunks(s); 1088 1089 return; 1090 1091 shut: 1092 mutex_lock(&s->lock); 1093 s->merge_failed = 1; 1094 b = __release_queued_bios_after_merge(s); 1095 mutex_unlock(&s->lock); 1096 error_bios(b); 1097 1098 merge_shutdown(s); 1099 } 1100 1101 static void start_merge(struct dm_snapshot *s) 1102 { 1103 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) 1104 snapshot_merge_next_chunks(s); 1105 } 1106 1107 /* 1108 * Stop the merging process and wait until it finishes. 1109 */ 1110 static void stop_merge(struct dm_snapshot *s) 1111 { 1112 set_bit(SHUTDOWN_MERGE, &s->state_bits); 1113 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); 1114 clear_bit(SHUTDOWN_MERGE, &s->state_bits); 1115 } 1116 1117 /* 1118 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> 1119 */ 1120 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1121 { 1122 struct dm_snapshot *s; 1123 int i; 1124 int r = -EINVAL; 1125 char *origin_path, *cow_path; 1126 dev_t origin_dev, cow_dev; 1127 unsigned args_used, num_flush_bios = 1; 1128 fmode_t origin_mode = FMODE_READ; 1129 1130 if (argc != 4) { 1131 ti->error = "requires exactly 4 arguments"; 1132 r = -EINVAL; 1133 goto bad; 1134 } 1135 1136 if (dm_target_is_snapshot_merge(ti)) { 1137 num_flush_bios = 2; 1138 origin_mode = FMODE_WRITE; 1139 } 1140 1141 s = kzalloc(sizeof(*s), GFP_KERNEL); 1142 if (!s) { 1143 ti->error = "Cannot allocate private snapshot structure"; 1144 r = -ENOMEM; 1145 goto bad; 1146 } 1147 1148 origin_path = argv[0]; 1149 argv++; 1150 argc--; 1151 1152 r = dm_get_device(ti, origin_path, origin_mode, &s->origin); 1153 if (r) { 1154 ti->error = "Cannot get origin device"; 1155 goto bad_origin; 1156 } 1157 origin_dev = s->origin->bdev->bd_dev; 1158 1159 cow_path = argv[0]; 1160 argv++; 1161 argc--; 1162 1163 cow_dev = dm_get_dev_t(cow_path); 1164 if (cow_dev && cow_dev == origin_dev) { 1165 ti->error = "COW device cannot be the same as origin device"; 1166 r = -EINVAL; 1167 goto bad_cow; 1168 } 1169 1170 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); 1171 if (r) { 1172 ti->error = "Cannot get COW device"; 1173 goto bad_cow; 1174 } 1175 1176 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); 1177 if (r) { 1178 ti->error = "Couldn't create exception store"; 1179 r = -EINVAL; 1180 goto bad_store; 1181 } 1182 1183 argv += args_used; 1184 argc -= args_used; 1185 1186 s->ti = ti; 1187 s->valid = 1; 1188 s->snapshot_overflowed = 0; 1189 s->active = 0; 1190 atomic_set(&s->pending_exceptions_count, 0); 1191 s->exception_start_sequence = 0; 1192 s->exception_complete_sequence = 0; 1193 s->out_of_order_tree = RB_ROOT; 1194 mutex_init(&s->lock); 1195 INIT_LIST_HEAD(&s->list); 1196 spin_lock_init(&s->pe_lock); 1197 s->state_bits = 0; 1198 s->merge_failed = 0; 1199 s->first_merging_chunk = 0; 1200 s->num_merging_chunks = 0; 1201 bio_list_init(&s->bios_queued_during_merge); 1202 1203 /* Allocate hash table for COW data */ 1204 if (init_hash_tables(s)) { 1205 ti->error = "Unable to allocate hash table space"; 1206 r = -ENOMEM; 1207 goto bad_hash_tables; 1208 } 1209 1210 sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); 1211 1212 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); 1213 if (IS_ERR(s->kcopyd_client)) { 1214 r = PTR_ERR(s->kcopyd_client); 1215 ti->error = "Could not create kcopyd client"; 1216 goto bad_kcopyd; 1217 } 1218 1219 r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache); 1220 if (r) { 1221 ti->error = "Could not allocate mempool for pending exceptions"; 1222 goto bad_pending_pool; 1223 } 1224 1225 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1226 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1227 1228 spin_lock_init(&s->tracked_chunk_lock); 1229 1230 ti->private = s; 1231 ti->num_flush_bios = num_flush_bios; 1232 ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk); 1233 1234 /* Add snapshot to the list of snapshots for this origin */ 1235 /* Exceptions aren't triggered till snapshot_resume() is called */ 1236 r = register_snapshot(s); 1237 if (r == -ENOMEM) { 1238 ti->error = "Snapshot origin struct allocation failed"; 1239 goto bad_load_and_register; 1240 } else if (r < 0) { 1241 /* invalid handover, register_snapshot has set ti->error */ 1242 goto bad_load_and_register; 1243 } 1244 1245 /* 1246 * Metadata must only be loaded into one table at once, so skip this 1247 * if metadata will be handed over during resume. 1248 * Chunk size will be set during the handover - set it to zero to 1249 * ensure it's ignored. 1250 */ 1251 if (r > 0) { 1252 s->store->chunk_size = 0; 1253 return 0; 1254 } 1255 1256 r = s->store->type->read_metadata(s->store, dm_add_exception, 1257 (void *)s); 1258 if (r < 0) { 1259 ti->error = "Failed to read snapshot metadata"; 1260 goto bad_read_metadata; 1261 } else if (r > 0) { 1262 s->valid = 0; 1263 DMWARN("Snapshot is marked invalid."); 1264 } 1265 1266 if (!s->store->chunk_size) { 1267 ti->error = "Chunk size not set"; 1268 goto bad_read_metadata; 1269 } 1270 1271 r = dm_set_target_max_io_len(ti, s->store->chunk_size); 1272 if (r) 1273 goto bad_read_metadata; 1274 1275 return 0; 1276 1277 bad_read_metadata: 1278 unregister_snapshot(s); 1279 1280 bad_load_and_register: 1281 mempool_exit(&s->pending_pool); 1282 1283 bad_pending_pool: 1284 dm_kcopyd_client_destroy(s->kcopyd_client); 1285 1286 bad_kcopyd: 1287 dm_exception_table_exit(&s->pending, pending_cache); 1288 dm_exception_table_exit(&s->complete, exception_cache); 1289 1290 bad_hash_tables: 1291 dm_exception_store_destroy(s->store); 1292 1293 bad_store: 1294 dm_put_device(ti, s->cow); 1295 1296 bad_cow: 1297 dm_put_device(ti, s->origin); 1298 1299 bad_origin: 1300 kfree(s); 1301 1302 bad: 1303 return r; 1304 } 1305 1306 static void __free_exceptions(struct dm_snapshot *s) 1307 { 1308 dm_kcopyd_client_destroy(s->kcopyd_client); 1309 s->kcopyd_client = NULL; 1310 1311 dm_exception_table_exit(&s->pending, pending_cache); 1312 dm_exception_table_exit(&s->complete, exception_cache); 1313 } 1314 1315 static void __handover_exceptions(struct dm_snapshot *snap_src, 1316 struct dm_snapshot *snap_dest) 1317 { 1318 union { 1319 struct dm_exception_table table_swap; 1320 struct dm_exception_store *store_swap; 1321 } u; 1322 1323 /* 1324 * Swap all snapshot context information between the two instances. 1325 */ 1326 u.table_swap = snap_dest->complete; 1327 snap_dest->complete = snap_src->complete; 1328 snap_src->complete = u.table_swap; 1329 1330 u.store_swap = snap_dest->store; 1331 snap_dest->store = snap_src->store; 1332 snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; 1333 snap_src->store = u.store_swap; 1334 1335 snap_dest->store->snap = snap_dest; 1336 snap_src->store->snap = snap_src; 1337 1338 snap_dest->ti->max_io_len = snap_dest->store->chunk_size; 1339 snap_dest->valid = snap_src->valid; 1340 snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed; 1341 1342 /* 1343 * Set source invalid to ensure it receives no further I/O. 1344 */ 1345 snap_src->valid = 0; 1346 } 1347 1348 static void snapshot_dtr(struct dm_target *ti) 1349 { 1350 #ifdef CONFIG_DM_DEBUG 1351 int i; 1352 #endif 1353 struct dm_snapshot *s = ti->private; 1354 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1355 1356 down_read(&_origins_lock); 1357 /* Check whether exception handover must be cancelled */ 1358 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1359 if (snap_src && snap_dest && (s == snap_src)) { 1360 mutex_lock(&snap_dest->lock); 1361 snap_dest->valid = 0; 1362 mutex_unlock(&snap_dest->lock); 1363 DMERR("Cancelling snapshot handover."); 1364 } 1365 up_read(&_origins_lock); 1366 1367 if (dm_target_is_snapshot_merge(ti)) 1368 stop_merge(s); 1369 1370 /* Prevent further origin writes from using this snapshot. */ 1371 /* After this returns there can be no new kcopyd jobs. */ 1372 unregister_snapshot(s); 1373 1374 while (atomic_read(&s->pending_exceptions_count)) 1375 msleep(1); 1376 /* 1377 * Ensure instructions in mempool_exit aren't reordered 1378 * before atomic_read. 1379 */ 1380 smp_mb(); 1381 1382 #ifdef CONFIG_DM_DEBUG 1383 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1384 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1385 #endif 1386 1387 __free_exceptions(s); 1388 1389 mempool_exit(&s->pending_pool); 1390 1391 dm_exception_store_destroy(s->store); 1392 1393 mutex_destroy(&s->lock); 1394 1395 dm_put_device(ti, s->cow); 1396 1397 dm_put_device(ti, s->origin); 1398 1399 kfree(s); 1400 } 1401 1402 /* 1403 * Flush a list of buffers. 1404 */ 1405 static void flush_bios(struct bio *bio) 1406 { 1407 struct bio *n; 1408 1409 while (bio) { 1410 n = bio->bi_next; 1411 bio->bi_next = NULL; 1412 generic_make_request(bio); 1413 bio = n; 1414 } 1415 } 1416 1417 static int do_origin(struct dm_dev *origin, struct bio *bio); 1418 1419 /* 1420 * Flush a list of buffers. 1421 */ 1422 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) 1423 { 1424 struct bio *n; 1425 int r; 1426 1427 while (bio) { 1428 n = bio->bi_next; 1429 bio->bi_next = NULL; 1430 r = do_origin(s->origin, bio); 1431 if (r == DM_MAPIO_REMAPPED) 1432 generic_make_request(bio); 1433 bio = n; 1434 } 1435 } 1436 1437 /* 1438 * Error a list of buffers. 1439 */ 1440 static void error_bios(struct bio *bio) 1441 { 1442 struct bio *n; 1443 1444 while (bio) { 1445 n = bio->bi_next; 1446 bio->bi_next = NULL; 1447 bio_io_error(bio); 1448 bio = n; 1449 } 1450 } 1451 1452 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 1453 { 1454 if (!s->valid) 1455 return; 1456 1457 if (err == -EIO) 1458 DMERR("Invalidating snapshot: Error reading/writing."); 1459 else if (err == -ENOMEM) 1460 DMERR("Invalidating snapshot: Unable to allocate exception."); 1461 1462 if (s->store->type->drop_snapshot) 1463 s->store->type->drop_snapshot(s->store); 1464 1465 s->valid = 0; 1466 1467 dm_table_event(s->ti->table); 1468 } 1469 1470 static void pending_complete(void *context, int success) 1471 { 1472 struct dm_snap_pending_exception *pe = context; 1473 struct dm_exception *e; 1474 struct dm_snapshot *s = pe->snap; 1475 struct bio *origin_bios = NULL; 1476 struct bio *snapshot_bios = NULL; 1477 struct bio *full_bio = NULL; 1478 int error = 0; 1479 1480 if (!success) { 1481 /* Read/write error - snapshot is unusable */ 1482 mutex_lock(&s->lock); 1483 __invalidate_snapshot(s, -EIO); 1484 error = 1; 1485 goto out; 1486 } 1487 1488 e = alloc_completed_exception(GFP_NOIO); 1489 if (!e) { 1490 mutex_lock(&s->lock); 1491 __invalidate_snapshot(s, -ENOMEM); 1492 error = 1; 1493 goto out; 1494 } 1495 *e = pe->e; 1496 1497 mutex_lock(&s->lock); 1498 if (!s->valid) { 1499 free_completed_exception(e); 1500 error = 1; 1501 goto out; 1502 } 1503 1504 /* Check for conflicting reads */ 1505 __check_for_conflicting_io(s, pe->e.old_chunk); 1506 1507 /* 1508 * Add a proper exception, and remove the 1509 * in-flight exception from the list. 1510 */ 1511 dm_insert_exception(&s->complete, e); 1512 1513 out: 1514 dm_remove_exception(&pe->e); 1515 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1516 origin_bios = bio_list_get(&pe->origin_bios); 1517 full_bio = pe->full_bio; 1518 if (full_bio) 1519 full_bio->bi_end_io = pe->full_bio_end_io; 1520 increment_pending_exceptions_done_count(); 1521 1522 mutex_unlock(&s->lock); 1523 1524 /* Submit any pending write bios */ 1525 if (error) { 1526 if (full_bio) 1527 bio_io_error(full_bio); 1528 error_bios(snapshot_bios); 1529 } else { 1530 if (full_bio) 1531 bio_endio(full_bio); 1532 flush_bios(snapshot_bios); 1533 } 1534 1535 retry_origin_bios(s, origin_bios); 1536 1537 free_pending_exception(pe); 1538 } 1539 1540 static void complete_exception(struct dm_snap_pending_exception *pe) 1541 { 1542 struct dm_snapshot *s = pe->snap; 1543 1544 /* Update the metadata if we are persistent */ 1545 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, 1546 pending_complete, pe); 1547 } 1548 1549 /* 1550 * Called when the copy I/O has finished. kcopyd actually runs 1551 * this code so don't block. 1552 */ 1553 static void copy_callback(int read_err, unsigned long write_err, void *context) 1554 { 1555 struct dm_snap_pending_exception *pe = context; 1556 struct dm_snapshot *s = pe->snap; 1557 1558 pe->copy_error = read_err || write_err; 1559 1560 if (pe->exception_sequence == s->exception_complete_sequence) { 1561 struct rb_node *next; 1562 1563 s->exception_complete_sequence++; 1564 complete_exception(pe); 1565 1566 next = rb_first(&s->out_of_order_tree); 1567 while (next) { 1568 pe = rb_entry(next, struct dm_snap_pending_exception, 1569 out_of_order_node); 1570 if (pe->exception_sequence != s->exception_complete_sequence) 1571 break; 1572 next = rb_next(next); 1573 s->exception_complete_sequence++; 1574 rb_erase(&pe->out_of_order_node, &s->out_of_order_tree); 1575 complete_exception(pe); 1576 cond_resched(); 1577 } 1578 } else { 1579 struct rb_node *parent = NULL; 1580 struct rb_node **p = &s->out_of_order_tree.rb_node; 1581 struct dm_snap_pending_exception *pe2; 1582 1583 while (*p) { 1584 pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node); 1585 parent = *p; 1586 1587 BUG_ON(pe->exception_sequence == pe2->exception_sequence); 1588 if (pe->exception_sequence < pe2->exception_sequence) 1589 p = &((*p)->rb_left); 1590 else 1591 p = &((*p)->rb_right); 1592 } 1593 1594 rb_link_node(&pe->out_of_order_node, parent, p); 1595 rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); 1596 } 1597 up(&s->cow_count); 1598 } 1599 1600 /* 1601 * Dispatches the copy operation to kcopyd. 1602 */ 1603 static void start_copy(struct dm_snap_pending_exception *pe) 1604 { 1605 struct dm_snapshot *s = pe->snap; 1606 struct dm_io_region src, dest; 1607 struct block_device *bdev = s->origin->bdev; 1608 sector_t dev_size; 1609 1610 dev_size = get_dev_size(bdev); 1611 1612 src.bdev = bdev; 1613 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1614 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 1615 1616 dest.bdev = s->cow->bdev; 1617 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 1618 dest.count = src.count; 1619 1620 /* Hand over to kcopyd */ 1621 down(&s->cow_count); 1622 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); 1623 } 1624 1625 static void full_bio_end_io(struct bio *bio) 1626 { 1627 void *callback_data = bio->bi_private; 1628 1629 dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); 1630 } 1631 1632 static void start_full_bio(struct dm_snap_pending_exception *pe, 1633 struct bio *bio) 1634 { 1635 struct dm_snapshot *s = pe->snap; 1636 void *callback_data; 1637 1638 pe->full_bio = bio; 1639 pe->full_bio_end_io = bio->bi_end_io; 1640 1641 down(&s->cow_count); 1642 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, 1643 copy_callback, pe); 1644 1645 bio->bi_end_io = full_bio_end_io; 1646 bio->bi_private = callback_data; 1647 1648 generic_make_request(bio); 1649 } 1650 1651 static struct dm_snap_pending_exception * 1652 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 1653 { 1654 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); 1655 1656 if (!e) 1657 return NULL; 1658 1659 return container_of(e, struct dm_snap_pending_exception, e); 1660 } 1661 1662 /* 1663 * Looks to see if this snapshot already has a pending exception 1664 * for this chunk, otherwise it allocates a new one and inserts 1665 * it into the pending table. 1666 * 1667 * NOTE: a write lock must be held on snap->lock before calling 1668 * this. 1669 */ 1670 static struct dm_snap_pending_exception * 1671 __find_pending_exception(struct dm_snapshot *s, 1672 struct dm_snap_pending_exception *pe, chunk_t chunk) 1673 { 1674 struct dm_snap_pending_exception *pe2; 1675 1676 pe2 = __lookup_pending_exception(s, chunk); 1677 if (pe2) { 1678 free_pending_exception(pe); 1679 return pe2; 1680 } 1681 1682 pe->e.old_chunk = chunk; 1683 bio_list_init(&pe->origin_bios); 1684 bio_list_init(&pe->snapshot_bios); 1685 pe->started = 0; 1686 pe->full_bio = NULL; 1687 1688 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1689 free_pending_exception(pe); 1690 return NULL; 1691 } 1692 1693 pe->exception_sequence = s->exception_start_sequence++; 1694 1695 dm_insert_exception(&s->pending, &pe->e); 1696 1697 return pe; 1698 } 1699 1700 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1701 struct bio *bio, chunk_t chunk) 1702 { 1703 bio_set_dev(bio, s->cow->bdev); 1704 bio->bi_iter.bi_sector = 1705 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + 1706 (chunk - e->old_chunk)) + 1707 (bio->bi_iter.bi_sector & s->store->chunk_mask); 1708 } 1709 1710 static int snapshot_map(struct dm_target *ti, struct bio *bio) 1711 { 1712 struct dm_exception *e; 1713 struct dm_snapshot *s = ti->private; 1714 int r = DM_MAPIO_REMAPPED; 1715 chunk_t chunk; 1716 struct dm_snap_pending_exception *pe = NULL; 1717 1718 init_tracked_chunk(bio); 1719 1720 if (bio->bi_opf & REQ_PREFLUSH) { 1721 bio_set_dev(bio, s->cow->bdev); 1722 return DM_MAPIO_REMAPPED; 1723 } 1724 1725 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1726 1727 /* Full snapshots are not usable */ 1728 /* To get here the table must be live so s->active is always set. */ 1729 if (!s->valid) 1730 return DM_MAPIO_KILL; 1731 1732 mutex_lock(&s->lock); 1733 1734 if (!s->valid || (unlikely(s->snapshot_overflowed) && 1735 bio_data_dir(bio) == WRITE)) { 1736 r = DM_MAPIO_KILL; 1737 goto out_unlock; 1738 } 1739 1740 /* If the block is already remapped - use that, else remap it */ 1741 e = dm_lookup_exception(&s->complete, chunk); 1742 if (e) { 1743 remap_exception(s, e, bio, chunk); 1744 goto out_unlock; 1745 } 1746 1747 /* 1748 * Write to snapshot - higher level takes care of RW/RO 1749 * flags so we should only get this if we are 1750 * writeable. 1751 */ 1752 if (bio_data_dir(bio) == WRITE) { 1753 pe = __lookup_pending_exception(s, chunk); 1754 if (!pe) { 1755 mutex_unlock(&s->lock); 1756 pe = alloc_pending_exception(s); 1757 mutex_lock(&s->lock); 1758 1759 if (!s->valid || s->snapshot_overflowed) { 1760 free_pending_exception(pe); 1761 r = DM_MAPIO_KILL; 1762 goto out_unlock; 1763 } 1764 1765 e = dm_lookup_exception(&s->complete, chunk); 1766 if (e) { 1767 free_pending_exception(pe); 1768 remap_exception(s, e, bio, chunk); 1769 goto out_unlock; 1770 } 1771 1772 pe = __find_pending_exception(s, pe, chunk); 1773 if (!pe) { 1774 if (s->store->userspace_supports_overflow) { 1775 s->snapshot_overflowed = 1; 1776 DMERR("Snapshot overflowed: Unable to allocate exception."); 1777 } else 1778 __invalidate_snapshot(s, -ENOMEM); 1779 r = DM_MAPIO_KILL; 1780 goto out_unlock; 1781 } 1782 } 1783 1784 remap_exception(s, &pe->e, bio, chunk); 1785 1786 r = DM_MAPIO_SUBMITTED; 1787 1788 if (!pe->started && 1789 bio->bi_iter.bi_size == 1790 (s->store->chunk_size << SECTOR_SHIFT)) { 1791 pe->started = 1; 1792 mutex_unlock(&s->lock); 1793 start_full_bio(pe, bio); 1794 goto out; 1795 } 1796 1797 bio_list_add(&pe->snapshot_bios, bio); 1798 1799 if (!pe->started) { 1800 /* this is protected by snap->lock */ 1801 pe->started = 1; 1802 mutex_unlock(&s->lock); 1803 start_copy(pe); 1804 goto out; 1805 } 1806 } else { 1807 bio_set_dev(bio, s->origin->bdev); 1808 track_chunk(s, bio, chunk); 1809 } 1810 1811 out_unlock: 1812 mutex_unlock(&s->lock); 1813 out: 1814 return r; 1815 } 1816 1817 /* 1818 * A snapshot-merge target behaves like a combination of a snapshot 1819 * target and a snapshot-origin target. It only generates new 1820 * exceptions in other snapshots and not in the one that is being 1821 * merged. 1822 * 1823 * For each chunk, if there is an existing exception, it is used to 1824 * redirect I/O to the cow device. Otherwise I/O is sent to the origin, 1825 * which in turn might generate exceptions in other snapshots. 1826 * If merging is currently taking place on the chunk in question, the 1827 * I/O is deferred by adding it to s->bios_queued_during_merge. 1828 */ 1829 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) 1830 { 1831 struct dm_exception *e; 1832 struct dm_snapshot *s = ti->private; 1833 int r = DM_MAPIO_REMAPPED; 1834 chunk_t chunk; 1835 1836 init_tracked_chunk(bio); 1837 1838 if (bio->bi_opf & REQ_PREFLUSH) { 1839 if (!dm_bio_get_target_bio_nr(bio)) 1840 bio_set_dev(bio, s->origin->bdev); 1841 else 1842 bio_set_dev(bio, s->cow->bdev); 1843 return DM_MAPIO_REMAPPED; 1844 } 1845 1846 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1847 1848 mutex_lock(&s->lock); 1849 1850 /* Full merging snapshots are redirected to the origin */ 1851 if (!s->valid) 1852 goto redirect_to_origin; 1853 1854 /* If the block is already remapped - use that */ 1855 e = dm_lookup_exception(&s->complete, chunk); 1856 if (e) { 1857 /* Queue writes overlapping with chunks being merged */ 1858 if (bio_data_dir(bio) == WRITE && 1859 chunk >= s->first_merging_chunk && 1860 chunk < (s->first_merging_chunk + 1861 s->num_merging_chunks)) { 1862 bio_set_dev(bio, s->origin->bdev); 1863 bio_list_add(&s->bios_queued_during_merge, bio); 1864 r = DM_MAPIO_SUBMITTED; 1865 goto out_unlock; 1866 } 1867 1868 remap_exception(s, e, bio, chunk); 1869 1870 if (bio_data_dir(bio) == WRITE) 1871 track_chunk(s, bio, chunk); 1872 goto out_unlock; 1873 } 1874 1875 redirect_to_origin: 1876 bio_set_dev(bio, s->origin->bdev); 1877 1878 if (bio_data_dir(bio) == WRITE) { 1879 mutex_unlock(&s->lock); 1880 return do_origin(s->origin, bio); 1881 } 1882 1883 out_unlock: 1884 mutex_unlock(&s->lock); 1885 1886 return r; 1887 } 1888 1889 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1890 blk_status_t *error) 1891 { 1892 struct dm_snapshot *s = ti->private; 1893 1894 if (is_bio_tracked(bio)) 1895 stop_tracking_chunk(s, bio); 1896 1897 return DM_ENDIO_DONE; 1898 } 1899 1900 static void snapshot_merge_presuspend(struct dm_target *ti) 1901 { 1902 struct dm_snapshot *s = ti->private; 1903 1904 stop_merge(s); 1905 } 1906 1907 static int snapshot_preresume(struct dm_target *ti) 1908 { 1909 int r = 0; 1910 struct dm_snapshot *s = ti->private; 1911 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1912 1913 down_read(&_origins_lock); 1914 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1915 if (snap_src && snap_dest) { 1916 mutex_lock(&snap_src->lock); 1917 if (s == snap_src) { 1918 DMERR("Unable to resume snapshot source until " 1919 "handover completes."); 1920 r = -EINVAL; 1921 } else if (!dm_suspended(snap_src->ti)) { 1922 DMERR("Unable to perform snapshot handover until " 1923 "source is suspended."); 1924 r = -EINVAL; 1925 } 1926 mutex_unlock(&snap_src->lock); 1927 } 1928 up_read(&_origins_lock); 1929 1930 return r; 1931 } 1932 1933 static void snapshot_resume(struct dm_target *ti) 1934 { 1935 struct dm_snapshot *s = ti->private; 1936 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; 1937 struct dm_origin *o; 1938 struct mapped_device *origin_md = NULL; 1939 bool must_restart_merging = false; 1940 1941 down_read(&_origins_lock); 1942 1943 o = __lookup_dm_origin(s->origin->bdev); 1944 if (o) 1945 origin_md = dm_table_get_md(o->ti->table); 1946 if (!origin_md) { 1947 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); 1948 if (snap_merging) 1949 origin_md = dm_table_get_md(snap_merging->ti->table); 1950 } 1951 if (origin_md == dm_table_get_md(ti->table)) 1952 origin_md = NULL; 1953 if (origin_md) { 1954 if (dm_hold(origin_md)) 1955 origin_md = NULL; 1956 } 1957 1958 up_read(&_origins_lock); 1959 1960 if (origin_md) { 1961 dm_internal_suspend_fast(origin_md); 1962 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { 1963 must_restart_merging = true; 1964 stop_merge(snap_merging); 1965 } 1966 } 1967 1968 down_read(&_origins_lock); 1969 1970 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1971 if (snap_src && snap_dest) { 1972 mutex_lock(&snap_src->lock); 1973 mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); 1974 __handover_exceptions(snap_src, snap_dest); 1975 mutex_unlock(&snap_dest->lock); 1976 mutex_unlock(&snap_src->lock); 1977 } 1978 1979 up_read(&_origins_lock); 1980 1981 if (origin_md) { 1982 if (must_restart_merging) 1983 start_merge(snap_merging); 1984 dm_internal_resume_fast(origin_md); 1985 dm_put(origin_md); 1986 } 1987 1988 /* Now we have correct chunk size, reregister */ 1989 reregister_snapshot(s); 1990 1991 mutex_lock(&s->lock); 1992 s->active = 1; 1993 mutex_unlock(&s->lock); 1994 } 1995 1996 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) 1997 { 1998 uint32_t min_chunksize; 1999 2000 down_read(&_origins_lock); 2001 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); 2002 up_read(&_origins_lock); 2003 2004 return min_chunksize; 2005 } 2006 2007 static void snapshot_merge_resume(struct dm_target *ti) 2008 { 2009 struct dm_snapshot *s = ti->private; 2010 2011 /* 2012 * Handover exceptions from existing snapshot. 2013 */ 2014 snapshot_resume(ti); 2015 2016 /* 2017 * snapshot-merge acts as an origin, so set ti->max_io_len 2018 */ 2019 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); 2020 2021 start_merge(s); 2022 } 2023 2024 static void snapshot_status(struct dm_target *ti, status_type_t type, 2025 unsigned status_flags, char *result, unsigned maxlen) 2026 { 2027 unsigned sz = 0; 2028 struct dm_snapshot *snap = ti->private; 2029 2030 switch (type) { 2031 case STATUSTYPE_INFO: 2032 2033 mutex_lock(&snap->lock); 2034 2035 if (!snap->valid) 2036 DMEMIT("Invalid"); 2037 else if (snap->merge_failed) 2038 DMEMIT("Merge failed"); 2039 else if (snap->snapshot_overflowed) 2040 DMEMIT("Overflow"); 2041 else { 2042 if (snap->store->type->usage) { 2043 sector_t total_sectors, sectors_allocated, 2044 metadata_sectors; 2045 snap->store->type->usage(snap->store, 2046 &total_sectors, 2047 §ors_allocated, 2048 &metadata_sectors); 2049 DMEMIT("%llu/%llu %llu", 2050 (unsigned long long)sectors_allocated, 2051 (unsigned long long)total_sectors, 2052 (unsigned long long)metadata_sectors); 2053 } 2054 else 2055 DMEMIT("Unknown"); 2056 } 2057 2058 mutex_unlock(&snap->lock); 2059 2060 break; 2061 2062 case STATUSTYPE_TABLE: 2063 /* 2064 * kdevname returns a static pointer so we need 2065 * to make private copies if the output is to 2066 * make sense. 2067 */ 2068 DMEMIT("%s %s", snap->origin->name, snap->cow->name); 2069 snap->store->type->status(snap->store, type, result + sz, 2070 maxlen - sz); 2071 break; 2072 } 2073 } 2074 2075 static int snapshot_iterate_devices(struct dm_target *ti, 2076 iterate_devices_callout_fn fn, void *data) 2077 { 2078 struct dm_snapshot *snap = ti->private; 2079 int r; 2080 2081 r = fn(ti, snap->origin, 0, ti->len, data); 2082 2083 if (!r) 2084 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); 2085 2086 return r; 2087 } 2088 2089 2090 /*----------------------------------------------------------------- 2091 * Origin methods 2092 *---------------------------------------------------------------*/ 2093 2094 /* 2095 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any 2096 * supplied bio was ignored. The caller may submit it immediately. 2097 * (No remapping actually occurs as the origin is always a direct linear 2098 * map.) 2099 * 2100 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned 2101 * and any supplied bio is added to a list to be submitted once all 2102 * the necessary exceptions exist. 2103 */ 2104 static int __origin_write(struct list_head *snapshots, sector_t sector, 2105 struct bio *bio) 2106 { 2107 int r = DM_MAPIO_REMAPPED; 2108 struct dm_snapshot *snap; 2109 struct dm_exception *e; 2110 struct dm_snap_pending_exception *pe; 2111 struct dm_snap_pending_exception *pe_to_start_now = NULL; 2112 struct dm_snap_pending_exception *pe_to_start_last = NULL; 2113 chunk_t chunk; 2114 2115 /* Do all the snapshots on this origin */ 2116 list_for_each_entry (snap, snapshots, list) { 2117 /* 2118 * Don't make new exceptions in a merging snapshot 2119 * because it has effectively been deleted 2120 */ 2121 if (dm_target_is_snapshot_merge(snap->ti)) 2122 continue; 2123 2124 mutex_lock(&snap->lock); 2125 2126 /* Only deal with valid and active snapshots */ 2127 if (!snap->valid || !snap->active) 2128 goto next_snapshot; 2129 2130 /* Nothing to do if writing beyond end of snapshot */ 2131 if (sector >= dm_table_get_size(snap->ti->table)) 2132 goto next_snapshot; 2133 2134 /* 2135 * Remember, different snapshots can have 2136 * different chunk sizes. 2137 */ 2138 chunk = sector_to_chunk(snap->store, sector); 2139 2140 /* 2141 * Check exception table to see if block 2142 * is already remapped in this snapshot 2143 * and trigger an exception if not. 2144 */ 2145 e = dm_lookup_exception(&snap->complete, chunk); 2146 if (e) 2147 goto next_snapshot; 2148 2149 pe = __lookup_pending_exception(snap, chunk); 2150 if (!pe) { 2151 mutex_unlock(&snap->lock); 2152 pe = alloc_pending_exception(snap); 2153 mutex_lock(&snap->lock); 2154 2155 if (!snap->valid) { 2156 free_pending_exception(pe); 2157 goto next_snapshot; 2158 } 2159 2160 e = dm_lookup_exception(&snap->complete, chunk); 2161 if (e) { 2162 free_pending_exception(pe); 2163 goto next_snapshot; 2164 } 2165 2166 pe = __find_pending_exception(snap, pe, chunk); 2167 if (!pe) { 2168 __invalidate_snapshot(snap, -ENOMEM); 2169 goto next_snapshot; 2170 } 2171 } 2172 2173 r = DM_MAPIO_SUBMITTED; 2174 2175 /* 2176 * If an origin bio was supplied, queue it to wait for the 2177 * completion of this exception, and start this one last, 2178 * at the end of the function. 2179 */ 2180 if (bio) { 2181 bio_list_add(&pe->origin_bios, bio); 2182 bio = NULL; 2183 2184 if (!pe->started) { 2185 pe->started = 1; 2186 pe_to_start_last = pe; 2187 } 2188 } 2189 2190 if (!pe->started) { 2191 pe->started = 1; 2192 pe_to_start_now = pe; 2193 } 2194 2195 next_snapshot: 2196 mutex_unlock(&snap->lock); 2197 2198 if (pe_to_start_now) { 2199 start_copy(pe_to_start_now); 2200 pe_to_start_now = NULL; 2201 } 2202 } 2203 2204 /* 2205 * Submit the exception against which the bio is queued last, 2206 * to give the other exceptions a head start. 2207 */ 2208 if (pe_to_start_last) 2209 start_copy(pe_to_start_last); 2210 2211 return r; 2212 } 2213 2214 /* 2215 * Called on a write from the origin driver. 2216 */ 2217 static int do_origin(struct dm_dev *origin, struct bio *bio) 2218 { 2219 struct origin *o; 2220 int r = DM_MAPIO_REMAPPED; 2221 2222 down_read(&_origins_lock); 2223 o = __lookup_origin(origin->bdev); 2224 if (o) 2225 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); 2226 up_read(&_origins_lock); 2227 2228 return r; 2229 } 2230 2231 /* 2232 * Trigger exceptions in all non-merging snapshots. 2233 * 2234 * The chunk size of the merging snapshot may be larger than the chunk 2235 * size of some other snapshot so we may need to reallocate multiple 2236 * chunks in other snapshots. 2237 * 2238 * We scan all the overlapping exceptions in the other snapshots. 2239 * Returns 1 if anything was reallocated and must be waited for, 2240 * otherwise returns 0. 2241 * 2242 * size must be a multiple of merging_snap's chunk_size. 2243 */ 2244 static int origin_write_extent(struct dm_snapshot *merging_snap, 2245 sector_t sector, unsigned size) 2246 { 2247 int must_wait = 0; 2248 sector_t n; 2249 struct origin *o; 2250 2251 /* 2252 * The origin's __minimum_chunk_size() got stored in max_io_len 2253 * by snapshot_merge_resume(). 2254 */ 2255 down_read(&_origins_lock); 2256 o = __lookup_origin(merging_snap->origin->bdev); 2257 for (n = 0; n < size; n += merging_snap->ti->max_io_len) 2258 if (__origin_write(&o->snapshots, sector + n, NULL) == 2259 DM_MAPIO_SUBMITTED) 2260 must_wait = 1; 2261 up_read(&_origins_lock); 2262 2263 return must_wait; 2264 } 2265 2266 /* 2267 * Origin: maps a linear range of a device, with hooks for snapshotting. 2268 */ 2269 2270 /* 2271 * Construct an origin mapping: <dev_path> 2272 * The context for an origin is merely a 'struct dm_dev *' 2273 * pointing to the real device. 2274 */ 2275 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 2276 { 2277 int r; 2278 struct dm_origin *o; 2279 2280 if (argc != 1) { 2281 ti->error = "origin: incorrect number of arguments"; 2282 return -EINVAL; 2283 } 2284 2285 o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL); 2286 if (!o) { 2287 ti->error = "Cannot allocate private origin structure"; 2288 r = -ENOMEM; 2289 goto bad_alloc; 2290 } 2291 2292 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev); 2293 if (r) { 2294 ti->error = "Cannot get target device"; 2295 goto bad_open; 2296 } 2297 2298 o->ti = ti; 2299 ti->private = o; 2300 ti->num_flush_bios = 1; 2301 2302 return 0; 2303 2304 bad_open: 2305 kfree(o); 2306 bad_alloc: 2307 return r; 2308 } 2309 2310 static void origin_dtr(struct dm_target *ti) 2311 { 2312 struct dm_origin *o = ti->private; 2313 2314 dm_put_device(ti, o->dev); 2315 kfree(o); 2316 } 2317 2318 static int origin_map(struct dm_target *ti, struct bio *bio) 2319 { 2320 struct dm_origin *o = ti->private; 2321 unsigned available_sectors; 2322 2323 bio_set_dev(bio, o->dev->bdev); 2324 2325 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) 2326 return DM_MAPIO_REMAPPED; 2327 2328 if (bio_data_dir(bio) != WRITE) 2329 return DM_MAPIO_REMAPPED; 2330 2331 available_sectors = o->split_boundary - 2332 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); 2333 2334 if (bio_sectors(bio) > available_sectors) 2335 dm_accept_partial_bio(bio, available_sectors); 2336 2337 /* Only tell snapshots if this is a write */ 2338 return do_origin(o->dev, bio); 2339 } 2340 2341 static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, 2342 long nr_pages, void **kaddr, pfn_t *pfn) 2343 { 2344 DMWARN("device does not support dax."); 2345 return -EIO; 2346 } 2347 2348 /* 2349 * Set the target "max_io_len" field to the minimum of all the snapshots' 2350 * chunk sizes. 2351 */ 2352 static void origin_resume(struct dm_target *ti) 2353 { 2354 struct dm_origin *o = ti->private; 2355 2356 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2357 2358 down_write(&_origins_lock); 2359 __insert_dm_origin(o); 2360 up_write(&_origins_lock); 2361 } 2362 2363 static void origin_postsuspend(struct dm_target *ti) 2364 { 2365 struct dm_origin *o = ti->private; 2366 2367 down_write(&_origins_lock); 2368 __remove_dm_origin(o); 2369 up_write(&_origins_lock); 2370 } 2371 2372 static void origin_status(struct dm_target *ti, status_type_t type, 2373 unsigned status_flags, char *result, unsigned maxlen) 2374 { 2375 struct dm_origin *o = ti->private; 2376 2377 switch (type) { 2378 case STATUSTYPE_INFO: 2379 result[0] = '\0'; 2380 break; 2381 2382 case STATUSTYPE_TABLE: 2383 snprintf(result, maxlen, "%s", o->dev->name); 2384 break; 2385 } 2386 } 2387 2388 static int origin_iterate_devices(struct dm_target *ti, 2389 iterate_devices_callout_fn fn, void *data) 2390 { 2391 struct dm_origin *o = ti->private; 2392 2393 return fn(ti, o->dev, 0, ti->len, data); 2394 } 2395 2396 static struct target_type origin_target = { 2397 .name = "snapshot-origin", 2398 .version = {1, 9, 0}, 2399 .module = THIS_MODULE, 2400 .ctr = origin_ctr, 2401 .dtr = origin_dtr, 2402 .map = origin_map, 2403 .resume = origin_resume, 2404 .postsuspend = origin_postsuspend, 2405 .status = origin_status, 2406 .iterate_devices = origin_iterate_devices, 2407 .direct_access = origin_dax_direct_access, 2408 }; 2409 2410 static struct target_type snapshot_target = { 2411 .name = "snapshot", 2412 .version = {1, 15, 0}, 2413 .module = THIS_MODULE, 2414 .ctr = snapshot_ctr, 2415 .dtr = snapshot_dtr, 2416 .map = snapshot_map, 2417 .end_io = snapshot_end_io, 2418 .preresume = snapshot_preresume, 2419 .resume = snapshot_resume, 2420 .status = snapshot_status, 2421 .iterate_devices = snapshot_iterate_devices, 2422 }; 2423 2424 static struct target_type merge_target = { 2425 .name = dm_snapshot_merge_target_name, 2426 .version = {1, 4, 0}, 2427 .module = THIS_MODULE, 2428 .ctr = snapshot_ctr, 2429 .dtr = snapshot_dtr, 2430 .map = snapshot_merge_map, 2431 .end_io = snapshot_end_io, 2432 .presuspend = snapshot_merge_presuspend, 2433 .preresume = snapshot_preresume, 2434 .resume = snapshot_merge_resume, 2435 .status = snapshot_status, 2436 .iterate_devices = snapshot_iterate_devices, 2437 }; 2438 2439 static int __init dm_snapshot_init(void) 2440 { 2441 int r; 2442 2443 r = dm_exception_store_init(); 2444 if (r) { 2445 DMERR("Failed to initialize exception stores"); 2446 return r; 2447 } 2448 2449 r = init_origin_hash(); 2450 if (r) { 2451 DMERR("init_origin_hash failed."); 2452 goto bad_origin_hash; 2453 } 2454 2455 exception_cache = KMEM_CACHE(dm_exception, 0); 2456 if (!exception_cache) { 2457 DMERR("Couldn't create exception cache."); 2458 r = -ENOMEM; 2459 goto bad_exception_cache; 2460 } 2461 2462 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 2463 if (!pending_cache) { 2464 DMERR("Couldn't create pending cache."); 2465 r = -ENOMEM; 2466 goto bad_pending_cache; 2467 } 2468 2469 r = dm_register_target(&snapshot_target); 2470 if (r < 0) { 2471 DMERR("snapshot target register failed %d", r); 2472 goto bad_register_snapshot_target; 2473 } 2474 2475 r = dm_register_target(&origin_target); 2476 if (r < 0) { 2477 DMERR("Origin target register failed %d", r); 2478 goto bad_register_origin_target; 2479 } 2480 2481 r = dm_register_target(&merge_target); 2482 if (r < 0) { 2483 DMERR("Merge target register failed %d", r); 2484 goto bad_register_merge_target; 2485 } 2486 2487 return 0; 2488 2489 bad_register_merge_target: 2490 dm_unregister_target(&origin_target); 2491 bad_register_origin_target: 2492 dm_unregister_target(&snapshot_target); 2493 bad_register_snapshot_target: 2494 kmem_cache_destroy(pending_cache); 2495 bad_pending_cache: 2496 kmem_cache_destroy(exception_cache); 2497 bad_exception_cache: 2498 exit_origin_hash(); 2499 bad_origin_hash: 2500 dm_exception_store_exit(); 2501 2502 return r; 2503 } 2504 2505 static void __exit dm_snapshot_exit(void) 2506 { 2507 dm_unregister_target(&snapshot_target); 2508 dm_unregister_target(&origin_target); 2509 dm_unregister_target(&merge_target); 2510 2511 exit_origin_hash(); 2512 kmem_cache_destroy(pending_cache); 2513 kmem_cache_destroy(exception_cache); 2514 2515 dm_exception_store_exit(); 2516 } 2517 2518 /* Module hooks */ 2519 module_init(dm_snapshot_init); 2520 module_exit(dm_snapshot_exit); 2521 2522 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 2523 MODULE_AUTHOR("Joe Thornber"); 2524 MODULE_LICENSE("GPL"); 2525 MODULE_ALIAS("dm-snapshot-origin"); 2526 MODULE_ALIAS("dm-snapshot-merge"); 2527