1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/device-mapper.h> 11 #include <linux/delay.h> 12 #include <linux/fs.h> 13 #include <linux/init.h> 14 #include <linux/kdev_t.h> 15 #include <linux/list.h> 16 #include <linux/mempool.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/log2.h> 21 #include <linux/dm-kcopyd.h> 22 #include <linux/workqueue.h> 23 24 #include "dm-exception-store.h" 25 26 #define DM_MSG_PREFIX "snapshots" 27 28 /* 29 * The percentage increment we will wake up users at 30 */ 31 #define WAKE_UP_PERCENT 5 32 33 /* 34 * kcopyd priority of snapshot operations 35 */ 36 #define SNAPSHOT_COPY_PRIORITY 2 37 38 /* 39 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 40 */ 41 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 42 43 /* 44 * The size of the mempool used to track chunks in use. 45 */ 46 #define MIN_IOS 256 47 48 #define DM_TRACKED_CHUNK_HASH_SIZE 16 49 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 50 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 51 52 struct exception_table { 53 uint32_t hash_mask; 54 unsigned hash_shift; 55 struct list_head *table; 56 }; 57 58 struct dm_snapshot { 59 struct rw_semaphore lock; 60 61 struct dm_dev *origin; 62 63 /* List of snapshots per Origin */ 64 struct list_head list; 65 66 /* You can't use a snapshot if this is 0 (e.g. if full) */ 67 int valid; 68 69 /* Origin writes don't trigger exceptions until this is set */ 70 int active; 71 72 mempool_t *pending_pool; 73 74 atomic_t pending_exceptions_count; 75 76 struct exception_table pending; 77 struct exception_table complete; 78 79 /* 80 * pe_lock protects all pending_exception operations and access 81 * as well as the snapshot_bios list. 82 */ 83 spinlock_t pe_lock; 84 85 /* The on disk metadata handler */ 86 struct dm_exception_store *store; 87 88 struct dm_kcopyd_client *kcopyd_client; 89 90 /* Queue of snapshot writes for ksnapd to flush */ 91 struct bio_list queued_bios; 92 struct work_struct queued_bios_work; 93 94 /* Chunks with outstanding reads */ 95 mempool_t *tracked_chunk_pool; 96 spinlock_t tracked_chunk_lock; 97 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 98 }; 99 100 static struct workqueue_struct *ksnapd; 101 static void flush_queued_bios(struct work_struct *work); 102 103 static sector_t chunk_to_sector(struct dm_exception_store *store, 104 chunk_t chunk) 105 { 106 return chunk << store->chunk_shift; 107 } 108 109 static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 110 { 111 /* 112 * There is only ever one instance of a particular block 113 * device so we can compare pointers safely. 114 */ 115 return lhs == rhs; 116 } 117 118 struct dm_snap_pending_exception { 119 struct dm_snap_exception e; 120 121 /* 122 * Origin buffers waiting for this to complete are held 123 * in a bio list 124 */ 125 struct bio_list origin_bios; 126 struct bio_list snapshot_bios; 127 128 /* 129 * Short-term queue of pending exceptions prior to submission. 130 */ 131 struct list_head list; 132 133 /* 134 * The primary pending_exception is the one that holds 135 * the ref_count and the list of origin_bios for a 136 * group of pending_exceptions. It is always last to get freed. 137 * These fields get set up when writing to the origin. 138 */ 139 struct dm_snap_pending_exception *primary_pe; 140 141 /* 142 * Number of pending_exceptions processing this chunk. 143 * When this drops to zero we must complete the origin bios. 144 * If incrementing or decrementing this, hold pe->snap->lock for 145 * the sibling concerned and not pe->primary_pe->snap->lock unless 146 * they are the same. 147 */ 148 atomic_t ref_count; 149 150 /* Pointer back to snapshot context */ 151 struct dm_snapshot *snap; 152 153 /* 154 * 1 indicates the exception has already been sent to 155 * kcopyd. 156 */ 157 int started; 158 }; 159 160 /* 161 * Hash table mapping origin volumes to lists of snapshots and 162 * a lock to protect it 163 */ 164 static struct kmem_cache *exception_cache; 165 static struct kmem_cache *pending_cache; 166 167 struct dm_snap_tracked_chunk { 168 struct hlist_node node; 169 chunk_t chunk; 170 }; 171 172 static struct kmem_cache *tracked_chunk_cache; 173 174 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 175 chunk_t chunk) 176 { 177 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 178 GFP_NOIO); 179 unsigned long flags; 180 181 c->chunk = chunk; 182 183 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 184 hlist_add_head(&c->node, 185 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 186 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 187 188 return c; 189 } 190 191 static void stop_tracking_chunk(struct dm_snapshot *s, 192 struct dm_snap_tracked_chunk *c) 193 { 194 unsigned long flags; 195 196 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 197 hlist_del(&c->node); 198 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 199 200 mempool_free(c, s->tracked_chunk_pool); 201 } 202 203 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 204 { 205 struct dm_snap_tracked_chunk *c; 206 struct hlist_node *hn; 207 int found = 0; 208 209 spin_lock_irq(&s->tracked_chunk_lock); 210 211 hlist_for_each_entry(c, hn, 212 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 213 if (c->chunk == chunk) { 214 found = 1; 215 break; 216 } 217 } 218 219 spin_unlock_irq(&s->tracked_chunk_lock); 220 221 return found; 222 } 223 224 /* 225 * One of these per registered origin, held in the snapshot_origins hash 226 */ 227 struct origin { 228 /* The origin device */ 229 struct block_device *bdev; 230 231 struct list_head hash_list; 232 233 /* List of snapshots for this origin */ 234 struct list_head snapshots; 235 }; 236 237 /* 238 * Size of the hash table for origin volumes. If we make this 239 * the size of the minors list then it should be nearly perfect 240 */ 241 #define ORIGIN_HASH_SIZE 256 242 #define ORIGIN_MASK 0xFF 243 static struct list_head *_origins; 244 static struct rw_semaphore _origins_lock; 245 246 static int init_origin_hash(void) 247 { 248 int i; 249 250 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 251 GFP_KERNEL); 252 if (!_origins) { 253 DMERR("unable to allocate memory"); 254 return -ENOMEM; 255 } 256 257 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 258 INIT_LIST_HEAD(_origins + i); 259 init_rwsem(&_origins_lock); 260 261 return 0; 262 } 263 264 static void exit_origin_hash(void) 265 { 266 kfree(_origins); 267 } 268 269 static unsigned origin_hash(struct block_device *bdev) 270 { 271 return bdev->bd_dev & ORIGIN_MASK; 272 } 273 274 static struct origin *__lookup_origin(struct block_device *origin) 275 { 276 struct list_head *ol; 277 struct origin *o; 278 279 ol = &_origins[origin_hash(origin)]; 280 list_for_each_entry (o, ol, hash_list) 281 if (bdev_equal(o->bdev, origin)) 282 return o; 283 284 return NULL; 285 } 286 287 static void __insert_origin(struct origin *o) 288 { 289 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 290 list_add_tail(&o->hash_list, sl); 291 } 292 293 /* 294 * Make a note of the snapshot and its origin so we can look it 295 * up when the origin has a write on it. 296 */ 297 static int register_snapshot(struct dm_snapshot *snap) 298 { 299 struct origin *o, *new_o; 300 struct block_device *bdev = snap->origin->bdev; 301 302 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 303 if (!new_o) 304 return -ENOMEM; 305 306 down_write(&_origins_lock); 307 o = __lookup_origin(bdev); 308 309 if (o) 310 kfree(new_o); 311 else { 312 /* New origin */ 313 o = new_o; 314 315 /* Initialise the struct */ 316 INIT_LIST_HEAD(&o->snapshots); 317 o->bdev = bdev; 318 319 __insert_origin(o); 320 } 321 322 list_add_tail(&snap->list, &o->snapshots); 323 324 up_write(&_origins_lock); 325 return 0; 326 } 327 328 static void unregister_snapshot(struct dm_snapshot *s) 329 { 330 struct origin *o; 331 332 down_write(&_origins_lock); 333 o = __lookup_origin(s->origin->bdev); 334 335 list_del(&s->list); 336 if (list_empty(&o->snapshots)) { 337 list_del(&o->hash_list); 338 kfree(o); 339 } 340 341 up_write(&_origins_lock); 342 } 343 344 /* 345 * Implementation of the exception hash tables. 346 * The lowest hash_shift bits of the chunk number are ignored, allowing 347 * some consecutive chunks to be grouped together. 348 */ 349 static int init_exception_table(struct exception_table *et, uint32_t size, 350 unsigned hash_shift) 351 { 352 unsigned int i; 353 354 et->hash_shift = hash_shift; 355 et->hash_mask = size - 1; 356 et->table = dm_vcalloc(size, sizeof(struct list_head)); 357 if (!et->table) 358 return -ENOMEM; 359 360 for (i = 0; i < size; i++) 361 INIT_LIST_HEAD(et->table + i); 362 363 return 0; 364 } 365 366 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 367 { 368 struct list_head *slot; 369 struct dm_snap_exception *ex, *next; 370 int i, size; 371 372 size = et->hash_mask + 1; 373 for (i = 0; i < size; i++) { 374 slot = et->table + i; 375 376 list_for_each_entry_safe (ex, next, slot, hash_list) 377 kmem_cache_free(mem, ex); 378 } 379 380 vfree(et->table); 381 } 382 383 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 384 { 385 return (chunk >> et->hash_shift) & et->hash_mask; 386 } 387 388 static void insert_exception(struct exception_table *eh, 389 struct dm_snap_exception *e) 390 { 391 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 392 list_add(&e->hash_list, l); 393 } 394 395 static void remove_exception(struct dm_snap_exception *e) 396 { 397 list_del(&e->hash_list); 398 } 399 400 /* 401 * Return the exception data for a sector, or NULL if not 402 * remapped. 403 */ 404 static struct dm_snap_exception *lookup_exception(struct exception_table *et, 405 chunk_t chunk) 406 { 407 struct list_head *slot; 408 struct dm_snap_exception *e; 409 410 slot = &et->table[exception_hash(et, chunk)]; 411 list_for_each_entry (e, slot, hash_list) 412 if (chunk >= e->old_chunk && 413 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 414 return e; 415 416 return NULL; 417 } 418 419 static struct dm_snap_exception *alloc_exception(void) 420 { 421 struct dm_snap_exception *e; 422 423 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 424 if (!e) 425 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 426 427 return e; 428 } 429 430 static void free_exception(struct dm_snap_exception *e) 431 { 432 kmem_cache_free(exception_cache, e); 433 } 434 435 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 436 { 437 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 438 GFP_NOIO); 439 440 atomic_inc(&s->pending_exceptions_count); 441 pe->snap = s; 442 443 return pe; 444 } 445 446 static void free_pending_exception(struct dm_snap_pending_exception *pe) 447 { 448 struct dm_snapshot *s = pe->snap; 449 450 mempool_free(pe, s->pending_pool); 451 smp_mb__before_atomic_dec(); 452 atomic_dec(&s->pending_exceptions_count); 453 } 454 455 static void insert_completed_exception(struct dm_snapshot *s, 456 struct dm_snap_exception *new_e) 457 { 458 struct exception_table *eh = &s->complete; 459 struct list_head *l; 460 struct dm_snap_exception *e = NULL; 461 462 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 463 464 /* Add immediately if this table doesn't support consecutive chunks */ 465 if (!eh->hash_shift) 466 goto out; 467 468 /* List is ordered by old_chunk */ 469 list_for_each_entry_reverse(e, l, hash_list) { 470 /* Insert after an existing chunk? */ 471 if (new_e->old_chunk == (e->old_chunk + 472 dm_consecutive_chunk_count(e) + 1) && 473 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 474 dm_consecutive_chunk_count(e) + 1)) { 475 dm_consecutive_chunk_count_inc(e); 476 free_exception(new_e); 477 return; 478 } 479 480 /* Insert before an existing chunk? */ 481 if (new_e->old_chunk == (e->old_chunk - 1) && 482 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 483 dm_consecutive_chunk_count_inc(e); 484 e->old_chunk--; 485 e->new_chunk--; 486 free_exception(new_e); 487 return; 488 } 489 490 if (new_e->old_chunk > e->old_chunk) 491 break; 492 } 493 494 out: 495 list_add(&new_e->hash_list, e ? &e->hash_list : l); 496 } 497 498 /* 499 * Callback used by the exception stores to load exceptions when 500 * initialising. 501 */ 502 static int dm_add_exception(void *context, chunk_t old, chunk_t new) 503 { 504 struct dm_snapshot *s = context; 505 struct dm_snap_exception *e; 506 507 e = alloc_exception(); 508 if (!e) 509 return -ENOMEM; 510 511 e->old_chunk = old; 512 513 /* Consecutive_count is implicitly initialised to zero */ 514 e->new_chunk = new; 515 516 insert_completed_exception(s, e); 517 518 return 0; 519 } 520 521 /* 522 * Hard coded magic. 523 */ 524 static int calc_max_buckets(void) 525 { 526 /* use a fixed size of 2MB */ 527 unsigned long mem = 2 * 1024 * 1024; 528 mem /= sizeof(struct list_head); 529 530 return mem; 531 } 532 533 /* 534 * Allocate room for a suitable hash table. 535 */ 536 static int init_hash_tables(struct dm_snapshot *s) 537 { 538 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 539 540 /* 541 * Calculate based on the size of the original volume or 542 * the COW volume... 543 */ 544 cow_dev_size = get_dev_size(s->store->cow->bdev); 545 origin_dev_size = get_dev_size(s->origin->bdev); 546 max_buckets = calc_max_buckets(); 547 548 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 549 hash_size = min(hash_size, max_buckets); 550 551 hash_size = rounddown_pow_of_two(hash_size); 552 if (init_exception_table(&s->complete, hash_size, 553 DM_CHUNK_CONSECUTIVE_BITS)) 554 return -ENOMEM; 555 556 /* 557 * Allocate hash table for in-flight exceptions 558 * Make this smaller than the real hash table 559 */ 560 hash_size >>= 3; 561 if (hash_size < 64) 562 hash_size = 64; 563 564 if (init_exception_table(&s->pending, hash_size, 0)) { 565 exit_exception_table(&s->complete, exception_cache); 566 return -ENOMEM; 567 } 568 569 return 0; 570 } 571 572 /* 573 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 574 */ 575 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 576 { 577 struct dm_snapshot *s; 578 int i; 579 int r = -EINVAL; 580 char *origin_path; 581 struct dm_exception_store *store; 582 unsigned args_used; 583 584 if (argc != 4) { 585 ti->error = "requires exactly 4 arguments"; 586 r = -EINVAL; 587 goto bad_args; 588 } 589 590 origin_path = argv[0]; 591 argv++; 592 argc--; 593 594 r = dm_exception_store_create(ti, argc, argv, &args_used, &store); 595 if (r) { 596 ti->error = "Couldn't create exception store"; 597 r = -EINVAL; 598 goto bad_args; 599 } 600 601 argv += args_used; 602 argc -= args_used; 603 604 s = kmalloc(sizeof(*s), GFP_KERNEL); 605 if (!s) { 606 ti->error = "Cannot allocate snapshot context private " 607 "structure"; 608 r = -ENOMEM; 609 goto bad_snap; 610 } 611 612 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 613 if (r) { 614 ti->error = "Cannot get origin device"; 615 goto bad_origin; 616 } 617 618 s->store = store; 619 s->valid = 1; 620 s->active = 0; 621 atomic_set(&s->pending_exceptions_count, 0); 622 init_rwsem(&s->lock); 623 spin_lock_init(&s->pe_lock); 624 625 /* Allocate hash table for COW data */ 626 if (init_hash_tables(s)) { 627 ti->error = "Unable to allocate hash table space"; 628 r = -ENOMEM; 629 goto bad_hash_tables; 630 } 631 632 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 633 if (r) { 634 ti->error = "Could not create kcopyd client"; 635 goto bad_kcopyd; 636 } 637 638 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 639 if (!s->pending_pool) { 640 ti->error = "Could not allocate mempool for pending exceptions"; 641 goto bad_pending_pool; 642 } 643 644 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 645 tracked_chunk_cache); 646 if (!s->tracked_chunk_pool) { 647 ti->error = "Could not allocate tracked_chunk mempool for " 648 "tracking reads"; 649 goto bad_tracked_chunk_pool; 650 } 651 652 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 653 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 654 655 spin_lock_init(&s->tracked_chunk_lock); 656 657 /* Metadata must only be loaded into one table at once */ 658 r = s->store->type->read_metadata(s->store, dm_add_exception, 659 (void *)s); 660 if (r < 0) { 661 ti->error = "Failed to read snapshot metadata"; 662 goto bad_load_and_register; 663 } else if (r > 0) { 664 s->valid = 0; 665 DMWARN("Snapshot is marked invalid."); 666 } 667 668 bio_list_init(&s->queued_bios); 669 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 670 671 /* Add snapshot to the list of snapshots for this origin */ 672 /* Exceptions aren't triggered till snapshot_resume() is called */ 673 if (register_snapshot(s)) { 674 r = -EINVAL; 675 ti->error = "Cannot register snapshot origin"; 676 goto bad_load_and_register; 677 } 678 679 ti->private = s; 680 ti->split_io = s->store->chunk_size; 681 682 return 0; 683 684 bad_load_and_register: 685 mempool_destroy(s->tracked_chunk_pool); 686 687 bad_tracked_chunk_pool: 688 mempool_destroy(s->pending_pool); 689 690 bad_pending_pool: 691 dm_kcopyd_client_destroy(s->kcopyd_client); 692 693 bad_kcopyd: 694 exit_exception_table(&s->pending, pending_cache); 695 exit_exception_table(&s->complete, exception_cache); 696 697 bad_hash_tables: 698 dm_put_device(ti, s->origin); 699 700 bad_origin: 701 kfree(s); 702 703 bad_snap: 704 dm_exception_store_destroy(store); 705 706 bad_args: 707 return r; 708 } 709 710 static void __free_exceptions(struct dm_snapshot *s) 711 { 712 dm_kcopyd_client_destroy(s->kcopyd_client); 713 s->kcopyd_client = NULL; 714 715 exit_exception_table(&s->pending, pending_cache); 716 exit_exception_table(&s->complete, exception_cache); 717 } 718 719 static void snapshot_dtr(struct dm_target *ti) 720 { 721 #ifdef CONFIG_DM_DEBUG 722 int i; 723 #endif 724 struct dm_snapshot *s = ti->private; 725 726 flush_workqueue(ksnapd); 727 728 /* Prevent further origin writes from using this snapshot. */ 729 /* After this returns there can be no new kcopyd jobs. */ 730 unregister_snapshot(s); 731 732 while (atomic_read(&s->pending_exceptions_count)) 733 msleep(1); 734 /* 735 * Ensure instructions in mempool_destroy aren't reordered 736 * before atomic_read. 737 */ 738 smp_mb(); 739 740 #ifdef CONFIG_DM_DEBUG 741 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 742 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 743 #endif 744 745 mempool_destroy(s->tracked_chunk_pool); 746 747 __free_exceptions(s); 748 749 mempool_destroy(s->pending_pool); 750 751 dm_put_device(ti, s->origin); 752 753 dm_exception_store_destroy(s->store); 754 755 kfree(s); 756 } 757 758 /* 759 * Flush a list of buffers. 760 */ 761 static void flush_bios(struct bio *bio) 762 { 763 struct bio *n; 764 765 while (bio) { 766 n = bio->bi_next; 767 bio->bi_next = NULL; 768 generic_make_request(bio); 769 bio = n; 770 } 771 } 772 773 static void flush_queued_bios(struct work_struct *work) 774 { 775 struct dm_snapshot *s = 776 container_of(work, struct dm_snapshot, queued_bios_work); 777 struct bio *queued_bios; 778 unsigned long flags; 779 780 spin_lock_irqsave(&s->pe_lock, flags); 781 queued_bios = bio_list_get(&s->queued_bios); 782 spin_unlock_irqrestore(&s->pe_lock, flags); 783 784 flush_bios(queued_bios); 785 } 786 787 /* 788 * Error a list of buffers. 789 */ 790 static void error_bios(struct bio *bio) 791 { 792 struct bio *n; 793 794 while (bio) { 795 n = bio->bi_next; 796 bio->bi_next = NULL; 797 bio_io_error(bio); 798 bio = n; 799 } 800 } 801 802 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 803 { 804 if (!s->valid) 805 return; 806 807 if (err == -EIO) 808 DMERR("Invalidating snapshot: Error reading/writing."); 809 else if (err == -ENOMEM) 810 DMERR("Invalidating snapshot: Unable to allocate exception."); 811 812 if (s->store->type->drop_snapshot) 813 s->store->type->drop_snapshot(s->store); 814 815 s->valid = 0; 816 817 dm_table_event(s->store->ti->table); 818 } 819 820 static void get_pending_exception(struct dm_snap_pending_exception *pe) 821 { 822 atomic_inc(&pe->ref_count); 823 } 824 825 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 826 { 827 struct dm_snap_pending_exception *primary_pe; 828 struct bio *origin_bios = NULL; 829 830 primary_pe = pe->primary_pe; 831 832 /* 833 * If this pe is involved in a write to the origin and 834 * it is the last sibling to complete then release 835 * the bios for the original write to the origin. 836 */ 837 if (primary_pe && 838 atomic_dec_and_test(&primary_pe->ref_count)) { 839 origin_bios = bio_list_get(&primary_pe->origin_bios); 840 free_pending_exception(primary_pe); 841 } 842 843 /* 844 * Free the pe if it's not linked to an origin write or if 845 * it's not itself a primary pe. 846 */ 847 if (!primary_pe || primary_pe != pe) 848 free_pending_exception(pe); 849 850 return origin_bios; 851 } 852 853 static void pending_complete(struct dm_snap_pending_exception *pe, int success) 854 { 855 struct dm_snap_exception *e; 856 struct dm_snapshot *s = pe->snap; 857 struct bio *origin_bios = NULL; 858 struct bio *snapshot_bios = NULL; 859 int error = 0; 860 861 if (!success) { 862 /* Read/write error - snapshot is unusable */ 863 down_write(&s->lock); 864 __invalidate_snapshot(s, -EIO); 865 error = 1; 866 goto out; 867 } 868 869 e = alloc_exception(); 870 if (!e) { 871 down_write(&s->lock); 872 __invalidate_snapshot(s, -ENOMEM); 873 error = 1; 874 goto out; 875 } 876 *e = pe->e; 877 878 down_write(&s->lock); 879 if (!s->valid) { 880 free_exception(e); 881 error = 1; 882 goto out; 883 } 884 885 /* 886 * Check for conflicting reads. This is extremely improbable, 887 * so msleep(1) is sufficient and there is no need for a wait queue. 888 */ 889 while (__chunk_is_tracked(s, pe->e.old_chunk)) 890 msleep(1); 891 892 /* 893 * Add a proper exception, and remove the 894 * in-flight exception from the list. 895 */ 896 insert_completed_exception(s, e); 897 898 out: 899 remove_exception(&pe->e); 900 snapshot_bios = bio_list_get(&pe->snapshot_bios); 901 origin_bios = put_pending_exception(pe); 902 903 up_write(&s->lock); 904 905 /* Submit any pending write bios */ 906 if (error) 907 error_bios(snapshot_bios); 908 else 909 flush_bios(snapshot_bios); 910 911 flush_bios(origin_bios); 912 } 913 914 static void commit_callback(void *context, int success) 915 { 916 struct dm_snap_pending_exception *pe = context; 917 918 pending_complete(pe, success); 919 } 920 921 /* 922 * Called when the copy I/O has finished. kcopyd actually runs 923 * this code so don't block. 924 */ 925 static void copy_callback(int read_err, unsigned long write_err, void *context) 926 { 927 struct dm_snap_pending_exception *pe = context; 928 struct dm_snapshot *s = pe->snap; 929 930 if (read_err || write_err) 931 pending_complete(pe, 0); 932 933 else 934 /* Update the metadata if we are persistent */ 935 s->store->type->commit_exception(s->store, &pe->e, 936 commit_callback, pe); 937 } 938 939 /* 940 * Dispatches the copy operation to kcopyd. 941 */ 942 static void start_copy(struct dm_snap_pending_exception *pe) 943 { 944 struct dm_snapshot *s = pe->snap; 945 struct dm_io_region src, dest; 946 struct block_device *bdev = s->origin->bdev; 947 sector_t dev_size; 948 949 dev_size = get_dev_size(bdev); 950 951 src.bdev = bdev; 952 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 953 src.count = min(s->store->chunk_size, dev_size - src.sector); 954 955 dest.bdev = s->store->cow->bdev; 956 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 957 dest.count = src.count; 958 959 /* Hand over to kcopyd */ 960 dm_kcopyd_copy(s->kcopyd_client, 961 &src, 1, &dest, 0, copy_callback, pe); 962 } 963 964 static struct dm_snap_pending_exception * 965 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 966 { 967 struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); 968 969 if (!e) 970 return NULL; 971 972 return container_of(e, struct dm_snap_pending_exception, e); 973 } 974 975 /* 976 * Looks to see if this snapshot already has a pending exception 977 * for this chunk, otherwise it allocates a new one and inserts 978 * it into the pending table. 979 * 980 * NOTE: a write lock must be held on snap->lock before calling 981 * this. 982 */ 983 static struct dm_snap_pending_exception * 984 __find_pending_exception(struct dm_snapshot *s, 985 struct dm_snap_pending_exception *pe, chunk_t chunk) 986 { 987 struct dm_snap_pending_exception *pe2; 988 989 pe2 = __lookup_pending_exception(s, chunk); 990 if (pe2) { 991 free_pending_exception(pe); 992 return pe2; 993 } 994 995 pe->e.old_chunk = chunk; 996 bio_list_init(&pe->origin_bios); 997 bio_list_init(&pe->snapshot_bios); 998 pe->primary_pe = NULL; 999 atomic_set(&pe->ref_count, 0); 1000 pe->started = 0; 1001 1002 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1003 free_pending_exception(pe); 1004 return NULL; 1005 } 1006 1007 get_pending_exception(pe); 1008 insert_exception(&s->pending, &pe->e); 1009 1010 return pe; 1011 } 1012 1013 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1014 struct bio *bio, chunk_t chunk) 1015 { 1016 bio->bi_bdev = s->store->cow->bdev; 1017 bio->bi_sector = chunk_to_sector(s->store, 1018 dm_chunk_number(e->new_chunk) + 1019 (chunk - e->old_chunk)) + 1020 (bio->bi_sector & 1021 s->store->chunk_mask); 1022 } 1023 1024 static int snapshot_map(struct dm_target *ti, struct bio *bio, 1025 union map_info *map_context) 1026 { 1027 struct dm_snap_exception *e; 1028 struct dm_snapshot *s = ti->private; 1029 int r = DM_MAPIO_REMAPPED; 1030 chunk_t chunk; 1031 struct dm_snap_pending_exception *pe = NULL; 1032 1033 chunk = sector_to_chunk(s->store, bio->bi_sector); 1034 1035 /* Full snapshots are not usable */ 1036 /* To get here the table must be live so s->active is always set. */ 1037 if (!s->valid) 1038 return -EIO; 1039 1040 /* FIXME: should only take write lock if we need 1041 * to copy an exception */ 1042 down_write(&s->lock); 1043 1044 if (!s->valid) { 1045 r = -EIO; 1046 goto out_unlock; 1047 } 1048 1049 /* If the block is already remapped - use that, else remap it */ 1050 e = lookup_exception(&s->complete, chunk); 1051 if (e) { 1052 remap_exception(s, e, bio, chunk); 1053 goto out_unlock; 1054 } 1055 1056 /* 1057 * Write to snapshot - higher level takes care of RW/RO 1058 * flags so we should only get this if we are 1059 * writeable. 1060 */ 1061 if (bio_rw(bio) == WRITE) { 1062 pe = __lookup_pending_exception(s, chunk); 1063 if (!pe) { 1064 up_write(&s->lock); 1065 pe = alloc_pending_exception(s); 1066 down_write(&s->lock); 1067 1068 if (!s->valid) { 1069 free_pending_exception(pe); 1070 r = -EIO; 1071 goto out_unlock; 1072 } 1073 1074 e = lookup_exception(&s->complete, chunk); 1075 if (e) { 1076 free_pending_exception(pe); 1077 remap_exception(s, e, bio, chunk); 1078 goto out_unlock; 1079 } 1080 1081 pe = __find_pending_exception(s, pe, chunk); 1082 if (!pe) { 1083 __invalidate_snapshot(s, -ENOMEM); 1084 r = -EIO; 1085 goto out_unlock; 1086 } 1087 } 1088 1089 remap_exception(s, &pe->e, bio, chunk); 1090 bio_list_add(&pe->snapshot_bios, bio); 1091 1092 r = DM_MAPIO_SUBMITTED; 1093 1094 if (!pe->started) { 1095 /* this is protected by snap->lock */ 1096 pe->started = 1; 1097 up_write(&s->lock); 1098 start_copy(pe); 1099 goto out; 1100 } 1101 } else { 1102 bio->bi_bdev = s->origin->bdev; 1103 map_context->ptr = track_chunk(s, chunk); 1104 } 1105 1106 out_unlock: 1107 up_write(&s->lock); 1108 out: 1109 return r; 1110 } 1111 1112 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1113 int error, union map_info *map_context) 1114 { 1115 struct dm_snapshot *s = ti->private; 1116 struct dm_snap_tracked_chunk *c = map_context->ptr; 1117 1118 if (c) 1119 stop_tracking_chunk(s, c); 1120 1121 return 0; 1122 } 1123 1124 static void snapshot_resume(struct dm_target *ti) 1125 { 1126 struct dm_snapshot *s = ti->private; 1127 1128 down_write(&s->lock); 1129 s->active = 1; 1130 up_write(&s->lock); 1131 } 1132 1133 static int snapshot_status(struct dm_target *ti, status_type_t type, 1134 char *result, unsigned int maxlen) 1135 { 1136 unsigned sz = 0; 1137 struct dm_snapshot *snap = ti->private; 1138 1139 switch (type) { 1140 case STATUSTYPE_INFO: 1141 if (!snap->valid) 1142 DMEMIT("Invalid"); 1143 else { 1144 if (snap->store->type->fraction_full) { 1145 sector_t numerator, denominator; 1146 snap->store->type->fraction_full(snap->store, 1147 &numerator, 1148 &denominator); 1149 DMEMIT("%llu/%llu", 1150 (unsigned long long)numerator, 1151 (unsigned long long)denominator); 1152 } 1153 else 1154 DMEMIT("Unknown"); 1155 } 1156 break; 1157 1158 case STATUSTYPE_TABLE: 1159 /* 1160 * kdevname returns a static pointer so we need 1161 * to make private copies if the output is to 1162 * make sense. 1163 */ 1164 DMEMIT("%s", snap->origin->name); 1165 snap->store->type->status(snap->store, type, result + sz, 1166 maxlen - sz); 1167 break; 1168 } 1169 1170 return 0; 1171 } 1172 1173 /*----------------------------------------------------------------- 1174 * Origin methods 1175 *---------------------------------------------------------------*/ 1176 static int __origin_write(struct list_head *snapshots, struct bio *bio) 1177 { 1178 int r = DM_MAPIO_REMAPPED, first = 0; 1179 struct dm_snapshot *snap; 1180 struct dm_snap_exception *e; 1181 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1182 chunk_t chunk; 1183 LIST_HEAD(pe_queue); 1184 1185 /* Do all the snapshots on this origin */ 1186 list_for_each_entry (snap, snapshots, list) { 1187 1188 down_write(&snap->lock); 1189 1190 /* Only deal with valid and active snapshots */ 1191 if (!snap->valid || !snap->active) 1192 goto next_snapshot; 1193 1194 /* Nothing to do if writing beyond end of snapshot */ 1195 if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) 1196 goto next_snapshot; 1197 1198 /* 1199 * Remember, different snapshots can have 1200 * different chunk sizes. 1201 */ 1202 chunk = sector_to_chunk(snap->store, bio->bi_sector); 1203 1204 /* 1205 * Check exception table to see if block 1206 * is already remapped in this snapshot 1207 * and trigger an exception if not. 1208 * 1209 * ref_count is initialised to 1 so pending_complete() 1210 * won't destroy the primary_pe while we're inside this loop. 1211 */ 1212 e = lookup_exception(&snap->complete, chunk); 1213 if (e) 1214 goto next_snapshot; 1215 1216 pe = __lookup_pending_exception(snap, chunk); 1217 if (!pe) { 1218 up_write(&snap->lock); 1219 pe = alloc_pending_exception(snap); 1220 down_write(&snap->lock); 1221 1222 if (!snap->valid) { 1223 free_pending_exception(pe); 1224 goto next_snapshot; 1225 } 1226 1227 e = lookup_exception(&snap->complete, chunk); 1228 if (e) { 1229 free_pending_exception(pe); 1230 goto next_snapshot; 1231 } 1232 1233 pe = __find_pending_exception(snap, pe, chunk); 1234 if (!pe) { 1235 __invalidate_snapshot(snap, -ENOMEM); 1236 goto next_snapshot; 1237 } 1238 } 1239 1240 if (!primary_pe) { 1241 /* 1242 * Either every pe here has same 1243 * primary_pe or none has one yet. 1244 */ 1245 if (pe->primary_pe) 1246 primary_pe = pe->primary_pe; 1247 else { 1248 primary_pe = pe; 1249 first = 1; 1250 } 1251 1252 bio_list_add(&primary_pe->origin_bios, bio); 1253 1254 r = DM_MAPIO_SUBMITTED; 1255 } 1256 1257 if (!pe->primary_pe) { 1258 pe->primary_pe = primary_pe; 1259 get_pending_exception(primary_pe); 1260 } 1261 1262 if (!pe->started) { 1263 pe->started = 1; 1264 list_add_tail(&pe->list, &pe_queue); 1265 } 1266 1267 next_snapshot: 1268 up_write(&snap->lock); 1269 } 1270 1271 if (!primary_pe) 1272 return r; 1273 1274 /* 1275 * If this is the first time we're processing this chunk and 1276 * ref_count is now 1 it means all the pending exceptions 1277 * got completed while we were in the loop above, so it falls to 1278 * us here to remove the primary_pe and submit any origin_bios. 1279 */ 1280 1281 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1282 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1283 free_pending_exception(primary_pe); 1284 /* If we got here, pe_queue is necessarily empty. */ 1285 return r; 1286 } 1287 1288 /* 1289 * Now that we have a complete pe list we can start the copying. 1290 */ 1291 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1292 start_copy(pe); 1293 1294 return r; 1295 } 1296 1297 /* 1298 * Called on a write from the origin driver. 1299 */ 1300 static int do_origin(struct dm_dev *origin, struct bio *bio) 1301 { 1302 struct origin *o; 1303 int r = DM_MAPIO_REMAPPED; 1304 1305 down_read(&_origins_lock); 1306 o = __lookup_origin(origin->bdev); 1307 if (o) 1308 r = __origin_write(&o->snapshots, bio); 1309 up_read(&_origins_lock); 1310 1311 return r; 1312 } 1313 1314 /* 1315 * Origin: maps a linear range of a device, with hooks for snapshotting. 1316 */ 1317 1318 /* 1319 * Construct an origin mapping: <dev_path> 1320 * The context for an origin is merely a 'struct dm_dev *' 1321 * pointing to the real device. 1322 */ 1323 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1324 { 1325 int r; 1326 struct dm_dev *dev; 1327 1328 if (argc != 1) { 1329 ti->error = "origin: incorrect number of arguments"; 1330 return -EINVAL; 1331 } 1332 1333 r = dm_get_device(ti, argv[0], 0, ti->len, 1334 dm_table_get_mode(ti->table), &dev); 1335 if (r) { 1336 ti->error = "Cannot get target device"; 1337 return r; 1338 } 1339 1340 ti->private = dev; 1341 return 0; 1342 } 1343 1344 static void origin_dtr(struct dm_target *ti) 1345 { 1346 struct dm_dev *dev = ti->private; 1347 dm_put_device(ti, dev); 1348 } 1349 1350 static int origin_map(struct dm_target *ti, struct bio *bio, 1351 union map_info *map_context) 1352 { 1353 struct dm_dev *dev = ti->private; 1354 bio->bi_bdev = dev->bdev; 1355 1356 /* Only tell snapshots if this is a write */ 1357 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1358 } 1359 1360 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1361 1362 /* 1363 * Set the target "split_io" field to the minimum of all the snapshots' 1364 * chunk sizes. 1365 */ 1366 static void origin_resume(struct dm_target *ti) 1367 { 1368 struct dm_dev *dev = ti->private; 1369 struct dm_snapshot *snap; 1370 struct origin *o; 1371 chunk_t chunk_size = 0; 1372 1373 down_read(&_origins_lock); 1374 o = __lookup_origin(dev->bdev); 1375 if (o) 1376 list_for_each_entry (snap, &o->snapshots, list) 1377 chunk_size = min_not_zero(chunk_size, 1378 snap->store->chunk_size); 1379 up_read(&_origins_lock); 1380 1381 ti->split_io = chunk_size; 1382 } 1383 1384 static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1385 unsigned int maxlen) 1386 { 1387 struct dm_dev *dev = ti->private; 1388 1389 switch (type) { 1390 case STATUSTYPE_INFO: 1391 result[0] = '\0'; 1392 break; 1393 1394 case STATUSTYPE_TABLE: 1395 snprintf(result, maxlen, "%s", dev->name); 1396 break; 1397 } 1398 1399 return 0; 1400 } 1401 1402 static struct target_type origin_target = { 1403 .name = "snapshot-origin", 1404 .version = {1, 6, 0}, 1405 .module = THIS_MODULE, 1406 .ctr = origin_ctr, 1407 .dtr = origin_dtr, 1408 .map = origin_map, 1409 .resume = origin_resume, 1410 .status = origin_status, 1411 }; 1412 1413 static struct target_type snapshot_target = { 1414 .name = "snapshot", 1415 .version = {1, 6, 0}, 1416 .module = THIS_MODULE, 1417 .ctr = snapshot_ctr, 1418 .dtr = snapshot_dtr, 1419 .map = snapshot_map, 1420 .end_io = snapshot_end_io, 1421 .resume = snapshot_resume, 1422 .status = snapshot_status, 1423 }; 1424 1425 static int __init dm_snapshot_init(void) 1426 { 1427 int r; 1428 1429 r = dm_exception_store_init(); 1430 if (r) { 1431 DMERR("Failed to initialize exception stores"); 1432 return r; 1433 } 1434 1435 r = dm_register_target(&snapshot_target); 1436 if (r) { 1437 DMERR("snapshot target register failed %d", r); 1438 return r; 1439 } 1440 1441 r = dm_register_target(&origin_target); 1442 if (r < 0) { 1443 DMERR("Origin target register failed %d", r); 1444 goto bad1; 1445 } 1446 1447 r = init_origin_hash(); 1448 if (r) { 1449 DMERR("init_origin_hash failed."); 1450 goto bad2; 1451 } 1452 1453 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 1454 if (!exception_cache) { 1455 DMERR("Couldn't create exception cache."); 1456 r = -ENOMEM; 1457 goto bad3; 1458 } 1459 1460 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1461 if (!pending_cache) { 1462 DMERR("Couldn't create pending cache."); 1463 r = -ENOMEM; 1464 goto bad4; 1465 } 1466 1467 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1468 if (!tracked_chunk_cache) { 1469 DMERR("Couldn't create cache to track chunks in use."); 1470 r = -ENOMEM; 1471 goto bad5; 1472 } 1473 1474 ksnapd = create_singlethread_workqueue("ksnapd"); 1475 if (!ksnapd) { 1476 DMERR("Failed to create ksnapd workqueue."); 1477 r = -ENOMEM; 1478 goto bad_pending_pool; 1479 } 1480 1481 return 0; 1482 1483 bad_pending_pool: 1484 kmem_cache_destroy(tracked_chunk_cache); 1485 bad5: 1486 kmem_cache_destroy(pending_cache); 1487 bad4: 1488 kmem_cache_destroy(exception_cache); 1489 bad3: 1490 exit_origin_hash(); 1491 bad2: 1492 dm_unregister_target(&origin_target); 1493 bad1: 1494 dm_unregister_target(&snapshot_target); 1495 return r; 1496 } 1497 1498 static void __exit dm_snapshot_exit(void) 1499 { 1500 destroy_workqueue(ksnapd); 1501 1502 dm_unregister_target(&snapshot_target); 1503 dm_unregister_target(&origin_target); 1504 1505 exit_origin_hash(); 1506 kmem_cache_destroy(pending_cache); 1507 kmem_cache_destroy(exception_cache); 1508 kmem_cache_destroy(tracked_chunk_cache); 1509 1510 dm_exception_store_exit(); 1511 } 1512 1513 /* Module hooks */ 1514 module_init(dm_snapshot_init); 1515 module_exit(dm_snapshot_exit); 1516 1517 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1518 MODULE_AUTHOR("Joe Thornber"); 1519 MODULE_LICENSE("GPL"); 1520