1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/ctype.h> 11 #include <linux/device-mapper.h> 12 #include <linux/delay.h> 13 #include <linux/fs.h> 14 #include <linux/init.h> 15 #include <linux/kdev_t.h> 16 #include <linux/list.h> 17 #include <linux/mempool.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/vmalloc.h> 21 #include <linux/log2.h> 22 #include <linux/dm-kcopyd.h> 23 24 #include "dm-snap.h" 25 #include "dm-bio-list.h" 26 27 #define DM_MSG_PREFIX "snapshots" 28 29 /* 30 * The percentage increment we will wake up users at 31 */ 32 #define WAKE_UP_PERCENT 5 33 34 /* 35 * kcopyd priority of snapshot operations 36 */ 37 #define SNAPSHOT_COPY_PRIORITY 2 38 39 /* 40 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 41 */ 42 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 43 44 /* 45 * The size of the mempool used to track chunks in use. 46 */ 47 #define MIN_IOS 256 48 49 static struct workqueue_struct *ksnapd; 50 static void flush_queued_bios(struct work_struct *work); 51 52 struct dm_snap_pending_exception { 53 struct dm_snap_exception e; 54 55 /* 56 * Origin buffers waiting for this to complete are held 57 * in a bio list 58 */ 59 struct bio_list origin_bios; 60 struct bio_list snapshot_bios; 61 62 /* 63 * Short-term queue of pending exceptions prior to submission. 64 */ 65 struct list_head list; 66 67 /* 68 * The primary pending_exception is the one that holds 69 * the ref_count and the list of origin_bios for a 70 * group of pending_exceptions. It is always last to get freed. 71 * These fields get set up when writing to the origin. 72 */ 73 struct dm_snap_pending_exception *primary_pe; 74 75 /* 76 * Number of pending_exceptions processing this chunk. 77 * When this drops to zero we must complete the origin bios. 78 * If incrementing or decrementing this, hold pe->snap->lock for 79 * the sibling concerned and not pe->primary_pe->snap->lock unless 80 * they are the same. 81 */ 82 atomic_t ref_count; 83 84 /* Pointer back to snapshot context */ 85 struct dm_snapshot *snap; 86 87 /* 88 * 1 indicates the exception has already been sent to 89 * kcopyd. 90 */ 91 int started; 92 }; 93 94 /* 95 * Hash table mapping origin volumes to lists of snapshots and 96 * a lock to protect it 97 */ 98 static struct kmem_cache *exception_cache; 99 static struct kmem_cache *pending_cache; 100 101 struct dm_snap_tracked_chunk { 102 struct hlist_node node; 103 chunk_t chunk; 104 }; 105 106 static struct kmem_cache *tracked_chunk_cache; 107 108 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 109 chunk_t chunk) 110 { 111 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 112 GFP_NOIO); 113 unsigned long flags; 114 115 c->chunk = chunk; 116 117 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 118 hlist_add_head(&c->node, 119 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 120 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 121 122 return c; 123 } 124 125 static void stop_tracking_chunk(struct dm_snapshot *s, 126 struct dm_snap_tracked_chunk *c) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 131 hlist_del(&c->node); 132 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 133 134 mempool_free(c, s->tracked_chunk_pool); 135 } 136 137 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 138 { 139 struct dm_snap_tracked_chunk *c; 140 struct hlist_node *hn; 141 int found = 0; 142 143 spin_lock_irq(&s->tracked_chunk_lock); 144 145 hlist_for_each_entry(c, hn, 146 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 147 if (c->chunk == chunk) { 148 found = 1; 149 break; 150 } 151 } 152 153 spin_unlock_irq(&s->tracked_chunk_lock); 154 155 return found; 156 } 157 158 /* 159 * One of these per registered origin, held in the snapshot_origins hash 160 */ 161 struct origin { 162 /* The origin device */ 163 struct block_device *bdev; 164 165 struct list_head hash_list; 166 167 /* List of snapshots for this origin */ 168 struct list_head snapshots; 169 }; 170 171 /* 172 * Size of the hash table for origin volumes. If we make this 173 * the size of the minors list then it should be nearly perfect 174 */ 175 #define ORIGIN_HASH_SIZE 256 176 #define ORIGIN_MASK 0xFF 177 static struct list_head *_origins; 178 static struct rw_semaphore _origins_lock; 179 180 static int init_origin_hash(void) 181 { 182 int i; 183 184 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 185 GFP_KERNEL); 186 if (!_origins) { 187 DMERR("unable to allocate memory"); 188 return -ENOMEM; 189 } 190 191 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 192 INIT_LIST_HEAD(_origins + i); 193 init_rwsem(&_origins_lock); 194 195 return 0; 196 } 197 198 static void exit_origin_hash(void) 199 { 200 kfree(_origins); 201 } 202 203 static unsigned origin_hash(struct block_device *bdev) 204 { 205 return bdev->bd_dev & ORIGIN_MASK; 206 } 207 208 static struct origin *__lookup_origin(struct block_device *origin) 209 { 210 struct list_head *ol; 211 struct origin *o; 212 213 ol = &_origins[origin_hash(origin)]; 214 list_for_each_entry (o, ol, hash_list) 215 if (bdev_equal(o->bdev, origin)) 216 return o; 217 218 return NULL; 219 } 220 221 static void __insert_origin(struct origin *o) 222 { 223 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 224 list_add_tail(&o->hash_list, sl); 225 } 226 227 /* 228 * Make a note of the snapshot and its origin so we can look it 229 * up when the origin has a write on it. 230 */ 231 static int register_snapshot(struct dm_snapshot *snap) 232 { 233 struct origin *o, *new_o; 234 struct block_device *bdev = snap->origin->bdev; 235 236 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 237 if (!new_o) 238 return -ENOMEM; 239 240 down_write(&_origins_lock); 241 o = __lookup_origin(bdev); 242 243 if (o) 244 kfree(new_o); 245 else { 246 /* New origin */ 247 o = new_o; 248 249 /* Initialise the struct */ 250 INIT_LIST_HEAD(&o->snapshots); 251 o->bdev = bdev; 252 253 __insert_origin(o); 254 } 255 256 list_add_tail(&snap->list, &o->snapshots); 257 258 up_write(&_origins_lock); 259 return 0; 260 } 261 262 static void unregister_snapshot(struct dm_snapshot *s) 263 { 264 struct origin *o; 265 266 down_write(&_origins_lock); 267 o = __lookup_origin(s->origin->bdev); 268 269 list_del(&s->list); 270 if (list_empty(&o->snapshots)) { 271 list_del(&o->hash_list); 272 kfree(o); 273 } 274 275 up_write(&_origins_lock); 276 } 277 278 /* 279 * Implementation of the exception hash tables. 280 * The lowest hash_shift bits of the chunk number are ignored, allowing 281 * some consecutive chunks to be grouped together. 282 */ 283 static int init_exception_table(struct exception_table *et, uint32_t size, 284 unsigned hash_shift) 285 { 286 unsigned int i; 287 288 et->hash_shift = hash_shift; 289 et->hash_mask = size - 1; 290 et->table = dm_vcalloc(size, sizeof(struct list_head)); 291 if (!et->table) 292 return -ENOMEM; 293 294 for (i = 0; i < size; i++) 295 INIT_LIST_HEAD(et->table + i); 296 297 return 0; 298 } 299 300 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 301 { 302 struct list_head *slot; 303 struct dm_snap_exception *ex, *next; 304 int i, size; 305 306 size = et->hash_mask + 1; 307 for (i = 0; i < size; i++) { 308 slot = et->table + i; 309 310 list_for_each_entry_safe (ex, next, slot, hash_list) 311 kmem_cache_free(mem, ex); 312 } 313 314 vfree(et->table); 315 } 316 317 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 318 { 319 return (chunk >> et->hash_shift) & et->hash_mask; 320 } 321 322 static void insert_exception(struct exception_table *eh, 323 struct dm_snap_exception *e) 324 { 325 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 326 list_add(&e->hash_list, l); 327 } 328 329 static void remove_exception(struct dm_snap_exception *e) 330 { 331 list_del(&e->hash_list); 332 } 333 334 /* 335 * Return the exception data for a sector, or NULL if not 336 * remapped. 337 */ 338 static struct dm_snap_exception *lookup_exception(struct exception_table *et, 339 chunk_t chunk) 340 { 341 struct list_head *slot; 342 struct dm_snap_exception *e; 343 344 slot = &et->table[exception_hash(et, chunk)]; 345 list_for_each_entry (e, slot, hash_list) 346 if (chunk >= e->old_chunk && 347 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 348 return e; 349 350 return NULL; 351 } 352 353 static struct dm_snap_exception *alloc_exception(void) 354 { 355 struct dm_snap_exception *e; 356 357 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 358 if (!e) 359 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 360 361 return e; 362 } 363 364 static void free_exception(struct dm_snap_exception *e) 365 { 366 kmem_cache_free(exception_cache, e); 367 } 368 369 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 370 { 371 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 372 GFP_NOIO); 373 374 atomic_inc(&s->pending_exceptions_count); 375 pe->snap = s; 376 377 return pe; 378 } 379 380 static void free_pending_exception(struct dm_snap_pending_exception *pe) 381 { 382 struct dm_snapshot *s = pe->snap; 383 384 mempool_free(pe, s->pending_pool); 385 smp_mb__before_atomic_dec(); 386 atomic_dec(&s->pending_exceptions_count); 387 } 388 389 static void insert_completed_exception(struct dm_snapshot *s, 390 struct dm_snap_exception *new_e) 391 { 392 struct exception_table *eh = &s->complete; 393 struct list_head *l; 394 struct dm_snap_exception *e = NULL; 395 396 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 397 398 /* Add immediately if this table doesn't support consecutive chunks */ 399 if (!eh->hash_shift) 400 goto out; 401 402 /* List is ordered by old_chunk */ 403 list_for_each_entry_reverse(e, l, hash_list) { 404 /* Insert after an existing chunk? */ 405 if (new_e->old_chunk == (e->old_chunk + 406 dm_consecutive_chunk_count(e) + 1) && 407 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 408 dm_consecutive_chunk_count(e) + 1)) { 409 dm_consecutive_chunk_count_inc(e); 410 free_exception(new_e); 411 return; 412 } 413 414 /* Insert before an existing chunk? */ 415 if (new_e->old_chunk == (e->old_chunk - 1) && 416 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 417 dm_consecutive_chunk_count_inc(e); 418 e->old_chunk--; 419 e->new_chunk--; 420 free_exception(new_e); 421 return; 422 } 423 424 if (new_e->old_chunk > e->old_chunk) 425 break; 426 } 427 428 out: 429 list_add(&new_e->hash_list, e ? &e->hash_list : l); 430 } 431 432 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) 433 { 434 struct dm_snap_exception *e; 435 436 e = alloc_exception(); 437 if (!e) 438 return -ENOMEM; 439 440 e->old_chunk = old; 441 442 /* Consecutive_count is implicitly initialised to zero */ 443 e->new_chunk = new; 444 445 insert_completed_exception(s, e); 446 447 return 0; 448 } 449 450 /* 451 * Hard coded magic. 452 */ 453 static int calc_max_buckets(void) 454 { 455 /* use a fixed size of 2MB */ 456 unsigned long mem = 2 * 1024 * 1024; 457 mem /= sizeof(struct list_head); 458 459 return mem; 460 } 461 462 /* 463 * Allocate room for a suitable hash table. 464 */ 465 static int init_hash_tables(struct dm_snapshot *s) 466 { 467 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 468 469 /* 470 * Calculate based on the size of the original volume or 471 * the COW volume... 472 */ 473 cow_dev_size = get_dev_size(s->cow->bdev); 474 origin_dev_size = get_dev_size(s->origin->bdev); 475 max_buckets = calc_max_buckets(); 476 477 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; 478 hash_size = min(hash_size, max_buckets); 479 480 hash_size = rounddown_pow_of_two(hash_size); 481 if (init_exception_table(&s->complete, hash_size, 482 DM_CHUNK_CONSECUTIVE_BITS)) 483 return -ENOMEM; 484 485 /* 486 * Allocate hash table for in-flight exceptions 487 * Make this smaller than the real hash table 488 */ 489 hash_size >>= 3; 490 if (hash_size < 64) 491 hash_size = 64; 492 493 if (init_exception_table(&s->pending, hash_size, 0)) { 494 exit_exception_table(&s->complete, exception_cache); 495 return -ENOMEM; 496 } 497 498 return 0; 499 } 500 501 /* 502 * Round a number up to the nearest 'size' boundary. size must 503 * be a power of 2. 504 */ 505 static ulong round_up(ulong n, ulong size) 506 { 507 size--; 508 return (n + size) & ~size; 509 } 510 511 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, 512 char **error) 513 { 514 unsigned long chunk_size; 515 char *value; 516 517 chunk_size = simple_strtoul(chunk_size_arg, &value, 10); 518 if (*chunk_size_arg == '\0' || *value != '\0') { 519 *error = "Invalid chunk size"; 520 return -EINVAL; 521 } 522 523 if (!chunk_size) { 524 s->chunk_size = s->chunk_mask = s->chunk_shift = 0; 525 return 0; 526 } 527 528 /* 529 * Chunk size must be multiple of page size. Silently 530 * round up if it's not. 531 */ 532 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); 533 534 /* Check chunk_size is a power of 2 */ 535 if (!is_power_of_2(chunk_size)) { 536 *error = "Chunk size is not a power of 2"; 537 return -EINVAL; 538 } 539 540 /* Validate the chunk size against the device block size */ 541 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { 542 *error = "Chunk size is not a multiple of device blocksize"; 543 return -EINVAL; 544 } 545 546 s->chunk_size = chunk_size; 547 s->chunk_mask = chunk_size - 1; 548 s->chunk_shift = ffs(chunk_size) - 1; 549 550 return 0; 551 } 552 553 /* 554 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 555 */ 556 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 557 { 558 struct dm_snapshot *s; 559 int i; 560 int r = -EINVAL; 561 char persistent; 562 char *origin_path; 563 char *cow_path; 564 565 if (argc != 4) { 566 ti->error = "requires exactly 4 arguments"; 567 r = -EINVAL; 568 goto bad1; 569 } 570 571 origin_path = argv[0]; 572 cow_path = argv[1]; 573 persistent = toupper(*argv[2]); 574 575 if (persistent != 'P' && persistent != 'N') { 576 ti->error = "Persistent flag is not P or N"; 577 r = -EINVAL; 578 goto bad1; 579 } 580 581 s = kmalloc(sizeof(*s), GFP_KERNEL); 582 if (s == NULL) { 583 ti->error = "Cannot allocate snapshot context private " 584 "structure"; 585 r = -ENOMEM; 586 goto bad1; 587 } 588 589 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 590 if (r) { 591 ti->error = "Cannot get origin device"; 592 goto bad2; 593 } 594 595 r = dm_get_device(ti, cow_path, 0, 0, 596 FMODE_READ | FMODE_WRITE, &s->cow); 597 if (r) { 598 dm_put_device(ti, s->origin); 599 ti->error = "Cannot get COW device"; 600 goto bad2; 601 } 602 603 r = set_chunk_size(s, argv[3], &ti->error); 604 if (r) 605 goto bad3; 606 607 s->type = persistent; 608 609 s->valid = 1; 610 s->active = 0; 611 atomic_set(&s->pending_exceptions_count, 0); 612 init_rwsem(&s->lock); 613 spin_lock_init(&s->pe_lock); 614 s->ti = ti; 615 616 /* Allocate hash table for COW data */ 617 if (init_hash_tables(s)) { 618 ti->error = "Unable to allocate hash table space"; 619 r = -ENOMEM; 620 goto bad3; 621 } 622 623 s->store.snap = s; 624 625 if (persistent == 'P') 626 r = dm_create_persistent(&s->store); 627 else 628 r = dm_create_transient(&s->store); 629 630 if (r) { 631 ti->error = "Couldn't create exception store"; 632 r = -EINVAL; 633 goto bad4; 634 } 635 636 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 637 if (r) { 638 ti->error = "Could not create kcopyd client"; 639 goto bad5; 640 } 641 642 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 643 if (!s->pending_pool) { 644 ti->error = "Could not allocate mempool for pending exceptions"; 645 goto bad6; 646 } 647 648 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 649 tracked_chunk_cache); 650 if (!s->tracked_chunk_pool) { 651 ti->error = "Could not allocate tracked_chunk mempool for " 652 "tracking reads"; 653 goto bad_tracked_chunk_pool; 654 } 655 656 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 657 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 658 659 spin_lock_init(&s->tracked_chunk_lock); 660 661 /* Metadata must only be loaded into one table at once */ 662 r = s->store.read_metadata(&s->store); 663 if (r < 0) { 664 ti->error = "Failed to read snapshot metadata"; 665 goto bad_load_and_register; 666 } else if (r > 0) { 667 s->valid = 0; 668 DMWARN("Snapshot is marked invalid."); 669 } 670 671 bio_list_init(&s->queued_bios); 672 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 673 674 /* Add snapshot to the list of snapshots for this origin */ 675 /* Exceptions aren't triggered till snapshot_resume() is called */ 676 if (register_snapshot(s)) { 677 r = -EINVAL; 678 ti->error = "Cannot register snapshot origin"; 679 goto bad_load_and_register; 680 } 681 682 ti->private = s; 683 ti->split_io = s->chunk_size; 684 685 return 0; 686 687 bad_load_and_register: 688 mempool_destroy(s->tracked_chunk_pool); 689 690 bad_tracked_chunk_pool: 691 mempool_destroy(s->pending_pool); 692 693 bad6: 694 dm_kcopyd_client_destroy(s->kcopyd_client); 695 696 bad5: 697 s->store.destroy(&s->store); 698 699 bad4: 700 exit_exception_table(&s->pending, pending_cache); 701 exit_exception_table(&s->complete, exception_cache); 702 703 bad3: 704 dm_put_device(ti, s->cow); 705 dm_put_device(ti, s->origin); 706 707 bad2: 708 kfree(s); 709 710 bad1: 711 return r; 712 } 713 714 static void __free_exceptions(struct dm_snapshot *s) 715 { 716 dm_kcopyd_client_destroy(s->kcopyd_client); 717 s->kcopyd_client = NULL; 718 719 exit_exception_table(&s->pending, pending_cache); 720 exit_exception_table(&s->complete, exception_cache); 721 722 s->store.destroy(&s->store); 723 } 724 725 static void snapshot_dtr(struct dm_target *ti) 726 { 727 #ifdef CONFIG_DM_DEBUG 728 int i; 729 #endif 730 struct dm_snapshot *s = ti->private; 731 732 flush_workqueue(ksnapd); 733 734 /* Prevent further origin writes from using this snapshot. */ 735 /* After this returns there can be no new kcopyd jobs. */ 736 unregister_snapshot(s); 737 738 while (atomic_read(&s->pending_exceptions_count)) 739 msleep(1); 740 /* 741 * Ensure instructions in mempool_destroy aren't reordered 742 * before atomic_read. 743 */ 744 smp_mb(); 745 746 #ifdef CONFIG_DM_DEBUG 747 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 748 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 749 #endif 750 751 mempool_destroy(s->tracked_chunk_pool); 752 753 __free_exceptions(s); 754 755 mempool_destroy(s->pending_pool); 756 757 dm_put_device(ti, s->origin); 758 dm_put_device(ti, s->cow); 759 760 kfree(s); 761 } 762 763 /* 764 * Flush a list of buffers. 765 */ 766 static void flush_bios(struct bio *bio) 767 { 768 struct bio *n; 769 770 while (bio) { 771 n = bio->bi_next; 772 bio->bi_next = NULL; 773 generic_make_request(bio); 774 bio = n; 775 } 776 } 777 778 static void flush_queued_bios(struct work_struct *work) 779 { 780 struct dm_snapshot *s = 781 container_of(work, struct dm_snapshot, queued_bios_work); 782 struct bio *queued_bios; 783 unsigned long flags; 784 785 spin_lock_irqsave(&s->pe_lock, flags); 786 queued_bios = bio_list_get(&s->queued_bios); 787 spin_unlock_irqrestore(&s->pe_lock, flags); 788 789 flush_bios(queued_bios); 790 } 791 792 /* 793 * Error a list of buffers. 794 */ 795 static void error_bios(struct bio *bio) 796 { 797 struct bio *n; 798 799 while (bio) { 800 n = bio->bi_next; 801 bio->bi_next = NULL; 802 bio_io_error(bio); 803 bio = n; 804 } 805 } 806 807 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 808 { 809 if (!s->valid) 810 return; 811 812 if (err == -EIO) 813 DMERR("Invalidating snapshot: Error reading/writing."); 814 else if (err == -ENOMEM) 815 DMERR("Invalidating snapshot: Unable to allocate exception."); 816 817 if (s->store.drop_snapshot) 818 s->store.drop_snapshot(&s->store); 819 820 s->valid = 0; 821 822 dm_table_event(s->ti->table); 823 } 824 825 static void get_pending_exception(struct dm_snap_pending_exception *pe) 826 { 827 atomic_inc(&pe->ref_count); 828 } 829 830 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 831 { 832 struct dm_snap_pending_exception *primary_pe; 833 struct bio *origin_bios = NULL; 834 835 primary_pe = pe->primary_pe; 836 837 /* 838 * If this pe is involved in a write to the origin and 839 * it is the last sibling to complete then release 840 * the bios for the original write to the origin. 841 */ 842 if (primary_pe && 843 atomic_dec_and_test(&primary_pe->ref_count)) { 844 origin_bios = bio_list_get(&primary_pe->origin_bios); 845 free_pending_exception(primary_pe); 846 } 847 848 /* 849 * Free the pe if it's not linked to an origin write or if 850 * it's not itself a primary pe. 851 */ 852 if (!primary_pe || primary_pe != pe) 853 free_pending_exception(pe); 854 855 return origin_bios; 856 } 857 858 static void pending_complete(struct dm_snap_pending_exception *pe, int success) 859 { 860 struct dm_snap_exception *e; 861 struct dm_snapshot *s = pe->snap; 862 struct bio *origin_bios = NULL; 863 struct bio *snapshot_bios = NULL; 864 int error = 0; 865 866 if (!success) { 867 /* Read/write error - snapshot is unusable */ 868 down_write(&s->lock); 869 __invalidate_snapshot(s, -EIO); 870 error = 1; 871 goto out; 872 } 873 874 e = alloc_exception(); 875 if (!e) { 876 down_write(&s->lock); 877 __invalidate_snapshot(s, -ENOMEM); 878 error = 1; 879 goto out; 880 } 881 *e = pe->e; 882 883 down_write(&s->lock); 884 if (!s->valid) { 885 free_exception(e); 886 error = 1; 887 goto out; 888 } 889 890 /* 891 * Check for conflicting reads. This is extremely improbable, 892 * so msleep(1) is sufficient and there is no need for a wait queue. 893 */ 894 while (__chunk_is_tracked(s, pe->e.old_chunk)) 895 msleep(1); 896 897 /* 898 * Add a proper exception, and remove the 899 * in-flight exception from the list. 900 */ 901 insert_completed_exception(s, e); 902 903 out: 904 remove_exception(&pe->e); 905 snapshot_bios = bio_list_get(&pe->snapshot_bios); 906 origin_bios = put_pending_exception(pe); 907 908 up_write(&s->lock); 909 910 /* Submit any pending write bios */ 911 if (error) 912 error_bios(snapshot_bios); 913 else 914 flush_bios(snapshot_bios); 915 916 flush_bios(origin_bios); 917 } 918 919 static void commit_callback(void *context, int success) 920 { 921 struct dm_snap_pending_exception *pe = context; 922 923 pending_complete(pe, success); 924 } 925 926 /* 927 * Called when the copy I/O has finished. kcopyd actually runs 928 * this code so don't block. 929 */ 930 static void copy_callback(int read_err, unsigned long write_err, void *context) 931 { 932 struct dm_snap_pending_exception *pe = context; 933 struct dm_snapshot *s = pe->snap; 934 935 if (read_err || write_err) 936 pending_complete(pe, 0); 937 938 else 939 /* Update the metadata if we are persistent */ 940 s->store.commit_exception(&s->store, &pe->e, commit_callback, 941 pe); 942 } 943 944 /* 945 * Dispatches the copy operation to kcopyd. 946 */ 947 static void start_copy(struct dm_snap_pending_exception *pe) 948 { 949 struct dm_snapshot *s = pe->snap; 950 struct dm_io_region src, dest; 951 struct block_device *bdev = s->origin->bdev; 952 sector_t dev_size; 953 954 dev_size = get_dev_size(bdev); 955 956 src.bdev = bdev; 957 src.sector = chunk_to_sector(s, pe->e.old_chunk); 958 src.count = min(s->chunk_size, dev_size - src.sector); 959 960 dest.bdev = s->cow->bdev; 961 dest.sector = chunk_to_sector(s, pe->e.new_chunk); 962 dest.count = src.count; 963 964 /* Hand over to kcopyd */ 965 dm_kcopyd_copy(s->kcopyd_client, 966 &src, 1, &dest, 0, copy_callback, pe); 967 } 968 969 /* 970 * Looks to see if this snapshot already has a pending exception 971 * for this chunk, otherwise it allocates a new one and inserts 972 * it into the pending table. 973 * 974 * NOTE: a write lock must be held on snap->lock before calling 975 * this. 976 */ 977 static struct dm_snap_pending_exception * 978 __find_pending_exception(struct dm_snapshot *s, struct bio *bio) 979 { 980 struct dm_snap_exception *e; 981 struct dm_snap_pending_exception *pe; 982 chunk_t chunk = sector_to_chunk(s, bio->bi_sector); 983 984 /* 985 * Is there a pending exception for this already ? 986 */ 987 e = lookup_exception(&s->pending, chunk); 988 if (e) { 989 /* cast the exception to a pending exception */ 990 pe = container_of(e, struct dm_snap_pending_exception, e); 991 goto out; 992 } 993 994 /* 995 * Create a new pending exception, we don't want 996 * to hold the lock while we do this. 997 */ 998 up_write(&s->lock); 999 pe = alloc_pending_exception(s); 1000 down_write(&s->lock); 1001 1002 if (!s->valid) { 1003 free_pending_exception(pe); 1004 return NULL; 1005 } 1006 1007 e = lookup_exception(&s->pending, chunk); 1008 if (e) { 1009 free_pending_exception(pe); 1010 pe = container_of(e, struct dm_snap_pending_exception, e); 1011 goto out; 1012 } 1013 1014 pe->e.old_chunk = chunk; 1015 bio_list_init(&pe->origin_bios); 1016 bio_list_init(&pe->snapshot_bios); 1017 pe->primary_pe = NULL; 1018 atomic_set(&pe->ref_count, 0); 1019 pe->started = 0; 1020 1021 if (s->store.prepare_exception(&s->store, &pe->e)) { 1022 free_pending_exception(pe); 1023 return NULL; 1024 } 1025 1026 get_pending_exception(pe); 1027 insert_exception(&s->pending, &pe->e); 1028 1029 out: 1030 return pe; 1031 } 1032 1033 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1034 struct bio *bio, chunk_t chunk) 1035 { 1036 bio->bi_bdev = s->cow->bdev; 1037 bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + 1038 (chunk - e->old_chunk)) + 1039 (bio->bi_sector & s->chunk_mask); 1040 } 1041 1042 static int snapshot_map(struct dm_target *ti, struct bio *bio, 1043 union map_info *map_context) 1044 { 1045 struct dm_snap_exception *e; 1046 struct dm_snapshot *s = ti->private; 1047 int r = DM_MAPIO_REMAPPED; 1048 chunk_t chunk; 1049 struct dm_snap_pending_exception *pe = NULL; 1050 1051 chunk = sector_to_chunk(s, bio->bi_sector); 1052 1053 /* Full snapshots are not usable */ 1054 /* To get here the table must be live so s->active is always set. */ 1055 if (!s->valid) 1056 return -EIO; 1057 1058 /* FIXME: should only take write lock if we need 1059 * to copy an exception */ 1060 down_write(&s->lock); 1061 1062 if (!s->valid) { 1063 r = -EIO; 1064 goto out_unlock; 1065 } 1066 1067 /* If the block is already remapped - use that, else remap it */ 1068 e = lookup_exception(&s->complete, chunk); 1069 if (e) { 1070 remap_exception(s, e, bio, chunk); 1071 goto out_unlock; 1072 } 1073 1074 /* 1075 * Write to snapshot - higher level takes care of RW/RO 1076 * flags so we should only get this if we are 1077 * writeable. 1078 */ 1079 if (bio_rw(bio) == WRITE) { 1080 pe = __find_pending_exception(s, bio); 1081 if (!pe) { 1082 __invalidate_snapshot(s, -ENOMEM); 1083 r = -EIO; 1084 goto out_unlock; 1085 } 1086 1087 remap_exception(s, &pe->e, bio, chunk); 1088 bio_list_add(&pe->snapshot_bios, bio); 1089 1090 r = DM_MAPIO_SUBMITTED; 1091 1092 if (!pe->started) { 1093 /* this is protected by snap->lock */ 1094 pe->started = 1; 1095 up_write(&s->lock); 1096 start_copy(pe); 1097 goto out; 1098 } 1099 } else { 1100 bio->bi_bdev = s->origin->bdev; 1101 map_context->ptr = track_chunk(s, chunk); 1102 } 1103 1104 out_unlock: 1105 up_write(&s->lock); 1106 out: 1107 return r; 1108 } 1109 1110 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1111 int error, union map_info *map_context) 1112 { 1113 struct dm_snapshot *s = ti->private; 1114 struct dm_snap_tracked_chunk *c = map_context->ptr; 1115 1116 if (c) 1117 stop_tracking_chunk(s, c); 1118 1119 return 0; 1120 } 1121 1122 static void snapshot_resume(struct dm_target *ti) 1123 { 1124 struct dm_snapshot *s = ti->private; 1125 1126 down_write(&s->lock); 1127 s->active = 1; 1128 up_write(&s->lock); 1129 } 1130 1131 static int snapshot_status(struct dm_target *ti, status_type_t type, 1132 char *result, unsigned int maxlen) 1133 { 1134 struct dm_snapshot *snap = ti->private; 1135 1136 switch (type) { 1137 case STATUSTYPE_INFO: 1138 if (!snap->valid) 1139 snprintf(result, maxlen, "Invalid"); 1140 else { 1141 if (snap->store.fraction_full) { 1142 sector_t numerator, denominator; 1143 snap->store.fraction_full(&snap->store, 1144 &numerator, 1145 &denominator); 1146 snprintf(result, maxlen, "%llu/%llu", 1147 (unsigned long long)numerator, 1148 (unsigned long long)denominator); 1149 } 1150 else 1151 snprintf(result, maxlen, "Unknown"); 1152 } 1153 break; 1154 1155 case STATUSTYPE_TABLE: 1156 /* 1157 * kdevname returns a static pointer so we need 1158 * to make private copies if the output is to 1159 * make sense. 1160 */ 1161 snprintf(result, maxlen, "%s %s %c %llu", 1162 snap->origin->name, snap->cow->name, 1163 snap->type, 1164 (unsigned long long)snap->chunk_size); 1165 break; 1166 } 1167 1168 return 0; 1169 } 1170 1171 /*----------------------------------------------------------------- 1172 * Origin methods 1173 *---------------------------------------------------------------*/ 1174 static int __origin_write(struct list_head *snapshots, struct bio *bio) 1175 { 1176 int r = DM_MAPIO_REMAPPED, first = 0; 1177 struct dm_snapshot *snap; 1178 struct dm_snap_exception *e; 1179 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1180 chunk_t chunk; 1181 LIST_HEAD(pe_queue); 1182 1183 /* Do all the snapshots on this origin */ 1184 list_for_each_entry (snap, snapshots, list) { 1185 1186 down_write(&snap->lock); 1187 1188 /* Only deal with valid and active snapshots */ 1189 if (!snap->valid || !snap->active) 1190 goto next_snapshot; 1191 1192 /* Nothing to do if writing beyond end of snapshot */ 1193 if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) 1194 goto next_snapshot; 1195 1196 /* 1197 * Remember, different snapshots can have 1198 * different chunk sizes. 1199 */ 1200 chunk = sector_to_chunk(snap, bio->bi_sector); 1201 1202 /* 1203 * Check exception table to see if block 1204 * is already remapped in this snapshot 1205 * and trigger an exception if not. 1206 * 1207 * ref_count is initialised to 1 so pending_complete() 1208 * won't destroy the primary_pe while we're inside this loop. 1209 */ 1210 e = lookup_exception(&snap->complete, chunk); 1211 if (e) 1212 goto next_snapshot; 1213 1214 pe = __find_pending_exception(snap, bio); 1215 if (!pe) { 1216 __invalidate_snapshot(snap, -ENOMEM); 1217 goto next_snapshot; 1218 } 1219 1220 if (!primary_pe) { 1221 /* 1222 * Either every pe here has same 1223 * primary_pe or none has one yet. 1224 */ 1225 if (pe->primary_pe) 1226 primary_pe = pe->primary_pe; 1227 else { 1228 primary_pe = pe; 1229 first = 1; 1230 } 1231 1232 bio_list_add(&primary_pe->origin_bios, bio); 1233 1234 r = DM_MAPIO_SUBMITTED; 1235 } 1236 1237 if (!pe->primary_pe) { 1238 pe->primary_pe = primary_pe; 1239 get_pending_exception(primary_pe); 1240 } 1241 1242 if (!pe->started) { 1243 pe->started = 1; 1244 list_add_tail(&pe->list, &pe_queue); 1245 } 1246 1247 next_snapshot: 1248 up_write(&snap->lock); 1249 } 1250 1251 if (!primary_pe) 1252 return r; 1253 1254 /* 1255 * If this is the first time we're processing this chunk and 1256 * ref_count is now 1 it means all the pending exceptions 1257 * got completed while we were in the loop above, so it falls to 1258 * us here to remove the primary_pe and submit any origin_bios. 1259 */ 1260 1261 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1262 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1263 free_pending_exception(primary_pe); 1264 /* If we got here, pe_queue is necessarily empty. */ 1265 return r; 1266 } 1267 1268 /* 1269 * Now that we have a complete pe list we can start the copying. 1270 */ 1271 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1272 start_copy(pe); 1273 1274 return r; 1275 } 1276 1277 /* 1278 * Called on a write from the origin driver. 1279 */ 1280 static int do_origin(struct dm_dev *origin, struct bio *bio) 1281 { 1282 struct origin *o; 1283 int r = DM_MAPIO_REMAPPED; 1284 1285 down_read(&_origins_lock); 1286 o = __lookup_origin(origin->bdev); 1287 if (o) 1288 r = __origin_write(&o->snapshots, bio); 1289 up_read(&_origins_lock); 1290 1291 return r; 1292 } 1293 1294 /* 1295 * Origin: maps a linear range of a device, with hooks for snapshotting. 1296 */ 1297 1298 /* 1299 * Construct an origin mapping: <dev_path> 1300 * The context for an origin is merely a 'struct dm_dev *' 1301 * pointing to the real device. 1302 */ 1303 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1304 { 1305 int r; 1306 struct dm_dev *dev; 1307 1308 if (argc != 1) { 1309 ti->error = "origin: incorrect number of arguments"; 1310 return -EINVAL; 1311 } 1312 1313 r = dm_get_device(ti, argv[0], 0, ti->len, 1314 dm_table_get_mode(ti->table), &dev); 1315 if (r) { 1316 ti->error = "Cannot get target device"; 1317 return r; 1318 } 1319 1320 ti->private = dev; 1321 return 0; 1322 } 1323 1324 static void origin_dtr(struct dm_target *ti) 1325 { 1326 struct dm_dev *dev = ti->private; 1327 dm_put_device(ti, dev); 1328 } 1329 1330 static int origin_map(struct dm_target *ti, struct bio *bio, 1331 union map_info *map_context) 1332 { 1333 struct dm_dev *dev = ti->private; 1334 bio->bi_bdev = dev->bdev; 1335 1336 /* Only tell snapshots if this is a write */ 1337 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1338 } 1339 1340 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1341 1342 /* 1343 * Set the target "split_io" field to the minimum of all the snapshots' 1344 * chunk sizes. 1345 */ 1346 static void origin_resume(struct dm_target *ti) 1347 { 1348 struct dm_dev *dev = ti->private; 1349 struct dm_snapshot *snap; 1350 struct origin *o; 1351 chunk_t chunk_size = 0; 1352 1353 down_read(&_origins_lock); 1354 o = __lookup_origin(dev->bdev); 1355 if (o) 1356 list_for_each_entry (snap, &o->snapshots, list) 1357 chunk_size = min_not_zero(chunk_size, snap->chunk_size); 1358 up_read(&_origins_lock); 1359 1360 ti->split_io = chunk_size; 1361 } 1362 1363 static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1364 unsigned int maxlen) 1365 { 1366 struct dm_dev *dev = ti->private; 1367 1368 switch (type) { 1369 case STATUSTYPE_INFO: 1370 result[0] = '\0'; 1371 break; 1372 1373 case STATUSTYPE_TABLE: 1374 snprintf(result, maxlen, "%s", dev->name); 1375 break; 1376 } 1377 1378 return 0; 1379 } 1380 1381 static struct target_type origin_target = { 1382 .name = "snapshot-origin", 1383 .version = {1, 6, 0}, 1384 .module = THIS_MODULE, 1385 .ctr = origin_ctr, 1386 .dtr = origin_dtr, 1387 .map = origin_map, 1388 .resume = origin_resume, 1389 .status = origin_status, 1390 }; 1391 1392 static struct target_type snapshot_target = { 1393 .name = "snapshot", 1394 .version = {1, 6, 0}, 1395 .module = THIS_MODULE, 1396 .ctr = snapshot_ctr, 1397 .dtr = snapshot_dtr, 1398 .map = snapshot_map, 1399 .end_io = snapshot_end_io, 1400 .resume = snapshot_resume, 1401 .status = snapshot_status, 1402 }; 1403 1404 static int __init dm_snapshot_init(void) 1405 { 1406 int r; 1407 1408 r = dm_register_target(&snapshot_target); 1409 if (r) { 1410 DMERR("snapshot target register failed %d", r); 1411 return r; 1412 } 1413 1414 r = dm_register_target(&origin_target); 1415 if (r < 0) { 1416 DMERR("Origin target register failed %d", r); 1417 goto bad1; 1418 } 1419 1420 r = init_origin_hash(); 1421 if (r) { 1422 DMERR("init_origin_hash failed."); 1423 goto bad2; 1424 } 1425 1426 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 1427 if (!exception_cache) { 1428 DMERR("Couldn't create exception cache."); 1429 r = -ENOMEM; 1430 goto bad3; 1431 } 1432 1433 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1434 if (!pending_cache) { 1435 DMERR("Couldn't create pending cache."); 1436 r = -ENOMEM; 1437 goto bad4; 1438 } 1439 1440 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1441 if (!tracked_chunk_cache) { 1442 DMERR("Couldn't create cache to track chunks in use."); 1443 r = -ENOMEM; 1444 goto bad5; 1445 } 1446 1447 ksnapd = create_singlethread_workqueue("ksnapd"); 1448 if (!ksnapd) { 1449 DMERR("Failed to create ksnapd workqueue."); 1450 r = -ENOMEM; 1451 goto bad_pending_pool; 1452 } 1453 1454 return 0; 1455 1456 bad_pending_pool: 1457 kmem_cache_destroy(tracked_chunk_cache); 1458 bad5: 1459 kmem_cache_destroy(pending_cache); 1460 bad4: 1461 kmem_cache_destroy(exception_cache); 1462 bad3: 1463 exit_origin_hash(); 1464 bad2: 1465 dm_unregister_target(&origin_target); 1466 bad1: 1467 dm_unregister_target(&snapshot_target); 1468 return r; 1469 } 1470 1471 static void __exit dm_snapshot_exit(void) 1472 { 1473 int r; 1474 1475 destroy_workqueue(ksnapd); 1476 1477 r = dm_unregister_target(&snapshot_target); 1478 if (r) 1479 DMERR("snapshot unregister failed %d", r); 1480 1481 r = dm_unregister_target(&origin_target); 1482 if (r) 1483 DMERR("origin unregister failed %d", r); 1484 1485 exit_origin_hash(); 1486 kmem_cache_destroy(pending_cache); 1487 kmem_cache_destroy(exception_cache); 1488 kmem_cache_destroy(tracked_chunk_cache); 1489 } 1490 1491 /* Module hooks */ 1492 module_init(dm_snapshot_init); 1493 module_exit(dm_snapshot_exit); 1494 1495 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1496 MODULE_AUTHOR("Joe Thornber"); 1497 MODULE_LICENSE("GPL"); 1498