1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/ctype.h> 11 #include <linux/device-mapper.h> 12 #include <linux/fs.h> 13 #include <linux/init.h> 14 #include <linux/kdev_t.h> 15 #include <linux/list.h> 16 #include <linux/mempool.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/log2.h> 21 #include <linux/dm-kcopyd.h> 22 23 #include "dm-snap.h" 24 #include "dm-bio-list.h" 25 26 #define DM_MSG_PREFIX "snapshots" 27 28 /* 29 * The percentage increment we will wake up users at 30 */ 31 #define WAKE_UP_PERCENT 5 32 33 /* 34 * kcopyd priority of snapshot operations 35 */ 36 #define SNAPSHOT_COPY_PRIORITY 2 37 38 /* 39 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 40 */ 41 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 42 43 /* 44 * The size of the mempool used to track chunks in use. 45 */ 46 #define MIN_IOS 256 47 48 static struct workqueue_struct *ksnapd; 49 static void flush_queued_bios(struct work_struct *work); 50 51 struct dm_snap_pending_exception { 52 struct dm_snap_exception e; 53 54 /* 55 * Origin buffers waiting for this to complete are held 56 * in a bio list 57 */ 58 struct bio_list origin_bios; 59 struct bio_list snapshot_bios; 60 61 /* 62 * Short-term queue of pending exceptions prior to submission. 63 */ 64 struct list_head list; 65 66 /* 67 * The primary pending_exception is the one that holds 68 * the ref_count and the list of origin_bios for a 69 * group of pending_exceptions. It is always last to get freed. 70 * These fields get set up when writing to the origin. 71 */ 72 struct dm_snap_pending_exception *primary_pe; 73 74 /* 75 * Number of pending_exceptions processing this chunk. 76 * When this drops to zero we must complete the origin bios. 77 * If incrementing or decrementing this, hold pe->snap->lock for 78 * the sibling concerned and not pe->primary_pe->snap->lock unless 79 * they are the same. 80 */ 81 atomic_t ref_count; 82 83 /* Pointer back to snapshot context */ 84 struct dm_snapshot *snap; 85 86 /* 87 * 1 indicates the exception has already been sent to 88 * kcopyd. 89 */ 90 int started; 91 }; 92 93 /* 94 * Hash table mapping origin volumes to lists of snapshots and 95 * a lock to protect it 96 */ 97 static struct kmem_cache *exception_cache; 98 static struct kmem_cache *pending_cache; 99 100 struct dm_snap_tracked_chunk { 101 struct hlist_node node; 102 chunk_t chunk; 103 }; 104 105 static struct kmem_cache *tracked_chunk_cache; 106 107 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 108 chunk_t chunk) 109 { 110 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 111 GFP_NOIO); 112 unsigned long flags; 113 114 c->chunk = chunk; 115 116 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 117 hlist_add_head(&c->node, 118 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 119 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 120 121 return c; 122 } 123 124 static void stop_tracking_chunk(struct dm_snapshot *s, 125 struct dm_snap_tracked_chunk *c) 126 { 127 unsigned long flags; 128 129 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 130 hlist_del(&c->node); 131 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 132 133 mempool_free(c, s->tracked_chunk_pool); 134 } 135 136 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 137 { 138 struct dm_snap_tracked_chunk *c; 139 struct hlist_node *hn; 140 int found = 0; 141 142 spin_lock_irq(&s->tracked_chunk_lock); 143 144 hlist_for_each_entry(c, hn, 145 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 146 if (c->chunk == chunk) { 147 found = 1; 148 break; 149 } 150 } 151 152 spin_unlock_irq(&s->tracked_chunk_lock); 153 154 return found; 155 } 156 157 /* 158 * One of these per registered origin, held in the snapshot_origins hash 159 */ 160 struct origin { 161 /* The origin device */ 162 struct block_device *bdev; 163 164 struct list_head hash_list; 165 166 /* List of snapshots for this origin */ 167 struct list_head snapshots; 168 }; 169 170 /* 171 * Size of the hash table for origin volumes. If we make this 172 * the size of the minors list then it should be nearly perfect 173 */ 174 #define ORIGIN_HASH_SIZE 256 175 #define ORIGIN_MASK 0xFF 176 static struct list_head *_origins; 177 static struct rw_semaphore _origins_lock; 178 179 static int init_origin_hash(void) 180 { 181 int i; 182 183 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 184 GFP_KERNEL); 185 if (!_origins) { 186 DMERR("unable to allocate memory"); 187 return -ENOMEM; 188 } 189 190 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 191 INIT_LIST_HEAD(_origins + i); 192 init_rwsem(&_origins_lock); 193 194 return 0; 195 } 196 197 static void exit_origin_hash(void) 198 { 199 kfree(_origins); 200 } 201 202 static unsigned origin_hash(struct block_device *bdev) 203 { 204 return bdev->bd_dev & ORIGIN_MASK; 205 } 206 207 static struct origin *__lookup_origin(struct block_device *origin) 208 { 209 struct list_head *ol; 210 struct origin *o; 211 212 ol = &_origins[origin_hash(origin)]; 213 list_for_each_entry (o, ol, hash_list) 214 if (bdev_equal(o->bdev, origin)) 215 return o; 216 217 return NULL; 218 } 219 220 static void __insert_origin(struct origin *o) 221 { 222 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 223 list_add_tail(&o->hash_list, sl); 224 } 225 226 /* 227 * Make a note of the snapshot and its origin so we can look it 228 * up when the origin has a write on it. 229 */ 230 static int register_snapshot(struct dm_snapshot *snap) 231 { 232 struct origin *o; 233 struct block_device *bdev = snap->origin->bdev; 234 235 down_write(&_origins_lock); 236 o = __lookup_origin(bdev); 237 238 if (!o) { 239 /* New origin */ 240 o = kmalloc(sizeof(*o), GFP_KERNEL); 241 if (!o) { 242 up_write(&_origins_lock); 243 return -ENOMEM; 244 } 245 246 /* Initialise the struct */ 247 INIT_LIST_HEAD(&o->snapshots); 248 o->bdev = bdev; 249 250 __insert_origin(o); 251 } 252 253 list_add_tail(&snap->list, &o->snapshots); 254 255 up_write(&_origins_lock); 256 return 0; 257 } 258 259 static void unregister_snapshot(struct dm_snapshot *s) 260 { 261 struct origin *o; 262 263 down_write(&_origins_lock); 264 o = __lookup_origin(s->origin->bdev); 265 266 list_del(&s->list); 267 if (list_empty(&o->snapshots)) { 268 list_del(&o->hash_list); 269 kfree(o); 270 } 271 272 up_write(&_origins_lock); 273 } 274 275 /* 276 * Implementation of the exception hash tables. 277 * The lowest hash_shift bits of the chunk number are ignored, allowing 278 * some consecutive chunks to be grouped together. 279 */ 280 static int init_exception_table(struct exception_table *et, uint32_t size, 281 unsigned hash_shift) 282 { 283 unsigned int i; 284 285 et->hash_shift = hash_shift; 286 et->hash_mask = size - 1; 287 et->table = dm_vcalloc(size, sizeof(struct list_head)); 288 if (!et->table) 289 return -ENOMEM; 290 291 for (i = 0; i < size; i++) 292 INIT_LIST_HEAD(et->table + i); 293 294 return 0; 295 } 296 297 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 298 { 299 struct list_head *slot; 300 struct dm_snap_exception *ex, *next; 301 int i, size; 302 303 size = et->hash_mask + 1; 304 for (i = 0; i < size; i++) { 305 slot = et->table + i; 306 307 list_for_each_entry_safe (ex, next, slot, hash_list) 308 kmem_cache_free(mem, ex); 309 } 310 311 vfree(et->table); 312 } 313 314 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 315 { 316 return (chunk >> et->hash_shift) & et->hash_mask; 317 } 318 319 static void insert_exception(struct exception_table *eh, 320 struct dm_snap_exception *e) 321 { 322 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 323 list_add(&e->hash_list, l); 324 } 325 326 static void remove_exception(struct dm_snap_exception *e) 327 { 328 list_del(&e->hash_list); 329 } 330 331 /* 332 * Return the exception data for a sector, or NULL if not 333 * remapped. 334 */ 335 static struct dm_snap_exception *lookup_exception(struct exception_table *et, 336 chunk_t chunk) 337 { 338 struct list_head *slot; 339 struct dm_snap_exception *e; 340 341 slot = &et->table[exception_hash(et, chunk)]; 342 list_for_each_entry (e, slot, hash_list) 343 if (chunk >= e->old_chunk && 344 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 345 return e; 346 347 return NULL; 348 } 349 350 static struct dm_snap_exception *alloc_exception(void) 351 { 352 struct dm_snap_exception *e; 353 354 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 355 if (!e) 356 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 357 358 return e; 359 } 360 361 static void free_exception(struct dm_snap_exception *e) 362 { 363 kmem_cache_free(exception_cache, e); 364 } 365 366 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 367 { 368 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 369 GFP_NOIO); 370 371 pe->snap = s; 372 373 return pe; 374 } 375 376 static void free_pending_exception(struct dm_snap_pending_exception *pe) 377 { 378 mempool_free(pe, pe->snap->pending_pool); 379 } 380 381 static void insert_completed_exception(struct dm_snapshot *s, 382 struct dm_snap_exception *new_e) 383 { 384 struct exception_table *eh = &s->complete; 385 struct list_head *l; 386 struct dm_snap_exception *e = NULL; 387 388 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 389 390 /* Add immediately if this table doesn't support consecutive chunks */ 391 if (!eh->hash_shift) 392 goto out; 393 394 /* List is ordered by old_chunk */ 395 list_for_each_entry_reverse(e, l, hash_list) { 396 /* Insert after an existing chunk? */ 397 if (new_e->old_chunk == (e->old_chunk + 398 dm_consecutive_chunk_count(e) + 1) && 399 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 400 dm_consecutive_chunk_count(e) + 1)) { 401 dm_consecutive_chunk_count_inc(e); 402 free_exception(new_e); 403 return; 404 } 405 406 /* Insert before an existing chunk? */ 407 if (new_e->old_chunk == (e->old_chunk - 1) && 408 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 409 dm_consecutive_chunk_count_inc(e); 410 e->old_chunk--; 411 e->new_chunk--; 412 free_exception(new_e); 413 return; 414 } 415 416 if (new_e->old_chunk > e->old_chunk) 417 break; 418 } 419 420 out: 421 list_add(&new_e->hash_list, e ? &e->hash_list : l); 422 } 423 424 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) 425 { 426 struct dm_snap_exception *e; 427 428 e = alloc_exception(); 429 if (!e) 430 return -ENOMEM; 431 432 e->old_chunk = old; 433 434 /* Consecutive_count is implicitly initialised to zero */ 435 e->new_chunk = new; 436 437 insert_completed_exception(s, e); 438 439 return 0; 440 } 441 442 /* 443 * Hard coded magic. 444 */ 445 static int calc_max_buckets(void) 446 { 447 /* use a fixed size of 2MB */ 448 unsigned long mem = 2 * 1024 * 1024; 449 mem /= sizeof(struct list_head); 450 451 return mem; 452 } 453 454 /* 455 * Allocate room for a suitable hash table. 456 */ 457 static int init_hash_tables(struct dm_snapshot *s) 458 { 459 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 460 461 /* 462 * Calculate based on the size of the original volume or 463 * the COW volume... 464 */ 465 cow_dev_size = get_dev_size(s->cow->bdev); 466 origin_dev_size = get_dev_size(s->origin->bdev); 467 max_buckets = calc_max_buckets(); 468 469 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; 470 hash_size = min(hash_size, max_buckets); 471 472 hash_size = rounddown_pow_of_two(hash_size); 473 if (init_exception_table(&s->complete, hash_size, 474 DM_CHUNK_CONSECUTIVE_BITS)) 475 return -ENOMEM; 476 477 /* 478 * Allocate hash table for in-flight exceptions 479 * Make this smaller than the real hash table 480 */ 481 hash_size >>= 3; 482 if (hash_size < 64) 483 hash_size = 64; 484 485 if (init_exception_table(&s->pending, hash_size, 0)) { 486 exit_exception_table(&s->complete, exception_cache); 487 return -ENOMEM; 488 } 489 490 return 0; 491 } 492 493 /* 494 * Round a number up to the nearest 'size' boundary. size must 495 * be a power of 2. 496 */ 497 static ulong round_up(ulong n, ulong size) 498 { 499 size--; 500 return (n + size) & ~size; 501 } 502 503 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, 504 char **error) 505 { 506 unsigned long chunk_size; 507 char *value; 508 509 chunk_size = simple_strtoul(chunk_size_arg, &value, 10); 510 if (*chunk_size_arg == '\0' || *value != '\0') { 511 *error = "Invalid chunk size"; 512 return -EINVAL; 513 } 514 515 if (!chunk_size) { 516 s->chunk_size = s->chunk_mask = s->chunk_shift = 0; 517 return 0; 518 } 519 520 /* 521 * Chunk size must be multiple of page size. Silently 522 * round up if it's not. 523 */ 524 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); 525 526 /* Check chunk_size is a power of 2 */ 527 if (!is_power_of_2(chunk_size)) { 528 *error = "Chunk size is not a power of 2"; 529 return -EINVAL; 530 } 531 532 /* Validate the chunk size against the device block size */ 533 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { 534 *error = "Chunk size is not a multiple of device blocksize"; 535 return -EINVAL; 536 } 537 538 s->chunk_size = chunk_size; 539 s->chunk_mask = chunk_size - 1; 540 s->chunk_shift = ffs(chunk_size) - 1; 541 542 return 0; 543 } 544 545 /* 546 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 547 */ 548 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 549 { 550 struct dm_snapshot *s; 551 int i; 552 int r = -EINVAL; 553 char persistent; 554 char *origin_path; 555 char *cow_path; 556 557 if (argc != 4) { 558 ti->error = "requires exactly 4 arguments"; 559 r = -EINVAL; 560 goto bad1; 561 } 562 563 origin_path = argv[0]; 564 cow_path = argv[1]; 565 persistent = toupper(*argv[2]); 566 567 if (persistent != 'P' && persistent != 'N') { 568 ti->error = "Persistent flag is not P or N"; 569 r = -EINVAL; 570 goto bad1; 571 } 572 573 s = kmalloc(sizeof(*s), GFP_KERNEL); 574 if (s == NULL) { 575 ti->error = "Cannot allocate snapshot context private " 576 "structure"; 577 r = -ENOMEM; 578 goto bad1; 579 } 580 581 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 582 if (r) { 583 ti->error = "Cannot get origin device"; 584 goto bad2; 585 } 586 587 r = dm_get_device(ti, cow_path, 0, 0, 588 FMODE_READ | FMODE_WRITE, &s->cow); 589 if (r) { 590 dm_put_device(ti, s->origin); 591 ti->error = "Cannot get COW device"; 592 goto bad2; 593 } 594 595 r = set_chunk_size(s, argv[3], &ti->error); 596 if (r) 597 goto bad3; 598 599 s->type = persistent; 600 601 s->valid = 1; 602 s->active = 0; 603 s->last_percent = 0; 604 init_rwsem(&s->lock); 605 spin_lock_init(&s->pe_lock); 606 s->ti = ti; 607 608 /* Allocate hash table for COW data */ 609 if (init_hash_tables(s)) { 610 ti->error = "Unable to allocate hash table space"; 611 r = -ENOMEM; 612 goto bad3; 613 } 614 615 s->store.snap = s; 616 617 if (persistent == 'P') 618 r = dm_create_persistent(&s->store); 619 else 620 r = dm_create_transient(&s->store); 621 622 if (r) { 623 ti->error = "Couldn't create exception store"; 624 r = -EINVAL; 625 goto bad4; 626 } 627 628 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 629 if (r) { 630 ti->error = "Could not create kcopyd client"; 631 goto bad5; 632 } 633 634 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 635 if (!s->pending_pool) { 636 ti->error = "Could not allocate mempool for pending exceptions"; 637 goto bad6; 638 } 639 640 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 641 tracked_chunk_cache); 642 if (!s->tracked_chunk_pool) { 643 ti->error = "Could not allocate tracked_chunk mempool for " 644 "tracking reads"; 645 goto bad_tracked_chunk_pool; 646 } 647 648 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 649 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 650 651 spin_lock_init(&s->tracked_chunk_lock); 652 653 /* Metadata must only be loaded into one table at once */ 654 r = s->store.read_metadata(&s->store); 655 if (r < 0) { 656 ti->error = "Failed to read snapshot metadata"; 657 goto bad_load_and_register; 658 } else if (r > 0) { 659 s->valid = 0; 660 DMWARN("Snapshot is marked invalid."); 661 } 662 663 bio_list_init(&s->queued_bios); 664 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 665 666 /* Add snapshot to the list of snapshots for this origin */ 667 /* Exceptions aren't triggered till snapshot_resume() is called */ 668 if (register_snapshot(s)) { 669 r = -EINVAL; 670 ti->error = "Cannot register snapshot origin"; 671 goto bad_load_and_register; 672 } 673 674 ti->private = s; 675 ti->split_io = s->chunk_size; 676 677 return 0; 678 679 bad_load_and_register: 680 mempool_destroy(s->tracked_chunk_pool); 681 682 bad_tracked_chunk_pool: 683 mempool_destroy(s->pending_pool); 684 685 bad6: 686 dm_kcopyd_client_destroy(s->kcopyd_client); 687 688 bad5: 689 s->store.destroy(&s->store); 690 691 bad4: 692 exit_exception_table(&s->pending, pending_cache); 693 exit_exception_table(&s->complete, exception_cache); 694 695 bad3: 696 dm_put_device(ti, s->cow); 697 dm_put_device(ti, s->origin); 698 699 bad2: 700 kfree(s); 701 702 bad1: 703 return r; 704 } 705 706 static void __free_exceptions(struct dm_snapshot *s) 707 { 708 dm_kcopyd_client_destroy(s->kcopyd_client); 709 s->kcopyd_client = NULL; 710 711 exit_exception_table(&s->pending, pending_cache); 712 exit_exception_table(&s->complete, exception_cache); 713 714 s->store.destroy(&s->store); 715 } 716 717 static void snapshot_dtr(struct dm_target *ti) 718 { 719 #ifdef CONFIG_DM_DEBUG 720 int i; 721 #endif 722 struct dm_snapshot *s = ti->private; 723 724 flush_workqueue(ksnapd); 725 726 /* Prevent further origin writes from using this snapshot. */ 727 /* After this returns there can be no new kcopyd jobs. */ 728 unregister_snapshot(s); 729 730 #ifdef CONFIG_DM_DEBUG 731 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 732 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 733 #endif 734 735 mempool_destroy(s->tracked_chunk_pool); 736 737 __free_exceptions(s); 738 739 mempool_destroy(s->pending_pool); 740 741 dm_put_device(ti, s->origin); 742 dm_put_device(ti, s->cow); 743 744 kfree(s); 745 } 746 747 /* 748 * Flush a list of buffers. 749 */ 750 static void flush_bios(struct bio *bio) 751 { 752 struct bio *n; 753 754 while (bio) { 755 n = bio->bi_next; 756 bio->bi_next = NULL; 757 generic_make_request(bio); 758 bio = n; 759 } 760 } 761 762 static void flush_queued_bios(struct work_struct *work) 763 { 764 struct dm_snapshot *s = 765 container_of(work, struct dm_snapshot, queued_bios_work); 766 struct bio *queued_bios; 767 unsigned long flags; 768 769 spin_lock_irqsave(&s->pe_lock, flags); 770 queued_bios = bio_list_get(&s->queued_bios); 771 spin_unlock_irqrestore(&s->pe_lock, flags); 772 773 flush_bios(queued_bios); 774 } 775 776 /* 777 * Error a list of buffers. 778 */ 779 static void error_bios(struct bio *bio) 780 { 781 struct bio *n; 782 783 while (bio) { 784 n = bio->bi_next; 785 bio->bi_next = NULL; 786 bio_io_error(bio); 787 bio = n; 788 } 789 } 790 791 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 792 { 793 if (!s->valid) 794 return; 795 796 if (err == -EIO) 797 DMERR("Invalidating snapshot: Error reading/writing."); 798 else if (err == -ENOMEM) 799 DMERR("Invalidating snapshot: Unable to allocate exception."); 800 801 if (s->store.drop_snapshot) 802 s->store.drop_snapshot(&s->store); 803 804 s->valid = 0; 805 806 dm_table_event(s->ti->table); 807 } 808 809 static void get_pending_exception(struct dm_snap_pending_exception *pe) 810 { 811 atomic_inc(&pe->ref_count); 812 } 813 814 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 815 { 816 struct dm_snap_pending_exception *primary_pe; 817 struct bio *origin_bios = NULL; 818 819 primary_pe = pe->primary_pe; 820 821 /* 822 * If this pe is involved in a write to the origin and 823 * it is the last sibling to complete then release 824 * the bios for the original write to the origin. 825 */ 826 if (primary_pe && 827 atomic_dec_and_test(&primary_pe->ref_count)) 828 origin_bios = bio_list_get(&primary_pe->origin_bios); 829 830 /* 831 * Free the pe if it's not linked to an origin write or if 832 * it's not itself a primary pe. 833 */ 834 if (!primary_pe || primary_pe != pe) 835 free_pending_exception(pe); 836 837 /* 838 * Free the primary pe if nothing references it. 839 */ 840 if (primary_pe && !atomic_read(&primary_pe->ref_count)) 841 free_pending_exception(primary_pe); 842 843 return origin_bios; 844 } 845 846 static void pending_complete(struct dm_snap_pending_exception *pe, int success) 847 { 848 struct dm_snap_exception *e; 849 struct dm_snapshot *s = pe->snap; 850 struct bio *origin_bios = NULL; 851 struct bio *snapshot_bios = NULL; 852 int error = 0; 853 854 if (!success) { 855 /* Read/write error - snapshot is unusable */ 856 down_write(&s->lock); 857 __invalidate_snapshot(s, -EIO); 858 error = 1; 859 goto out; 860 } 861 862 e = alloc_exception(); 863 if (!e) { 864 down_write(&s->lock); 865 __invalidate_snapshot(s, -ENOMEM); 866 error = 1; 867 goto out; 868 } 869 *e = pe->e; 870 871 down_write(&s->lock); 872 if (!s->valid) { 873 free_exception(e); 874 error = 1; 875 goto out; 876 } 877 878 /* 879 * Check for conflicting reads. This is extremely improbable, 880 * so yield() is sufficient and there is no need for a wait queue. 881 */ 882 while (__chunk_is_tracked(s, pe->e.old_chunk)) 883 yield(); 884 885 /* 886 * Add a proper exception, and remove the 887 * in-flight exception from the list. 888 */ 889 insert_completed_exception(s, e); 890 891 out: 892 remove_exception(&pe->e); 893 snapshot_bios = bio_list_get(&pe->snapshot_bios); 894 origin_bios = put_pending_exception(pe); 895 896 up_write(&s->lock); 897 898 /* Submit any pending write bios */ 899 if (error) 900 error_bios(snapshot_bios); 901 else 902 flush_bios(snapshot_bios); 903 904 flush_bios(origin_bios); 905 } 906 907 static void commit_callback(void *context, int success) 908 { 909 struct dm_snap_pending_exception *pe = context; 910 911 pending_complete(pe, success); 912 } 913 914 /* 915 * Called when the copy I/O has finished. kcopyd actually runs 916 * this code so don't block. 917 */ 918 static void copy_callback(int read_err, unsigned long write_err, void *context) 919 { 920 struct dm_snap_pending_exception *pe = context; 921 struct dm_snapshot *s = pe->snap; 922 923 if (read_err || write_err) 924 pending_complete(pe, 0); 925 926 else 927 /* Update the metadata if we are persistent */ 928 s->store.commit_exception(&s->store, &pe->e, commit_callback, 929 pe); 930 } 931 932 /* 933 * Dispatches the copy operation to kcopyd. 934 */ 935 static void start_copy(struct dm_snap_pending_exception *pe) 936 { 937 struct dm_snapshot *s = pe->snap; 938 struct dm_io_region src, dest; 939 struct block_device *bdev = s->origin->bdev; 940 sector_t dev_size; 941 942 dev_size = get_dev_size(bdev); 943 944 src.bdev = bdev; 945 src.sector = chunk_to_sector(s, pe->e.old_chunk); 946 src.count = min(s->chunk_size, dev_size - src.sector); 947 948 dest.bdev = s->cow->bdev; 949 dest.sector = chunk_to_sector(s, pe->e.new_chunk); 950 dest.count = src.count; 951 952 /* Hand over to kcopyd */ 953 dm_kcopyd_copy(s->kcopyd_client, 954 &src, 1, &dest, 0, copy_callback, pe); 955 } 956 957 /* 958 * Looks to see if this snapshot already has a pending exception 959 * for this chunk, otherwise it allocates a new one and inserts 960 * it into the pending table. 961 * 962 * NOTE: a write lock must be held on snap->lock before calling 963 * this. 964 */ 965 static struct dm_snap_pending_exception * 966 __find_pending_exception(struct dm_snapshot *s, struct bio *bio) 967 { 968 struct dm_snap_exception *e; 969 struct dm_snap_pending_exception *pe; 970 chunk_t chunk = sector_to_chunk(s, bio->bi_sector); 971 972 /* 973 * Is there a pending exception for this already ? 974 */ 975 e = lookup_exception(&s->pending, chunk); 976 if (e) { 977 /* cast the exception to a pending exception */ 978 pe = container_of(e, struct dm_snap_pending_exception, e); 979 goto out; 980 } 981 982 /* 983 * Create a new pending exception, we don't want 984 * to hold the lock while we do this. 985 */ 986 up_write(&s->lock); 987 pe = alloc_pending_exception(s); 988 down_write(&s->lock); 989 990 if (!s->valid) { 991 free_pending_exception(pe); 992 return NULL; 993 } 994 995 e = lookup_exception(&s->pending, chunk); 996 if (e) { 997 free_pending_exception(pe); 998 pe = container_of(e, struct dm_snap_pending_exception, e); 999 goto out; 1000 } 1001 1002 pe->e.old_chunk = chunk; 1003 bio_list_init(&pe->origin_bios); 1004 bio_list_init(&pe->snapshot_bios); 1005 pe->primary_pe = NULL; 1006 atomic_set(&pe->ref_count, 0); 1007 pe->started = 0; 1008 1009 if (s->store.prepare_exception(&s->store, &pe->e)) { 1010 free_pending_exception(pe); 1011 return NULL; 1012 } 1013 1014 get_pending_exception(pe); 1015 insert_exception(&s->pending, &pe->e); 1016 1017 out: 1018 return pe; 1019 } 1020 1021 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1022 struct bio *bio, chunk_t chunk) 1023 { 1024 bio->bi_bdev = s->cow->bdev; 1025 bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + 1026 (chunk - e->old_chunk)) + 1027 (bio->bi_sector & s->chunk_mask); 1028 } 1029 1030 static int snapshot_map(struct dm_target *ti, struct bio *bio, 1031 union map_info *map_context) 1032 { 1033 struct dm_snap_exception *e; 1034 struct dm_snapshot *s = ti->private; 1035 int r = DM_MAPIO_REMAPPED; 1036 chunk_t chunk; 1037 struct dm_snap_pending_exception *pe = NULL; 1038 1039 chunk = sector_to_chunk(s, bio->bi_sector); 1040 1041 /* Full snapshots are not usable */ 1042 /* To get here the table must be live so s->active is always set. */ 1043 if (!s->valid) 1044 return -EIO; 1045 1046 /* FIXME: should only take write lock if we need 1047 * to copy an exception */ 1048 down_write(&s->lock); 1049 1050 if (!s->valid) { 1051 r = -EIO; 1052 goto out_unlock; 1053 } 1054 1055 /* If the block is already remapped - use that, else remap it */ 1056 e = lookup_exception(&s->complete, chunk); 1057 if (e) { 1058 remap_exception(s, e, bio, chunk); 1059 goto out_unlock; 1060 } 1061 1062 /* 1063 * Write to snapshot - higher level takes care of RW/RO 1064 * flags so we should only get this if we are 1065 * writeable. 1066 */ 1067 if (bio_rw(bio) == WRITE) { 1068 pe = __find_pending_exception(s, bio); 1069 if (!pe) { 1070 __invalidate_snapshot(s, -ENOMEM); 1071 r = -EIO; 1072 goto out_unlock; 1073 } 1074 1075 remap_exception(s, &pe->e, bio, chunk); 1076 bio_list_add(&pe->snapshot_bios, bio); 1077 1078 r = DM_MAPIO_SUBMITTED; 1079 1080 if (!pe->started) { 1081 /* this is protected by snap->lock */ 1082 pe->started = 1; 1083 up_write(&s->lock); 1084 start_copy(pe); 1085 goto out; 1086 } 1087 } else { 1088 bio->bi_bdev = s->origin->bdev; 1089 map_context->ptr = track_chunk(s, chunk); 1090 } 1091 1092 out_unlock: 1093 up_write(&s->lock); 1094 out: 1095 return r; 1096 } 1097 1098 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1099 int error, union map_info *map_context) 1100 { 1101 struct dm_snapshot *s = ti->private; 1102 struct dm_snap_tracked_chunk *c = map_context->ptr; 1103 1104 if (c) 1105 stop_tracking_chunk(s, c); 1106 1107 return 0; 1108 } 1109 1110 static void snapshot_resume(struct dm_target *ti) 1111 { 1112 struct dm_snapshot *s = ti->private; 1113 1114 down_write(&s->lock); 1115 s->active = 1; 1116 up_write(&s->lock); 1117 } 1118 1119 static int snapshot_status(struct dm_target *ti, status_type_t type, 1120 char *result, unsigned int maxlen) 1121 { 1122 struct dm_snapshot *snap = ti->private; 1123 1124 switch (type) { 1125 case STATUSTYPE_INFO: 1126 if (!snap->valid) 1127 snprintf(result, maxlen, "Invalid"); 1128 else { 1129 if (snap->store.fraction_full) { 1130 sector_t numerator, denominator; 1131 snap->store.fraction_full(&snap->store, 1132 &numerator, 1133 &denominator); 1134 snprintf(result, maxlen, "%llu/%llu", 1135 (unsigned long long)numerator, 1136 (unsigned long long)denominator); 1137 } 1138 else 1139 snprintf(result, maxlen, "Unknown"); 1140 } 1141 break; 1142 1143 case STATUSTYPE_TABLE: 1144 /* 1145 * kdevname returns a static pointer so we need 1146 * to make private copies if the output is to 1147 * make sense. 1148 */ 1149 snprintf(result, maxlen, "%s %s %c %llu", 1150 snap->origin->name, snap->cow->name, 1151 snap->type, 1152 (unsigned long long)snap->chunk_size); 1153 break; 1154 } 1155 1156 return 0; 1157 } 1158 1159 /*----------------------------------------------------------------- 1160 * Origin methods 1161 *---------------------------------------------------------------*/ 1162 static int __origin_write(struct list_head *snapshots, struct bio *bio) 1163 { 1164 int r = DM_MAPIO_REMAPPED, first = 0; 1165 struct dm_snapshot *snap; 1166 struct dm_snap_exception *e; 1167 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1168 chunk_t chunk; 1169 LIST_HEAD(pe_queue); 1170 1171 /* Do all the snapshots on this origin */ 1172 list_for_each_entry (snap, snapshots, list) { 1173 1174 down_write(&snap->lock); 1175 1176 /* Only deal with valid and active snapshots */ 1177 if (!snap->valid || !snap->active) 1178 goto next_snapshot; 1179 1180 /* Nothing to do if writing beyond end of snapshot */ 1181 if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) 1182 goto next_snapshot; 1183 1184 /* 1185 * Remember, different snapshots can have 1186 * different chunk sizes. 1187 */ 1188 chunk = sector_to_chunk(snap, bio->bi_sector); 1189 1190 /* 1191 * Check exception table to see if block 1192 * is already remapped in this snapshot 1193 * and trigger an exception if not. 1194 * 1195 * ref_count is initialised to 1 so pending_complete() 1196 * won't destroy the primary_pe while we're inside this loop. 1197 */ 1198 e = lookup_exception(&snap->complete, chunk); 1199 if (e) 1200 goto next_snapshot; 1201 1202 pe = __find_pending_exception(snap, bio); 1203 if (!pe) { 1204 __invalidate_snapshot(snap, -ENOMEM); 1205 goto next_snapshot; 1206 } 1207 1208 if (!primary_pe) { 1209 /* 1210 * Either every pe here has same 1211 * primary_pe or none has one yet. 1212 */ 1213 if (pe->primary_pe) 1214 primary_pe = pe->primary_pe; 1215 else { 1216 primary_pe = pe; 1217 first = 1; 1218 } 1219 1220 bio_list_add(&primary_pe->origin_bios, bio); 1221 1222 r = DM_MAPIO_SUBMITTED; 1223 } 1224 1225 if (!pe->primary_pe) { 1226 pe->primary_pe = primary_pe; 1227 get_pending_exception(primary_pe); 1228 } 1229 1230 if (!pe->started) { 1231 pe->started = 1; 1232 list_add_tail(&pe->list, &pe_queue); 1233 } 1234 1235 next_snapshot: 1236 up_write(&snap->lock); 1237 } 1238 1239 if (!primary_pe) 1240 return r; 1241 1242 /* 1243 * If this is the first time we're processing this chunk and 1244 * ref_count is now 1 it means all the pending exceptions 1245 * got completed while we were in the loop above, so it falls to 1246 * us here to remove the primary_pe and submit any origin_bios. 1247 */ 1248 1249 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1250 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1251 free_pending_exception(primary_pe); 1252 /* If we got here, pe_queue is necessarily empty. */ 1253 return r; 1254 } 1255 1256 /* 1257 * Now that we have a complete pe list we can start the copying. 1258 */ 1259 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1260 start_copy(pe); 1261 1262 return r; 1263 } 1264 1265 /* 1266 * Called on a write from the origin driver. 1267 */ 1268 static int do_origin(struct dm_dev *origin, struct bio *bio) 1269 { 1270 struct origin *o; 1271 int r = DM_MAPIO_REMAPPED; 1272 1273 down_read(&_origins_lock); 1274 o = __lookup_origin(origin->bdev); 1275 if (o) 1276 r = __origin_write(&o->snapshots, bio); 1277 up_read(&_origins_lock); 1278 1279 return r; 1280 } 1281 1282 /* 1283 * Origin: maps a linear range of a device, with hooks for snapshotting. 1284 */ 1285 1286 /* 1287 * Construct an origin mapping: <dev_path> 1288 * The context for an origin is merely a 'struct dm_dev *' 1289 * pointing to the real device. 1290 */ 1291 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1292 { 1293 int r; 1294 struct dm_dev *dev; 1295 1296 if (argc != 1) { 1297 ti->error = "origin: incorrect number of arguments"; 1298 return -EINVAL; 1299 } 1300 1301 r = dm_get_device(ti, argv[0], 0, ti->len, 1302 dm_table_get_mode(ti->table), &dev); 1303 if (r) { 1304 ti->error = "Cannot get target device"; 1305 return r; 1306 } 1307 1308 ti->private = dev; 1309 return 0; 1310 } 1311 1312 static void origin_dtr(struct dm_target *ti) 1313 { 1314 struct dm_dev *dev = ti->private; 1315 dm_put_device(ti, dev); 1316 } 1317 1318 static int origin_map(struct dm_target *ti, struct bio *bio, 1319 union map_info *map_context) 1320 { 1321 struct dm_dev *dev = ti->private; 1322 bio->bi_bdev = dev->bdev; 1323 1324 /* Only tell snapshots if this is a write */ 1325 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1326 } 1327 1328 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1329 1330 /* 1331 * Set the target "split_io" field to the minimum of all the snapshots' 1332 * chunk sizes. 1333 */ 1334 static void origin_resume(struct dm_target *ti) 1335 { 1336 struct dm_dev *dev = ti->private; 1337 struct dm_snapshot *snap; 1338 struct origin *o; 1339 chunk_t chunk_size = 0; 1340 1341 down_read(&_origins_lock); 1342 o = __lookup_origin(dev->bdev); 1343 if (o) 1344 list_for_each_entry (snap, &o->snapshots, list) 1345 chunk_size = min_not_zero(chunk_size, snap->chunk_size); 1346 up_read(&_origins_lock); 1347 1348 ti->split_io = chunk_size; 1349 } 1350 1351 static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1352 unsigned int maxlen) 1353 { 1354 struct dm_dev *dev = ti->private; 1355 1356 switch (type) { 1357 case STATUSTYPE_INFO: 1358 result[0] = '\0'; 1359 break; 1360 1361 case STATUSTYPE_TABLE: 1362 snprintf(result, maxlen, "%s", dev->name); 1363 break; 1364 } 1365 1366 return 0; 1367 } 1368 1369 static struct target_type origin_target = { 1370 .name = "snapshot-origin", 1371 .version = {1, 6, 0}, 1372 .module = THIS_MODULE, 1373 .ctr = origin_ctr, 1374 .dtr = origin_dtr, 1375 .map = origin_map, 1376 .resume = origin_resume, 1377 .status = origin_status, 1378 }; 1379 1380 static struct target_type snapshot_target = { 1381 .name = "snapshot", 1382 .version = {1, 6, 0}, 1383 .module = THIS_MODULE, 1384 .ctr = snapshot_ctr, 1385 .dtr = snapshot_dtr, 1386 .map = snapshot_map, 1387 .end_io = snapshot_end_io, 1388 .resume = snapshot_resume, 1389 .status = snapshot_status, 1390 }; 1391 1392 static int __init dm_snapshot_init(void) 1393 { 1394 int r; 1395 1396 r = dm_register_target(&snapshot_target); 1397 if (r) { 1398 DMERR("snapshot target register failed %d", r); 1399 return r; 1400 } 1401 1402 r = dm_register_target(&origin_target); 1403 if (r < 0) { 1404 DMERR("Origin target register failed %d", r); 1405 goto bad1; 1406 } 1407 1408 r = init_origin_hash(); 1409 if (r) { 1410 DMERR("init_origin_hash failed."); 1411 goto bad2; 1412 } 1413 1414 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 1415 if (!exception_cache) { 1416 DMERR("Couldn't create exception cache."); 1417 r = -ENOMEM; 1418 goto bad3; 1419 } 1420 1421 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1422 if (!pending_cache) { 1423 DMERR("Couldn't create pending cache."); 1424 r = -ENOMEM; 1425 goto bad4; 1426 } 1427 1428 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1429 if (!tracked_chunk_cache) { 1430 DMERR("Couldn't create cache to track chunks in use."); 1431 r = -ENOMEM; 1432 goto bad5; 1433 } 1434 1435 ksnapd = create_singlethread_workqueue("ksnapd"); 1436 if (!ksnapd) { 1437 DMERR("Failed to create ksnapd workqueue."); 1438 r = -ENOMEM; 1439 goto bad_pending_pool; 1440 } 1441 1442 return 0; 1443 1444 bad_pending_pool: 1445 kmem_cache_destroy(tracked_chunk_cache); 1446 bad5: 1447 kmem_cache_destroy(pending_cache); 1448 bad4: 1449 kmem_cache_destroy(exception_cache); 1450 bad3: 1451 exit_origin_hash(); 1452 bad2: 1453 dm_unregister_target(&origin_target); 1454 bad1: 1455 dm_unregister_target(&snapshot_target); 1456 return r; 1457 } 1458 1459 static void __exit dm_snapshot_exit(void) 1460 { 1461 int r; 1462 1463 destroy_workqueue(ksnapd); 1464 1465 r = dm_unregister_target(&snapshot_target); 1466 if (r) 1467 DMERR("snapshot unregister failed %d", r); 1468 1469 r = dm_unregister_target(&origin_target); 1470 if (r) 1471 DMERR("origin unregister failed %d", r); 1472 1473 exit_origin_hash(); 1474 kmem_cache_destroy(pending_cache); 1475 kmem_cache_destroy(exception_cache); 1476 kmem_cache_destroy(tracked_chunk_cache); 1477 } 1478 1479 /* Module hooks */ 1480 module_init(dm_snapshot_init); 1481 module_exit(dm_snapshot_exit); 1482 1483 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1484 MODULE_AUTHOR("Joe Thornber"); 1485 MODULE_LICENSE("GPL"); 1486