1 /* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/ctype.h> 11 #include <linux/device-mapper.h> 12 #include <linux/fs.h> 13 #include <linux/init.h> 14 #include <linux/kdev_t.h> 15 #include <linux/list.h> 16 #include <linux/mempool.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/log2.h> 21 #include <linux/dm-kcopyd.h> 22 23 #include "dm-snap.h" 24 #include "dm-bio-list.h" 25 26 #define DM_MSG_PREFIX "snapshots" 27 28 /* 29 * The percentage increment we will wake up users at 30 */ 31 #define WAKE_UP_PERCENT 5 32 33 /* 34 * kcopyd priority of snapshot operations 35 */ 36 #define SNAPSHOT_COPY_PRIORITY 2 37 38 /* 39 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 40 */ 41 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 42 43 /* 44 * The size of the mempool used to track chunks in use. 45 */ 46 #define MIN_IOS 256 47 48 static struct workqueue_struct *ksnapd; 49 static void flush_queued_bios(struct work_struct *work); 50 51 struct dm_snap_pending_exception { 52 struct dm_snap_exception e; 53 54 /* 55 * Origin buffers waiting for this to complete are held 56 * in a bio list 57 */ 58 struct bio_list origin_bios; 59 struct bio_list snapshot_bios; 60 61 /* 62 * Short-term queue of pending exceptions prior to submission. 63 */ 64 struct list_head list; 65 66 /* 67 * The primary pending_exception is the one that holds 68 * the ref_count and the list of origin_bios for a 69 * group of pending_exceptions. It is always last to get freed. 70 * These fields get set up when writing to the origin. 71 */ 72 struct dm_snap_pending_exception *primary_pe; 73 74 /* 75 * Number of pending_exceptions processing this chunk. 76 * When this drops to zero we must complete the origin bios. 77 * If incrementing or decrementing this, hold pe->snap->lock for 78 * the sibling concerned and not pe->primary_pe->snap->lock unless 79 * they are the same. 80 */ 81 atomic_t ref_count; 82 83 /* Pointer back to snapshot context */ 84 struct dm_snapshot *snap; 85 86 /* 87 * 1 indicates the exception has already been sent to 88 * kcopyd. 89 */ 90 int started; 91 }; 92 93 /* 94 * Hash table mapping origin volumes to lists of snapshots and 95 * a lock to protect it 96 */ 97 static struct kmem_cache *exception_cache; 98 static struct kmem_cache *pending_cache; 99 100 struct dm_snap_tracked_chunk { 101 struct hlist_node node; 102 chunk_t chunk; 103 }; 104 105 static struct kmem_cache *tracked_chunk_cache; 106 107 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 108 chunk_t chunk) 109 { 110 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 111 GFP_NOIO); 112 unsigned long flags; 113 114 c->chunk = chunk; 115 116 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 117 hlist_add_head(&c->node, 118 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 119 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 120 121 return c; 122 } 123 124 static void stop_tracking_chunk(struct dm_snapshot *s, 125 struct dm_snap_tracked_chunk *c) 126 { 127 unsigned long flags; 128 129 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 130 hlist_del(&c->node); 131 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 132 133 mempool_free(c, s->tracked_chunk_pool); 134 } 135 136 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 137 { 138 struct dm_snap_tracked_chunk *c; 139 struct hlist_node *hn; 140 int found = 0; 141 142 spin_lock_irq(&s->tracked_chunk_lock); 143 144 hlist_for_each_entry(c, hn, 145 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 146 if (c->chunk == chunk) { 147 found = 1; 148 break; 149 } 150 } 151 152 spin_unlock_irq(&s->tracked_chunk_lock); 153 154 return found; 155 } 156 157 /* 158 * One of these per registered origin, held in the snapshot_origins hash 159 */ 160 struct origin { 161 /* The origin device */ 162 struct block_device *bdev; 163 164 struct list_head hash_list; 165 166 /* List of snapshots for this origin */ 167 struct list_head snapshots; 168 }; 169 170 /* 171 * Size of the hash table for origin volumes. If we make this 172 * the size of the minors list then it should be nearly perfect 173 */ 174 #define ORIGIN_HASH_SIZE 256 175 #define ORIGIN_MASK 0xFF 176 static struct list_head *_origins; 177 static struct rw_semaphore _origins_lock; 178 179 static int init_origin_hash(void) 180 { 181 int i; 182 183 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 184 GFP_KERNEL); 185 if (!_origins) { 186 DMERR("unable to allocate memory"); 187 return -ENOMEM; 188 } 189 190 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 191 INIT_LIST_HEAD(_origins + i); 192 init_rwsem(&_origins_lock); 193 194 return 0; 195 } 196 197 static void exit_origin_hash(void) 198 { 199 kfree(_origins); 200 } 201 202 static unsigned origin_hash(struct block_device *bdev) 203 { 204 return bdev->bd_dev & ORIGIN_MASK; 205 } 206 207 static struct origin *__lookup_origin(struct block_device *origin) 208 { 209 struct list_head *ol; 210 struct origin *o; 211 212 ol = &_origins[origin_hash(origin)]; 213 list_for_each_entry (o, ol, hash_list) 214 if (bdev_equal(o->bdev, origin)) 215 return o; 216 217 return NULL; 218 } 219 220 static void __insert_origin(struct origin *o) 221 { 222 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 223 list_add_tail(&o->hash_list, sl); 224 } 225 226 /* 227 * Make a note of the snapshot and its origin so we can look it 228 * up when the origin has a write on it. 229 */ 230 static int register_snapshot(struct dm_snapshot *snap) 231 { 232 struct origin *o, *new_o; 233 struct block_device *bdev = snap->origin->bdev; 234 235 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 236 if (!new_o) 237 return -ENOMEM; 238 239 down_write(&_origins_lock); 240 o = __lookup_origin(bdev); 241 242 if (o) 243 kfree(new_o); 244 else { 245 /* New origin */ 246 o = new_o; 247 248 /* Initialise the struct */ 249 INIT_LIST_HEAD(&o->snapshots); 250 o->bdev = bdev; 251 252 __insert_origin(o); 253 } 254 255 list_add_tail(&snap->list, &o->snapshots); 256 257 up_write(&_origins_lock); 258 return 0; 259 } 260 261 static void unregister_snapshot(struct dm_snapshot *s) 262 { 263 struct origin *o; 264 265 down_write(&_origins_lock); 266 o = __lookup_origin(s->origin->bdev); 267 268 list_del(&s->list); 269 if (list_empty(&o->snapshots)) { 270 list_del(&o->hash_list); 271 kfree(o); 272 } 273 274 up_write(&_origins_lock); 275 } 276 277 /* 278 * Implementation of the exception hash tables. 279 * The lowest hash_shift bits of the chunk number are ignored, allowing 280 * some consecutive chunks to be grouped together. 281 */ 282 static int init_exception_table(struct exception_table *et, uint32_t size, 283 unsigned hash_shift) 284 { 285 unsigned int i; 286 287 et->hash_shift = hash_shift; 288 et->hash_mask = size - 1; 289 et->table = dm_vcalloc(size, sizeof(struct list_head)); 290 if (!et->table) 291 return -ENOMEM; 292 293 for (i = 0; i < size; i++) 294 INIT_LIST_HEAD(et->table + i); 295 296 return 0; 297 } 298 299 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 300 { 301 struct list_head *slot; 302 struct dm_snap_exception *ex, *next; 303 int i, size; 304 305 size = et->hash_mask + 1; 306 for (i = 0; i < size; i++) { 307 slot = et->table + i; 308 309 list_for_each_entry_safe (ex, next, slot, hash_list) 310 kmem_cache_free(mem, ex); 311 } 312 313 vfree(et->table); 314 } 315 316 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 317 { 318 return (chunk >> et->hash_shift) & et->hash_mask; 319 } 320 321 static void insert_exception(struct exception_table *eh, 322 struct dm_snap_exception *e) 323 { 324 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 325 list_add(&e->hash_list, l); 326 } 327 328 static void remove_exception(struct dm_snap_exception *e) 329 { 330 list_del(&e->hash_list); 331 } 332 333 /* 334 * Return the exception data for a sector, or NULL if not 335 * remapped. 336 */ 337 static struct dm_snap_exception *lookup_exception(struct exception_table *et, 338 chunk_t chunk) 339 { 340 struct list_head *slot; 341 struct dm_snap_exception *e; 342 343 slot = &et->table[exception_hash(et, chunk)]; 344 list_for_each_entry (e, slot, hash_list) 345 if (chunk >= e->old_chunk && 346 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 347 return e; 348 349 return NULL; 350 } 351 352 static struct dm_snap_exception *alloc_exception(void) 353 { 354 struct dm_snap_exception *e; 355 356 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 357 if (!e) 358 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 359 360 return e; 361 } 362 363 static void free_exception(struct dm_snap_exception *e) 364 { 365 kmem_cache_free(exception_cache, e); 366 } 367 368 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 369 { 370 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 371 GFP_NOIO); 372 373 atomic_inc(&s->pending_exceptions_count); 374 pe->snap = s; 375 376 return pe; 377 } 378 379 static void free_pending_exception(struct dm_snap_pending_exception *pe) 380 { 381 struct dm_snapshot *s = pe->snap; 382 383 mempool_free(pe, s->pending_pool); 384 smp_mb__before_atomic_dec(); 385 atomic_dec(&s->pending_exceptions_count); 386 } 387 388 static void insert_completed_exception(struct dm_snapshot *s, 389 struct dm_snap_exception *new_e) 390 { 391 struct exception_table *eh = &s->complete; 392 struct list_head *l; 393 struct dm_snap_exception *e = NULL; 394 395 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 396 397 /* Add immediately if this table doesn't support consecutive chunks */ 398 if (!eh->hash_shift) 399 goto out; 400 401 /* List is ordered by old_chunk */ 402 list_for_each_entry_reverse(e, l, hash_list) { 403 /* Insert after an existing chunk? */ 404 if (new_e->old_chunk == (e->old_chunk + 405 dm_consecutive_chunk_count(e) + 1) && 406 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 407 dm_consecutive_chunk_count(e) + 1)) { 408 dm_consecutive_chunk_count_inc(e); 409 free_exception(new_e); 410 return; 411 } 412 413 /* Insert before an existing chunk? */ 414 if (new_e->old_chunk == (e->old_chunk - 1) && 415 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 416 dm_consecutive_chunk_count_inc(e); 417 e->old_chunk--; 418 e->new_chunk--; 419 free_exception(new_e); 420 return; 421 } 422 423 if (new_e->old_chunk > e->old_chunk) 424 break; 425 } 426 427 out: 428 list_add(&new_e->hash_list, e ? &e->hash_list : l); 429 } 430 431 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) 432 { 433 struct dm_snap_exception *e; 434 435 e = alloc_exception(); 436 if (!e) 437 return -ENOMEM; 438 439 e->old_chunk = old; 440 441 /* Consecutive_count is implicitly initialised to zero */ 442 e->new_chunk = new; 443 444 insert_completed_exception(s, e); 445 446 return 0; 447 } 448 449 /* 450 * Hard coded magic. 451 */ 452 static int calc_max_buckets(void) 453 { 454 /* use a fixed size of 2MB */ 455 unsigned long mem = 2 * 1024 * 1024; 456 mem /= sizeof(struct list_head); 457 458 return mem; 459 } 460 461 /* 462 * Allocate room for a suitable hash table. 463 */ 464 static int init_hash_tables(struct dm_snapshot *s) 465 { 466 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 467 468 /* 469 * Calculate based on the size of the original volume or 470 * the COW volume... 471 */ 472 cow_dev_size = get_dev_size(s->cow->bdev); 473 origin_dev_size = get_dev_size(s->origin->bdev); 474 max_buckets = calc_max_buckets(); 475 476 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; 477 hash_size = min(hash_size, max_buckets); 478 479 hash_size = rounddown_pow_of_two(hash_size); 480 if (init_exception_table(&s->complete, hash_size, 481 DM_CHUNK_CONSECUTIVE_BITS)) 482 return -ENOMEM; 483 484 /* 485 * Allocate hash table for in-flight exceptions 486 * Make this smaller than the real hash table 487 */ 488 hash_size >>= 3; 489 if (hash_size < 64) 490 hash_size = 64; 491 492 if (init_exception_table(&s->pending, hash_size, 0)) { 493 exit_exception_table(&s->complete, exception_cache); 494 return -ENOMEM; 495 } 496 497 return 0; 498 } 499 500 /* 501 * Round a number up to the nearest 'size' boundary. size must 502 * be a power of 2. 503 */ 504 static ulong round_up(ulong n, ulong size) 505 { 506 size--; 507 return (n + size) & ~size; 508 } 509 510 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, 511 char **error) 512 { 513 unsigned long chunk_size; 514 char *value; 515 516 chunk_size = simple_strtoul(chunk_size_arg, &value, 10); 517 if (*chunk_size_arg == '\0' || *value != '\0') { 518 *error = "Invalid chunk size"; 519 return -EINVAL; 520 } 521 522 if (!chunk_size) { 523 s->chunk_size = s->chunk_mask = s->chunk_shift = 0; 524 return 0; 525 } 526 527 /* 528 * Chunk size must be multiple of page size. Silently 529 * round up if it's not. 530 */ 531 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); 532 533 /* Check chunk_size is a power of 2 */ 534 if (!is_power_of_2(chunk_size)) { 535 *error = "Chunk size is not a power of 2"; 536 return -EINVAL; 537 } 538 539 /* Validate the chunk size against the device block size */ 540 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { 541 *error = "Chunk size is not a multiple of device blocksize"; 542 return -EINVAL; 543 } 544 545 s->chunk_size = chunk_size; 546 s->chunk_mask = chunk_size - 1; 547 s->chunk_shift = ffs(chunk_size) - 1; 548 549 return 0; 550 } 551 552 /* 553 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 554 */ 555 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 556 { 557 struct dm_snapshot *s; 558 int i; 559 int r = -EINVAL; 560 char persistent; 561 char *origin_path; 562 char *cow_path; 563 564 if (argc != 4) { 565 ti->error = "requires exactly 4 arguments"; 566 r = -EINVAL; 567 goto bad1; 568 } 569 570 origin_path = argv[0]; 571 cow_path = argv[1]; 572 persistent = toupper(*argv[2]); 573 574 if (persistent != 'P' && persistent != 'N') { 575 ti->error = "Persistent flag is not P or N"; 576 r = -EINVAL; 577 goto bad1; 578 } 579 580 s = kmalloc(sizeof(*s), GFP_KERNEL); 581 if (s == NULL) { 582 ti->error = "Cannot allocate snapshot context private " 583 "structure"; 584 r = -ENOMEM; 585 goto bad1; 586 } 587 588 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 589 if (r) { 590 ti->error = "Cannot get origin device"; 591 goto bad2; 592 } 593 594 r = dm_get_device(ti, cow_path, 0, 0, 595 FMODE_READ | FMODE_WRITE, &s->cow); 596 if (r) { 597 dm_put_device(ti, s->origin); 598 ti->error = "Cannot get COW device"; 599 goto bad2; 600 } 601 602 r = set_chunk_size(s, argv[3], &ti->error); 603 if (r) 604 goto bad3; 605 606 s->type = persistent; 607 608 s->valid = 1; 609 s->active = 0; 610 atomic_set(&s->pending_exceptions_count, 0); 611 init_rwsem(&s->lock); 612 spin_lock_init(&s->pe_lock); 613 s->ti = ti; 614 615 /* Allocate hash table for COW data */ 616 if (init_hash_tables(s)) { 617 ti->error = "Unable to allocate hash table space"; 618 r = -ENOMEM; 619 goto bad3; 620 } 621 622 s->store.snap = s; 623 624 if (persistent == 'P') 625 r = dm_create_persistent(&s->store); 626 else 627 r = dm_create_transient(&s->store); 628 629 if (r) { 630 ti->error = "Couldn't create exception store"; 631 r = -EINVAL; 632 goto bad4; 633 } 634 635 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 636 if (r) { 637 ti->error = "Could not create kcopyd client"; 638 goto bad5; 639 } 640 641 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 642 if (!s->pending_pool) { 643 ti->error = "Could not allocate mempool for pending exceptions"; 644 goto bad6; 645 } 646 647 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 648 tracked_chunk_cache); 649 if (!s->tracked_chunk_pool) { 650 ti->error = "Could not allocate tracked_chunk mempool for " 651 "tracking reads"; 652 goto bad_tracked_chunk_pool; 653 } 654 655 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 656 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 657 658 spin_lock_init(&s->tracked_chunk_lock); 659 660 /* Metadata must only be loaded into one table at once */ 661 r = s->store.read_metadata(&s->store); 662 if (r < 0) { 663 ti->error = "Failed to read snapshot metadata"; 664 goto bad_load_and_register; 665 } else if (r > 0) { 666 s->valid = 0; 667 DMWARN("Snapshot is marked invalid."); 668 } 669 670 bio_list_init(&s->queued_bios); 671 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 672 673 /* Add snapshot to the list of snapshots for this origin */ 674 /* Exceptions aren't triggered till snapshot_resume() is called */ 675 if (register_snapshot(s)) { 676 r = -EINVAL; 677 ti->error = "Cannot register snapshot origin"; 678 goto bad_load_and_register; 679 } 680 681 ti->private = s; 682 ti->split_io = s->chunk_size; 683 684 return 0; 685 686 bad_load_and_register: 687 mempool_destroy(s->tracked_chunk_pool); 688 689 bad_tracked_chunk_pool: 690 mempool_destroy(s->pending_pool); 691 692 bad6: 693 dm_kcopyd_client_destroy(s->kcopyd_client); 694 695 bad5: 696 s->store.destroy(&s->store); 697 698 bad4: 699 exit_exception_table(&s->pending, pending_cache); 700 exit_exception_table(&s->complete, exception_cache); 701 702 bad3: 703 dm_put_device(ti, s->cow); 704 dm_put_device(ti, s->origin); 705 706 bad2: 707 kfree(s); 708 709 bad1: 710 return r; 711 } 712 713 static void __free_exceptions(struct dm_snapshot *s) 714 { 715 dm_kcopyd_client_destroy(s->kcopyd_client); 716 s->kcopyd_client = NULL; 717 718 exit_exception_table(&s->pending, pending_cache); 719 exit_exception_table(&s->complete, exception_cache); 720 721 s->store.destroy(&s->store); 722 } 723 724 static void snapshot_dtr(struct dm_target *ti) 725 { 726 #ifdef CONFIG_DM_DEBUG 727 int i; 728 #endif 729 struct dm_snapshot *s = ti->private; 730 731 flush_workqueue(ksnapd); 732 733 /* Prevent further origin writes from using this snapshot. */ 734 /* After this returns there can be no new kcopyd jobs. */ 735 unregister_snapshot(s); 736 737 while (atomic_read(&s->pending_exceptions_count)) 738 yield(); 739 /* 740 * Ensure instructions in mempool_destroy aren't reordered 741 * before atomic_read. 742 */ 743 smp_mb(); 744 745 #ifdef CONFIG_DM_DEBUG 746 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 747 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 748 #endif 749 750 mempool_destroy(s->tracked_chunk_pool); 751 752 __free_exceptions(s); 753 754 mempool_destroy(s->pending_pool); 755 756 dm_put_device(ti, s->origin); 757 dm_put_device(ti, s->cow); 758 759 kfree(s); 760 } 761 762 /* 763 * Flush a list of buffers. 764 */ 765 static void flush_bios(struct bio *bio) 766 { 767 struct bio *n; 768 769 while (bio) { 770 n = bio->bi_next; 771 bio->bi_next = NULL; 772 generic_make_request(bio); 773 bio = n; 774 } 775 } 776 777 static void flush_queued_bios(struct work_struct *work) 778 { 779 struct dm_snapshot *s = 780 container_of(work, struct dm_snapshot, queued_bios_work); 781 struct bio *queued_bios; 782 unsigned long flags; 783 784 spin_lock_irqsave(&s->pe_lock, flags); 785 queued_bios = bio_list_get(&s->queued_bios); 786 spin_unlock_irqrestore(&s->pe_lock, flags); 787 788 flush_bios(queued_bios); 789 } 790 791 /* 792 * Error a list of buffers. 793 */ 794 static void error_bios(struct bio *bio) 795 { 796 struct bio *n; 797 798 while (bio) { 799 n = bio->bi_next; 800 bio->bi_next = NULL; 801 bio_io_error(bio); 802 bio = n; 803 } 804 } 805 806 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 807 { 808 if (!s->valid) 809 return; 810 811 if (err == -EIO) 812 DMERR("Invalidating snapshot: Error reading/writing."); 813 else if (err == -ENOMEM) 814 DMERR("Invalidating snapshot: Unable to allocate exception."); 815 816 if (s->store.drop_snapshot) 817 s->store.drop_snapshot(&s->store); 818 819 s->valid = 0; 820 821 dm_table_event(s->ti->table); 822 } 823 824 static void get_pending_exception(struct dm_snap_pending_exception *pe) 825 { 826 atomic_inc(&pe->ref_count); 827 } 828 829 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 830 { 831 struct dm_snap_pending_exception *primary_pe; 832 struct bio *origin_bios = NULL; 833 834 primary_pe = pe->primary_pe; 835 836 /* 837 * If this pe is involved in a write to the origin and 838 * it is the last sibling to complete then release 839 * the bios for the original write to the origin. 840 */ 841 if (primary_pe && 842 atomic_dec_and_test(&primary_pe->ref_count)) { 843 origin_bios = bio_list_get(&primary_pe->origin_bios); 844 free_pending_exception(primary_pe); 845 } 846 847 /* 848 * Free the pe if it's not linked to an origin write or if 849 * it's not itself a primary pe. 850 */ 851 if (!primary_pe || primary_pe != pe) 852 free_pending_exception(pe); 853 854 return origin_bios; 855 } 856 857 static void pending_complete(struct dm_snap_pending_exception *pe, int success) 858 { 859 struct dm_snap_exception *e; 860 struct dm_snapshot *s = pe->snap; 861 struct bio *origin_bios = NULL; 862 struct bio *snapshot_bios = NULL; 863 int error = 0; 864 865 if (!success) { 866 /* Read/write error - snapshot is unusable */ 867 down_write(&s->lock); 868 __invalidate_snapshot(s, -EIO); 869 error = 1; 870 goto out; 871 } 872 873 e = alloc_exception(); 874 if (!e) { 875 down_write(&s->lock); 876 __invalidate_snapshot(s, -ENOMEM); 877 error = 1; 878 goto out; 879 } 880 *e = pe->e; 881 882 down_write(&s->lock); 883 if (!s->valid) { 884 free_exception(e); 885 error = 1; 886 goto out; 887 } 888 889 /* 890 * Check for conflicting reads. This is extremely improbable, 891 * so yield() is sufficient and there is no need for a wait queue. 892 */ 893 while (__chunk_is_tracked(s, pe->e.old_chunk)) 894 yield(); 895 896 /* 897 * Add a proper exception, and remove the 898 * in-flight exception from the list. 899 */ 900 insert_completed_exception(s, e); 901 902 out: 903 remove_exception(&pe->e); 904 snapshot_bios = bio_list_get(&pe->snapshot_bios); 905 origin_bios = put_pending_exception(pe); 906 907 up_write(&s->lock); 908 909 /* Submit any pending write bios */ 910 if (error) 911 error_bios(snapshot_bios); 912 else 913 flush_bios(snapshot_bios); 914 915 flush_bios(origin_bios); 916 } 917 918 static void commit_callback(void *context, int success) 919 { 920 struct dm_snap_pending_exception *pe = context; 921 922 pending_complete(pe, success); 923 } 924 925 /* 926 * Called when the copy I/O has finished. kcopyd actually runs 927 * this code so don't block. 928 */ 929 static void copy_callback(int read_err, unsigned long write_err, void *context) 930 { 931 struct dm_snap_pending_exception *pe = context; 932 struct dm_snapshot *s = pe->snap; 933 934 if (read_err || write_err) 935 pending_complete(pe, 0); 936 937 else 938 /* Update the metadata if we are persistent */ 939 s->store.commit_exception(&s->store, &pe->e, commit_callback, 940 pe); 941 } 942 943 /* 944 * Dispatches the copy operation to kcopyd. 945 */ 946 static void start_copy(struct dm_snap_pending_exception *pe) 947 { 948 struct dm_snapshot *s = pe->snap; 949 struct dm_io_region src, dest; 950 struct block_device *bdev = s->origin->bdev; 951 sector_t dev_size; 952 953 dev_size = get_dev_size(bdev); 954 955 src.bdev = bdev; 956 src.sector = chunk_to_sector(s, pe->e.old_chunk); 957 src.count = min(s->chunk_size, dev_size - src.sector); 958 959 dest.bdev = s->cow->bdev; 960 dest.sector = chunk_to_sector(s, pe->e.new_chunk); 961 dest.count = src.count; 962 963 /* Hand over to kcopyd */ 964 dm_kcopyd_copy(s->kcopyd_client, 965 &src, 1, &dest, 0, copy_callback, pe); 966 } 967 968 /* 969 * Looks to see if this snapshot already has a pending exception 970 * for this chunk, otherwise it allocates a new one and inserts 971 * it into the pending table. 972 * 973 * NOTE: a write lock must be held on snap->lock before calling 974 * this. 975 */ 976 static struct dm_snap_pending_exception * 977 __find_pending_exception(struct dm_snapshot *s, struct bio *bio) 978 { 979 struct dm_snap_exception *e; 980 struct dm_snap_pending_exception *pe; 981 chunk_t chunk = sector_to_chunk(s, bio->bi_sector); 982 983 /* 984 * Is there a pending exception for this already ? 985 */ 986 e = lookup_exception(&s->pending, chunk); 987 if (e) { 988 /* cast the exception to a pending exception */ 989 pe = container_of(e, struct dm_snap_pending_exception, e); 990 goto out; 991 } 992 993 /* 994 * Create a new pending exception, we don't want 995 * to hold the lock while we do this. 996 */ 997 up_write(&s->lock); 998 pe = alloc_pending_exception(s); 999 down_write(&s->lock); 1000 1001 if (!s->valid) { 1002 free_pending_exception(pe); 1003 return NULL; 1004 } 1005 1006 e = lookup_exception(&s->pending, chunk); 1007 if (e) { 1008 free_pending_exception(pe); 1009 pe = container_of(e, struct dm_snap_pending_exception, e); 1010 goto out; 1011 } 1012 1013 pe->e.old_chunk = chunk; 1014 bio_list_init(&pe->origin_bios); 1015 bio_list_init(&pe->snapshot_bios); 1016 pe->primary_pe = NULL; 1017 atomic_set(&pe->ref_count, 0); 1018 pe->started = 0; 1019 1020 if (s->store.prepare_exception(&s->store, &pe->e)) { 1021 free_pending_exception(pe); 1022 return NULL; 1023 } 1024 1025 get_pending_exception(pe); 1026 insert_exception(&s->pending, &pe->e); 1027 1028 out: 1029 return pe; 1030 } 1031 1032 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1033 struct bio *bio, chunk_t chunk) 1034 { 1035 bio->bi_bdev = s->cow->bdev; 1036 bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) + 1037 (chunk - e->old_chunk)) + 1038 (bio->bi_sector & s->chunk_mask); 1039 } 1040 1041 static int snapshot_map(struct dm_target *ti, struct bio *bio, 1042 union map_info *map_context) 1043 { 1044 struct dm_snap_exception *e; 1045 struct dm_snapshot *s = ti->private; 1046 int r = DM_MAPIO_REMAPPED; 1047 chunk_t chunk; 1048 struct dm_snap_pending_exception *pe = NULL; 1049 1050 chunk = sector_to_chunk(s, bio->bi_sector); 1051 1052 /* Full snapshots are not usable */ 1053 /* To get here the table must be live so s->active is always set. */ 1054 if (!s->valid) 1055 return -EIO; 1056 1057 /* FIXME: should only take write lock if we need 1058 * to copy an exception */ 1059 down_write(&s->lock); 1060 1061 if (!s->valid) { 1062 r = -EIO; 1063 goto out_unlock; 1064 } 1065 1066 /* If the block is already remapped - use that, else remap it */ 1067 e = lookup_exception(&s->complete, chunk); 1068 if (e) { 1069 remap_exception(s, e, bio, chunk); 1070 goto out_unlock; 1071 } 1072 1073 /* 1074 * Write to snapshot - higher level takes care of RW/RO 1075 * flags so we should only get this if we are 1076 * writeable. 1077 */ 1078 if (bio_rw(bio) == WRITE) { 1079 pe = __find_pending_exception(s, bio); 1080 if (!pe) { 1081 __invalidate_snapshot(s, -ENOMEM); 1082 r = -EIO; 1083 goto out_unlock; 1084 } 1085 1086 remap_exception(s, &pe->e, bio, chunk); 1087 bio_list_add(&pe->snapshot_bios, bio); 1088 1089 r = DM_MAPIO_SUBMITTED; 1090 1091 if (!pe->started) { 1092 /* this is protected by snap->lock */ 1093 pe->started = 1; 1094 up_write(&s->lock); 1095 start_copy(pe); 1096 goto out; 1097 } 1098 } else { 1099 bio->bi_bdev = s->origin->bdev; 1100 map_context->ptr = track_chunk(s, chunk); 1101 } 1102 1103 out_unlock: 1104 up_write(&s->lock); 1105 out: 1106 return r; 1107 } 1108 1109 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1110 int error, union map_info *map_context) 1111 { 1112 struct dm_snapshot *s = ti->private; 1113 struct dm_snap_tracked_chunk *c = map_context->ptr; 1114 1115 if (c) 1116 stop_tracking_chunk(s, c); 1117 1118 return 0; 1119 } 1120 1121 static void snapshot_resume(struct dm_target *ti) 1122 { 1123 struct dm_snapshot *s = ti->private; 1124 1125 down_write(&s->lock); 1126 s->active = 1; 1127 up_write(&s->lock); 1128 } 1129 1130 static int snapshot_status(struct dm_target *ti, status_type_t type, 1131 char *result, unsigned int maxlen) 1132 { 1133 struct dm_snapshot *snap = ti->private; 1134 1135 switch (type) { 1136 case STATUSTYPE_INFO: 1137 if (!snap->valid) 1138 snprintf(result, maxlen, "Invalid"); 1139 else { 1140 if (snap->store.fraction_full) { 1141 sector_t numerator, denominator; 1142 snap->store.fraction_full(&snap->store, 1143 &numerator, 1144 &denominator); 1145 snprintf(result, maxlen, "%llu/%llu", 1146 (unsigned long long)numerator, 1147 (unsigned long long)denominator); 1148 } 1149 else 1150 snprintf(result, maxlen, "Unknown"); 1151 } 1152 break; 1153 1154 case STATUSTYPE_TABLE: 1155 /* 1156 * kdevname returns a static pointer so we need 1157 * to make private copies if the output is to 1158 * make sense. 1159 */ 1160 snprintf(result, maxlen, "%s %s %c %llu", 1161 snap->origin->name, snap->cow->name, 1162 snap->type, 1163 (unsigned long long)snap->chunk_size); 1164 break; 1165 } 1166 1167 return 0; 1168 } 1169 1170 /*----------------------------------------------------------------- 1171 * Origin methods 1172 *---------------------------------------------------------------*/ 1173 static int __origin_write(struct list_head *snapshots, struct bio *bio) 1174 { 1175 int r = DM_MAPIO_REMAPPED, first = 0; 1176 struct dm_snapshot *snap; 1177 struct dm_snap_exception *e; 1178 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1179 chunk_t chunk; 1180 LIST_HEAD(pe_queue); 1181 1182 /* Do all the snapshots on this origin */ 1183 list_for_each_entry (snap, snapshots, list) { 1184 1185 down_write(&snap->lock); 1186 1187 /* Only deal with valid and active snapshots */ 1188 if (!snap->valid || !snap->active) 1189 goto next_snapshot; 1190 1191 /* Nothing to do if writing beyond end of snapshot */ 1192 if (bio->bi_sector >= dm_table_get_size(snap->ti->table)) 1193 goto next_snapshot; 1194 1195 /* 1196 * Remember, different snapshots can have 1197 * different chunk sizes. 1198 */ 1199 chunk = sector_to_chunk(snap, bio->bi_sector); 1200 1201 /* 1202 * Check exception table to see if block 1203 * is already remapped in this snapshot 1204 * and trigger an exception if not. 1205 * 1206 * ref_count is initialised to 1 so pending_complete() 1207 * won't destroy the primary_pe while we're inside this loop. 1208 */ 1209 e = lookup_exception(&snap->complete, chunk); 1210 if (e) 1211 goto next_snapshot; 1212 1213 pe = __find_pending_exception(snap, bio); 1214 if (!pe) { 1215 __invalidate_snapshot(snap, -ENOMEM); 1216 goto next_snapshot; 1217 } 1218 1219 if (!primary_pe) { 1220 /* 1221 * Either every pe here has same 1222 * primary_pe or none has one yet. 1223 */ 1224 if (pe->primary_pe) 1225 primary_pe = pe->primary_pe; 1226 else { 1227 primary_pe = pe; 1228 first = 1; 1229 } 1230 1231 bio_list_add(&primary_pe->origin_bios, bio); 1232 1233 r = DM_MAPIO_SUBMITTED; 1234 } 1235 1236 if (!pe->primary_pe) { 1237 pe->primary_pe = primary_pe; 1238 get_pending_exception(primary_pe); 1239 } 1240 1241 if (!pe->started) { 1242 pe->started = 1; 1243 list_add_tail(&pe->list, &pe_queue); 1244 } 1245 1246 next_snapshot: 1247 up_write(&snap->lock); 1248 } 1249 1250 if (!primary_pe) 1251 return r; 1252 1253 /* 1254 * If this is the first time we're processing this chunk and 1255 * ref_count is now 1 it means all the pending exceptions 1256 * got completed while we were in the loop above, so it falls to 1257 * us here to remove the primary_pe and submit any origin_bios. 1258 */ 1259 1260 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1261 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1262 free_pending_exception(primary_pe); 1263 /* If we got here, pe_queue is necessarily empty. */ 1264 return r; 1265 } 1266 1267 /* 1268 * Now that we have a complete pe list we can start the copying. 1269 */ 1270 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1271 start_copy(pe); 1272 1273 return r; 1274 } 1275 1276 /* 1277 * Called on a write from the origin driver. 1278 */ 1279 static int do_origin(struct dm_dev *origin, struct bio *bio) 1280 { 1281 struct origin *o; 1282 int r = DM_MAPIO_REMAPPED; 1283 1284 down_read(&_origins_lock); 1285 o = __lookup_origin(origin->bdev); 1286 if (o) 1287 r = __origin_write(&o->snapshots, bio); 1288 up_read(&_origins_lock); 1289 1290 return r; 1291 } 1292 1293 /* 1294 * Origin: maps a linear range of a device, with hooks for snapshotting. 1295 */ 1296 1297 /* 1298 * Construct an origin mapping: <dev_path> 1299 * The context for an origin is merely a 'struct dm_dev *' 1300 * pointing to the real device. 1301 */ 1302 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1303 { 1304 int r; 1305 struct dm_dev *dev; 1306 1307 if (argc != 1) { 1308 ti->error = "origin: incorrect number of arguments"; 1309 return -EINVAL; 1310 } 1311 1312 r = dm_get_device(ti, argv[0], 0, ti->len, 1313 dm_table_get_mode(ti->table), &dev); 1314 if (r) { 1315 ti->error = "Cannot get target device"; 1316 return r; 1317 } 1318 1319 ti->private = dev; 1320 return 0; 1321 } 1322 1323 static void origin_dtr(struct dm_target *ti) 1324 { 1325 struct dm_dev *dev = ti->private; 1326 dm_put_device(ti, dev); 1327 } 1328 1329 static int origin_map(struct dm_target *ti, struct bio *bio, 1330 union map_info *map_context) 1331 { 1332 struct dm_dev *dev = ti->private; 1333 bio->bi_bdev = dev->bdev; 1334 1335 /* Only tell snapshots if this is a write */ 1336 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1337 } 1338 1339 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1340 1341 /* 1342 * Set the target "split_io" field to the minimum of all the snapshots' 1343 * chunk sizes. 1344 */ 1345 static void origin_resume(struct dm_target *ti) 1346 { 1347 struct dm_dev *dev = ti->private; 1348 struct dm_snapshot *snap; 1349 struct origin *o; 1350 chunk_t chunk_size = 0; 1351 1352 down_read(&_origins_lock); 1353 o = __lookup_origin(dev->bdev); 1354 if (o) 1355 list_for_each_entry (snap, &o->snapshots, list) 1356 chunk_size = min_not_zero(chunk_size, snap->chunk_size); 1357 up_read(&_origins_lock); 1358 1359 ti->split_io = chunk_size; 1360 } 1361 1362 static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1363 unsigned int maxlen) 1364 { 1365 struct dm_dev *dev = ti->private; 1366 1367 switch (type) { 1368 case STATUSTYPE_INFO: 1369 result[0] = '\0'; 1370 break; 1371 1372 case STATUSTYPE_TABLE: 1373 snprintf(result, maxlen, "%s", dev->name); 1374 break; 1375 } 1376 1377 return 0; 1378 } 1379 1380 static struct target_type origin_target = { 1381 .name = "snapshot-origin", 1382 .version = {1, 6, 0}, 1383 .module = THIS_MODULE, 1384 .ctr = origin_ctr, 1385 .dtr = origin_dtr, 1386 .map = origin_map, 1387 .resume = origin_resume, 1388 .status = origin_status, 1389 }; 1390 1391 static struct target_type snapshot_target = { 1392 .name = "snapshot", 1393 .version = {1, 6, 0}, 1394 .module = THIS_MODULE, 1395 .ctr = snapshot_ctr, 1396 .dtr = snapshot_dtr, 1397 .map = snapshot_map, 1398 .end_io = snapshot_end_io, 1399 .resume = snapshot_resume, 1400 .status = snapshot_status, 1401 }; 1402 1403 static int __init dm_snapshot_init(void) 1404 { 1405 int r; 1406 1407 r = dm_register_target(&snapshot_target); 1408 if (r) { 1409 DMERR("snapshot target register failed %d", r); 1410 return r; 1411 } 1412 1413 r = dm_register_target(&origin_target); 1414 if (r < 0) { 1415 DMERR("Origin target register failed %d", r); 1416 goto bad1; 1417 } 1418 1419 r = init_origin_hash(); 1420 if (r) { 1421 DMERR("init_origin_hash failed."); 1422 goto bad2; 1423 } 1424 1425 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 1426 if (!exception_cache) { 1427 DMERR("Couldn't create exception cache."); 1428 r = -ENOMEM; 1429 goto bad3; 1430 } 1431 1432 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1433 if (!pending_cache) { 1434 DMERR("Couldn't create pending cache."); 1435 r = -ENOMEM; 1436 goto bad4; 1437 } 1438 1439 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1440 if (!tracked_chunk_cache) { 1441 DMERR("Couldn't create cache to track chunks in use."); 1442 r = -ENOMEM; 1443 goto bad5; 1444 } 1445 1446 ksnapd = create_singlethread_workqueue("ksnapd"); 1447 if (!ksnapd) { 1448 DMERR("Failed to create ksnapd workqueue."); 1449 r = -ENOMEM; 1450 goto bad_pending_pool; 1451 } 1452 1453 return 0; 1454 1455 bad_pending_pool: 1456 kmem_cache_destroy(tracked_chunk_cache); 1457 bad5: 1458 kmem_cache_destroy(pending_cache); 1459 bad4: 1460 kmem_cache_destroy(exception_cache); 1461 bad3: 1462 exit_origin_hash(); 1463 bad2: 1464 dm_unregister_target(&origin_target); 1465 bad1: 1466 dm_unregister_target(&snapshot_target); 1467 return r; 1468 } 1469 1470 static void __exit dm_snapshot_exit(void) 1471 { 1472 int r; 1473 1474 destroy_workqueue(ksnapd); 1475 1476 r = dm_unregister_target(&snapshot_target); 1477 if (r) 1478 DMERR("snapshot unregister failed %d", r); 1479 1480 r = dm_unregister_target(&origin_target); 1481 if (r) 1482 DMERR("origin unregister failed %d", r); 1483 1484 exit_origin_hash(); 1485 kmem_cache_destroy(pending_cache); 1486 kmem_cache_destroy(exception_cache); 1487 kmem_cache_destroy(tracked_chunk_cache); 1488 } 1489 1490 /* Module hooks */ 1491 module_init(dm_snapshot_init); 1492 module_exit(dm_snapshot_exit); 1493 1494 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1495 MODULE_AUTHOR("Joe Thornber"); 1496 MODULE_LICENSE("GPL"); 1497