1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-bio-list.h" 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/moduleparam.h> 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/buffer_head.h> 18 #include <linux/mempool.h> 19 #include <linux/slab.h> 20 #include <linux/idr.h> 21 #include <linux/hdreg.h> 22 #include <linux/blktrace_api.h> 23 #include <linux/smp_lock.h> 24 25 #define DM_MSG_PREFIX "core" 26 27 static const char *_name = DM_NAME; 28 29 static unsigned int major = 0; 30 static unsigned int _major = 0; 31 32 static DEFINE_SPINLOCK(_minor_lock); 33 /* 34 * One of these is allocated per bio. 35 */ 36 struct dm_io { 37 struct mapped_device *md; 38 int error; 39 struct bio *bio; 40 atomic_t io_count; 41 unsigned long start_time; 42 }; 43 44 /* 45 * One of these is allocated per target within a bio. Hopefully 46 * this will be simplified out one day. 47 */ 48 struct dm_target_io { 49 struct dm_io *io; 50 struct dm_target *ti; 51 union map_info info; 52 }; 53 54 union map_info *dm_get_mapinfo(struct bio *bio) 55 { 56 if (bio && bio->bi_private) 57 return &((struct dm_target_io *)bio->bi_private)->info; 58 return NULL; 59 } 60 61 #define MINOR_ALLOCED ((void *)-1) 62 63 /* 64 * Bits for the md->flags field. 65 */ 66 #define DMF_BLOCK_IO 0 67 #define DMF_SUSPENDED 1 68 #define DMF_FROZEN 2 69 #define DMF_FREEING 3 70 #define DMF_DELETING 4 71 #define DMF_NOFLUSH_SUSPENDING 5 72 73 struct mapped_device { 74 struct rw_semaphore io_lock; 75 struct semaphore suspend_lock; 76 spinlock_t pushback_lock; 77 rwlock_t map_lock; 78 atomic_t holders; 79 atomic_t open_count; 80 81 unsigned long flags; 82 83 struct request_queue *queue; 84 struct gendisk *disk; 85 char name[16]; 86 87 void *interface_ptr; 88 89 /* 90 * A list of ios that arrived while we were suspended. 91 */ 92 atomic_t pending; 93 wait_queue_head_t wait; 94 struct bio_list deferred; 95 struct bio_list pushback; 96 97 /* 98 * The current mapping. 99 */ 100 struct dm_table *map; 101 102 /* 103 * io objects are allocated from here. 104 */ 105 mempool_t *io_pool; 106 mempool_t *tio_pool; 107 108 struct bio_set *bs; 109 110 /* 111 * Event handling. 112 */ 113 atomic_t event_nr; 114 wait_queue_head_t eventq; 115 116 /* 117 * freeze/thaw support require holding onto a super block 118 */ 119 struct super_block *frozen_sb; 120 struct block_device *suspended_bdev; 121 122 /* forced geometry settings */ 123 struct hd_geometry geometry; 124 }; 125 126 #define MIN_IOS 256 127 static struct kmem_cache *_io_cache; 128 static struct kmem_cache *_tio_cache; 129 130 static int __init local_init(void) 131 { 132 int r; 133 134 /* allocate a slab for the dm_ios */ 135 _io_cache = KMEM_CACHE(dm_io, 0); 136 if (!_io_cache) 137 return -ENOMEM; 138 139 /* allocate a slab for the target ios */ 140 _tio_cache = KMEM_CACHE(dm_target_io, 0); 141 if (!_tio_cache) { 142 kmem_cache_destroy(_io_cache); 143 return -ENOMEM; 144 } 145 146 _major = major; 147 r = register_blkdev(_major, _name); 148 if (r < 0) { 149 kmem_cache_destroy(_tio_cache); 150 kmem_cache_destroy(_io_cache); 151 return r; 152 } 153 154 if (!_major) 155 _major = r; 156 157 return 0; 158 } 159 160 static void local_exit(void) 161 { 162 kmem_cache_destroy(_tio_cache); 163 kmem_cache_destroy(_io_cache); 164 unregister_blkdev(_major, _name); 165 166 _major = 0; 167 168 DMINFO("cleaned up"); 169 } 170 171 int (*_inits[])(void) __initdata = { 172 local_init, 173 dm_target_init, 174 dm_linear_init, 175 dm_stripe_init, 176 dm_interface_init, 177 }; 178 179 void (*_exits[])(void) = { 180 local_exit, 181 dm_target_exit, 182 dm_linear_exit, 183 dm_stripe_exit, 184 dm_interface_exit, 185 }; 186 187 static int __init dm_init(void) 188 { 189 const int count = ARRAY_SIZE(_inits); 190 191 int r, i; 192 193 for (i = 0; i < count; i++) { 194 r = _inits[i](); 195 if (r) 196 goto bad; 197 } 198 199 return 0; 200 201 bad: 202 while (i--) 203 _exits[i](); 204 205 return r; 206 } 207 208 static void __exit dm_exit(void) 209 { 210 int i = ARRAY_SIZE(_exits); 211 212 while (i--) 213 _exits[i](); 214 } 215 216 /* 217 * Block device functions 218 */ 219 static int dm_blk_open(struct inode *inode, struct file *file) 220 { 221 struct mapped_device *md; 222 223 spin_lock(&_minor_lock); 224 225 md = inode->i_bdev->bd_disk->private_data; 226 if (!md) 227 goto out; 228 229 if (test_bit(DMF_FREEING, &md->flags) || 230 test_bit(DMF_DELETING, &md->flags)) { 231 md = NULL; 232 goto out; 233 } 234 235 dm_get(md); 236 atomic_inc(&md->open_count); 237 238 out: 239 spin_unlock(&_minor_lock); 240 241 return md ? 0 : -ENXIO; 242 } 243 244 static int dm_blk_close(struct inode *inode, struct file *file) 245 { 246 struct mapped_device *md; 247 248 md = inode->i_bdev->bd_disk->private_data; 249 atomic_dec(&md->open_count); 250 dm_put(md); 251 return 0; 252 } 253 254 int dm_open_count(struct mapped_device *md) 255 { 256 return atomic_read(&md->open_count); 257 } 258 259 /* 260 * Guarantees nothing is using the device before it's deleted. 261 */ 262 int dm_lock_for_deletion(struct mapped_device *md) 263 { 264 int r = 0; 265 266 spin_lock(&_minor_lock); 267 268 if (dm_open_count(md)) 269 r = -EBUSY; 270 else 271 set_bit(DMF_DELETING, &md->flags); 272 273 spin_unlock(&_minor_lock); 274 275 return r; 276 } 277 278 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 279 { 280 struct mapped_device *md = bdev->bd_disk->private_data; 281 282 return dm_get_geometry(md, geo); 283 } 284 285 static int dm_blk_ioctl(struct inode *inode, struct file *file, 286 unsigned int cmd, unsigned long arg) 287 { 288 struct mapped_device *md; 289 struct dm_table *map; 290 struct dm_target *tgt; 291 int r = -ENOTTY; 292 293 /* We don't really need this lock, but we do need 'inode'. */ 294 unlock_kernel(); 295 296 md = inode->i_bdev->bd_disk->private_data; 297 298 map = dm_get_table(md); 299 300 if (!map || !dm_table_get_size(map)) 301 goto out; 302 303 /* We only support devices that have a single target */ 304 if (dm_table_get_num_targets(map) != 1) 305 goto out; 306 307 tgt = dm_table_get_target(map, 0); 308 309 if (dm_suspended(md)) { 310 r = -EAGAIN; 311 goto out; 312 } 313 314 if (tgt->type->ioctl) 315 r = tgt->type->ioctl(tgt, inode, file, cmd, arg); 316 317 out: 318 dm_table_put(map); 319 320 lock_kernel(); 321 return r; 322 } 323 324 static struct dm_io *alloc_io(struct mapped_device *md) 325 { 326 return mempool_alloc(md->io_pool, GFP_NOIO); 327 } 328 329 static void free_io(struct mapped_device *md, struct dm_io *io) 330 { 331 mempool_free(io, md->io_pool); 332 } 333 334 static struct dm_target_io *alloc_tio(struct mapped_device *md) 335 { 336 return mempool_alloc(md->tio_pool, GFP_NOIO); 337 } 338 339 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 340 { 341 mempool_free(tio, md->tio_pool); 342 } 343 344 static void start_io_acct(struct dm_io *io) 345 { 346 struct mapped_device *md = io->md; 347 348 io->start_time = jiffies; 349 350 preempt_disable(); 351 disk_round_stats(dm_disk(md)); 352 preempt_enable(); 353 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 354 } 355 356 static int end_io_acct(struct dm_io *io) 357 { 358 struct mapped_device *md = io->md; 359 struct bio *bio = io->bio; 360 unsigned long duration = jiffies - io->start_time; 361 int pending; 362 int rw = bio_data_dir(bio); 363 364 preempt_disable(); 365 disk_round_stats(dm_disk(md)); 366 preempt_enable(); 367 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 368 369 disk_stat_add(dm_disk(md), ticks[rw], duration); 370 371 return !pending; 372 } 373 374 /* 375 * Add the bio to the list of deferred io. 376 */ 377 static int queue_io(struct mapped_device *md, struct bio *bio) 378 { 379 down_write(&md->io_lock); 380 381 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 382 up_write(&md->io_lock); 383 return 1; 384 } 385 386 bio_list_add(&md->deferred, bio); 387 388 up_write(&md->io_lock); 389 return 0; /* deferred successfully */ 390 } 391 392 /* 393 * Everyone (including functions in this file), should use this 394 * function to access the md->map field, and make sure they call 395 * dm_table_put() when finished. 396 */ 397 struct dm_table *dm_get_table(struct mapped_device *md) 398 { 399 struct dm_table *t; 400 401 read_lock(&md->map_lock); 402 t = md->map; 403 if (t) 404 dm_table_get(t); 405 read_unlock(&md->map_lock); 406 407 return t; 408 } 409 410 /* 411 * Get the geometry associated with a dm device 412 */ 413 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 414 { 415 *geo = md->geometry; 416 417 return 0; 418 } 419 420 /* 421 * Set the geometry of a device. 422 */ 423 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 424 { 425 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 426 427 if (geo->start > sz) { 428 DMWARN("Start sector is beyond the geometry limits."); 429 return -EINVAL; 430 } 431 432 md->geometry = *geo; 433 434 return 0; 435 } 436 437 /*----------------------------------------------------------------- 438 * CRUD START: 439 * A more elegant soln is in the works that uses the queue 440 * merge fn, unfortunately there are a couple of changes to 441 * the block layer that I want to make for this. So in the 442 * interests of getting something for people to use I give 443 * you this clearly demarcated crap. 444 *---------------------------------------------------------------*/ 445 446 static int __noflush_suspending(struct mapped_device *md) 447 { 448 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 449 } 450 451 /* 452 * Decrements the number of outstanding ios that a bio has been 453 * cloned into, completing the original io if necc. 454 */ 455 static void dec_pending(struct dm_io *io, int error) 456 { 457 unsigned long flags; 458 459 /* Push-back supersedes any I/O errors */ 460 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 461 io->error = error; 462 463 if (atomic_dec_and_test(&io->io_count)) { 464 if (io->error == DM_ENDIO_REQUEUE) { 465 /* 466 * Target requested pushing back the I/O. 467 * This must be handled before the sleeper on 468 * suspend queue merges the pushback list. 469 */ 470 spin_lock_irqsave(&io->md->pushback_lock, flags); 471 if (__noflush_suspending(io->md)) 472 bio_list_add(&io->md->pushback, io->bio); 473 else 474 /* noflush suspend was interrupted. */ 475 io->error = -EIO; 476 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 477 } 478 479 if (end_io_acct(io)) 480 /* nudge anyone waiting on suspend queue */ 481 wake_up(&io->md->wait); 482 483 if (io->error != DM_ENDIO_REQUEUE) { 484 blk_add_trace_bio(io->md->queue, io->bio, 485 BLK_TA_COMPLETE); 486 487 bio_endio(io->bio, io->error); 488 } 489 490 free_io(io->md, io); 491 } 492 } 493 494 static void clone_endio(struct bio *bio, int error) 495 { 496 int r = 0; 497 struct dm_target_io *tio = bio->bi_private; 498 struct mapped_device *md = tio->io->md; 499 dm_endio_fn endio = tio->ti->type->end_io; 500 501 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 502 error = -EIO; 503 504 if (endio) { 505 r = endio(tio->ti, bio, error, &tio->info); 506 if (r < 0 || r == DM_ENDIO_REQUEUE) 507 /* 508 * error and requeue request are handled 509 * in dec_pending(). 510 */ 511 error = r; 512 else if (r == DM_ENDIO_INCOMPLETE) 513 /* The target will handle the io */ 514 return; 515 else if (r) { 516 DMWARN("unimplemented target endio return value: %d", r); 517 BUG(); 518 } 519 } 520 521 dec_pending(tio->io, error); 522 523 /* 524 * Store md for cleanup instead of tio which is about to get freed. 525 */ 526 bio->bi_private = md->bs; 527 528 bio_put(bio); 529 free_tio(md, tio); 530 } 531 532 static sector_t max_io_len(struct mapped_device *md, 533 sector_t sector, struct dm_target *ti) 534 { 535 sector_t offset = sector - ti->begin; 536 sector_t len = ti->len - offset; 537 538 /* 539 * Does the target need to split even further ? 540 */ 541 if (ti->split_io) { 542 sector_t boundary; 543 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 544 - offset; 545 if (len > boundary) 546 len = boundary; 547 } 548 549 return len; 550 } 551 552 static void __map_bio(struct dm_target *ti, struct bio *clone, 553 struct dm_target_io *tio) 554 { 555 int r; 556 sector_t sector; 557 struct mapped_device *md; 558 559 /* 560 * Sanity checks. 561 */ 562 BUG_ON(!clone->bi_size); 563 564 clone->bi_end_io = clone_endio; 565 clone->bi_private = tio; 566 567 /* 568 * Map the clone. If r == 0 we don't need to do 569 * anything, the target has assumed ownership of 570 * this io. 571 */ 572 atomic_inc(&tio->io->io_count); 573 sector = clone->bi_sector; 574 r = ti->type->map(ti, clone, &tio->info); 575 if (r == DM_MAPIO_REMAPPED) { 576 /* the bio has been remapped so dispatch it */ 577 578 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 579 tio->io->bio->bi_bdev->bd_dev, 580 clone->bi_sector, sector); 581 582 generic_make_request(clone); 583 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 584 /* error the io and bail out, or requeue it if needed */ 585 md = tio->io->md; 586 dec_pending(tio->io, r); 587 /* 588 * Store bio_set for cleanup. 589 */ 590 clone->bi_private = md->bs; 591 bio_put(clone); 592 free_tio(md, tio); 593 } else if (r) { 594 DMWARN("unimplemented target map return value: %d", r); 595 BUG(); 596 } 597 } 598 599 struct clone_info { 600 struct mapped_device *md; 601 struct dm_table *map; 602 struct bio *bio; 603 struct dm_io *io; 604 sector_t sector; 605 sector_t sector_count; 606 unsigned short idx; 607 }; 608 609 static void dm_bio_destructor(struct bio *bio) 610 { 611 struct bio_set *bs = bio->bi_private; 612 613 bio_free(bio, bs); 614 } 615 616 /* 617 * Creates a little bio that is just does part of a bvec. 618 */ 619 static struct bio *split_bvec(struct bio *bio, sector_t sector, 620 unsigned short idx, unsigned int offset, 621 unsigned int len, struct bio_set *bs) 622 { 623 struct bio *clone; 624 struct bio_vec *bv = bio->bi_io_vec + idx; 625 626 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 627 clone->bi_destructor = dm_bio_destructor; 628 *clone->bi_io_vec = *bv; 629 630 clone->bi_sector = sector; 631 clone->bi_bdev = bio->bi_bdev; 632 clone->bi_rw = bio->bi_rw; 633 clone->bi_vcnt = 1; 634 clone->bi_size = to_bytes(len); 635 clone->bi_io_vec->bv_offset = offset; 636 clone->bi_io_vec->bv_len = clone->bi_size; 637 638 return clone; 639 } 640 641 /* 642 * Creates a bio that consists of range of complete bvecs. 643 */ 644 static struct bio *clone_bio(struct bio *bio, sector_t sector, 645 unsigned short idx, unsigned short bv_count, 646 unsigned int len, struct bio_set *bs) 647 { 648 struct bio *clone; 649 650 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 651 __bio_clone(clone, bio); 652 clone->bi_destructor = dm_bio_destructor; 653 clone->bi_sector = sector; 654 clone->bi_idx = idx; 655 clone->bi_vcnt = idx + bv_count; 656 clone->bi_size = to_bytes(len); 657 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 658 659 return clone; 660 } 661 662 static void __clone_and_map(struct clone_info *ci) 663 { 664 struct bio *clone, *bio = ci->bio; 665 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); 666 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); 667 struct dm_target_io *tio; 668 669 /* 670 * Allocate a target io object. 671 */ 672 tio = alloc_tio(ci->md); 673 tio->io = ci->io; 674 tio->ti = ti; 675 memset(&tio->info, 0, sizeof(tio->info)); 676 677 if (ci->sector_count <= max) { 678 /* 679 * Optimise for the simple case where we can do all of 680 * the remaining io with a single clone. 681 */ 682 clone = clone_bio(bio, ci->sector, ci->idx, 683 bio->bi_vcnt - ci->idx, ci->sector_count, 684 ci->md->bs); 685 __map_bio(ti, clone, tio); 686 ci->sector_count = 0; 687 688 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 689 /* 690 * There are some bvecs that don't span targets. 691 * Do as many of these as possible. 692 */ 693 int i; 694 sector_t remaining = max; 695 sector_t bv_len; 696 697 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 698 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 699 700 if (bv_len > remaining) 701 break; 702 703 remaining -= bv_len; 704 len += bv_len; 705 } 706 707 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 708 ci->md->bs); 709 __map_bio(ti, clone, tio); 710 711 ci->sector += len; 712 ci->sector_count -= len; 713 ci->idx = i; 714 715 } else { 716 /* 717 * Handle a bvec that must be split between two or more targets. 718 */ 719 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 720 sector_t remaining = to_sector(bv->bv_len); 721 unsigned int offset = 0; 722 723 do { 724 if (offset) { 725 ti = dm_table_find_target(ci->map, ci->sector); 726 max = max_io_len(ci->md, ci->sector, ti); 727 728 tio = alloc_tio(ci->md); 729 tio->io = ci->io; 730 tio->ti = ti; 731 memset(&tio->info, 0, sizeof(tio->info)); 732 } 733 734 len = min(remaining, max); 735 736 clone = split_bvec(bio, ci->sector, ci->idx, 737 bv->bv_offset + offset, len, 738 ci->md->bs); 739 740 __map_bio(ti, clone, tio); 741 742 ci->sector += len; 743 ci->sector_count -= len; 744 offset += to_bytes(len); 745 } while (remaining -= len); 746 747 ci->idx++; 748 } 749 } 750 751 /* 752 * Split the bio into several clones. 753 */ 754 static void __split_bio(struct mapped_device *md, struct bio *bio) 755 { 756 struct clone_info ci; 757 758 ci.map = dm_get_table(md); 759 if (!ci.map) { 760 bio_io_error(bio); 761 return; 762 } 763 764 ci.md = md; 765 ci.bio = bio; 766 ci.io = alloc_io(md); 767 ci.io->error = 0; 768 atomic_set(&ci.io->io_count, 1); 769 ci.io->bio = bio; 770 ci.io->md = md; 771 ci.sector = bio->bi_sector; 772 ci.sector_count = bio_sectors(bio); 773 ci.idx = bio->bi_idx; 774 775 start_io_acct(ci.io); 776 while (ci.sector_count) 777 __clone_and_map(&ci); 778 779 /* drop the extra reference count */ 780 dec_pending(ci.io, 0); 781 dm_table_put(ci.map); 782 } 783 /*----------------------------------------------------------------- 784 * CRUD END 785 *---------------------------------------------------------------*/ 786 787 /* 788 * The request function that just remaps the bio built up by 789 * dm_merge_bvec. 790 */ 791 static int dm_request(struct request_queue *q, struct bio *bio) 792 { 793 int r; 794 int rw = bio_data_dir(bio); 795 struct mapped_device *md = q->queuedata; 796 797 /* 798 * There is no use in forwarding any barrier request since we can't 799 * guarantee it is (or can be) handled by the targets correctly. 800 */ 801 if (unlikely(bio_barrier(bio))) { 802 bio_endio(bio, -EOPNOTSUPP); 803 return 0; 804 } 805 806 down_read(&md->io_lock); 807 808 disk_stat_inc(dm_disk(md), ios[rw]); 809 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 810 811 /* 812 * If we're suspended we have to queue 813 * this io for later. 814 */ 815 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 816 up_read(&md->io_lock); 817 818 if (bio_rw(bio) == READA) { 819 bio_io_error(bio); 820 return 0; 821 } 822 823 r = queue_io(md, bio); 824 if (r < 0) { 825 bio_io_error(bio); 826 return 0; 827 828 } else if (r == 0) 829 return 0; /* deferred successfully */ 830 831 /* 832 * We're in a while loop, because someone could suspend 833 * before we get to the following read lock. 834 */ 835 down_read(&md->io_lock); 836 } 837 838 __split_bio(md, bio); 839 up_read(&md->io_lock); 840 return 0; 841 } 842 843 static void dm_unplug_all(struct request_queue *q) 844 { 845 struct mapped_device *md = q->queuedata; 846 struct dm_table *map = dm_get_table(md); 847 848 if (map) { 849 dm_table_unplug_all(map); 850 dm_table_put(map); 851 } 852 } 853 854 static int dm_any_congested(void *congested_data, int bdi_bits) 855 { 856 int r; 857 struct mapped_device *md = (struct mapped_device *) congested_data; 858 struct dm_table *map = dm_get_table(md); 859 860 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 861 r = bdi_bits; 862 else 863 r = dm_table_any_congested(map, bdi_bits); 864 865 dm_table_put(map); 866 return r; 867 } 868 869 /*----------------------------------------------------------------- 870 * An IDR is used to keep track of allocated minor numbers. 871 *---------------------------------------------------------------*/ 872 static DEFINE_IDR(_minor_idr); 873 874 static void free_minor(int minor) 875 { 876 spin_lock(&_minor_lock); 877 idr_remove(&_minor_idr, minor); 878 spin_unlock(&_minor_lock); 879 } 880 881 /* 882 * See if the device with a specific minor # is free. 883 */ 884 static int specific_minor(struct mapped_device *md, int minor) 885 { 886 int r, m; 887 888 if (minor >= (1 << MINORBITS)) 889 return -EINVAL; 890 891 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 892 if (!r) 893 return -ENOMEM; 894 895 spin_lock(&_minor_lock); 896 897 if (idr_find(&_minor_idr, minor)) { 898 r = -EBUSY; 899 goto out; 900 } 901 902 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 903 if (r) 904 goto out; 905 906 if (m != minor) { 907 idr_remove(&_minor_idr, m); 908 r = -EBUSY; 909 goto out; 910 } 911 912 out: 913 spin_unlock(&_minor_lock); 914 return r; 915 } 916 917 static int next_free_minor(struct mapped_device *md, int *minor) 918 { 919 int r, m; 920 921 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 922 if (!r) 923 return -ENOMEM; 924 925 spin_lock(&_minor_lock); 926 927 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 928 if (r) { 929 goto out; 930 } 931 932 if (m >= (1 << MINORBITS)) { 933 idr_remove(&_minor_idr, m); 934 r = -ENOSPC; 935 goto out; 936 } 937 938 *minor = m; 939 940 out: 941 spin_unlock(&_minor_lock); 942 return r; 943 } 944 945 static struct block_device_operations dm_blk_dops; 946 947 /* 948 * Allocate and initialise a blank device with a given minor. 949 */ 950 static struct mapped_device *alloc_dev(int minor) 951 { 952 int r; 953 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 954 void *old_md; 955 956 if (!md) { 957 DMWARN("unable to allocate device, out of memory."); 958 return NULL; 959 } 960 961 if (!try_module_get(THIS_MODULE)) 962 goto bad0; 963 964 /* get a minor number for the dev */ 965 if (minor == DM_ANY_MINOR) 966 r = next_free_minor(md, &minor); 967 else 968 r = specific_minor(md, minor); 969 if (r < 0) 970 goto bad1; 971 972 memset(md, 0, sizeof(*md)); 973 init_rwsem(&md->io_lock); 974 init_MUTEX(&md->suspend_lock); 975 spin_lock_init(&md->pushback_lock); 976 rwlock_init(&md->map_lock); 977 atomic_set(&md->holders, 1); 978 atomic_set(&md->open_count, 0); 979 atomic_set(&md->event_nr, 0); 980 981 md->queue = blk_alloc_queue(GFP_KERNEL); 982 if (!md->queue) 983 goto bad1_free_minor; 984 985 md->queue->queuedata = md; 986 md->queue->backing_dev_info.congested_fn = dm_any_congested; 987 md->queue->backing_dev_info.congested_data = md; 988 blk_queue_make_request(md->queue, dm_request); 989 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 990 md->queue->unplug_fn = dm_unplug_all; 991 992 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 993 if (!md->io_pool) 994 goto bad2; 995 996 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); 997 if (!md->tio_pool) 998 goto bad3; 999 1000 md->bs = bioset_create(16, 16); 1001 if (!md->bs) 1002 goto bad_no_bioset; 1003 1004 md->disk = alloc_disk(1); 1005 if (!md->disk) 1006 goto bad4; 1007 1008 atomic_set(&md->pending, 0); 1009 init_waitqueue_head(&md->wait); 1010 init_waitqueue_head(&md->eventq); 1011 1012 md->disk->major = _major; 1013 md->disk->first_minor = minor; 1014 md->disk->fops = &dm_blk_dops; 1015 md->disk->queue = md->queue; 1016 md->disk->private_data = md; 1017 sprintf(md->disk->disk_name, "dm-%d", minor); 1018 add_disk(md->disk); 1019 format_dev_t(md->name, MKDEV(_major, minor)); 1020 1021 /* Populate the mapping, nobody knows we exist yet */ 1022 spin_lock(&_minor_lock); 1023 old_md = idr_replace(&_minor_idr, md, minor); 1024 spin_unlock(&_minor_lock); 1025 1026 BUG_ON(old_md != MINOR_ALLOCED); 1027 1028 return md; 1029 1030 bad4: 1031 bioset_free(md->bs); 1032 bad_no_bioset: 1033 mempool_destroy(md->tio_pool); 1034 bad3: 1035 mempool_destroy(md->io_pool); 1036 bad2: 1037 blk_cleanup_queue(md->queue); 1038 bad1_free_minor: 1039 free_minor(minor); 1040 bad1: 1041 module_put(THIS_MODULE); 1042 bad0: 1043 kfree(md); 1044 return NULL; 1045 } 1046 1047 static void unlock_fs(struct mapped_device *md); 1048 1049 static void free_dev(struct mapped_device *md) 1050 { 1051 int minor = md->disk->first_minor; 1052 1053 if (md->suspended_bdev) { 1054 unlock_fs(md); 1055 bdput(md->suspended_bdev); 1056 } 1057 mempool_destroy(md->tio_pool); 1058 mempool_destroy(md->io_pool); 1059 bioset_free(md->bs); 1060 del_gendisk(md->disk); 1061 free_minor(minor); 1062 1063 spin_lock(&_minor_lock); 1064 md->disk->private_data = NULL; 1065 spin_unlock(&_minor_lock); 1066 1067 put_disk(md->disk); 1068 blk_cleanup_queue(md->queue); 1069 module_put(THIS_MODULE); 1070 kfree(md); 1071 } 1072 1073 /* 1074 * Bind a table to the device. 1075 */ 1076 static void event_callback(void *context) 1077 { 1078 struct mapped_device *md = (struct mapped_device *) context; 1079 1080 atomic_inc(&md->event_nr); 1081 wake_up(&md->eventq); 1082 } 1083 1084 static void __set_size(struct mapped_device *md, sector_t size) 1085 { 1086 set_capacity(md->disk, size); 1087 1088 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); 1089 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1090 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); 1091 } 1092 1093 static int __bind(struct mapped_device *md, struct dm_table *t) 1094 { 1095 struct request_queue *q = md->queue; 1096 sector_t size; 1097 1098 size = dm_table_get_size(t); 1099 1100 /* 1101 * Wipe any geometry if the size of the table changed. 1102 */ 1103 if (size != get_capacity(md->disk)) 1104 memset(&md->geometry, 0, sizeof(md->geometry)); 1105 1106 if (md->suspended_bdev) 1107 __set_size(md, size); 1108 if (size == 0) 1109 return 0; 1110 1111 dm_table_get(t); 1112 dm_table_event_callback(t, event_callback, md); 1113 1114 write_lock(&md->map_lock); 1115 md->map = t; 1116 dm_table_set_restrictions(t, q); 1117 write_unlock(&md->map_lock); 1118 1119 return 0; 1120 } 1121 1122 static void __unbind(struct mapped_device *md) 1123 { 1124 struct dm_table *map = md->map; 1125 1126 if (!map) 1127 return; 1128 1129 dm_table_event_callback(map, NULL, NULL); 1130 write_lock(&md->map_lock); 1131 md->map = NULL; 1132 write_unlock(&md->map_lock); 1133 dm_table_put(map); 1134 } 1135 1136 /* 1137 * Constructor for a new device. 1138 */ 1139 int dm_create(int minor, struct mapped_device **result) 1140 { 1141 struct mapped_device *md; 1142 1143 md = alloc_dev(minor); 1144 if (!md) 1145 return -ENXIO; 1146 1147 *result = md; 1148 return 0; 1149 } 1150 1151 static struct mapped_device *dm_find_md(dev_t dev) 1152 { 1153 struct mapped_device *md; 1154 unsigned minor = MINOR(dev); 1155 1156 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1157 return NULL; 1158 1159 spin_lock(&_minor_lock); 1160 1161 md = idr_find(&_minor_idr, minor); 1162 if (md && (md == MINOR_ALLOCED || 1163 (dm_disk(md)->first_minor != minor) || 1164 test_bit(DMF_FREEING, &md->flags))) { 1165 md = NULL; 1166 goto out; 1167 } 1168 1169 out: 1170 spin_unlock(&_minor_lock); 1171 1172 return md; 1173 } 1174 1175 struct mapped_device *dm_get_md(dev_t dev) 1176 { 1177 struct mapped_device *md = dm_find_md(dev); 1178 1179 if (md) 1180 dm_get(md); 1181 1182 return md; 1183 } 1184 1185 void *dm_get_mdptr(struct mapped_device *md) 1186 { 1187 return md->interface_ptr; 1188 } 1189 1190 void dm_set_mdptr(struct mapped_device *md, void *ptr) 1191 { 1192 md->interface_ptr = ptr; 1193 } 1194 1195 void dm_get(struct mapped_device *md) 1196 { 1197 atomic_inc(&md->holders); 1198 } 1199 1200 const char *dm_device_name(struct mapped_device *md) 1201 { 1202 return md->name; 1203 } 1204 EXPORT_SYMBOL_GPL(dm_device_name); 1205 1206 void dm_put(struct mapped_device *md) 1207 { 1208 struct dm_table *map; 1209 1210 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 1211 1212 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1213 map = dm_get_table(md); 1214 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); 1215 set_bit(DMF_FREEING, &md->flags); 1216 spin_unlock(&_minor_lock); 1217 if (!dm_suspended(md)) { 1218 dm_table_presuspend_targets(map); 1219 dm_table_postsuspend_targets(map); 1220 } 1221 __unbind(md); 1222 dm_table_put(map); 1223 free_dev(md); 1224 } 1225 } 1226 EXPORT_SYMBOL_GPL(dm_put); 1227 1228 /* 1229 * Process the deferred bios 1230 */ 1231 static void __flush_deferred_io(struct mapped_device *md, struct bio *c) 1232 { 1233 struct bio *n; 1234 1235 while (c) { 1236 n = c->bi_next; 1237 c->bi_next = NULL; 1238 __split_bio(md, c); 1239 c = n; 1240 } 1241 } 1242 1243 /* 1244 * Swap in a new table (destroying old one). 1245 */ 1246 int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1247 { 1248 int r = -EINVAL; 1249 1250 down(&md->suspend_lock); 1251 1252 /* device must be suspended */ 1253 if (!dm_suspended(md)) 1254 goto out; 1255 1256 /* without bdev, the device size cannot be changed */ 1257 if (!md->suspended_bdev) 1258 if (get_capacity(md->disk) != dm_table_get_size(table)) 1259 goto out; 1260 1261 __unbind(md); 1262 r = __bind(md, table); 1263 1264 out: 1265 up(&md->suspend_lock); 1266 return r; 1267 } 1268 1269 /* 1270 * Functions to lock and unlock any filesystem running on the 1271 * device. 1272 */ 1273 static int lock_fs(struct mapped_device *md) 1274 { 1275 int r; 1276 1277 WARN_ON(md->frozen_sb); 1278 1279 md->frozen_sb = freeze_bdev(md->suspended_bdev); 1280 if (IS_ERR(md->frozen_sb)) { 1281 r = PTR_ERR(md->frozen_sb); 1282 md->frozen_sb = NULL; 1283 return r; 1284 } 1285 1286 set_bit(DMF_FROZEN, &md->flags); 1287 1288 /* don't bdput right now, we don't want the bdev 1289 * to go away while it is locked. 1290 */ 1291 return 0; 1292 } 1293 1294 static void unlock_fs(struct mapped_device *md) 1295 { 1296 if (!test_bit(DMF_FROZEN, &md->flags)) 1297 return; 1298 1299 thaw_bdev(md->suspended_bdev, md->frozen_sb); 1300 md->frozen_sb = NULL; 1301 clear_bit(DMF_FROZEN, &md->flags); 1302 } 1303 1304 /* 1305 * We need to be able to change a mapping table under a mounted 1306 * filesystem. For example we might want to move some data in 1307 * the background. Before the table can be swapped with 1308 * dm_bind_table, dm_suspend must be called to flush any in 1309 * flight bios and ensure that any further io gets deferred. 1310 */ 1311 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 1312 { 1313 struct dm_table *map = NULL; 1314 unsigned long flags; 1315 DECLARE_WAITQUEUE(wait, current); 1316 struct bio *def; 1317 int r = -EINVAL; 1318 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 1319 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 1320 1321 down(&md->suspend_lock); 1322 1323 if (dm_suspended(md)) 1324 goto out_unlock; 1325 1326 map = dm_get_table(md); 1327 1328 /* 1329 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 1330 * This flag is cleared before dm_suspend returns. 1331 */ 1332 if (noflush) 1333 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1334 1335 /* This does not get reverted if there's an error later. */ 1336 dm_table_presuspend_targets(map); 1337 1338 /* bdget() can stall if the pending I/Os are not flushed */ 1339 if (!noflush) { 1340 md->suspended_bdev = bdget_disk(md->disk, 0); 1341 if (!md->suspended_bdev) { 1342 DMWARN("bdget failed in dm_suspend"); 1343 r = -ENOMEM; 1344 goto flush_and_out; 1345 } 1346 } 1347 1348 /* 1349 * Flush I/O to the device. 1350 * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os. 1351 */ 1352 if (do_lockfs && !noflush) { 1353 r = lock_fs(md); 1354 if (r) 1355 goto out; 1356 } 1357 1358 /* 1359 * First we set the BLOCK_IO flag so no more ios will be mapped. 1360 */ 1361 down_write(&md->io_lock); 1362 set_bit(DMF_BLOCK_IO, &md->flags); 1363 1364 add_wait_queue(&md->wait, &wait); 1365 up_write(&md->io_lock); 1366 1367 /* unplug */ 1368 if (map) 1369 dm_table_unplug_all(map); 1370 1371 /* 1372 * Then we wait for the already mapped ios to 1373 * complete. 1374 */ 1375 while (1) { 1376 set_current_state(TASK_INTERRUPTIBLE); 1377 1378 if (!atomic_read(&md->pending) || signal_pending(current)) 1379 break; 1380 1381 io_schedule(); 1382 } 1383 set_current_state(TASK_RUNNING); 1384 1385 down_write(&md->io_lock); 1386 remove_wait_queue(&md->wait, &wait); 1387 1388 if (noflush) { 1389 spin_lock_irqsave(&md->pushback_lock, flags); 1390 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1391 bio_list_merge_head(&md->deferred, &md->pushback); 1392 bio_list_init(&md->pushback); 1393 spin_unlock_irqrestore(&md->pushback_lock, flags); 1394 } 1395 1396 /* were we interrupted ? */ 1397 r = -EINTR; 1398 if (atomic_read(&md->pending)) { 1399 clear_bit(DMF_BLOCK_IO, &md->flags); 1400 def = bio_list_get(&md->deferred); 1401 __flush_deferred_io(md, def); 1402 up_write(&md->io_lock); 1403 unlock_fs(md); 1404 goto out; /* pushback list is already flushed, so skip flush */ 1405 } 1406 up_write(&md->io_lock); 1407 1408 dm_table_postsuspend_targets(map); 1409 1410 set_bit(DMF_SUSPENDED, &md->flags); 1411 1412 r = 0; 1413 1414 flush_and_out: 1415 if (r && noflush) { 1416 /* 1417 * Because there may be already I/Os in the pushback list, 1418 * flush them before return. 1419 */ 1420 down_write(&md->io_lock); 1421 1422 spin_lock_irqsave(&md->pushback_lock, flags); 1423 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1424 bio_list_merge_head(&md->deferred, &md->pushback); 1425 bio_list_init(&md->pushback); 1426 spin_unlock_irqrestore(&md->pushback_lock, flags); 1427 1428 def = bio_list_get(&md->deferred); 1429 __flush_deferred_io(md, def); 1430 up_write(&md->io_lock); 1431 } 1432 1433 out: 1434 if (r && md->suspended_bdev) { 1435 bdput(md->suspended_bdev); 1436 md->suspended_bdev = NULL; 1437 } 1438 1439 dm_table_put(map); 1440 1441 out_unlock: 1442 up(&md->suspend_lock); 1443 return r; 1444 } 1445 1446 int dm_resume(struct mapped_device *md) 1447 { 1448 int r = -EINVAL; 1449 struct bio *def; 1450 struct dm_table *map = NULL; 1451 1452 down(&md->suspend_lock); 1453 if (!dm_suspended(md)) 1454 goto out; 1455 1456 map = dm_get_table(md); 1457 if (!map || !dm_table_get_size(map)) 1458 goto out; 1459 1460 r = dm_table_resume_targets(map); 1461 if (r) 1462 goto out; 1463 1464 down_write(&md->io_lock); 1465 clear_bit(DMF_BLOCK_IO, &md->flags); 1466 1467 def = bio_list_get(&md->deferred); 1468 __flush_deferred_io(md, def); 1469 up_write(&md->io_lock); 1470 1471 unlock_fs(md); 1472 1473 if (md->suspended_bdev) { 1474 bdput(md->suspended_bdev); 1475 md->suspended_bdev = NULL; 1476 } 1477 1478 clear_bit(DMF_SUSPENDED, &md->flags); 1479 1480 dm_table_unplug_all(map); 1481 1482 kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); 1483 1484 r = 0; 1485 1486 out: 1487 dm_table_put(map); 1488 up(&md->suspend_lock); 1489 1490 return r; 1491 } 1492 1493 /*----------------------------------------------------------------- 1494 * Event notification. 1495 *---------------------------------------------------------------*/ 1496 uint32_t dm_get_event_nr(struct mapped_device *md) 1497 { 1498 return atomic_read(&md->event_nr); 1499 } 1500 1501 int dm_wait_event(struct mapped_device *md, int event_nr) 1502 { 1503 return wait_event_interruptible(md->eventq, 1504 (event_nr != atomic_read(&md->event_nr))); 1505 } 1506 1507 /* 1508 * The gendisk is only valid as long as you have a reference 1509 * count on 'md'. 1510 */ 1511 struct gendisk *dm_disk(struct mapped_device *md) 1512 { 1513 return md->disk; 1514 } 1515 1516 int dm_suspended(struct mapped_device *md) 1517 { 1518 return test_bit(DMF_SUSPENDED, &md->flags); 1519 } 1520 1521 int dm_noflush_suspending(struct dm_target *ti) 1522 { 1523 struct mapped_device *md = dm_table_get_md(ti->table); 1524 int r = __noflush_suspending(md); 1525 1526 dm_put(md); 1527 1528 return r; 1529 } 1530 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 1531 1532 static struct block_device_operations dm_blk_dops = { 1533 .open = dm_blk_open, 1534 .release = dm_blk_close, 1535 .ioctl = dm_blk_ioctl, 1536 .getgeo = dm_blk_getgeo, 1537 .owner = THIS_MODULE 1538 }; 1539 1540 EXPORT_SYMBOL(dm_get_mapinfo); 1541 1542 /* 1543 * module hooks 1544 */ 1545 module_init(dm_init); 1546 module_exit(dm_exit); 1547 1548 module_param(major, uint, 0); 1549 MODULE_PARM_DESC(major, "The major number of the device mapper"); 1550 MODULE_DESCRIPTION(DM_NAME " driver"); 1551 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1552 MODULE_LICENSE("GPL"); 1553