1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 #include "dm-bio-list.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/moduleparam.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/buffer_head.h> 19 #include <linux/mempool.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/hdreg.h> 23 #include <linux/blktrace_api.h> 24 25 #define DM_MSG_PREFIX "core" 26 27 static const char *_name = DM_NAME; 28 29 static unsigned int major = 0; 30 static unsigned int _major = 0; 31 32 static DEFINE_SPINLOCK(_minor_lock); 33 /* 34 * One of these is allocated per bio. 35 */ 36 struct dm_io { 37 struct mapped_device *md; 38 int error; 39 atomic_t io_count; 40 struct bio *bio; 41 unsigned long start_time; 42 }; 43 44 /* 45 * One of these is allocated per target within a bio. Hopefully 46 * this will be simplified out one day. 47 */ 48 struct dm_target_io { 49 struct dm_io *io; 50 struct dm_target *ti; 51 union map_info info; 52 }; 53 54 union map_info *dm_get_mapinfo(struct bio *bio) 55 { 56 if (bio && bio->bi_private) 57 return &((struct dm_target_io *)bio->bi_private)->info; 58 return NULL; 59 } 60 61 #define MINOR_ALLOCED ((void *)-1) 62 63 /* 64 * Bits for the md->flags field. 65 */ 66 #define DMF_BLOCK_IO 0 67 #define DMF_SUSPENDED 1 68 #define DMF_FROZEN 2 69 #define DMF_FREEING 3 70 #define DMF_DELETING 4 71 #define DMF_NOFLUSH_SUSPENDING 5 72 73 /* 74 * Work processed by per-device workqueue. 75 */ 76 struct dm_wq_req { 77 enum { 78 DM_WQ_FLUSH_DEFERRED, 79 } type; 80 struct work_struct work; 81 struct mapped_device *md; 82 void *context; 83 }; 84 85 struct mapped_device { 86 struct rw_semaphore io_lock; 87 struct mutex suspend_lock; 88 spinlock_t pushback_lock; 89 rwlock_t map_lock; 90 atomic_t holders; 91 atomic_t open_count; 92 93 unsigned long flags; 94 95 struct request_queue *queue; 96 struct gendisk *disk; 97 char name[16]; 98 99 void *interface_ptr; 100 101 /* 102 * A list of ios that arrived while we were suspended. 103 */ 104 atomic_t pending; 105 wait_queue_head_t wait; 106 struct bio_list deferred; 107 struct bio_list pushback; 108 109 /* 110 * Processing queue (flush/barriers) 111 */ 112 struct workqueue_struct *wq; 113 114 /* 115 * The current mapping. 116 */ 117 struct dm_table *map; 118 119 /* 120 * io objects are allocated from here. 121 */ 122 mempool_t *io_pool; 123 mempool_t *tio_pool; 124 125 struct bio_set *bs; 126 127 /* 128 * Event handling. 129 */ 130 atomic_t event_nr; 131 wait_queue_head_t eventq; 132 atomic_t uevent_seq; 133 struct list_head uevent_list; 134 spinlock_t uevent_lock; /* Protect access to uevent_list */ 135 136 /* 137 * freeze/thaw support require holding onto a super block 138 */ 139 struct super_block *frozen_sb; 140 struct block_device *suspended_bdev; 141 142 /* forced geometry settings */ 143 struct hd_geometry geometry; 144 }; 145 146 #define MIN_IOS 256 147 static struct kmem_cache *_io_cache; 148 static struct kmem_cache *_tio_cache; 149 150 static int __init local_init(void) 151 { 152 int r = -ENOMEM; 153 154 /* allocate a slab for the dm_ios */ 155 _io_cache = KMEM_CACHE(dm_io, 0); 156 if (!_io_cache) 157 return r; 158 159 /* allocate a slab for the target ios */ 160 _tio_cache = KMEM_CACHE(dm_target_io, 0); 161 if (!_tio_cache) 162 goto out_free_io_cache; 163 164 r = dm_uevent_init(); 165 if (r) 166 goto out_free_tio_cache; 167 168 _major = major; 169 r = register_blkdev(_major, _name); 170 if (r < 0) 171 goto out_uevent_exit; 172 173 if (!_major) 174 _major = r; 175 176 return 0; 177 178 out_uevent_exit: 179 dm_uevent_exit(); 180 out_free_tio_cache: 181 kmem_cache_destroy(_tio_cache); 182 out_free_io_cache: 183 kmem_cache_destroy(_io_cache); 184 185 return r; 186 } 187 188 static void local_exit(void) 189 { 190 kmem_cache_destroy(_tio_cache); 191 kmem_cache_destroy(_io_cache); 192 unregister_blkdev(_major, _name); 193 dm_uevent_exit(); 194 195 _major = 0; 196 197 DMINFO("cleaned up"); 198 } 199 200 static int (*_inits[])(void) __initdata = { 201 local_init, 202 dm_target_init, 203 dm_linear_init, 204 dm_stripe_init, 205 dm_kcopyd_init, 206 dm_interface_init, 207 }; 208 209 static void (*_exits[])(void) = { 210 local_exit, 211 dm_target_exit, 212 dm_linear_exit, 213 dm_stripe_exit, 214 dm_kcopyd_exit, 215 dm_interface_exit, 216 }; 217 218 static int __init dm_init(void) 219 { 220 const int count = ARRAY_SIZE(_inits); 221 222 int r, i; 223 224 for (i = 0; i < count; i++) { 225 r = _inits[i](); 226 if (r) 227 goto bad; 228 } 229 230 return 0; 231 232 bad: 233 while (i--) 234 _exits[i](); 235 236 return r; 237 } 238 239 static void __exit dm_exit(void) 240 { 241 int i = ARRAY_SIZE(_exits); 242 243 while (i--) 244 _exits[i](); 245 } 246 247 /* 248 * Block device functions 249 */ 250 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 251 { 252 struct mapped_device *md; 253 254 spin_lock(&_minor_lock); 255 256 md = bdev->bd_disk->private_data; 257 if (!md) 258 goto out; 259 260 if (test_bit(DMF_FREEING, &md->flags) || 261 test_bit(DMF_DELETING, &md->flags)) { 262 md = NULL; 263 goto out; 264 } 265 266 dm_get(md); 267 atomic_inc(&md->open_count); 268 269 out: 270 spin_unlock(&_minor_lock); 271 272 return md ? 0 : -ENXIO; 273 } 274 275 static int dm_blk_close(struct gendisk *disk, fmode_t mode) 276 { 277 struct mapped_device *md = disk->private_data; 278 atomic_dec(&md->open_count); 279 dm_put(md); 280 return 0; 281 } 282 283 int dm_open_count(struct mapped_device *md) 284 { 285 return atomic_read(&md->open_count); 286 } 287 288 /* 289 * Guarantees nothing is using the device before it's deleted. 290 */ 291 int dm_lock_for_deletion(struct mapped_device *md) 292 { 293 int r = 0; 294 295 spin_lock(&_minor_lock); 296 297 if (dm_open_count(md)) 298 r = -EBUSY; 299 else 300 set_bit(DMF_DELETING, &md->flags); 301 302 spin_unlock(&_minor_lock); 303 304 return r; 305 } 306 307 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 308 { 309 struct mapped_device *md = bdev->bd_disk->private_data; 310 311 return dm_get_geometry(md, geo); 312 } 313 314 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 315 unsigned int cmd, unsigned long arg) 316 { 317 struct mapped_device *md = bdev->bd_disk->private_data; 318 struct dm_table *map = dm_get_table(md); 319 struct dm_target *tgt; 320 int r = -ENOTTY; 321 322 if (!map || !dm_table_get_size(map)) 323 goto out; 324 325 /* We only support devices that have a single target */ 326 if (dm_table_get_num_targets(map) != 1) 327 goto out; 328 329 tgt = dm_table_get_target(map, 0); 330 331 if (dm_suspended(md)) { 332 r = -EAGAIN; 333 goto out; 334 } 335 336 if (tgt->type->ioctl) 337 r = tgt->type->ioctl(tgt, cmd, arg); 338 339 out: 340 dm_table_put(map); 341 342 return r; 343 } 344 345 static struct dm_io *alloc_io(struct mapped_device *md) 346 { 347 return mempool_alloc(md->io_pool, GFP_NOIO); 348 } 349 350 static void free_io(struct mapped_device *md, struct dm_io *io) 351 { 352 mempool_free(io, md->io_pool); 353 } 354 355 static struct dm_target_io *alloc_tio(struct mapped_device *md) 356 { 357 return mempool_alloc(md->tio_pool, GFP_NOIO); 358 } 359 360 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 361 { 362 mempool_free(tio, md->tio_pool); 363 } 364 365 static void start_io_acct(struct dm_io *io) 366 { 367 struct mapped_device *md = io->md; 368 int cpu; 369 370 io->start_time = jiffies; 371 372 cpu = part_stat_lock(); 373 part_round_stats(cpu, &dm_disk(md)->part0); 374 part_stat_unlock(); 375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 376 } 377 378 static int end_io_acct(struct dm_io *io) 379 { 380 struct mapped_device *md = io->md; 381 struct bio *bio = io->bio; 382 unsigned long duration = jiffies - io->start_time; 383 int pending, cpu; 384 int rw = bio_data_dir(bio); 385 386 cpu = part_stat_lock(); 387 part_round_stats(cpu, &dm_disk(md)->part0); 388 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 389 part_stat_unlock(); 390 391 dm_disk(md)->part0.in_flight = pending = 392 atomic_dec_return(&md->pending); 393 394 return !pending; 395 } 396 397 /* 398 * Add the bio to the list of deferred io. 399 */ 400 static int queue_io(struct mapped_device *md, struct bio *bio) 401 { 402 down_write(&md->io_lock); 403 404 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 405 up_write(&md->io_lock); 406 return 1; 407 } 408 409 bio_list_add(&md->deferred, bio); 410 411 up_write(&md->io_lock); 412 return 0; /* deferred successfully */ 413 } 414 415 /* 416 * Everyone (including functions in this file), should use this 417 * function to access the md->map field, and make sure they call 418 * dm_table_put() when finished. 419 */ 420 struct dm_table *dm_get_table(struct mapped_device *md) 421 { 422 struct dm_table *t; 423 424 read_lock(&md->map_lock); 425 t = md->map; 426 if (t) 427 dm_table_get(t); 428 read_unlock(&md->map_lock); 429 430 return t; 431 } 432 433 /* 434 * Get the geometry associated with a dm device 435 */ 436 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 437 { 438 *geo = md->geometry; 439 440 return 0; 441 } 442 443 /* 444 * Set the geometry of a device. 445 */ 446 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 447 { 448 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 449 450 if (geo->start > sz) { 451 DMWARN("Start sector is beyond the geometry limits."); 452 return -EINVAL; 453 } 454 455 md->geometry = *geo; 456 457 return 0; 458 } 459 460 /*----------------------------------------------------------------- 461 * CRUD START: 462 * A more elegant soln is in the works that uses the queue 463 * merge fn, unfortunately there are a couple of changes to 464 * the block layer that I want to make for this. So in the 465 * interests of getting something for people to use I give 466 * you this clearly demarcated crap. 467 *---------------------------------------------------------------*/ 468 469 static int __noflush_suspending(struct mapped_device *md) 470 { 471 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 472 } 473 474 /* 475 * Decrements the number of outstanding ios that a bio has been 476 * cloned into, completing the original io if necc. 477 */ 478 static void dec_pending(struct dm_io *io, int error) 479 { 480 unsigned long flags; 481 482 /* Push-back supersedes any I/O errors */ 483 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 484 io->error = error; 485 486 if (atomic_dec_and_test(&io->io_count)) { 487 if (io->error == DM_ENDIO_REQUEUE) { 488 /* 489 * Target requested pushing back the I/O. 490 * This must be handled before the sleeper on 491 * suspend queue merges the pushback list. 492 */ 493 spin_lock_irqsave(&io->md->pushback_lock, flags); 494 if (__noflush_suspending(io->md)) 495 bio_list_add(&io->md->pushback, io->bio); 496 else 497 /* noflush suspend was interrupted. */ 498 io->error = -EIO; 499 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 500 } 501 502 if (end_io_acct(io)) 503 /* nudge anyone waiting on suspend queue */ 504 wake_up(&io->md->wait); 505 506 if (io->error != DM_ENDIO_REQUEUE) { 507 blk_add_trace_bio(io->md->queue, io->bio, 508 BLK_TA_COMPLETE); 509 510 bio_endio(io->bio, io->error); 511 } 512 513 free_io(io->md, io); 514 } 515 } 516 517 static void clone_endio(struct bio *bio, int error) 518 { 519 int r = 0; 520 struct dm_target_io *tio = bio->bi_private; 521 struct mapped_device *md = tio->io->md; 522 dm_endio_fn endio = tio->ti->type->end_io; 523 524 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 525 error = -EIO; 526 527 if (endio) { 528 r = endio(tio->ti, bio, error, &tio->info); 529 if (r < 0 || r == DM_ENDIO_REQUEUE) 530 /* 531 * error and requeue request are handled 532 * in dec_pending(). 533 */ 534 error = r; 535 else if (r == DM_ENDIO_INCOMPLETE) 536 /* The target will handle the io */ 537 return; 538 else if (r) { 539 DMWARN("unimplemented target endio return value: %d", r); 540 BUG(); 541 } 542 } 543 544 dec_pending(tio->io, error); 545 546 /* 547 * Store md for cleanup instead of tio which is about to get freed. 548 */ 549 bio->bi_private = md->bs; 550 551 bio_put(bio); 552 free_tio(md, tio); 553 } 554 555 static sector_t max_io_len(struct mapped_device *md, 556 sector_t sector, struct dm_target *ti) 557 { 558 sector_t offset = sector - ti->begin; 559 sector_t len = ti->len - offset; 560 561 /* 562 * Does the target need to split even further ? 563 */ 564 if (ti->split_io) { 565 sector_t boundary; 566 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 567 - offset; 568 if (len > boundary) 569 len = boundary; 570 } 571 572 return len; 573 } 574 575 static void __map_bio(struct dm_target *ti, struct bio *clone, 576 struct dm_target_io *tio) 577 { 578 int r; 579 sector_t sector; 580 struct mapped_device *md; 581 582 /* 583 * Sanity checks. 584 */ 585 BUG_ON(!clone->bi_size); 586 587 clone->bi_end_io = clone_endio; 588 clone->bi_private = tio; 589 590 /* 591 * Map the clone. If r == 0 we don't need to do 592 * anything, the target has assumed ownership of 593 * this io. 594 */ 595 atomic_inc(&tio->io->io_count); 596 sector = clone->bi_sector; 597 r = ti->type->map(ti, clone, &tio->info); 598 if (r == DM_MAPIO_REMAPPED) { 599 /* the bio has been remapped so dispatch it */ 600 601 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 602 tio->io->bio->bi_bdev->bd_dev, 603 clone->bi_sector, sector); 604 605 generic_make_request(clone); 606 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 607 /* error the io and bail out, or requeue it if needed */ 608 md = tio->io->md; 609 dec_pending(tio->io, r); 610 /* 611 * Store bio_set for cleanup. 612 */ 613 clone->bi_private = md->bs; 614 bio_put(clone); 615 free_tio(md, tio); 616 } else if (r) { 617 DMWARN("unimplemented target map return value: %d", r); 618 BUG(); 619 } 620 } 621 622 struct clone_info { 623 struct mapped_device *md; 624 struct dm_table *map; 625 struct bio *bio; 626 struct dm_io *io; 627 sector_t sector; 628 sector_t sector_count; 629 unsigned short idx; 630 }; 631 632 static void dm_bio_destructor(struct bio *bio) 633 { 634 struct bio_set *bs = bio->bi_private; 635 636 bio_free(bio, bs); 637 } 638 639 /* 640 * Creates a little bio that is just does part of a bvec. 641 */ 642 static struct bio *split_bvec(struct bio *bio, sector_t sector, 643 unsigned short idx, unsigned int offset, 644 unsigned int len, struct bio_set *bs) 645 { 646 struct bio *clone; 647 struct bio_vec *bv = bio->bi_io_vec + idx; 648 649 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 650 clone->bi_destructor = dm_bio_destructor; 651 *clone->bi_io_vec = *bv; 652 653 clone->bi_sector = sector; 654 clone->bi_bdev = bio->bi_bdev; 655 clone->bi_rw = bio->bi_rw; 656 clone->bi_vcnt = 1; 657 clone->bi_size = to_bytes(len); 658 clone->bi_io_vec->bv_offset = offset; 659 clone->bi_io_vec->bv_len = clone->bi_size; 660 clone->bi_flags |= 1 << BIO_CLONED; 661 662 return clone; 663 } 664 665 /* 666 * Creates a bio that consists of range of complete bvecs. 667 */ 668 static struct bio *clone_bio(struct bio *bio, sector_t sector, 669 unsigned short idx, unsigned short bv_count, 670 unsigned int len, struct bio_set *bs) 671 { 672 struct bio *clone; 673 674 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 675 __bio_clone(clone, bio); 676 clone->bi_destructor = dm_bio_destructor; 677 clone->bi_sector = sector; 678 clone->bi_idx = idx; 679 clone->bi_vcnt = idx + bv_count; 680 clone->bi_size = to_bytes(len); 681 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 682 683 return clone; 684 } 685 686 static int __clone_and_map(struct clone_info *ci) 687 { 688 struct bio *clone, *bio = ci->bio; 689 struct dm_target *ti; 690 sector_t len = 0, max; 691 struct dm_target_io *tio; 692 693 ti = dm_table_find_target(ci->map, ci->sector); 694 if (!dm_target_is_valid(ti)) 695 return -EIO; 696 697 max = max_io_len(ci->md, ci->sector, ti); 698 699 /* 700 * Allocate a target io object. 701 */ 702 tio = alloc_tio(ci->md); 703 tio->io = ci->io; 704 tio->ti = ti; 705 memset(&tio->info, 0, sizeof(tio->info)); 706 707 if (ci->sector_count <= max) { 708 /* 709 * Optimise for the simple case where we can do all of 710 * the remaining io with a single clone. 711 */ 712 clone = clone_bio(bio, ci->sector, ci->idx, 713 bio->bi_vcnt - ci->idx, ci->sector_count, 714 ci->md->bs); 715 __map_bio(ti, clone, tio); 716 ci->sector_count = 0; 717 718 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 719 /* 720 * There are some bvecs that don't span targets. 721 * Do as many of these as possible. 722 */ 723 int i; 724 sector_t remaining = max; 725 sector_t bv_len; 726 727 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 728 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 729 730 if (bv_len > remaining) 731 break; 732 733 remaining -= bv_len; 734 len += bv_len; 735 } 736 737 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 738 ci->md->bs); 739 __map_bio(ti, clone, tio); 740 741 ci->sector += len; 742 ci->sector_count -= len; 743 ci->idx = i; 744 745 } else { 746 /* 747 * Handle a bvec that must be split between two or more targets. 748 */ 749 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 750 sector_t remaining = to_sector(bv->bv_len); 751 unsigned int offset = 0; 752 753 do { 754 if (offset) { 755 ti = dm_table_find_target(ci->map, ci->sector); 756 if (!dm_target_is_valid(ti)) 757 return -EIO; 758 759 max = max_io_len(ci->md, ci->sector, ti); 760 761 tio = alloc_tio(ci->md); 762 tio->io = ci->io; 763 tio->ti = ti; 764 memset(&tio->info, 0, sizeof(tio->info)); 765 } 766 767 len = min(remaining, max); 768 769 clone = split_bvec(bio, ci->sector, ci->idx, 770 bv->bv_offset + offset, len, 771 ci->md->bs); 772 773 __map_bio(ti, clone, tio); 774 775 ci->sector += len; 776 ci->sector_count -= len; 777 offset += to_bytes(len); 778 } while (remaining -= len); 779 780 ci->idx++; 781 } 782 783 return 0; 784 } 785 786 /* 787 * Split the bio into several clones. 788 */ 789 static int __split_bio(struct mapped_device *md, struct bio *bio) 790 { 791 struct clone_info ci; 792 int error = 0; 793 794 ci.map = dm_get_table(md); 795 if (unlikely(!ci.map)) 796 return -EIO; 797 798 ci.md = md; 799 ci.bio = bio; 800 ci.io = alloc_io(md); 801 ci.io->error = 0; 802 atomic_set(&ci.io->io_count, 1); 803 ci.io->bio = bio; 804 ci.io->md = md; 805 ci.sector = bio->bi_sector; 806 ci.sector_count = bio_sectors(bio); 807 ci.idx = bio->bi_idx; 808 809 start_io_acct(ci.io); 810 while (ci.sector_count && !error) 811 error = __clone_and_map(&ci); 812 813 /* drop the extra reference count */ 814 dec_pending(ci.io, error); 815 dm_table_put(ci.map); 816 817 return 0; 818 } 819 /*----------------------------------------------------------------- 820 * CRUD END 821 *---------------------------------------------------------------*/ 822 823 static int dm_merge_bvec(struct request_queue *q, 824 struct bvec_merge_data *bvm, 825 struct bio_vec *biovec) 826 { 827 struct mapped_device *md = q->queuedata; 828 struct dm_table *map = dm_get_table(md); 829 struct dm_target *ti; 830 sector_t max_sectors; 831 int max_size = 0; 832 833 if (unlikely(!map)) 834 goto out; 835 836 ti = dm_table_find_target(map, bvm->bi_sector); 837 if (!dm_target_is_valid(ti)) 838 goto out_table; 839 840 /* 841 * Find maximum amount of I/O that won't need splitting 842 */ 843 max_sectors = min(max_io_len(md, bvm->bi_sector, ti), 844 (sector_t) BIO_MAX_SECTORS); 845 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 846 if (max_size < 0) 847 max_size = 0; 848 849 /* 850 * merge_bvec_fn() returns number of bytes 851 * it can accept at this offset 852 * max is precomputed maximal io size 853 */ 854 if (max_size && ti->type->merge) 855 max_size = ti->type->merge(ti, bvm, biovec, max_size); 856 857 out_table: 858 dm_table_put(map); 859 860 out: 861 /* 862 * Always allow an entire first page 863 */ 864 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 865 max_size = biovec->bv_len; 866 867 return max_size; 868 } 869 870 /* 871 * The request function that just remaps the bio built up by 872 * dm_merge_bvec. 873 */ 874 static int dm_request(struct request_queue *q, struct bio *bio) 875 { 876 int r = -EIO; 877 int rw = bio_data_dir(bio); 878 struct mapped_device *md = q->queuedata; 879 int cpu; 880 881 /* 882 * There is no use in forwarding any barrier request since we can't 883 * guarantee it is (or can be) handled by the targets correctly. 884 */ 885 if (unlikely(bio_barrier(bio))) { 886 bio_endio(bio, -EOPNOTSUPP); 887 return 0; 888 } 889 890 down_read(&md->io_lock); 891 892 cpu = part_stat_lock(); 893 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 894 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 895 part_stat_unlock(); 896 897 /* 898 * If we're suspended we have to queue 899 * this io for later. 900 */ 901 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 902 up_read(&md->io_lock); 903 904 if (bio_rw(bio) != READA) 905 r = queue_io(md, bio); 906 907 if (r <= 0) 908 goto out_req; 909 910 /* 911 * We're in a while loop, because someone could suspend 912 * before we get to the following read lock. 913 */ 914 down_read(&md->io_lock); 915 } 916 917 r = __split_bio(md, bio); 918 up_read(&md->io_lock); 919 920 out_req: 921 if (r < 0) 922 bio_io_error(bio); 923 924 return 0; 925 } 926 927 static void dm_unplug_all(struct request_queue *q) 928 { 929 struct mapped_device *md = q->queuedata; 930 struct dm_table *map = dm_get_table(md); 931 932 if (map) { 933 dm_table_unplug_all(map); 934 dm_table_put(map); 935 } 936 } 937 938 static int dm_any_congested(void *congested_data, int bdi_bits) 939 { 940 int r; 941 struct mapped_device *md = (struct mapped_device *) congested_data; 942 struct dm_table *map = dm_get_table(md); 943 944 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 945 r = bdi_bits; 946 else 947 r = dm_table_any_congested(map, bdi_bits); 948 949 dm_table_put(map); 950 return r; 951 } 952 953 /*----------------------------------------------------------------- 954 * An IDR is used to keep track of allocated minor numbers. 955 *---------------------------------------------------------------*/ 956 static DEFINE_IDR(_minor_idr); 957 958 static void free_minor(int minor) 959 { 960 spin_lock(&_minor_lock); 961 idr_remove(&_minor_idr, minor); 962 spin_unlock(&_minor_lock); 963 } 964 965 /* 966 * See if the device with a specific minor # is free. 967 */ 968 static int specific_minor(int minor) 969 { 970 int r, m; 971 972 if (minor >= (1 << MINORBITS)) 973 return -EINVAL; 974 975 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 976 if (!r) 977 return -ENOMEM; 978 979 spin_lock(&_minor_lock); 980 981 if (idr_find(&_minor_idr, minor)) { 982 r = -EBUSY; 983 goto out; 984 } 985 986 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 987 if (r) 988 goto out; 989 990 if (m != minor) { 991 idr_remove(&_minor_idr, m); 992 r = -EBUSY; 993 goto out; 994 } 995 996 out: 997 spin_unlock(&_minor_lock); 998 return r; 999 } 1000 1001 static int next_free_minor(int *minor) 1002 { 1003 int r, m; 1004 1005 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 1006 if (!r) 1007 return -ENOMEM; 1008 1009 spin_lock(&_minor_lock); 1010 1011 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1012 if (r) 1013 goto out; 1014 1015 if (m >= (1 << MINORBITS)) { 1016 idr_remove(&_minor_idr, m); 1017 r = -ENOSPC; 1018 goto out; 1019 } 1020 1021 *minor = m; 1022 1023 out: 1024 spin_unlock(&_minor_lock); 1025 return r; 1026 } 1027 1028 static struct block_device_operations dm_blk_dops; 1029 1030 /* 1031 * Allocate and initialise a blank device with a given minor. 1032 */ 1033 static struct mapped_device *alloc_dev(int minor) 1034 { 1035 int r; 1036 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1037 void *old_md; 1038 1039 if (!md) { 1040 DMWARN("unable to allocate device, out of memory."); 1041 return NULL; 1042 } 1043 1044 if (!try_module_get(THIS_MODULE)) 1045 goto bad_module_get; 1046 1047 /* get a minor number for the dev */ 1048 if (minor == DM_ANY_MINOR) 1049 r = next_free_minor(&minor); 1050 else 1051 r = specific_minor(minor); 1052 if (r < 0) 1053 goto bad_minor; 1054 1055 init_rwsem(&md->io_lock); 1056 mutex_init(&md->suspend_lock); 1057 spin_lock_init(&md->pushback_lock); 1058 rwlock_init(&md->map_lock); 1059 atomic_set(&md->holders, 1); 1060 atomic_set(&md->open_count, 0); 1061 atomic_set(&md->event_nr, 0); 1062 atomic_set(&md->uevent_seq, 0); 1063 INIT_LIST_HEAD(&md->uevent_list); 1064 spin_lock_init(&md->uevent_lock); 1065 1066 md->queue = blk_alloc_queue(GFP_KERNEL); 1067 if (!md->queue) 1068 goto bad_queue; 1069 1070 md->queue->queuedata = md; 1071 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1072 md->queue->backing_dev_info.congested_data = md; 1073 blk_queue_make_request(md->queue, dm_request); 1074 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1075 md->queue->unplug_fn = dm_unplug_all; 1076 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1077 1078 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1079 if (!md->io_pool) 1080 goto bad_io_pool; 1081 1082 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); 1083 if (!md->tio_pool) 1084 goto bad_tio_pool; 1085 1086 md->bs = bioset_create(16, 16); 1087 if (!md->bs) 1088 goto bad_no_bioset; 1089 1090 md->disk = alloc_disk(1); 1091 if (!md->disk) 1092 goto bad_disk; 1093 1094 atomic_set(&md->pending, 0); 1095 init_waitqueue_head(&md->wait); 1096 init_waitqueue_head(&md->eventq); 1097 1098 md->disk->major = _major; 1099 md->disk->first_minor = minor; 1100 md->disk->fops = &dm_blk_dops; 1101 md->disk->queue = md->queue; 1102 md->disk->private_data = md; 1103 sprintf(md->disk->disk_name, "dm-%d", minor); 1104 add_disk(md->disk); 1105 format_dev_t(md->name, MKDEV(_major, minor)); 1106 1107 md->wq = create_singlethread_workqueue("kdmflush"); 1108 if (!md->wq) 1109 goto bad_thread; 1110 1111 /* Populate the mapping, nobody knows we exist yet */ 1112 spin_lock(&_minor_lock); 1113 old_md = idr_replace(&_minor_idr, md, minor); 1114 spin_unlock(&_minor_lock); 1115 1116 BUG_ON(old_md != MINOR_ALLOCED); 1117 1118 return md; 1119 1120 bad_thread: 1121 put_disk(md->disk); 1122 bad_disk: 1123 bioset_free(md->bs); 1124 bad_no_bioset: 1125 mempool_destroy(md->tio_pool); 1126 bad_tio_pool: 1127 mempool_destroy(md->io_pool); 1128 bad_io_pool: 1129 blk_cleanup_queue(md->queue); 1130 bad_queue: 1131 free_minor(minor); 1132 bad_minor: 1133 module_put(THIS_MODULE); 1134 bad_module_get: 1135 kfree(md); 1136 return NULL; 1137 } 1138 1139 static void unlock_fs(struct mapped_device *md); 1140 1141 static void free_dev(struct mapped_device *md) 1142 { 1143 int minor = MINOR(disk_devt(md->disk)); 1144 1145 if (md->suspended_bdev) { 1146 unlock_fs(md); 1147 bdput(md->suspended_bdev); 1148 } 1149 destroy_workqueue(md->wq); 1150 mempool_destroy(md->tio_pool); 1151 mempool_destroy(md->io_pool); 1152 bioset_free(md->bs); 1153 del_gendisk(md->disk); 1154 free_minor(minor); 1155 1156 spin_lock(&_minor_lock); 1157 md->disk->private_data = NULL; 1158 spin_unlock(&_minor_lock); 1159 1160 put_disk(md->disk); 1161 blk_cleanup_queue(md->queue); 1162 module_put(THIS_MODULE); 1163 kfree(md); 1164 } 1165 1166 /* 1167 * Bind a table to the device. 1168 */ 1169 static void event_callback(void *context) 1170 { 1171 unsigned long flags; 1172 LIST_HEAD(uevents); 1173 struct mapped_device *md = (struct mapped_device *) context; 1174 1175 spin_lock_irqsave(&md->uevent_lock, flags); 1176 list_splice_init(&md->uevent_list, &uevents); 1177 spin_unlock_irqrestore(&md->uevent_lock, flags); 1178 1179 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1180 1181 atomic_inc(&md->event_nr); 1182 wake_up(&md->eventq); 1183 } 1184 1185 static void __set_size(struct mapped_device *md, sector_t size) 1186 { 1187 set_capacity(md->disk, size); 1188 1189 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); 1190 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1191 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); 1192 } 1193 1194 static int __bind(struct mapped_device *md, struct dm_table *t) 1195 { 1196 struct request_queue *q = md->queue; 1197 sector_t size; 1198 1199 size = dm_table_get_size(t); 1200 1201 /* 1202 * Wipe any geometry if the size of the table changed. 1203 */ 1204 if (size != get_capacity(md->disk)) 1205 memset(&md->geometry, 0, sizeof(md->geometry)); 1206 1207 if (md->suspended_bdev) 1208 __set_size(md, size); 1209 if (size == 0) 1210 return 0; 1211 1212 dm_table_get(t); 1213 dm_table_event_callback(t, event_callback, md); 1214 1215 write_lock(&md->map_lock); 1216 md->map = t; 1217 dm_table_set_restrictions(t, q); 1218 write_unlock(&md->map_lock); 1219 1220 return 0; 1221 } 1222 1223 static void __unbind(struct mapped_device *md) 1224 { 1225 struct dm_table *map = md->map; 1226 1227 if (!map) 1228 return; 1229 1230 dm_table_event_callback(map, NULL, NULL); 1231 write_lock(&md->map_lock); 1232 md->map = NULL; 1233 write_unlock(&md->map_lock); 1234 dm_table_put(map); 1235 } 1236 1237 /* 1238 * Constructor for a new device. 1239 */ 1240 int dm_create(int minor, struct mapped_device **result) 1241 { 1242 struct mapped_device *md; 1243 1244 md = alloc_dev(minor); 1245 if (!md) 1246 return -ENXIO; 1247 1248 *result = md; 1249 return 0; 1250 } 1251 1252 static struct mapped_device *dm_find_md(dev_t dev) 1253 { 1254 struct mapped_device *md; 1255 unsigned minor = MINOR(dev); 1256 1257 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1258 return NULL; 1259 1260 spin_lock(&_minor_lock); 1261 1262 md = idr_find(&_minor_idr, minor); 1263 if (md && (md == MINOR_ALLOCED || 1264 (MINOR(disk_devt(dm_disk(md))) != minor) || 1265 test_bit(DMF_FREEING, &md->flags))) { 1266 md = NULL; 1267 goto out; 1268 } 1269 1270 out: 1271 spin_unlock(&_minor_lock); 1272 1273 return md; 1274 } 1275 1276 struct mapped_device *dm_get_md(dev_t dev) 1277 { 1278 struct mapped_device *md = dm_find_md(dev); 1279 1280 if (md) 1281 dm_get(md); 1282 1283 return md; 1284 } 1285 1286 void *dm_get_mdptr(struct mapped_device *md) 1287 { 1288 return md->interface_ptr; 1289 } 1290 1291 void dm_set_mdptr(struct mapped_device *md, void *ptr) 1292 { 1293 md->interface_ptr = ptr; 1294 } 1295 1296 void dm_get(struct mapped_device *md) 1297 { 1298 atomic_inc(&md->holders); 1299 } 1300 1301 const char *dm_device_name(struct mapped_device *md) 1302 { 1303 return md->name; 1304 } 1305 EXPORT_SYMBOL_GPL(dm_device_name); 1306 1307 void dm_put(struct mapped_device *md) 1308 { 1309 struct dm_table *map; 1310 1311 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 1312 1313 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1314 map = dm_get_table(md); 1315 idr_replace(&_minor_idr, MINOR_ALLOCED, 1316 MINOR(disk_devt(dm_disk(md)))); 1317 set_bit(DMF_FREEING, &md->flags); 1318 spin_unlock(&_minor_lock); 1319 if (!dm_suspended(md)) { 1320 dm_table_presuspend_targets(map); 1321 dm_table_postsuspend_targets(map); 1322 } 1323 __unbind(md); 1324 dm_table_put(map); 1325 free_dev(md); 1326 } 1327 } 1328 EXPORT_SYMBOL_GPL(dm_put); 1329 1330 static int dm_wait_for_completion(struct mapped_device *md) 1331 { 1332 int r = 0; 1333 1334 while (1) { 1335 set_current_state(TASK_INTERRUPTIBLE); 1336 1337 smp_mb(); 1338 if (!atomic_read(&md->pending)) 1339 break; 1340 1341 if (signal_pending(current)) { 1342 r = -EINTR; 1343 break; 1344 } 1345 1346 io_schedule(); 1347 } 1348 set_current_state(TASK_RUNNING); 1349 1350 return r; 1351 } 1352 1353 /* 1354 * Process the deferred bios 1355 */ 1356 static void __flush_deferred_io(struct mapped_device *md) 1357 { 1358 struct bio *c; 1359 1360 while ((c = bio_list_pop(&md->deferred))) { 1361 if (__split_bio(md, c)) 1362 bio_io_error(c); 1363 } 1364 1365 clear_bit(DMF_BLOCK_IO, &md->flags); 1366 } 1367 1368 static void __merge_pushback_list(struct mapped_device *md) 1369 { 1370 unsigned long flags; 1371 1372 spin_lock_irqsave(&md->pushback_lock, flags); 1373 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1374 bio_list_merge_head(&md->deferred, &md->pushback); 1375 bio_list_init(&md->pushback); 1376 spin_unlock_irqrestore(&md->pushback_lock, flags); 1377 } 1378 1379 static void dm_wq_work(struct work_struct *work) 1380 { 1381 struct dm_wq_req *req = container_of(work, struct dm_wq_req, work); 1382 struct mapped_device *md = req->md; 1383 1384 down_write(&md->io_lock); 1385 switch (req->type) { 1386 case DM_WQ_FLUSH_DEFERRED: 1387 __flush_deferred_io(md); 1388 break; 1389 default: 1390 DMERR("dm_wq_work: unrecognised work type %d", req->type); 1391 BUG(); 1392 } 1393 up_write(&md->io_lock); 1394 } 1395 1396 static void dm_wq_queue(struct mapped_device *md, int type, void *context, 1397 struct dm_wq_req *req) 1398 { 1399 req->type = type; 1400 req->md = md; 1401 req->context = context; 1402 INIT_WORK(&req->work, dm_wq_work); 1403 queue_work(md->wq, &req->work); 1404 } 1405 1406 static void dm_queue_flush(struct mapped_device *md, int type, void *context) 1407 { 1408 struct dm_wq_req req; 1409 1410 dm_wq_queue(md, type, context, &req); 1411 flush_workqueue(md->wq); 1412 } 1413 1414 /* 1415 * Swap in a new table (destroying old one). 1416 */ 1417 int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1418 { 1419 int r = -EINVAL; 1420 1421 mutex_lock(&md->suspend_lock); 1422 1423 /* device must be suspended */ 1424 if (!dm_suspended(md)) 1425 goto out; 1426 1427 /* without bdev, the device size cannot be changed */ 1428 if (!md->suspended_bdev) 1429 if (get_capacity(md->disk) != dm_table_get_size(table)) 1430 goto out; 1431 1432 __unbind(md); 1433 r = __bind(md, table); 1434 1435 out: 1436 mutex_unlock(&md->suspend_lock); 1437 return r; 1438 } 1439 1440 /* 1441 * Functions to lock and unlock any filesystem running on the 1442 * device. 1443 */ 1444 static int lock_fs(struct mapped_device *md) 1445 { 1446 int r; 1447 1448 WARN_ON(md->frozen_sb); 1449 1450 md->frozen_sb = freeze_bdev(md->suspended_bdev); 1451 if (IS_ERR(md->frozen_sb)) { 1452 r = PTR_ERR(md->frozen_sb); 1453 md->frozen_sb = NULL; 1454 return r; 1455 } 1456 1457 set_bit(DMF_FROZEN, &md->flags); 1458 1459 /* don't bdput right now, we don't want the bdev 1460 * to go away while it is locked. 1461 */ 1462 return 0; 1463 } 1464 1465 static void unlock_fs(struct mapped_device *md) 1466 { 1467 if (!test_bit(DMF_FROZEN, &md->flags)) 1468 return; 1469 1470 thaw_bdev(md->suspended_bdev, md->frozen_sb); 1471 md->frozen_sb = NULL; 1472 clear_bit(DMF_FROZEN, &md->flags); 1473 } 1474 1475 /* 1476 * We need to be able to change a mapping table under a mounted 1477 * filesystem. For example we might want to move some data in 1478 * the background. Before the table can be swapped with 1479 * dm_bind_table, dm_suspend must be called to flush any in 1480 * flight bios and ensure that any further io gets deferred. 1481 */ 1482 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 1483 { 1484 struct dm_table *map = NULL; 1485 DECLARE_WAITQUEUE(wait, current); 1486 int r = 0; 1487 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 1488 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 1489 1490 mutex_lock(&md->suspend_lock); 1491 1492 if (dm_suspended(md)) { 1493 r = -EINVAL; 1494 goto out_unlock; 1495 } 1496 1497 map = dm_get_table(md); 1498 1499 /* 1500 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 1501 * This flag is cleared before dm_suspend returns. 1502 */ 1503 if (noflush) 1504 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1505 1506 /* This does not get reverted if there's an error later. */ 1507 dm_table_presuspend_targets(map); 1508 1509 /* bdget() can stall if the pending I/Os are not flushed */ 1510 if (!noflush) { 1511 md->suspended_bdev = bdget_disk(md->disk, 0); 1512 if (!md->suspended_bdev) { 1513 DMWARN("bdget failed in dm_suspend"); 1514 r = -ENOMEM; 1515 goto out; 1516 } 1517 1518 /* 1519 * Flush I/O to the device. noflush supersedes do_lockfs, 1520 * because lock_fs() needs to flush I/Os. 1521 */ 1522 if (do_lockfs) { 1523 r = lock_fs(md); 1524 if (r) 1525 goto out; 1526 } 1527 } 1528 1529 /* 1530 * First we set the BLOCK_IO flag so no more ios will be mapped. 1531 */ 1532 down_write(&md->io_lock); 1533 set_bit(DMF_BLOCK_IO, &md->flags); 1534 1535 add_wait_queue(&md->wait, &wait); 1536 up_write(&md->io_lock); 1537 1538 /* unplug */ 1539 if (map) 1540 dm_table_unplug_all(map); 1541 1542 /* 1543 * Wait for the already-mapped ios to complete. 1544 */ 1545 r = dm_wait_for_completion(md); 1546 1547 down_write(&md->io_lock); 1548 remove_wait_queue(&md->wait, &wait); 1549 1550 if (noflush) 1551 __merge_pushback_list(md); 1552 up_write(&md->io_lock); 1553 1554 /* were we interrupted ? */ 1555 if (r < 0) { 1556 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1557 1558 unlock_fs(md); 1559 goto out; /* pushback list is already flushed, so skip flush */ 1560 } 1561 1562 dm_table_postsuspend_targets(map); 1563 1564 set_bit(DMF_SUSPENDED, &md->flags); 1565 1566 out: 1567 if (r && md->suspended_bdev) { 1568 bdput(md->suspended_bdev); 1569 md->suspended_bdev = NULL; 1570 } 1571 1572 dm_table_put(map); 1573 1574 out_unlock: 1575 mutex_unlock(&md->suspend_lock); 1576 return r; 1577 } 1578 1579 int dm_resume(struct mapped_device *md) 1580 { 1581 int r = -EINVAL; 1582 struct dm_table *map = NULL; 1583 1584 mutex_lock(&md->suspend_lock); 1585 if (!dm_suspended(md)) 1586 goto out; 1587 1588 map = dm_get_table(md); 1589 if (!map || !dm_table_get_size(map)) 1590 goto out; 1591 1592 r = dm_table_resume_targets(map); 1593 if (r) 1594 goto out; 1595 1596 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1597 1598 unlock_fs(md); 1599 1600 if (md->suspended_bdev) { 1601 bdput(md->suspended_bdev); 1602 md->suspended_bdev = NULL; 1603 } 1604 1605 clear_bit(DMF_SUSPENDED, &md->flags); 1606 1607 dm_table_unplug_all(map); 1608 1609 dm_kobject_uevent(md); 1610 1611 r = 0; 1612 1613 out: 1614 dm_table_put(map); 1615 mutex_unlock(&md->suspend_lock); 1616 1617 return r; 1618 } 1619 1620 /*----------------------------------------------------------------- 1621 * Event notification. 1622 *---------------------------------------------------------------*/ 1623 void dm_kobject_uevent(struct mapped_device *md) 1624 { 1625 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); 1626 } 1627 1628 uint32_t dm_next_uevent_seq(struct mapped_device *md) 1629 { 1630 return atomic_add_return(1, &md->uevent_seq); 1631 } 1632 1633 uint32_t dm_get_event_nr(struct mapped_device *md) 1634 { 1635 return atomic_read(&md->event_nr); 1636 } 1637 1638 int dm_wait_event(struct mapped_device *md, int event_nr) 1639 { 1640 return wait_event_interruptible(md->eventq, 1641 (event_nr != atomic_read(&md->event_nr))); 1642 } 1643 1644 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 1645 { 1646 unsigned long flags; 1647 1648 spin_lock_irqsave(&md->uevent_lock, flags); 1649 list_add(elist, &md->uevent_list); 1650 spin_unlock_irqrestore(&md->uevent_lock, flags); 1651 } 1652 1653 /* 1654 * The gendisk is only valid as long as you have a reference 1655 * count on 'md'. 1656 */ 1657 struct gendisk *dm_disk(struct mapped_device *md) 1658 { 1659 return md->disk; 1660 } 1661 1662 int dm_suspended(struct mapped_device *md) 1663 { 1664 return test_bit(DMF_SUSPENDED, &md->flags); 1665 } 1666 1667 int dm_noflush_suspending(struct dm_target *ti) 1668 { 1669 struct mapped_device *md = dm_table_get_md(ti->table); 1670 int r = __noflush_suspending(md); 1671 1672 dm_put(md); 1673 1674 return r; 1675 } 1676 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 1677 1678 static struct block_device_operations dm_blk_dops = { 1679 .open = dm_blk_open, 1680 .release = dm_blk_close, 1681 .ioctl = dm_blk_ioctl, 1682 .getgeo = dm_blk_getgeo, 1683 .owner = THIS_MODULE 1684 }; 1685 1686 EXPORT_SYMBOL(dm_get_mapinfo); 1687 1688 /* 1689 * module hooks 1690 */ 1691 module_init(dm_init); 1692 module_exit(dm_exit); 1693 1694 module_param(major, uint, 0); 1695 MODULE_PARM_DESC(major, "The major number of the device mapper"); 1696 MODULE_DESCRIPTION(DM_NAME " driver"); 1697 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1698 MODULE_LICENSE("GPL"); 1699