1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/signal.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/mempool.h> 19 #include <linux/dax.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/uio.h> 23 #include <linux/hdreg.h> 24 #include <linux/delay.h> 25 #include <linux/wait.h> 26 #include <linux/pr.h> 27 #include <linux/refcount.h> 28 29 #define DM_MSG_PREFIX "core" 30 31 /* 32 * Cookies are numeric values sent with CHANGE and REMOVE 33 * uevents while resuming, removing or renaming the device. 34 */ 35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 36 #define DM_COOKIE_LENGTH 24 37 38 static const char *_name = DM_NAME; 39 40 static unsigned int major = 0; 41 static unsigned int _major = 0; 42 43 static DEFINE_IDR(_minor_idr); 44 45 static DEFINE_SPINLOCK(_minor_lock); 46 47 static void do_deferred_remove(struct work_struct *w); 48 49 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 50 51 static struct workqueue_struct *deferred_remove_workqueue; 52 53 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 55 56 void dm_issue_global_event(void) 57 { 58 atomic_inc(&dm_global_event_nr); 59 wake_up(&dm_global_eventq); 60 } 61 62 /* 63 * One of these is allocated per bio. 64 */ 65 struct dm_io { 66 struct mapped_device *md; 67 blk_status_t status; 68 atomic_t io_count; 69 struct bio *bio; 70 unsigned long start_time; 71 spinlock_t endio_lock; 72 struct dm_stats_aux stats_aux; 73 }; 74 75 #define MINOR_ALLOCED ((void *)-1) 76 77 /* 78 * Bits for the md->flags field. 79 */ 80 #define DMF_BLOCK_IO_FOR_SUSPEND 0 81 #define DMF_SUSPENDED 1 82 #define DMF_FROZEN 2 83 #define DMF_FREEING 3 84 #define DMF_DELETING 4 85 #define DMF_NOFLUSH_SUSPENDING 5 86 #define DMF_DEFERRED_REMOVE 6 87 #define DMF_SUSPENDED_INTERNALLY 7 88 89 #define DM_NUMA_NODE NUMA_NO_NODE 90 static int dm_numa_node = DM_NUMA_NODE; 91 92 /* 93 * For mempools pre-allocation at the table loading time. 94 */ 95 struct dm_md_mempools { 96 mempool_t *io_pool; 97 struct bio_set *bs; 98 }; 99 100 struct table_device { 101 struct list_head list; 102 refcount_t count; 103 struct dm_dev dm_dev; 104 }; 105 106 static struct kmem_cache *_io_cache; 107 static struct kmem_cache *_rq_tio_cache; 108 static struct kmem_cache *_rq_cache; 109 110 /* 111 * Bio-based DM's mempools' reserved IOs set by the user. 112 */ 113 #define RESERVED_BIO_BASED_IOS 16 114 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 115 116 static int __dm_get_module_param_int(int *module_param, int min, int max) 117 { 118 int param = ACCESS_ONCE(*module_param); 119 int modified_param = 0; 120 bool modified = true; 121 122 if (param < min) 123 modified_param = min; 124 else if (param > max) 125 modified_param = max; 126 else 127 modified = false; 128 129 if (modified) { 130 (void)cmpxchg(module_param, param, modified_param); 131 param = modified_param; 132 } 133 134 return param; 135 } 136 137 unsigned __dm_get_module_param(unsigned *module_param, 138 unsigned def, unsigned max) 139 { 140 unsigned param = ACCESS_ONCE(*module_param); 141 unsigned modified_param = 0; 142 143 if (!param) 144 modified_param = def; 145 else if (param > max) 146 modified_param = max; 147 148 if (modified_param) { 149 (void)cmpxchg(module_param, param, modified_param); 150 param = modified_param; 151 } 152 153 return param; 154 } 155 156 unsigned dm_get_reserved_bio_based_ios(void) 157 { 158 return __dm_get_module_param(&reserved_bio_based_ios, 159 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 160 } 161 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 162 163 static unsigned dm_get_numa_node(void) 164 { 165 return __dm_get_module_param_int(&dm_numa_node, 166 DM_NUMA_NODE, num_online_nodes() - 1); 167 } 168 169 static int __init local_init(void) 170 { 171 int r = -ENOMEM; 172 173 /* allocate a slab for the dm_ios */ 174 _io_cache = KMEM_CACHE(dm_io, 0); 175 if (!_io_cache) 176 return r; 177 178 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 179 if (!_rq_tio_cache) 180 goto out_free_io_cache; 181 182 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 183 __alignof__(struct request), 0, NULL); 184 if (!_rq_cache) 185 goto out_free_rq_tio_cache; 186 187 r = dm_uevent_init(); 188 if (r) 189 goto out_free_rq_cache; 190 191 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 192 if (!deferred_remove_workqueue) { 193 r = -ENOMEM; 194 goto out_uevent_exit; 195 } 196 197 _major = major; 198 r = register_blkdev(_major, _name); 199 if (r < 0) 200 goto out_free_workqueue; 201 202 if (!_major) 203 _major = r; 204 205 return 0; 206 207 out_free_workqueue: 208 destroy_workqueue(deferred_remove_workqueue); 209 out_uevent_exit: 210 dm_uevent_exit(); 211 out_free_rq_cache: 212 kmem_cache_destroy(_rq_cache); 213 out_free_rq_tio_cache: 214 kmem_cache_destroy(_rq_tio_cache); 215 out_free_io_cache: 216 kmem_cache_destroy(_io_cache); 217 218 return r; 219 } 220 221 static void local_exit(void) 222 { 223 flush_scheduled_work(); 224 destroy_workqueue(deferred_remove_workqueue); 225 226 kmem_cache_destroy(_rq_cache); 227 kmem_cache_destroy(_rq_tio_cache); 228 kmem_cache_destroy(_io_cache); 229 unregister_blkdev(_major, _name); 230 dm_uevent_exit(); 231 232 _major = 0; 233 234 DMINFO("cleaned up"); 235 } 236 237 static int (*_inits[])(void) __initdata = { 238 local_init, 239 dm_target_init, 240 dm_linear_init, 241 dm_stripe_init, 242 dm_io_init, 243 dm_kcopyd_init, 244 dm_interface_init, 245 dm_statistics_init, 246 }; 247 248 static void (*_exits[])(void) = { 249 local_exit, 250 dm_target_exit, 251 dm_linear_exit, 252 dm_stripe_exit, 253 dm_io_exit, 254 dm_kcopyd_exit, 255 dm_interface_exit, 256 dm_statistics_exit, 257 }; 258 259 static int __init dm_init(void) 260 { 261 const int count = ARRAY_SIZE(_inits); 262 263 int r, i; 264 265 for (i = 0; i < count; i++) { 266 r = _inits[i](); 267 if (r) 268 goto bad; 269 } 270 271 return 0; 272 273 bad: 274 while (i--) 275 _exits[i](); 276 277 return r; 278 } 279 280 static void __exit dm_exit(void) 281 { 282 int i = ARRAY_SIZE(_exits); 283 284 while (i--) 285 _exits[i](); 286 287 /* 288 * Should be empty by this point. 289 */ 290 idr_destroy(&_minor_idr); 291 } 292 293 /* 294 * Block device functions 295 */ 296 int dm_deleting_md(struct mapped_device *md) 297 { 298 return test_bit(DMF_DELETING, &md->flags); 299 } 300 301 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 302 { 303 struct mapped_device *md; 304 305 spin_lock(&_minor_lock); 306 307 md = bdev->bd_disk->private_data; 308 if (!md) 309 goto out; 310 311 if (test_bit(DMF_FREEING, &md->flags) || 312 dm_deleting_md(md)) { 313 md = NULL; 314 goto out; 315 } 316 317 dm_get(md); 318 atomic_inc(&md->open_count); 319 out: 320 spin_unlock(&_minor_lock); 321 322 return md ? 0 : -ENXIO; 323 } 324 325 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 326 { 327 struct mapped_device *md; 328 329 spin_lock(&_minor_lock); 330 331 md = disk->private_data; 332 if (WARN_ON(!md)) 333 goto out; 334 335 if (atomic_dec_and_test(&md->open_count) && 336 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 337 queue_work(deferred_remove_workqueue, &deferred_remove_work); 338 339 dm_put(md); 340 out: 341 spin_unlock(&_minor_lock); 342 } 343 344 int dm_open_count(struct mapped_device *md) 345 { 346 return atomic_read(&md->open_count); 347 } 348 349 /* 350 * Guarantees nothing is using the device before it's deleted. 351 */ 352 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 353 { 354 int r = 0; 355 356 spin_lock(&_minor_lock); 357 358 if (dm_open_count(md)) { 359 r = -EBUSY; 360 if (mark_deferred) 361 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 362 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 363 r = -EEXIST; 364 else 365 set_bit(DMF_DELETING, &md->flags); 366 367 spin_unlock(&_minor_lock); 368 369 return r; 370 } 371 372 int dm_cancel_deferred_remove(struct mapped_device *md) 373 { 374 int r = 0; 375 376 spin_lock(&_minor_lock); 377 378 if (test_bit(DMF_DELETING, &md->flags)) 379 r = -EBUSY; 380 else 381 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 382 383 spin_unlock(&_minor_lock); 384 385 return r; 386 } 387 388 static void do_deferred_remove(struct work_struct *w) 389 { 390 dm_deferred_remove(); 391 } 392 393 sector_t dm_get_size(struct mapped_device *md) 394 { 395 return get_capacity(md->disk); 396 } 397 398 struct request_queue *dm_get_md_queue(struct mapped_device *md) 399 { 400 return md->queue; 401 } 402 403 struct dm_stats *dm_get_stats(struct mapped_device *md) 404 { 405 return &md->stats; 406 } 407 408 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 409 { 410 struct mapped_device *md = bdev->bd_disk->private_data; 411 412 return dm_get_geometry(md, geo); 413 } 414 415 static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 416 struct block_device **bdev, 417 fmode_t *mode) 418 { 419 struct dm_target *tgt; 420 struct dm_table *map; 421 int srcu_idx, r; 422 423 retry: 424 r = -ENOTTY; 425 map = dm_get_live_table(md, &srcu_idx); 426 if (!map || !dm_table_get_size(map)) 427 goto out; 428 429 /* We only support devices that have a single target */ 430 if (dm_table_get_num_targets(map) != 1) 431 goto out; 432 433 tgt = dm_table_get_target(map, 0); 434 if (!tgt->type->prepare_ioctl) 435 goto out; 436 437 if (dm_suspended_md(md)) { 438 r = -EAGAIN; 439 goto out; 440 } 441 442 r = tgt->type->prepare_ioctl(tgt, bdev, mode); 443 if (r < 0) 444 goto out; 445 446 bdgrab(*bdev); 447 dm_put_live_table(md, srcu_idx); 448 return r; 449 450 out: 451 dm_put_live_table(md, srcu_idx); 452 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 453 msleep(10); 454 goto retry; 455 } 456 return r; 457 } 458 459 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 460 unsigned int cmd, unsigned long arg) 461 { 462 struct mapped_device *md = bdev->bd_disk->private_data; 463 int r; 464 465 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 466 if (r < 0) 467 return r; 468 469 if (r > 0) { 470 /* 471 * Target determined this ioctl is being issued against a 472 * subset of the parent bdev; require extra privileges. 473 */ 474 if (!capable(CAP_SYS_RAWIO)) { 475 DMWARN_LIMIT( 476 "%s: sending ioctl %x to DM device without required privilege.", 477 current->comm, cmd); 478 r = -ENOIOCTLCMD; 479 goto out; 480 } 481 } 482 483 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 484 out: 485 bdput(bdev); 486 return r; 487 } 488 489 static struct dm_io *alloc_io(struct mapped_device *md) 490 { 491 return mempool_alloc(md->io_pool, GFP_NOIO); 492 } 493 494 static void free_io(struct mapped_device *md, struct dm_io *io) 495 { 496 mempool_free(io, md->io_pool); 497 } 498 499 static void free_tio(struct dm_target_io *tio) 500 { 501 bio_put(&tio->clone); 502 } 503 504 int md_in_flight(struct mapped_device *md) 505 { 506 return atomic_read(&md->pending[READ]) + 507 atomic_read(&md->pending[WRITE]); 508 } 509 510 static void start_io_acct(struct dm_io *io) 511 { 512 struct mapped_device *md = io->md; 513 struct bio *bio = io->bio; 514 int cpu; 515 int rw = bio_data_dir(bio); 516 517 io->start_time = jiffies; 518 519 cpu = part_stat_lock(); 520 part_round_stats(md->queue, cpu, &dm_disk(md)->part0); 521 part_stat_unlock(); 522 atomic_set(&dm_disk(md)->part0.in_flight[rw], 523 atomic_inc_return(&md->pending[rw])); 524 525 if (unlikely(dm_stats_used(&md->stats))) 526 dm_stats_account_io(&md->stats, bio_data_dir(bio), 527 bio->bi_iter.bi_sector, bio_sectors(bio), 528 false, 0, &io->stats_aux); 529 } 530 531 static void end_io_acct(struct dm_io *io) 532 { 533 struct mapped_device *md = io->md; 534 struct bio *bio = io->bio; 535 unsigned long duration = jiffies - io->start_time; 536 int pending; 537 int rw = bio_data_dir(bio); 538 539 generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); 540 541 if (unlikely(dm_stats_used(&md->stats))) 542 dm_stats_account_io(&md->stats, bio_data_dir(bio), 543 bio->bi_iter.bi_sector, bio_sectors(bio), 544 true, duration, &io->stats_aux); 545 546 /* 547 * After this is decremented the bio must not be touched if it is 548 * a flush. 549 */ 550 pending = atomic_dec_return(&md->pending[rw]); 551 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 552 pending += atomic_read(&md->pending[rw^0x1]); 553 554 /* nudge anyone waiting on suspend queue */ 555 if (!pending) 556 wake_up(&md->wait); 557 } 558 559 /* 560 * Add the bio to the list of deferred io. 561 */ 562 static void queue_io(struct mapped_device *md, struct bio *bio) 563 { 564 unsigned long flags; 565 566 spin_lock_irqsave(&md->deferred_lock, flags); 567 bio_list_add(&md->deferred, bio); 568 spin_unlock_irqrestore(&md->deferred_lock, flags); 569 queue_work(md->wq, &md->work); 570 } 571 572 /* 573 * Everyone (including functions in this file), should use this 574 * function to access the md->map field, and make sure they call 575 * dm_put_live_table() when finished. 576 */ 577 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 578 { 579 *srcu_idx = srcu_read_lock(&md->io_barrier); 580 581 return srcu_dereference(md->map, &md->io_barrier); 582 } 583 584 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 585 { 586 srcu_read_unlock(&md->io_barrier, srcu_idx); 587 } 588 589 void dm_sync_table(struct mapped_device *md) 590 { 591 synchronize_srcu(&md->io_barrier); 592 synchronize_rcu_expedited(); 593 } 594 595 /* 596 * A fast alternative to dm_get_live_table/dm_put_live_table. 597 * The caller must not block between these two functions. 598 */ 599 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 600 { 601 rcu_read_lock(); 602 return rcu_dereference(md->map); 603 } 604 605 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 606 { 607 rcu_read_unlock(); 608 } 609 610 /* 611 * Open a table device so we can use it as a map destination. 612 */ 613 static int open_table_device(struct table_device *td, dev_t dev, 614 struct mapped_device *md) 615 { 616 static char *_claim_ptr = "I belong to device-mapper"; 617 struct block_device *bdev; 618 619 int r; 620 621 BUG_ON(td->dm_dev.bdev); 622 623 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 624 if (IS_ERR(bdev)) 625 return PTR_ERR(bdev); 626 627 r = bd_link_disk_holder(bdev, dm_disk(md)); 628 if (r) { 629 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 630 return r; 631 } 632 633 td->dm_dev.bdev = bdev; 634 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 635 return 0; 636 } 637 638 /* 639 * Close a table device that we've been using. 640 */ 641 static void close_table_device(struct table_device *td, struct mapped_device *md) 642 { 643 if (!td->dm_dev.bdev) 644 return; 645 646 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 647 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 648 put_dax(td->dm_dev.dax_dev); 649 td->dm_dev.bdev = NULL; 650 td->dm_dev.dax_dev = NULL; 651 } 652 653 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 654 fmode_t mode) { 655 struct table_device *td; 656 657 list_for_each_entry(td, l, list) 658 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 659 return td; 660 661 return NULL; 662 } 663 664 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 665 struct dm_dev **result) { 666 int r; 667 struct table_device *td; 668 669 mutex_lock(&md->table_devices_lock); 670 td = find_table_device(&md->table_devices, dev, mode); 671 if (!td) { 672 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 673 if (!td) { 674 mutex_unlock(&md->table_devices_lock); 675 return -ENOMEM; 676 } 677 678 td->dm_dev.mode = mode; 679 td->dm_dev.bdev = NULL; 680 681 if ((r = open_table_device(td, dev, md))) { 682 mutex_unlock(&md->table_devices_lock); 683 kfree(td); 684 return r; 685 } 686 687 format_dev_t(td->dm_dev.name, dev); 688 689 refcount_set(&td->count, 1); 690 list_add(&td->list, &md->table_devices); 691 } else { 692 refcount_inc(&td->count); 693 } 694 mutex_unlock(&md->table_devices_lock); 695 696 *result = &td->dm_dev; 697 return 0; 698 } 699 EXPORT_SYMBOL_GPL(dm_get_table_device); 700 701 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 702 { 703 struct table_device *td = container_of(d, struct table_device, dm_dev); 704 705 mutex_lock(&md->table_devices_lock); 706 if (refcount_dec_and_test(&td->count)) { 707 close_table_device(td, md); 708 list_del(&td->list); 709 kfree(td); 710 } 711 mutex_unlock(&md->table_devices_lock); 712 } 713 EXPORT_SYMBOL(dm_put_table_device); 714 715 static void free_table_devices(struct list_head *devices) 716 { 717 struct list_head *tmp, *next; 718 719 list_for_each_safe(tmp, next, devices) { 720 struct table_device *td = list_entry(tmp, struct table_device, list); 721 722 DMWARN("dm_destroy: %s still exists with %d references", 723 td->dm_dev.name, refcount_read(&td->count)); 724 kfree(td); 725 } 726 } 727 728 /* 729 * Get the geometry associated with a dm device 730 */ 731 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 732 { 733 *geo = md->geometry; 734 735 return 0; 736 } 737 738 /* 739 * Set the geometry of a device. 740 */ 741 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 742 { 743 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 744 745 if (geo->start > sz) { 746 DMWARN("Start sector is beyond the geometry limits."); 747 return -EINVAL; 748 } 749 750 md->geometry = *geo; 751 752 return 0; 753 } 754 755 /*----------------------------------------------------------------- 756 * CRUD START: 757 * A more elegant soln is in the works that uses the queue 758 * merge fn, unfortunately there are a couple of changes to 759 * the block layer that I want to make for this. So in the 760 * interests of getting something for people to use I give 761 * you this clearly demarcated crap. 762 *---------------------------------------------------------------*/ 763 764 static int __noflush_suspending(struct mapped_device *md) 765 { 766 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 767 } 768 769 /* 770 * Decrements the number of outstanding ios that a bio has been 771 * cloned into, completing the original io if necc. 772 */ 773 static void dec_pending(struct dm_io *io, blk_status_t error) 774 { 775 unsigned long flags; 776 blk_status_t io_error; 777 struct bio *bio; 778 struct mapped_device *md = io->md; 779 780 /* Push-back supersedes any I/O errors */ 781 if (unlikely(error)) { 782 spin_lock_irqsave(&io->endio_lock, flags); 783 if (!(io->status == BLK_STS_DM_REQUEUE && 784 __noflush_suspending(md))) 785 io->status = error; 786 spin_unlock_irqrestore(&io->endio_lock, flags); 787 } 788 789 if (atomic_dec_and_test(&io->io_count)) { 790 if (io->status == BLK_STS_DM_REQUEUE) { 791 /* 792 * Target requested pushing back the I/O. 793 */ 794 spin_lock_irqsave(&md->deferred_lock, flags); 795 if (__noflush_suspending(md)) 796 bio_list_add_head(&md->deferred, io->bio); 797 else 798 /* noflush suspend was interrupted. */ 799 io->status = BLK_STS_IOERR; 800 spin_unlock_irqrestore(&md->deferred_lock, flags); 801 } 802 803 io_error = io->status; 804 bio = io->bio; 805 end_io_acct(io); 806 free_io(md, io); 807 808 if (io_error == BLK_STS_DM_REQUEUE) 809 return; 810 811 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 812 /* 813 * Preflush done for flush with data, reissue 814 * without REQ_PREFLUSH. 815 */ 816 bio->bi_opf &= ~REQ_PREFLUSH; 817 queue_io(md, bio); 818 } else { 819 /* done with normal IO or empty flush */ 820 bio->bi_status = io_error; 821 bio_endio(bio); 822 } 823 } 824 } 825 826 void disable_write_same(struct mapped_device *md) 827 { 828 struct queue_limits *limits = dm_get_queue_limits(md); 829 830 /* device doesn't really support WRITE SAME, disable it */ 831 limits->max_write_same_sectors = 0; 832 } 833 834 void disable_write_zeroes(struct mapped_device *md) 835 { 836 struct queue_limits *limits = dm_get_queue_limits(md); 837 838 /* device doesn't really support WRITE ZEROES, disable it */ 839 limits->max_write_zeroes_sectors = 0; 840 } 841 842 static void clone_endio(struct bio *bio) 843 { 844 blk_status_t error = bio->bi_status; 845 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 846 struct dm_io *io = tio->io; 847 struct mapped_device *md = tio->io->md; 848 dm_endio_fn endio = tio->ti->type->end_io; 849 850 if (unlikely(error == BLK_STS_TARGET)) { 851 if (bio_op(bio) == REQ_OP_WRITE_SAME && 852 !bio->bi_disk->queue->limits.max_write_same_sectors) 853 disable_write_same(md); 854 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 855 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 856 disable_write_zeroes(md); 857 } 858 859 if (endio) { 860 int r = endio(tio->ti, bio, &error); 861 switch (r) { 862 case DM_ENDIO_REQUEUE: 863 error = BLK_STS_DM_REQUEUE; 864 /*FALLTHRU*/ 865 case DM_ENDIO_DONE: 866 break; 867 case DM_ENDIO_INCOMPLETE: 868 /* The target will handle the io */ 869 return; 870 default: 871 DMWARN("unimplemented target endio return value: %d", r); 872 BUG(); 873 } 874 } 875 876 free_tio(tio); 877 dec_pending(io, error); 878 } 879 880 /* 881 * Return maximum size of I/O possible at the supplied sector up to the current 882 * target boundary. 883 */ 884 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 885 { 886 sector_t target_offset = dm_target_offset(ti, sector); 887 888 return ti->len - target_offset; 889 } 890 891 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 892 { 893 sector_t len = max_io_len_target_boundary(sector, ti); 894 sector_t offset, max_len; 895 896 /* 897 * Does the target need to split even further? 898 */ 899 if (ti->max_io_len) { 900 offset = dm_target_offset(ti, sector); 901 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 902 max_len = sector_div(offset, ti->max_io_len); 903 else 904 max_len = offset & (ti->max_io_len - 1); 905 max_len = ti->max_io_len - max_len; 906 907 if (len > max_len) 908 len = max_len; 909 } 910 911 return len; 912 } 913 914 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 915 { 916 if (len > UINT_MAX) { 917 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 918 (unsigned long long)len, UINT_MAX); 919 ti->error = "Maximum size of target IO is too large"; 920 return -EINVAL; 921 } 922 923 ti->max_io_len = (uint32_t) len; 924 925 return 0; 926 } 927 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 928 929 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 930 sector_t sector, int *srcu_idx) 931 { 932 struct dm_table *map; 933 struct dm_target *ti; 934 935 map = dm_get_live_table(md, srcu_idx); 936 if (!map) 937 return NULL; 938 939 ti = dm_table_find_target(map, sector); 940 if (!dm_target_is_valid(ti)) 941 return NULL; 942 943 return ti; 944 } 945 946 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 947 long nr_pages, void **kaddr, pfn_t *pfn) 948 { 949 struct mapped_device *md = dax_get_private(dax_dev); 950 sector_t sector = pgoff * PAGE_SECTORS; 951 struct dm_target *ti; 952 long len, ret = -EIO; 953 int srcu_idx; 954 955 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 956 957 if (!ti) 958 goto out; 959 if (!ti->type->direct_access) 960 goto out; 961 len = max_io_len(sector, ti) / PAGE_SECTORS; 962 if (len < 1) 963 goto out; 964 nr_pages = min(len, nr_pages); 965 if (ti->type->direct_access) 966 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 967 968 out: 969 dm_put_live_table(md, srcu_idx); 970 971 return ret; 972 } 973 974 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 975 void *addr, size_t bytes, struct iov_iter *i) 976 { 977 struct mapped_device *md = dax_get_private(dax_dev); 978 sector_t sector = pgoff * PAGE_SECTORS; 979 struct dm_target *ti; 980 long ret = 0; 981 int srcu_idx; 982 983 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 984 985 if (!ti) 986 goto out; 987 if (!ti->type->dax_copy_from_iter) { 988 ret = copy_from_iter(addr, bytes, i); 989 goto out; 990 } 991 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 992 out: 993 dm_put_live_table(md, srcu_idx); 994 995 return ret; 996 } 997 998 /* 999 * A target may call dm_accept_partial_bio only from the map routine. It is 1000 * allowed for all bio types except REQ_PREFLUSH. 1001 * 1002 * dm_accept_partial_bio informs the dm that the target only wants to process 1003 * additional n_sectors sectors of the bio and the rest of the data should be 1004 * sent in a next bio. 1005 * 1006 * A diagram that explains the arithmetics: 1007 * +--------------------+---------------+-------+ 1008 * | 1 | 2 | 3 | 1009 * +--------------------+---------------+-------+ 1010 * 1011 * <-------------- *tio->len_ptr ---------------> 1012 * <------- bi_size -------> 1013 * <-- n_sectors --> 1014 * 1015 * Region 1 was already iterated over with bio_advance or similar function. 1016 * (it may be empty if the target doesn't use bio_advance) 1017 * Region 2 is the remaining bio size that the target wants to process. 1018 * (it may be empty if region 1 is non-empty, although there is no reason 1019 * to make it empty) 1020 * The target requires that region 3 is to be sent in the next bio. 1021 * 1022 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1023 * the partially processed part (the sum of regions 1+2) must be the same for all 1024 * copies of the bio. 1025 */ 1026 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1027 { 1028 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1029 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1030 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1031 BUG_ON(bi_size > *tio->len_ptr); 1032 BUG_ON(n_sectors > bi_size); 1033 *tio->len_ptr -= bi_size - n_sectors; 1034 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1035 } 1036 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1037 1038 /* 1039 * The zone descriptors obtained with a zone report indicate 1040 * zone positions within the target device. The zone descriptors 1041 * must be remapped to match their position within the dm device. 1042 * A target may call dm_remap_zone_report after completion of a 1043 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained 1044 * from the target device mapping to the dm device. 1045 */ 1046 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 1047 { 1048 #ifdef CONFIG_BLK_DEV_ZONED 1049 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1050 struct bio *report_bio = tio->io->bio; 1051 struct blk_zone_report_hdr *hdr = NULL; 1052 struct blk_zone *zone; 1053 unsigned int nr_rep = 0; 1054 unsigned int ofst; 1055 struct bio_vec bvec; 1056 struct bvec_iter iter; 1057 void *addr; 1058 1059 if (bio->bi_status) 1060 return; 1061 1062 /* 1063 * Remap the start sector of the reported zones. For sequential zones, 1064 * also remap the write pointer position. 1065 */ 1066 bio_for_each_segment(bvec, report_bio, iter) { 1067 addr = kmap_atomic(bvec.bv_page); 1068 1069 /* Remember the report header in the first page */ 1070 if (!hdr) { 1071 hdr = addr; 1072 ofst = sizeof(struct blk_zone_report_hdr); 1073 } else 1074 ofst = 0; 1075 1076 /* Set zones start sector */ 1077 while (hdr->nr_zones && ofst < bvec.bv_len) { 1078 zone = addr + ofst; 1079 if (zone->start >= start + ti->len) { 1080 hdr->nr_zones = 0; 1081 break; 1082 } 1083 zone->start = zone->start + ti->begin - start; 1084 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 1085 if (zone->cond == BLK_ZONE_COND_FULL) 1086 zone->wp = zone->start + zone->len; 1087 else if (zone->cond == BLK_ZONE_COND_EMPTY) 1088 zone->wp = zone->start; 1089 else 1090 zone->wp = zone->wp + ti->begin - start; 1091 } 1092 ofst += sizeof(struct blk_zone); 1093 hdr->nr_zones--; 1094 nr_rep++; 1095 } 1096 1097 if (addr != hdr) 1098 kunmap_atomic(addr); 1099 1100 if (!hdr->nr_zones) 1101 break; 1102 } 1103 1104 if (hdr) { 1105 hdr->nr_zones = nr_rep; 1106 kunmap_atomic(hdr); 1107 } 1108 1109 bio_advance(report_bio, report_bio->bi_iter.bi_size); 1110 1111 #else /* !CONFIG_BLK_DEV_ZONED */ 1112 bio->bi_status = BLK_STS_NOTSUPP; 1113 #endif 1114 } 1115 EXPORT_SYMBOL_GPL(dm_remap_zone_report); 1116 1117 /* 1118 * Flush current->bio_list when the target map method blocks. 1119 * This fixes deadlocks in snapshot and possibly in other targets. 1120 */ 1121 struct dm_offload { 1122 struct blk_plug plug; 1123 struct blk_plug_cb cb; 1124 }; 1125 1126 static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) 1127 { 1128 struct dm_offload *o = container_of(cb, struct dm_offload, cb); 1129 struct bio_list list; 1130 struct bio *bio; 1131 int i; 1132 1133 INIT_LIST_HEAD(&o->cb.list); 1134 1135 if (unlikely(!current->bio_list)) 1136 return; 1137 1138 for (i = 0; i < 2; i++) { 1139 list = current->bio_list[i]; 1140 bio_list_init(¤t->bio_list[i]); 1141 1142 while ((bio = bio_list_pop(&list))) { 1143 struct bio_set *bs = bio->bi_pool; 1144 if (unlikely(!bs) || bs == fs_bio_set || 1145 !bs->rescue_workqueue) { 1146 bio_list_add(¤t->bio_list[i], bio); 1147 continue; 1148 } 1149 1150 spin_lock(&bs->rescue_lock); 1151 bio_list_add(&bs->rescue_list, bio); 1152 queue_work(bs->rescue_workqueue, &bs->rescue_work); 1153 spin_unlock(&bs->rescue_lock); 1154 } 1155 } 1156 } 1157 1158 static void dm_offload_start(struct dm_offload *o) 1159 { 1160 blk_start_plug(&o->plug); 1161 o->cb.callback = flush_current_bio_list; 1162 list_add(&o->cb.list, ¤t->plug->cb_list); 1163 } 1164 1165 static void dm_offload_end(struct dm_offload *o) 1166 { 1167 list_del(&o->cb.list); 1168 blk_finish_plug(&o->plug); 1169 } 1170 1171 static void __map_bio(struct dm_target_io *tio) 1172 { 1173 int r; 1174 sector_t sector; 1175 struct dm_offload o; 1176 struct bio *clone = &tio->clone; 1177 struct dm_target *ti = tio->ti; 1178 1179 clone->bi_end_io = clone_endio; 1180 1181 /* 1182 * Map the clone. If r == 0 we don't need to do 1183 * anything, the target has assumed ownership of 1184 * this io. 1185 */ 1186 atomic_inc(&tio->io->io_count); 1187 sector = clone->bi_iter.bi_sector; 1188 1189 dm_offload_start(&o); 1190 r = ti->type->map(ti, clone); 1191 dm_offload_end(&o); 1192 1193 switch (r) { 1194 case DM_MAPIO_SUBMITTED: 1195 break; 1196 case DM_MAPIO_REMAPPED: 1197 /* the bio has been remapped so dispatch it */ 1198 trace_block_bio_remap(clone->bi_disk->queue, clone, 1199 bio_dev(tio->io->bio), sector); 1200 generic_make_request(clone); 1201 break; 1202 case DM_MAPIO_KILL: 1203 dec_pending(tio->io, BLK_STS_IOERR); 1204 free_tio(tio); 1205 break; 1206 case DM_MAPIO_REQUEUE: 1207 dec_pending(tio->io, BLK_STS_DM_REQUEUE); 1208 free_tio(tio); 1209 break; 1210 default: 1211 DMWARN("unimplemented target map return value: %d", r); 1212 BUG(); 1213 } 1214 } 1215 1216 struct clone_info { 1217 struct mapped_device *md; 1218 struct dm_table *map; 1219 struct bio *bio; 1220 struct dm_io *io; 1221 sector_t sector; 1222 unsigned sector_count; 1223 }; 1224 1225 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1226 { 1227 bio->bi_iter.bi_sector = sector; 1228 bio->bi_iter.bi_size = to_bytes(len); 1229 } 1230 1231 /* 1232 * Creates a bio that consists of range of complete bvecs. 1233 */ 1234 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1235 sector_t sector, unsigned len) 1236 { 1237 struct bio *clone = &tio->clone; 1238 1239 __bio_clone_fast(clone, bio); 1240 1241 if (unlikely(bio_integrity(bio) != NULL)) { 1242 int r; 1243 1244 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1245 !dm_target_passes_integrity(tio->ti->type))) { 1246 DMWARN("%s: the target %s doesn't support integrity data.", 1247 dm_device_name(tio->io->md), 1248 tio->ti->type->name); 1249 return -EIO; 1250 } 1251 1252 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1253 if (r < 0) 1254 return r; 1255 } 1256 1257 if (bio_op(bio) != REQ_OP_ZONE_REPORT) 1258 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1259 clone->bi_iter.bi_size = to_bytes(len); 1260 1261 if (unlikely(bio_integrity(bio) != NULL)) 1262 bio_integrity_trim(clone); 1263 1264 return 0; 1265 } 1266 1267 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1268 struct dm_target *ti, 1269 unsigned target_bio_nr) 1270 { 1271 struct dm_target_io *tio; 1272 struct bio *clone; 1273 1274 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1275 tio = container_of(clone, struct dm_target_io, clone); 1276 1277 tio->io = ci->io; 1278 tio->ti = ti; 1279 tio->target_bio_nr = target_bio_nr; 1280 1281 return tio; 1282 } 1283 1284 static void __clone_and_map_simple_bio(struct clone_info *ci, 1285 struct dm_target *ti, 1286 unsigned target_bio_nr, unsigned *len) 1287 { 1288 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1289 struct bio *clone = &tio->clone; 1290 1291 tio->len_ptr = len; 1292 1293 __bio_clone_fast(clone, ci->bio); 1294 if (len) 1295 bio_setup_sector(clone, ci->sector, *len); 1296 1297 __map_bio(tio); 1298 } 1299 1300 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1301 unsigned num_bios, unsigned *len) 1302 { 1303 unsigned target_bio_nr; 1304 1305 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 1306 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 1307 } 1308 1309 static int __send_empty_flush(struct clone_info *ci) 1310 { 1311 unsigned target_nr = 0; 1312 struct dm_target *ti; 1313 1314 BUG_ON(bio_has_data(ci->bio)); 1315 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1316 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1317 1318 return 0; 1319 } 1320 1321 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1322 sector_t sector, unsigned *len) 1323 { 1324 struct bio *bio = ci->bio; 1325 struct dm_target_io *tio; 1326 unsigned target_bio_nr; 1327 unsigned num_target_bios = 1; 1328 int r = 0; 1329 1330 /* 1331 * Does the target want to receive duplicate copies of the bio? 1332 */ 1333 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1334 num_target_bios = ti->num_write_bios(ti, bio); 1335 1336 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1337 tio = alloc_tio(ci, ti, target_bio_nr); 1338 tio->len_ptr = len; 1339 r = clone_bio(tio, bio, sector, *len); 1340 if (r < 0) { 1341 free_tio(tio); 1342 break; 1343 } 1344 __map_bio(tio); 1345 } 1346 1347 return r; 1348 } 1349 1350 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1351 1352 static unsigned get_num_discard_bios(struct dm_target *ti) 1353 { 1354 return ti->num_discard_bios; 1355 } 1356 1357 static unsigned get_num_write_same_bios(struct dm_target *ti) 1358 { 1359 return ti->num_write_same_bios; 1360 } 1361 1362 static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1363 { 1364 return ti->num_write_zeroes_bios; 1365 } 1366 1367 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1368 1369 static bool is_split_required_for_discard(struct dm_target *ti) 1370 { 1371 return ti->split_discard_bios; 1372 } 1373 1374 static int __send_changing_extent_only(struct clone_info *ci, 1375 get_num_bios_fn get_num_bios, 1376 is_split_required_fn is_split_required) 1377 { 1378 struct dm_target *ti; 1379 unsigned len; 1380 unsigned num_bios; 1381 1382 do { 1383 ti = dm_table_find_target(ci->map, ci->sector); 1384 if (!dm_target_is_valid(ti)) 1385 return -EIO; 1386 1387 /* 1388 * Even though the device advertised support for this type of 1389 * request, that does not mean every target supports it, and 1390 * reconfiguration might also have changed that since the 1391 * check was performed. 1392 */ 1393 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1394 if (!num_bios) 1395 return -EOPNOTSUPP; 1396 1397 if (is_split_required && !is_split_required(ti)) 1398 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1399 else 1400 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1401 1402 __send_duplicate_bios(ci, ti, num_bios, &len); 1403 1404 ci->sector += len; 1405 } while (ci->sector_count -= len); 1406 1407 return 0; 1408 } 1409 1410 static int __send_discard(struct clone_info *ci) 1411 { 1412 return __send_changing_extent_only(ci, get_num_discard_bios, 1413 is_split_required_for_discard); 1414 } 1415 1416 static int __send_write_same(struct clone_info *ci) 1417 { 1418 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1419 } 1420 1421 static int __send_write_zeroes(struct clone_info *ci) 1422 { 1423 return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); 1424 } 1425 1426 /* 1427 * Select the correct strategy for processing a non-flush bio. 1428 */ 1429 static int __split_and_process_non_flush(struct clone_info *ci) 1430 { 1431 struct bio *bio = ci->bio; 1432 struct dm_target *ti; 1433 unsigned len; 1434 int r; 1435 1436 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1437 return __send_discard(ci); 1438 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1439 return __send_write_same(ci); 1440 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) 1441 return __send_write_zeroes(ci); 1442 1443 ti = dm_table_find_target(ci->map, ci->sector); 1444 if (!dm_target_is_valid(ti)) 1445 return -EIO; 1446 1447 if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1448 len = ci->sector_count; 1449 else 1450 len = min_t(sector_t, max_io_len(ci->sector, ti), 1451 ci->sector_count); 1452 1453 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1454 if (r < 0) 1455 return r; 1456 1457 ci->sector += len; 1458 ci->sector_count -= len; 1459 1460 return 0; 1461 } 1462 1463 /* 1464 * Entry point to split a bio into clones and submit them to the targets. 1465 */ 1466 static void __split_and_process_bio(struct mapped_device *md, 1467 struct dm_table *map, struct bio *bio) 1468 { 1469 struct clone_info ci; 1470 int error = 0; 1471 1472 if (unlikely(!map)) { 1473 bio_io_error(bio); 1474 return; 1475 } 1476 1477 ci.map = map; 1478 ci.md = md; 1479 ci.io = alloc_io(md); 1480 ci.io->status = 0; 1481 atomic_set(&ci.io->io_count, 1); 1482 ci.io->bio = bio; 1483 ci.io->md = md; 1484 spin_lock_init(&ci.io->endio_lock); 1485 ci.sector = bio->bi_iter.bi_sector; 1486 1487 start_io_acct(ci.io); 1488 1489 if (bio->bi_opf & REQ_PREFLUSH) { 1490 ci.bio = &ci.md->flush_bio; 1491 ci.sector_count = 0; 1492 error = __send_empty_flush(&ci); 1493 /* dec_pending submits any data associated with flush */ 1494 } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1495 ci.bio = bio; 1496 ci.sector_count = 0; 1497 error = __split_and_process_non_flush(&ci); 1498 } else { 1499 ci.bio = bio; 1500 ci.sector_count = bio_sectors(bio); 1501 while (ci.sector_count && !error) 1502 error = __split_and_process_non_flush(&ci); 1503 } 1504 1505 /* drop the extra reference count */ 1506 dec_pending(ci.io, errno_to_blk_status(error)); 1507 } 1508 /*----------------------------------------------------------------- 1509 * CRUD END 1510 *---------------------------------------------------------------*/ 1511 1512 /* 1513 * The request function that just remaps the bio built up by 1514 * dm_merge_bvec. 1515 */ 1516 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1517 { 1518 int rw = bio_data_dir(bio); 1519 struct mapped_device *md = q->queuedata; 1520 int srcu_idx; 1521 struct dm_table *map; 1522 1523 map = dm_get_live_table(md, &srcu_idx); 1524 1525 generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); 1526 1527 /* if we're suspended, we have to queue this io for later */ 1528 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1529 dm_put_live_table(md, srcu_idx); 1530 1531 if (!(bio->bi_opf & REQ_RAHEAD)) 1532 queue_io(md, bio); 1533 else 1534 bio_io_error(bio); 1535 return BLK_QC_T_NONE; 1536 } 1537 1538 __split_and_process_bio(md, map, bio); 1539 dm_put_live_table(md, srcu_idx); 1540 return BLK_QC_T_NONE; 1541 } 1542 1543 static int dm_any_congested(void *congested_data, int bdi_bits) 1544 { 1545 int r = bdi_bits; 1546 struct mapped_device *md = congested_data; 1547 struct dm_table *map; 1548 1549 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1550 if (dm_request_based(md)) { 1551 /* 1552 * With request-based DM we only need to check the 1553 * top-level queue for congestion. 1554 */ 1555 r = md->queue->backing_dev_info->wb.state & bdi_bits; 1556 } else { 1557 map = dm_get_live_table_fast(md); 1558 if (map) 1559 r = dm_table_any_congested(map, bdi_bits); 1560 dm_put_live_table_fast(md); 1561 } 1562 } 1563 1564 return r; 1565 } 1566 1567 /*----------------------------------------------------------------- 1568 * An IDR is used to keep track of allocated minor numbers. 1569 *---------------------------------------------------------------*/ 1570 static void free_minor(int minor) 1571 { 1572 spin_lock(&_minor_lock); 1573 idr_remove(&_minor_idr, minor); 1574 spin_unlock(&_minor_lock); 1575 } 1576 1577 /* 1578 * See if the device with a specific minor # is free. 1579 */ 1580 static int specific_minor(int minor) 1581 { 1582 int r; 1583 1584 if (minor >= (1 << MINORBITS)) 1585 return -EINVAL; 1586 1587 idr_preload(GFP_KERNEL); 1588 spin_lock(&_minor_lock); 1589 1590 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1591 1592 spin_unlock(&_minor_lock); 1593 idr_preload_end(); 1594 if (r < 0) 1595 return r == -ENOSPC ? -EBUSY : r; 1596 return 0; 1597 } 1598 1599 static int next_free_minor(int *minor) 1600 { 1601 int r; 1602 1603 idr_preload(GFP_KERNEL); 1604 spin_lock(&_minor_lock); 1605 1606 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1607 1608 spin_unlock(&_minor_lock); 1609 idr_preload_end(); 1610 if (r < 0) 1611 return r; 1612 *minor = r; 1613 return 0; 1614 } 1615 1616 static const struct block_device_operations dm_blk_dops; 1617 static const struct dax_operations dm_dax_ops; 1618 1619 static void dm_wq_work(struct work_struct *work); 1620 1621 void dm_init_md_queue(struct mapped_device *md) 1622 { 1623 /* 1624 * Request-based dm devices cannot be stacked on top of bio-based dm 1625 * devices. The type of this dm device may not have been decided yet. 1626 * The type is decided at the first table loading time. 1627 * To prevent problematic device stacking, clear the queue flag 1628 * for request stacking support until then. 1629 * 1630 * This queue is new, so no concurrency on the queue_flags. 1631 */ 1632 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1633 1634 /* 1635 * Initialize data that will only be used by a non-blk-mq DM queue 1636 * - must do so here (in alloc_dev callchain) before queue is used 1637 */ 1638 md->queue->queuedata = md; 1639 md->queue->backing_dev_info->congested_data = md; 1640 } 1641 1642 void dm_init_normal_md_queue(struct mapped_device *md) 1643 { 1644 md->use_blk_mq = false; 1645 dm_init_md_queue(md); 1646 1647 /* 1648 * Initialize aspects of queue that aren't relevant for blk-mq 1649 */ 1650 md->queue->backing_dev_info->congested_fn = dm_any_congested; 1651 } 1652 1653 static void cleanup_mapped_device(struct mapped_device *md) 1654 { 1655 if (md->wq) 1656 destroy_workqueue(md->wq); 1657 if (md->kworker_task) 1658 kthread_stop(md->kworker_task); 1659 mempool_destroy(md->io_pool); 1660 if (md->bs) 1661 bioset_free(md->bs); 1662 1663 if (md->dax_dev) { 1664 kill_dax(md->dax_dev); 1665 put_dax(md->dax_dev); 1666 md->dax_dev = NULL; 1667 } 1668 1669 if (md->disk) { 1670 spin_lock(&_minor_lock); 1671 md->disk->private_data = NULL; 1672 spin_unlock(&_minor_lock); 1673 del_gendisk(md->disk); 1674 put_disk(md->disk); 1675 } 1676 1677 if (md->queue) 1678 blk_cleanup_queue(md->queue); 1679 1680 cleanup_srcu_struct(&md->io_barrier); 1681 1682 if (md->bdev) { 1683 bdput(md->bdev); 1684 md->bdev = NULL; 1685 } 1686 1687 dm_mq_cleanup_mapped_device(md); 1688 } 1689 1690 /* 1691 * Allocate and initialise a blank device with a given minor. 1692 */ 1693 static struct mapped_device *alloc_dev(int minor) 1694 { 1695 int r, numa_node_id = dm_get_numa_node(); 1696 struct dax_device *dax_dev; 1697 struct mapped_device *md; 1698 void *old_md; 1699 1700 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1701 if (!md) { 1702 DMWARN("unable to allocate device, out of memory."); 1703 return NULL; 1704 } 1705 1706 if (!try_module_get(THIS_MODULE)) 1707 goto bad_module_get; 1708 1709 /* get a minor number for the dev */ 1710 if (minor == DM_ANY_MINOR) 1711 r = next_free_minor(&minor); 1712 else 1713 r = specific_minor(minor); 1714 if (r < 0) 1715 goto bad_minor; 1716 1717 r = init_srcu_struct(&md->io_barrier); 1718 if (r < 0) 1719 goto bad_io_barrier; 1720 1721 md->numa_node_id = numa_node_id; 1722 md->use_blk_mq = dm_use_blk_mq_default(); 1723 md->init_tio_pdu = false; 1724 md->type = DM_TYPE_NONE; 1725 mutex_init(&md->suspend_lock); 1726 mutex_init(&md->type_lock); 1727 mutex_init(&md->table_devices_lock); 1728 spin_lock_init(&md->deferred_lock); 1729 atomic_set(&md->holders, 1); 1730 atomic_set(&md->open_count, 0); 1731 atomic_set(&md->event_nr, 0); 1732 atomic_set(&md->uevent_seq, 0); 1733 INIT_LIST_HEAD(&md->uevent_list); 1734 INIT_LIST_HEAD(&md->table_devices); 1735 spin_lock_init(&md->uevent_lock); 1736 1737 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 1738 if (!md->queue) 1739 goto bad; 1740 1741 dm_init_md_queue(md); 1742 1743 md->disk = alloc_disk_node(1, numa_node_id); 1744 if (!md->disk) 1745 goto bad; 1746 1747 atomic_set(&md->pending[0], 0); 1748 atomic_set(&md->pending[1], 0); 1749 init_waitqueue_head(&md->wait); 1750 INIT_WORK(&md->work, dm_wq_work); 1751 init_waitqueue_head(&md->eventq); 1752 init_completion(&md->kobj_holder.completion); 1753 md->kworker_task = NULL; 1754 1755 md->disk->major = _major; 1756 md->disk->first_minor = minor; 1757 md->disk->fops = &dm_blk_dops; 1758 md->disk->queue = md->queue; 1759 md->disk->private_data = md; 1760 sprintf(md->disk->disk_name, "dm-%d", minor); 1761 1762 dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1763 if (!dax_dev) 1764 goto bad; 1765 md->dax_dev = dax_dev; 1766 1767 add_disk(md->disk); 1768 format_dev_t(md->name, MKDEV(_major, minor)); 1769 1770 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1771 if (!md->wq) 1772 goto bad; 1773 1774 md->bdev = bdget_disk(md->disk, 0); 1775 if (!md->bdev) 1776 goto bad; 1777 1778 bio_init(&md->flush_bio, NULL, 0); 1779 bio_set_dev(&md->flush_bio, md->bdev); 1780 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1781 1782 dm_stats_init(&md->stats); 1783 1784 /* Populate the mapping, nobody knows we exist yet */ 1785 spin_lock(&_minor_lock); 1786 old_md = idr_replace(&_minor_idr, md, minor); 1787 spin_unlock(&_minor_lock); 1788 1789 BUG_ON(old_md != MINOR_ALLOCED); 1790 1791 return md; 1792 1793 bad: 1794 cleanup_mapped_device(md); 1795 bad_io_barrier: 1796 free_minor(minor); 1797 bad_minor: 1798 module_put(THIS_MODULE); 1799 bad_module_get: 1800 kvfree(md); 1801 return NULL; 1802 } 1803 1804 static void unlock_fs(struct mapped_device *md); 1805 1806 static void free_dev(struct mapped_device *md) 1807 { 1808 int minor = MINOR(disk_devt(md->disk)); 1809 1810 unlock_fs(md); 1811 1812 cleanup_mapped_device(md); 1813 1814 free_table_devices(&md->table_devices); 1815 dm_stats_cleanup(&md->stats); 1816 free_minor(minor); 1817 1818 module_put(THIS_MODULE); 1819 kvfree(md); 1820 } 1821 1822 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1823 { 1824 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1825 1826 if (md->bs) { 1827 /* The md already has necessary mempools. */ 1828 if (dm_table_bio_based(t)) { 1829 /* 1830 * Reload bioset because front_pad may have changed 1831 * because a different table was loaded. 1832 */ 1833 bioset_free(md->bs); 1834 md->bs = p->bs; 1835 p->bs = NULL; 1836 } 1837 /* 1838 * There's no need to reload with request-based dm 1839 * because the size of front_pad doesn't change. 1840 * Note for future: If you are to reload bioset, 1841 * prep-ed requests in the queue may refer 1842 * to bio from the old bioset, so you must walk 1843 * through the queue to unprep. 1844 */ 1845 goto out; 1846 } 1847 1848 BUG_ON(!p || md->io_pool || md->bs); 1849 1850 md->io_pool = p->io_pool; 1851 p->io_pool = NULL; 1852 md->bs = p->bs; 1853 p->bs = NULL; 1854 1855 out: 1856 /* mempool bind completed, no longer need any mempools in the table */ 1857 dm_table_free_md_mempools(t); 1858 } 1859 1860 /* 1861 * Bind a table to the device. 1862 */ 1863 static void event_callback(void *context) 1864 { 1865 unsigned long flags; 1866 LIST_HEAD(uevents); 1867 struct mapped_device *md = (struct mapped_device *) context; 1868 1869 spin_lock_irqsave(&md->uevent_lock, flags); 1870 list_splice_init(&md->uevent_list, &uevents); 1871 spin_unlock_irqrestore(&md->uevent_lock, flags); 1872 1873 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1874 1875 atomic_inc(&md->event_nr); 1876 wake_up(&md->eventq); 1877 dm_issue_global_event(); 1878 } 1879 1880 /* 1881 * Protected by md->suspend_lock obtained by dm_swap_table(). 1882 */ 1883 static void __set_size(struct mapped_device *md, sector_t size) 1884 { 1885 lockdep_assert_held(&md->suspend_lock); 1886 1887 set_capacity(md->disk, size); 1888 1889 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1890 } 1891 1892 /* 1893 * Returns old map, which caller must destroy. 1894 */ 1895 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1896 struct queue_limits *limits) 1897 { 1898 struct dm_table *old_map; 1899 struct request_queue *q = md->queue; 1900 sector_t size; 1901 1902 lockdep_assert_held(&md->suspend_lock); 1903 1904 size = dm_table_get_size(t); 1905 1906 /* 1907 * Wipe any geometry if the size of the table changed. 1908 */ 1909 if (size != dm_get_size(md)) 1910 memset(&md->geometry, 0, sizeof(md->geometry)); 1911 1912 __set_size(md, size); 1913 1914 dm_table_event_callback(t, event_callback, md); 1915 1916 /* 1917 * The queue hasn't been stopped yet, if the old table type wasn't 1918 * for request-based during suspension. So stop it to prevent 1919 * I/O mapping before resume. 1920 * This must be done before setting the queue restrictions, 1921 * because request-based dm may be run just after the setting. 1922 */ 1923 if (dm_table_request_based(t)) { 1924 dm_stop_queue(q); 1925 /* 1926 * Leverage the fact that request-based DM targets are 1927 * immutable singletons and establish md->immutable_target 1928 * - used to optimize both dm_request_fn and dm_mq_queue_rq 1929 */ 1930 md->immutable_target = dm_table_get_immutable_target(t); 1931 } 1932 1933 __bind_mempools(md, t); 1934 1935 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 1936 rcu_assign_pointer(md->map, (void *)t); 1937 md->immutable_target_type = dm_table_get_immutable_target_type(t); 1938 1939 dm_table_set_restrictions(t, q, limits); 1940 if (old_map) 1941 dm_sync_table(md); 1942 1943 return old_map; 1944 } 1945 1946 /* 1947 * Returns unbound table for the caller to free. 1948 */ 1949 static struct dm_table *__unbind(struct mapped_device *md) 1950 { 1951 struct dm_table *map = rcu_dereference_protected(md->map, 1); 1952 1953 if (!map) 1954 return NULL; 1955 1956 dm_table_event_callback(map, NULL, NULL); 1957 RCU_INIT_POINTER(md->map, NULL); 1958 dm_sync_table(md); 1959 1960 return map; 1961 } 1962 1963 /* 1964 * Constructor for a new device. 1965 */ 1966 int dm_create(int minor, struct mapped_device **result) 1967 { 1968 struct mapped_device *md; 1969 1970 md = alloc_dev(minor); 1971 if (!md) 1972 return -ENXIO; 1973 1974 dm_sysfs_init(md); 1975 1976 *result = md; 1977 return 0; 1978 } 1979 1980 /* 1981 * Functions to manage md->type. 1982 * All are required to hold md->type_lock. 1983 */ 1984 void dm_lock_md_type(struct mapped_device *md) 1985 { 1986 mutex_lock(&md->type_lock); 1987 } 1988 1989 void dm_unlock_md_type(struct mapped_device *md) 1990 { 1991 mutex_unlock(&md->type_lock); 1992 } 1993 1994 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 1995 { 1996 BUG_ON(!mutex_is_locked(&md->type_lock)); 1997 md->type = type; 1998 } 1999 2000 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2001 { 2002 return md->type; 2003 } 2004 2005 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2006 { 2007 return md->immutable_target_type; 2008 } 2009 2010 /* 2011 * The queue_limits are only valid as long as you have a reference 2012 * count on 'md'. 2013 */ 2014 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2015 { 2016 BUG_ON(!atomic_read(&md->holders)); 2017 return &md->queue->limits; 2018 } 2019 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2020 2021 /* 2022 * Setup the DM device's queue based on md's type 2023 */ 2024 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2025 { 2026 int r; 2027 enum dm_queue_mode type = dm_get_md_type(md); 2028 2029 switch (type) { 2030 case DM_TYPE_REQUEST_BASED: 2031 r = dm_old_init_request_queue(md, t); 2032 if (r) { 2033 DMERR("Cannot initialize queue for request-based mapped device"); 2034 return r; 2035 } 2036 break; 2037 case DM_TYPE_MQ_REQUEST_BASED: 2038 r = dm_mq_init_request_queue(md, t); 2039 if (r) { 2040 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2041 return r; 2042 } 2043 break; 2044 case DM_TYPE_BIO_BASED: 2045 case DM_TYPE_DAX_BIO_BASED: 2046 dm_init_normal_md_queue(md); 2047 blk_queue_make_request(md->queue, dm_make_request); 2048 /* 2049 * DM handles splitting bios as needed. Free the bio_split bioset 2050 * since it won't be used (saves 1 process per bio-based DM device). 2051 */ 2052 bioset_free(md->queue->bio_split); 2053 md->queue->bio_split = NULL; 2054 2055 if (type == DM_TYPE_DAX_BIO_BASED) 2056 queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); 2057 break; 2058 case DM_TYPE_NONE: 2059 WARN_ON_ONCE(true); 2060 break; 2061 } 2062 2063 return 0; 2064 } 2065 2066 struct mapped_device *dm_get_md(dev_t dev) 2067 { 2068 struct mapped_device *md; 2069 unsigned minor = MINOR(dev); 2070 2071 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2072 return NULL; 2073 2074 spin_lock(&_minor_lock); 2075 2076 md = idr_find(&_minor_idr, minor); 2077 if (md) { 2078 if ((md == MINOR_ALLOCED || 2079 (MINOR(disk_devt(dm_disk(md))) != minor) || 2080 dm_deleting_md(md) || 2081 test_bit(DMF_FREEING, &md->flags))) { 2082 md = NULL; 2083 goto out; 2084 } 2085 dm_get(md); 2086 } 2087 2088 out: 2089 spin_unlock(&_minor_lock); 2090 2091 return md; 2092 } 2093 EXPORT_SYMBOL_GPL(dm_get_md); 2094 2095 void *dm_get_mdptr(struct mapped_device *md) 2096 { 2097 return md->interface_ptr; 2098 } 2099 2100 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2101 { 2102 md->interface_ptr = ptr; 2103 } 2104 2105 void dm_get(struct mapped_device *md) 2106 { 2107 atomic_inc(&md->holders); 2108 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2109 } 2110 2111 int dm_hold(struct mapped_device *md) 2112 { 2113 spin_lock(&_minor_lock); 2114 if (test_bit(DMF_FREEING, &md->flags)) { 2115 spin_unlock(&_minor_lock); 2116 return -EBUSY; 2117 } 2118 dm_get(md); 2119 spin_unlock(&_minor_lock); 2120 return 0; 2121 } 2122 EXPORT_SYMBOL_GPL(dm_hold); 2123 2124 const char *dm_device_name(struct mapped_device *md) 2125 { 2126 return md->name; 2127 } 2128 EXPORT_SYMBOL_GPL(dm_device_name); 2129 2130 static void __dm_destroy(struct mapped_device *md, bool wait) 2131 { 2132 struct request_queue *q = dm_get_md_queue(md); 2133 struct dm_table *map; 2134 int srcu_idx; 2135 2136 might_sleep(); 2137 2138 spin_lock(&_minor_lock); 2139 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2140 set_bit(DMF_FREEING, &md->flags); 2141 spin_unlock(&_minor_lock); 2142 2143 blk_set_queue_dying(q); 2144 2145 if (dm_request_based(md) && md->kworker_task) 2146 kthread_flush_worker(&md->kworker); 2147 2148 /* 2149 * Take suspend_lock so that presuspend and postsuspend methods 2150 * do not race with internal suspend. 2151 */ 2152 mutex_lock(&md->suspend_lock); 2153 map = dm_get_live_table(md, &srcu_idx); 2154 if (!dm_suspended_md(md)) { 2155 dm_table_presuspend_targets(map); 2156 dm_table_postsuspend_targets(map); 2157 } 2158 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2159 dm_put_live_table(md, srcu_idx); 2160 mutex_unlock(&md->suspend_lock); 2161 2162 /* 2163 * Rare, but there may be I/O requests still going to complete, 2164 * for example. Wait for all references to disappear. 2165 * No one should increment the reference count of the mapped_device, 2166 * after the mapped_device state becomes DMF_FREEING. 2167 */ 2168 if (wait) 2169 while (atomic_read(&md->holders)) 2170 msleep(1); 2171 else if (atomic_read(&md->holders)) 2172 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2173 dm_device_name(md), atomic_read(&md->holders)); 2174 2175 dm_sysfs_exit(md); 2176 dm_table_destroy(__unbind(md)); 2177 free_dev(md); 2178 } 2179 2180 void dm_destroy(struct mapped_device *md) 2181 { 2182 __dm_destroy(md, true); 2183 } 2184 2185 void dm_destroy_immediate(struct mapped_device *md) 2186 { 2187 __dm_destroy(md, false); 2188 } 2189 2190 void dm_put(struct mapped_device *md) 2191 { 2192 atomic_dec(&md->holders); 2193 } 2194 EXPORT_SYMBOL_GPL(dm_put); 2195 2196 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2197 { 2198 int r = 0; 2199 DEFINE_WAIT(wait); 2200 2201 while (1) { 2202 prepare_to_wait(&md->wait, &wait, task_state); 2203 2204 if (!md_in_flight(md)) 2205 break; 2206 2207 if (signal_pending_state(task_state, current)) { 2208 r = -EINTR; 2209 break; 2210 } 2211 2212 io_schedule(); 2213 } 2214 finish_wait(&md->wait, &wait); 2215 2216 return r; 2217 } 2218 2219 /* 2220 * Process the deferred bios 2221 */ 2222 static void dm_wq_work(struct work_struct *work) 2223 { 2224 struct mapped_device *md = container_of(work, struct mapped_device, 2225 work); 2226 struct bio *c; 2227 int srcu_idx; 2228 struct dm_table *map; 2229 2230 map = dm_get_live_table(md, &srcu_idx); 2231 2232 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2233 spin_lock_irq(&md->deferred_lock); 2234 c = bio_list_pop(&md->deferred); 2235 spin_unlock_irq(&md->deferred_lock); 2236 2237 if (!c) 2238 break; 2239 2240 if (dm_request_based(md)) 2241 generic_make_request(c); 2242 else 2243 __split_and_process_bio(md, map, c); 2244 } 2245 2246 dm_put_live_table(md, srcu_idx); 2247 } 2248 2249 static void dm_queue_flush(struct mapped_device *md) 2250 { 2251 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2252 smp_mb__after_atomic(); 2253 queue_work(md->wq, &md->work); 2254 } 2255 2256 /* 2257 * Swap in a new table, returning the old one for the caller to destroy. 2258 */ 2259 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2260 { 2261 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2262 struct queue_limits limits; 2263 int r; 2264 2265 mutex_lock(&md->suspend_lock); 2266 2267 /* device must be suspended */ 2268 if (!dm_suspended_md(md)) 2269 goto out; 2270 2271 /* 2272 * If the new table has no data devices, retain the existing limits. 2273 * This helps multipath with queue_if_no_path if all paths disappear, 2274 * then new I/O is queued based on these limits, and then some paths 2275 * reappear. 2276 */ 2277 if (dm_table_has_no_data_devices(table)) { 2278 live_map = dm_get_live_table_fast(md); 2279 if (live_map) 2280 limits = md->queue->limits; 2281 dm_put_live_table_fast(md); 2282 } 2283 2284 if (!live_map) { 2285 r = dm_calculate_queue_limits(table, &limits); 2286 if (r) { 2287 map = ERR_PTR(r); 2288 goto out; 2289 } 2290 } 2291 2292 map = __bind(md, table, &limits); 2293 dm_issue_global_event(); 2294 2295 out: 2296 mutex_unlock(&md->suspend_lock); 2297 return map; 2298 } 2299 2300 /* 2301 * Functions to lock and unlock any filesystem running on the 2302 * device. 2303 */ 2304 static int lock_fs(struct mapped_device *md) 2305 { 2306 int r; 2307 2308 WARN_ON(md->frozen_sb); 2309 2310 md->frozen_sb = freeze_bdev(md->bdev); 2311 if (IS_ERR(md->frozen_sb)) { 2312 r = PTR_ERR(md->frozen_sb); 2313 md->frozen_sb = NULL; 2314 return r; 2315 } 2316 2317 set_bit(DMF_FROZEN, &md->flags); 2318 2319 return 0; 2320 } 2321 2322 static void unlock_fs(struct mapped_device *md) 2323 { 2324 if (!test_bit(DMF_FROZEN, &md->flags)) 2325 return; 2326 2327 thaw_bdev(md->bdev, md->frozen_sb); 2328 md->frozen_sb = NULL; 2329 clear_bit(DMF_FROZEN, &md->flags); 2330 } 2331 2332 /* 2333 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2334 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2335 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2336 * 2337 * If __dm_suspend returns 0, the device is completely quiescent 2338 * now. There is no request-processing activity. All new requests 2339 * are being added to md->deferred list. 2340 */ 2341 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2342 unsigned suspend_flags, long task_state, 2343 int dmf_suspended_flag) 2344 { 2345 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2346 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2347 int r; 2348 2349 lockdep_assert_held(&md->suspend_lock); 2350 2351 /* 2352 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2353 * This flag is cleared before dm_suspend returns. 2354 */ 2355 if (noflush) 2356 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2357 else 2358 pr_debug("%s: suspending with flush\n", dm_device_name(md)); 2359 2360 /* 2361 * This gets reverted if there's an error later and the targets 2362 * provide the .presuspend_undo hook. 2363 */ 2364 dm_table_presuspend_targets(map); 2365 2366 /* 2367 * Flush I/O to the device. 2368 * Any I/O submitted after lock_fs() may not be flushed. 2369 * noflush takes precedence over do_lockfs. 2370 * (lock_fs() flushes I/Os and waits for them to complete.) 2371 */ 2372 if (!noflush && do_lockfs) { 2373 r = lock_fs(md); 2374 if (r) { 2375 dm_table_presuspend_undo_targets(map); 2376 return r; 2377 } 2378 } 2379 2380 /* 2381 * Here we must make sure that no processes are submitting requests 2382 * to target drivers i.e. no one may be executing 2383 * __split_and_process_bio. This is called from dm_request and 2384 * dm_wq_work. 2385 * 2386 * To get all processes out of __split_and_process_bio in dm_request, 2387 * we take the write lock. To prevent any process from reentering 2388 * __split_and_process_bio from dm_request and quiesce the thread 2389 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2390 * flush_workqueue(md->wq). 2391 */ 2392 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2393 if (map) 2394 synchronize_srcu(&md->io_barrier); 2395 2396 /* 2397 * Stop md->queue before flushing md->wq in case request-based 2398 * dm defers requests to md->wq from md->queue. 2399 */ 2400 if (dm_request_based(md)) { 2401 dm_stop_queue(md->queue); 2402 if (md->kworker_task) 2403 kthread_flush_worker(&md->kworker); 2404 } 2405 2406 flush_workqueue(md->wq); 2407 2408 /* 2409 * At this point no more requests are entering target request routines. 2410 * We call dm_wait_for_completion to wait for all existing requests 2411 * to finish. 2412 */ 2413 r = dm_wait_for_completion(md, task_state); 2414 if (!r) 2415 set_bit(dmf_suspended_flag, &md->flags); 2416 2417 if (noflush) 2418 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2419 if (map) 2420 synchronize_srcu(&md->io_barrier); 2421 2422 /* were we interrupted ? */ 2423 if (r < 0) { 2424 dm_queue_flush(md); 2425 2426 if (dm_request_based(md)) 2427 dm_start_queue(md->queue); 2428 2429 unlock_fs(md); 2430 dm_table_presuspend_undo_targets(map); 2431 /* pushback list is already flushed, so skip flush */ 2432 } 2433 2434 return r; 2435 } 2436 2437 /* 2438 * We need to be able to change a mapping table under a mounted 2439 * filesystem. For example we might want to move some data in 2440 * the background. Before the table can be swapped with 2441 * dm_bind_table, dm_suspend must be called to flush any in 2442 * flight bios and ensure that any further io gets deferred. 2443 */ 2444 /* 2445 * Suspend mechanism in request-based dm. 2446 * 2447 * 1. Flush all I/Os by lock_fs() if needed. 2448 * 2. Stop dispatching any I/O by stopping the request_queue. 2449 * 3. Wait for all in-flight I/Os to be completed or requeued. 2450 * 2451 * To abort suspend, start the request_queue. 2452 */ 2453 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2454 { 2455 struct dm_table *map = NULL; 2456 int r = 0; 2457 2458 retry: 2459 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2460 2461 if (dm_suspended_md(md)) { 2462 r = -EINVAL; 2463 goto out_unlock; 2464 } 2465 2466 if (dm_suspended_internally_md(md)) { 2467 /* already internally suspended, wait for internal resume */ 2468 mutex_unlock(&md->suspend_lock); 2469 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2470 if (r) 2471 return r; 2472 goto retry; 2473 } 2474 2475 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2476 2477 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2478 if (r) 2479 goto out_unlock; 2480 2481 dm_table_postsuspend_targets(map); 2482 2483 out_unlock: 2484 mutex_unlock(&md->suspend_lock); 2485 return r; 2486 } 2487 2488 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2489 { 2490 if (map) { 2491 int r = dm_table_resume_targets(map); 2492 if (r) 2493 return r; 2494 } 2495 2496 dm_queue_flush(md); 2497 2498 /* 2499 * Flushing deferred I/Os must be done after targets are resumed 2500 * so that mapping of targets can work correctly. 2501 * Request-based dm is queueing the deferred I/Os in its request_queue. 2502 */ 2503 if (dm_request_based(md)) 2504 dm_start_queue(md->queue); 2505 2506 unlock_fs(md); 2507 2508 return 0; 2509 } 2510 2511 int dm_resume(struct mapped_device *md) 2512 { 2513 int r; 2514 struct dm_table *map = NULL; 2515 2516 retry: 2517 r = -EINVAL; 2518 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2519 2520 if (!dm_suspended_md(md)) 2521 goto out; 2522 2523 if (dm_suspended_internally_md(md)) { 2524 /* already internally suspended, wait for internal resume */ 2525 mutex_unlock(&md->suspend_lock); 2526 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2527 if (r) 2528 return r; 2529 goto retry; 2530 } 2531 2532 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2533 if (!map || !dm_table_get_size(map)) 2534 goto out; 2535 2536 r = __dm_resume(md, map); 2537 if (r) 2538 goto out; 2539 2540 clear_bit(DMF_SUSPENDED, &md->flags); 2541 out: 2542 mutex_unlock(&md->suspend_lock); 2543 2544 return r; 2545 } 2546 2547 /* 2548 * Internal suspend/resume works like userspace-driven suspend. It waits 2549 * until all bios finish and prevents issuing new bios to the target drivers. 2550 * It may be used only from the kernel. 2551 */ 2552 2553 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2554 { 2555 struct dm_table *map = NULL; 2556 2557 lockdep_assert_held(&md->suspend_lock); 2558 2559 if (md->internal_suspend_count++) 2560 return; /* nested internal suspend */ 2561 2562 if (dm_suspended_md(md)) { 2563 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2564 return; /* nest suspend */ 2565 } 2566 2567 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2568 2569 /* 2570 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2571 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2572 * would require changing .presuspend to return an error -- avoid this 2573 * until there is a need for more elaborate variants of internal suspend. 2574 */ 2575 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2576 DMF_SUSPENDED_INTERNALLY); 2577 2578 dm_table_postsuspend_targets(map); 2579 } 2580 2581 static void __dm_internal_resume(struct mapped_device *md) 2582 { 2583 BUG_ON(!md->internal_suspend_count); 2584 2585 if (--md->internal_suspend_count) 2586 return; /* resume from nested internal suspend */ 2587 2588 if (dm_suspended_md(md)) 2589 goto done; /* resume from nested suspend */ 2590 2591 /* 2592 * NOTE: existing callers don't need to call dm_table_resume_targets 2593 * (which may fail -- so best to avoid it for now by passing NULL map) 2594 */ 2595 (void) __dm_resume(md, NULL); 2596 2597 done: 2598 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2599 smp_mb__after_atomic(); 2600 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2601 } 2602 2603 void dm_internal_suspend_noflush(struct mapped_device *md) 2604 { 2605 mutex_lock(&md->suspend_lock); 2606 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2607 mutex_unlock(&md->suspend_lock); 2608 } 2609 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2610 2611 void dm_internal_resume(struct mapped_device *md) 2612 { 2613 mutex_lock(&md->suspend_lock); 2614 __dm_internal_resume(md); 2615 mutex_unlock(&md->suspend_lock); 2616 } 2617 EXPORT_SYMBOL_GPL(dm_internal_resume); 2618 2619 /* 2620 * Fast variants of internal suspend/resume hold md->suspend_lock, 2621 * which prevents interaction with userspace-driven suspend. 2622 */ 2623 2624 void dm_internal_suspend_fast(struct mapped_device *md) 2625 { 2626 mutex_lock(&md->suspend_lock); 2627 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2628 return; 2629 2630 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2631 synchronize_srcu(&md->io_barrier); 2632 flush_workqueue(md->wq); 2633 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2634 } 2635 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2636 2637 void dm_internal_resume_fast(struct mapped_device *md) 2638 { 2639 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2640 goto done; 2641 2642 dm_queue_flush(md); 2643 2644 done: 2645 mutex_unlock(&md->suspend_lock); 2646 } 2647 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2648 2649 /*----------------------------------------------------------------- 2650 * Event notification. 2651 *---------------------------------------------------------------*/ 2652 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2653 unsigned cookie) 2654 { 2655 char udev_cookie[DM_COOKIE_LENGTH]; 2656 char *envp[] = { udev_cookie, NULL }; 2657 2658 if (!cookie) 2659 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2660 else { 2661 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2662 DM_COOKIE_ENV_VAR_NAME, cookie); 2663 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2664 action, envp); 2665 } 2666 } 2667 2668 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2669 { 2670 return atomic_add_return(1, &md->uevent_seq); 2671 } 2672 2673 uint32_t dm_get_event_nr(struct mapped_device *md) 2674 { 2675 return atomic_read(&md->event_nr); 2676 } 2677 2678 int dm_wait_event(struct mapped_device *md, int event_nr) 2679 { 2680 return wait_event_interruptible(md->eventq, 2681 (event_nr != atomic_read(&md->event_nr))); 2682 } 2683 2684 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2685 { 2686 unsigned long flags; 2687 2688 spin_lock_irqsave(&md->uevent_lock, flags); 2689 list_add(elist, &md->uevent_list); 2690 spin_unlock_irqrestore(&md->uevent_lock, flags); 2691 } 2692 2693 /* 2694 * The gendisk is only valid as long as you have a reference 2695 * count on 'md'. 2696 */ 2697 struct gendisk *dm_disk(struct mapped_device *md) 2698 { 2699 return md->disk; 2700 } 2701 EXPORT_SYMBOL_GPL(dm_disk); 2702 2703 struct kobject *dm_kobject(struct mapped_device *md) 2704 { 2705 return &md->kobj_holder.kobj; 2706 } 2707 2708 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2709 { 2710 struct mapped_device *md; 2711 2712 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2713 2714 if (test_bit(DMF_FREEING, &md->flags) || 2715 dm_deleting_md(md)) 2716 return NULL; 2717 2718 dm_get(md); 2719 return md; 2720 } 2721 2722 int dm_suspended_md(struct mapped_device *md) 2723 { 2724 return test_bit(DMF_SUSPENDED, &md->flags); 2725 } 2726 2727 int dm_suspended_internally_md(struct mapped_device *md) 2728 { 2729 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2730 } 2731 2732 int dm_test_deferred_remove_flag(struct mapped_device *md) 2733 { 2734 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2735 } 2736 2737 int dm_suspended(struct dm_target *ti) 2738 { 2739 return dm_suspended_md(dm_table_get_md(ti->table)); 2740 } 2741 EXPORT_SYMBOL_GPL(dm_suspended); 2742 2743 int dm_noflush_suspending(struct dm_target *ti) 2744 { 2745 return __noflush_suspending(dm_table_get_md(ti->table)); 2746 } 2747 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2748 2749 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2750 unsigned integrity, unsigned per_io_data_size) 2751 { 2752 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2753 unsigned int pool_size = 0; 2754 unsigned int front_pad; 2755 2756 if (!pools) 2757 return NULL; 2758 2759 switch (type) { 2760 case DM_TYPE_BIO_BASED: 2761 case DM_TYPE_DAX_BIO_BASED: 2762 pool_size = dm_get_reserved_bio_based_ios(); 2763 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2764 2765 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); 2766 if (!pools->io_pool) 2767 goto out; 2768 break; 2769 case DM_TYPE_REQUEST_BASED: 2770 case DM_TYPE_MQ_REQUEST_BASED: 2771 pool_size = dm_get_reserved_rq_based_ios(); 2772 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2773 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2774 break; 2775 default: 2776 BUG(); 2777 } 2778 2779 pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER); 2780 if (!pools->bs) 2781 goto out; 2782 2783 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 2784 goto out; 2785 2786 return pools; 2787 2788 out: 2789 dm_free_md_mempools(pools); 2790 2791 return NULL; 2792 } 2793 2794 void dm_free_md_mempools(struct dm_md_mempools *pools) 2795 { 2796 if (!pools) 2797 return; 2798 2799 mempool_destroy(pools->io_pool); 2800 2801 if (pools->bs) 2802 bioset_free(pools->bs); 2803 2804 kfree(pools); 2805 } 2806 2807 struct dm_pr { 2808 u64 old_key; 2809 u64 new_key; 2810 u32 flags; 2811 bool fail_early; 2812 }; 2813 2814 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2815 void *data) 2816 { 2817 struct mapped_device *md = bdev->bd_disk->private_data; 2818 struct dm_table *table; 2819 struct dm_target *ti; 2820 int ret = -ENOTTY, srcu_idx; 2821 2822 table = dm_get_live_table(md, &srcu_idx); 2823 if (!table || !dm_table_get_size(table)) 2824 goto out; 2825 2826 /* We only support devices that have a single target */ 2827 if (dm_table_get_num_targets(table) != 1) 2828 goto out; 2829 ti = dm_table_get_target(table, 0); 2830 2831 ret = -EINVAL; 2832 if (!ti->type->iterate_devices) 2833 goto out; 2834 2835 ret = ti->type->iterate_devices(ti, fn, data); 2836 out: 2837 dm_put_live_table(md, srcu_idx); 2838 return ret; 2839 } 2840 2841 /* 2842 * For register / unregister we need to manually call out to every path. 2843 */ 2844 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 2845 sector_t start, sector_t len, void *data) 2846 { 2847 struct dm_pr *pr = data; 2848 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 2849 2850 if (!ops || !ops->pr_register) 2851 return -EOPNOTSUPP; 2852 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 2853 } 2854 2855 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 2856 u32 flags) 2857 { 2858 struct dm_pr pr = { 2859 .old_key = old_key, 2860 .new_key = new_key, 2861 .flags = flags, 2862 .fail_early = true, 2863 }; 2864 int ret; 2865 2866 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 2867 if (ret && new_key) { 2868 /* unregister all paths if we failed to register any path */ 2869 pr.old_key = new_key; 2870 pr.new_key = 0; 2871 pr.flags = 0; 2872 pr.fail_early = false; 2873 dm_call_pr(bdev, __dm_pr_register, &pr); 2874 } 2875 2876 return ret; 2877 } 2878 2879 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 2880 u32 flags) 2881 { 2882 struct mapped_device *md = bdev->bd_disk->private_data; 2883 const struct pr_ops *ops; 2884 fmode_t mode; 2885 int r; 2886 2887 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2888 if (r < 0) 2889 return r; 2890 2891 ops = bdev->bd_disk->fops->pr_ops; 2892 if (ops && ops->pr_reserve) 2893 r = ops->pr_reserve(bdev, key, type, flags); 2894 else 2895 r = -EOPNOTSUPP; 2896 2897 bdput(bdev); 2898 return r; 2899 } 2900 2901 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2902 { 2903 struct mapped_device *md = bdev->bd_disk->private_data; 2904 const struct pr_ops *ops; 2905 fmode_t mode; 2906 int r; 2907 2908 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2909 if (r < 0) 2910 return r; 2911 2912 ops = bdev->bd_disk->fops->pr_ops; 2913 if (ops && ops->pr_release) 2914 r = ops->pr_release(bdev, key, type); 2915 else 2916 r = -EOPNOTSUPP; 2917 2918 bdput(bdev); 2919 return r; 2920 } 2921 2922 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 2923 enum pr_type type, bool abort) 2924 { 2925 struct mapped_device *md = bdev->bd_disk->private_data; 2926 const struct pr_ops *ops; 2927 fmode_t mode; 2928 int r; 2929 2930 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2931 if (r < 0) 2932 return r; 2933 2934 ops = bdev->bd_disk->fops->pr_ops; 2935 if (ops && ops->pr_preempt) 2936 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 2937 else 2938 r = -EOPNOTSUPP; 2939 2940 bdput(bdev); 2941 return r; 2942 } 2943 2944 static int dm_pr_clear(struct block_device *bdev, u64 key) 2945 { 2946 struct mapped_device *md = bdev->bd_disk->private_data; 2947 const struct pr_ops *ops; 2948 fmode_t mode; 2949 int r; 2950 2951 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2952 if (r < 0) 2953 return r; 2954 2955 ops = bdev->bd_disk->fops->pr_ops; 2956 if (ops && ops->pr_clear) 2957 r = ops->pr_clear(bdev, key); 2958 else 2959 r = -EOPNOTSUPP; 2960 2961 bdput(bdev); 2962 return r; 2963 } 2964 2965 static const struct pr_ops dm_pr_ops = { 2966 .pr_register = dm_pr_register, 2967 .pr_reserve = dm_pr_reserve, 2968 .pr_release = dm_pr_release, 2969 .pr_preempt = dm_pr_preempt, 2970 .pr_clear = dm_pr_clear, 2971 }; 2972 2973 static const struct block_device_operations dm_blk_dops = { 2974 .open = dm_blk_open, 2975 .release = dm_blk_close, 2976 .ioctl = dm_blk_ioctl, 2977 .getgeo = dm_blk_getgeo, 2978 .pr_ops = &dm_pr_ops, 2979 .owner = THIS_MODULE 2980 }; 2981 2982 static const struct dax_operations dm_dax_ops = { 2983 .direct_access = dm_dax_direct_access, 2984 .copy_from_iter = dm_dax_copy_from_iter, 2985 }; 2986 2987 /* 2988 * module hooks 2989 */ 2990 module_init(dm_init); 2991 module_exit(dm_exit); 2992 2993 module_param(major, uint, 0); 2994 MODULE_PARM_DESC(major, "The major number of the device mapper"); 2995 2996 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2997 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2998 2999 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3000 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3001 3002 MODULE_DESCRIPTION(DM_NAME " driver"); 3003 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3004 MODULE_LICENSE("GPL"); 3005