1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/signal.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/mempool.h> 19 #include <linux/dax.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/uio.h> 23 #include <linux/hdreg.h> 24 #include <linux/delay.h> 25 #include <linux/wait.h> 26 #include <linux/pr.h> 27 28 #define DM_MSG_PREFIX "core" 29 30 /* 31 * Cookies are numeric values sent with CHANGE and REMOVE 32 * uevents while resuming, removing or renaming the device. 33 */ 34 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 35 #define DM_COOKIE_LENGTH 24 36 37 static const char *_name = DM_NAME; 38 39 static unsigned int major = 0; 40 static unsigned int _major = 0; 41 42 static DEFINE_IDR(_minor_idr); 43 44 static DEFINE_SPINLOCK(_minor_lock); 45 46 static void do_deferred_remove(struct work_struct *w); 47 48 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 49 50 static struct workqueue_struct *deferred_remove_workqueue; 51 52 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 53 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 54 55 /* 56 * One of these is allocated per bio. 57 */ 58 struct dm_io { 59 struct mapped_device *md; 60 blk_status_t status; 61 atomic_t io_count; 62 struct bio *bio; 63 unsigned long start_time; 64 spinlock_t endio_lock; 65 struct dm_stats_aux stats_aux; 66 }; 67 68 #define MINOR_ALLOCED ((void *)-1) 69 70 /* 71 * Bits for the md->flags field. 72 */ 73 #define DMF_BLOCK_IO_FOR_SUSPEND 0 74 #define DMF_SUSPENDED 1 75 #define DMF_FROZEN 2 76 #define DMF_FREEING 3 77 #define DMF_DELETING 4 78 #define DMF_NOFLUSH_SUSPENDING 5 79 #define DMF_DEFERRED_REMOVE 6 80 #define DMF_SUSPENDED_INTERNALLY 7 81 82 #define DM_NUMA_NODE NUMA_NO_NODE 83 static int dm_numa_node = DM_NUMA_NODE; 84 85 /* 86 * For mempools pre-allocation at the table loading time. 87 */ 88 struct dm_md_mempools { 89 mempool_t *io_pool; 90 struct bio_set *bs; 91 }; 92 93 struct table_device { 94 struct list_head list; 95 atomic_t count; 96 struct dm_dev dm_dev; 97 }; 98 99 static struct kmem_cache *_io_cache; 100 static struct kmem_cache *_rq_tio_cache; 101 static struct kmem_cache *_rq_cache; 102 103 /* 104 * Bio-based DM's mempools' reserved IOs set by the user. 105 */ 106 #define RESERVED_BIO_BASED_IOS 16 107 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 108 109 static int __dm_get_module_param_int(int *module_param, int min, int max) 110 { 111 int param = ACCESS_ONCE(*module_param); 112 int modified_param = 0; 113 bool modified = true; 114 115 if (param < min) 116 modified_param = min; 117 else if (param > max) 118 modified_param = max; 119 else 120 modified = false; 121 122 if (modified) { 123 (void)cmpxchg(module_param, param, modified_param); 124 param = modified_param; 125 } 126 127 return param; 128 } 129 130 unsigned __dm_get_module_param(unsigned *module_param, 131 unsigned def, unsigned max) 132 { 133 unsigned param = ACCESS_ONCE(*module_param); 134 unsigned modified_param = 0; 135 136 if (!param) 137 modified_param = def; 138 else if (param > max) 139 modified_param = max; 140 141 if (modified_param) { 142 (void)cmpxchg(module_param, param, modified_param); 143 param = modified_param; 144 } 145 146 return param; 147 } 148 149 unsigned dm_get_reserved_bio_based_ios(void) 150 { 151 return __dm_get_module_param(&reserved_bio_based_ios, 152 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 153 } 154 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 155 156 static unsigned dm_get_numa_node(void) 157 { 158 return __dm_get_module_param_int(&dm_numa_node, 159 DM_NUMA_NODE, num_online_nodes() - 1); 160 } 161 162 static int __init local_init(void) 163 { 164 int r = -ENOMEM; 165 166 /* allocate a slab for the dm_ios */ 167 _io_cache = KMEM_CACHE(dm_io, 0); 168 if (!_io_cache) 169 return r; 170 171 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 172 if (!_rq_tio_cache) 173 goto out_free_io_cache; 174 175 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 176 __alignof__(struct request), 0, NULL); 177 if (!_rq_cache) 178 goto out_free_rq_tio_cache; 179 180 r = dm_uevent_init(); 181 if (r) 182 goto out_free_rq_cache; 183 184 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 185 if (!deferred_remove_workqueue) { 186 r = -ENOMEM; 187 goto out_uevent_exit; 188 } 189 190 _major = major; 191 r = register_blkdev(_major, _name); 192 if (r < 0) 193 goto out_free_workqueue; 194 195 if (!_major) 196 _major = r; 197 198 return 0; 199 200 out_free_workqueue: 201 destroy_workqueue(deferred_remove_workqueue); 202 out_uevent_exit: 203 dm_uevent_exit(); 204 out_free_rq_cache: 205 kmem_cache_destroy(_rq_cache); 206 out_free_rq_tio_cache: 207 kmem_cache_destroy(_rq_tio_cache); 208 out_free_io_cache: 209 kmem_cache_destroy(_io_cache); 210 211 return r; 212 } 213 214 static void local_exit(void) 215 { 216 flush_scheduled_work(); 217 destroy_workqueue(deferred_remove_workqueue); 218 219 kmem_cache_destroy(_rq_cache); 220 kmem_cache_destroy(_rq_tio_cache); 221 kmem_cache_destroy(_io_cache); 222 unregister_blkdev(_major, _name); 223 dm_uevent_exit(); 224 225 _major = 0; 226 227 DMINFO("cleaned up"); 228 } 229 230 static int (*_inits[])(void) __initdata = { 231 local_init, 232 dm_target_init, 233 dm_linear_init, 234 dm_stripe_init, 235 dm_io_init, 236 dm_kcopyd_init, 237 dm_interface_init, 238 dm_statistics_init, 239 }; 240 241 static void (*_exits[])(void) = { 242 local_exit, 243 dm_target_exit, 244 dm_linear_exit, 245 dm_stripe_exit, 246 dm_io_exit, 247 dm_kcopyd_exit, 248 dm_interface_exit, 249 dm_statistics_exit, 250 }; 251 252 static int __init dm_init(void) 253 { 254 const int count = ARRAY_SIZE(_inits); 255 256 int r, i; 257 258 for (i = 0; i < count; i++) { 259 r = _inits[i](); 260 if (r) 261 goto bad; 262 } 263 264 return 0; 265 266 bad: 267 while (i--) 268 _exits[i](); 269 270 return r; 271 } 272 273 static void __exit dm_exit(void) 274 { 275 int i = ARRAY_SIZE(_exits); 276 277 while (i--) 278 _exits[i](); 279 280 /* 281 * Should be empty by this point. 282 */ 283 idr_destroy(&_minor_idr); 284 } 285 286 /* 287 * Block device functions 288 */ 289 int dm_deleting_md(struct mapped_device *md) 290 { 291 return test_bit(DMF_DELETING, &md->flags); 292 } 293 294 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 295 { 296 struct mapped_device *md; 297 298 spin_lock(&_minor_lock); 299 300 md = bdev->bd_disk->private_data; 301 if (!md) 302 goto out; 303 304 if (test_bit(DMF_FREEING, &md->flags) || 305 dm_deleting_md(md)) { 306 md = NULL; 307 goto out; 308 } 309 310 dm_get(md); 311 atomic_inc(&md->open_count); 312 out: 313 spin_unlock(&_minor_lock); 314 315 return md ? 0 : -ENXIO; 316 } 317 318 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 319 { 320 struct mapped_device *md; 321 322 spin_lock(&_minor_lock); 323 324 md = disk->private_data; 325 if (WARN_ON(!md)) 326 goto out; 327 328 if (atomic_dec_and_test(&md->open_count) && 329 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 330 queue_work(deferred_remove_workqueue, &deferred_remove_work); 331 332 dm_put(md); 333 out: 334 spin_unlock(&_minor_lock); 335 } 336 337 int dm_open_count(struct mapped_device *md) 338 { 339 return atomic_read(&md->open_count); 340 } 341 342 /* 343 * Guarantees nothing is using the device before it's deleted. 344 */ 345 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 346 { 347 int r = 0; 348 349 spin_lock(&_minor_lock); 350 351 if (dm_open_count(md)) { 352 r = -EBUSY; 353 if (mark_deferred) 354 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 355 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 356 r = -EEXIST; 357 else 358 set_bit(DMF_DELETING, &md->flags); 359 360 spin_unlock(&_minor_lock); 361 362 return r; 363 } 364 365 int dm_cancel_deferred_remove(struct mapped_device *md) 366 { 367 int r = 0; 368 369 spin_lock(&_minor_lock); 370 371 if (test_bit(DMF_DELETING, &md->flags)) 372 r = -EBUSY; 373 else 374 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 375 376 spin_unlock(&_minor_lock); 377 378 return r; 379 } 380 381 static void do_deferred_remove(struct work_struct *w) 382 { 383 dm_deferred_remove(); 384 } 385 386 sector_t dm_get_size(struct mapped_device *md) 387 { 388 return get_capacity(md->disk); 389 } 390 391 struct request_queue *dm_get_md_queue(struct mapped_device *md) 392 { 393 return md->queue; 394 } 395 396 struct dm_stats *dm_get_stats(struct mapped_device *md) 397 { 398 return &md->stats; 399 } 400 401 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 402 { 403 struct mapped_device *md = bdev->bd_disk->private_data; 404 405 return dm_get_geometry(md, geo); 406 } 407 408 static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 409 struct block_device **bdev, 410 fmode_t *mode) 411 { 412 struct dm_target *tgt; 413 struct dm_table *map; 414 int srcu_idx, r; 415 416 retry: 417 r = -ENOTTY; 418 map = dm_get_live_table(md, &srcu_idx); 419 if (!map || !dm_table_get_size(map)) 420 goto out; 421 422 /* We only support devices that have a single target */ 423 if (dm_table_get_num_targets(map) != 1) 424 goto out; 425 426 tgt = dm_table_get_target(map, 0); 427 if (!tgt->type->prepare_ioctl) 428 goto out; 429 430 if (dm_suspended_md(md)) { 431 r = -EAGAIN; 432 goto out; 433 } 434 435 r = tgt->type->prepare_ioctl(tgt, bdev, mode); 436 if (r < 0) 437 goto out; 438 439 bdgrab(*bdev); 440 dm_put_live_table(md, srcu_idx); 441 return r; 442 443 out: 444 dm_put_live_table(md, srcu_idx); 445 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 446 msleep(10); 447 goto retry; 448 } 449 return r; 450 } 451 452 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 453 unsigned int cmd, unsigned long arg) 454 { 455 struct mapped_device *md = bdev->bd_disk->private_data; 456 int r; 457 458 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 459 if (r < 0) 460 return r; 461 462 if (r > 0) { 463 /* 464 * Target determined this ioctl is being issued against a 465 * subset of the parent bdev; require extra privileges. 466 */ 467 if (!capable(CAP_SYS_RAWIO)) { 468 DMWARN_LIMIT( 469 "%s: sending ioctl %x to DM device without required privilege.", 470 current->comm, cmd); 471 r = -ENOIOCTLCMD; 472 goto out; 473 } 474 } 475 476 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 477 out: 478 bdput(bdev); 479 return r; 480 } 481 482 static struct dm_io *alloc_io(struct mapped_device *md) 483 { 484 return mempool_alloc(md->io_pool, GFP_NOIO); 485 } 486 487 static void free_io(struct mapped_device *md, struct dm_io *io) 488 { 489 mempool_free(io, md->io_pool); 490 } 491 492 static void free_tio(struct dm_target_io *tio) 493 { 494 bio_put(&tio->clone); 495 } 496 497 int md_in_flight(struct mapped_device *md) 498 { 499 return atomic_read(&md->pending[READ]) + 500 atomic_read(&md->pending[WRITE]); 501 } 502 503 static void start_io_acct(struct dm_io *io) 504 { 505 struct mapped_device *md = io->md; 506 struct bio *bio = io->bio; 507 int cpu; 508 int rw = bio_data_dir(bio); 509 510 io->start_time = jiffies; 511 512 cpu = part_stat_lock(); 513 part_round_stats(md->queue, cpu, &dm_disk(md)->part0); 514 part_stat_unlock(); 515 atomic_set(&dm_disk(md)->part0.in_flight[rw], 516 atomic_inc_return(&md->pending[rw])); 517 518 if (unlikely(dm_stats_used(&md->stats))) 519 dm_stats_account_io(&md->stats, bio_data_dir(bio), 520 bio->bi_iter.bi_sector, bio_sectors(bio), 521 false, 0, &io->stats_aux); 522 } 523 524 static void end_io_acct(struct dm_io *io) 525 { 526 struct mapped_device *md = io->md; 527 struct bio *bio = io->bio; 528 unsigned long duration = jiffies - io->start_time; 529 int pending; 530 int rw = bio_data_dir(bio); 531 532 generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); 533 534 if (unlikely(dm_stats_used(&md->stats))) 535 dm_stats_account_io(&md->stats, bio_data_dir(bio), 536 bio->bi_iter.bi_sector, bio_sectors(bio), 537 true, duration, &io->stats_aux); 538 539 /* 540 * After this is decremented the bio must not be touched if it is 541 * a flush. 542 */ 543 pending = atomic_dec_return(&md->pending[rw]); 544 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 545 pending += atomic_read(&md->pending[rw^0x1]); 546 547 /* nudge anyone waiting on suspend queue */ 548 if (!pending) 549 wake_up(&md->wait); 550 } 551 552 /* 553 * Add the bio to the list of deferred io. 554 */ 555 static void queue_io(struct mapped_device *md, struct bio *bio) 556 { 557 unsigned long flags; 558 559 spin_lock_irqsave(&md->deferred_lock, flags); 560 bio_list_add(&md->deferred, bio); 561 spin_unlock_irqrestore(&md->deferred_lock, flags); 562 queue_work(md->wq, &md->work); 563 } 564 565 /* 566 * Everyone (including functions in this file), should use this 567 * function to access the md->map field, and make sure they call 568 * dm_put_live_table() when finished. 569 */ 570 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 571 { 572 *srcu_idx = srcu_read_lock(&md->io_barrier); 573 574 return srcu_dereference(md->map, &md->io_barrier); 575 } 576 577 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 578 { 579 srcu_read_unlock(&md->io_barrier, srcu_idx); 580 } 581 582 void dm_sync_table(struct mapped_device *md) 583 { 584 synchronize_srcu(&md->io_barrier); 585 synchronize_rcu_expedited(); 586 } 587 588 /* 589 * A fast alternative to dm_get_live_table/dm_put_live_table. 590 * The caller must not block between these two functions. 591 */ 592 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 593 { 594 rcu_read_lock(); 595 return rcu_dereference(md->map); 596 } 597 598 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 599 { 600 rcu_read_unlock(); 601 } 602 603 /* 604 * Open a table device so we can use it as a map destination. 605 */ 606 static int open_table_device(struct table_device *td, dev_t dev, 607 struct mapped_device *md) 608 { 609 static char *_claim_ptr = "I belong to device-mapper"; 610 struct block_device *bdev; 611 612 int r; 613 614 BUG_ON(td->dm_dev.bdev); 615 616 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 617 if (IS_ERR(bdev)) 618 return PTR_ERR(bdev); 619 620 r = bd_link_disk_holder(bdev, dm_disk(md)); 621 if (r) { 622 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 623 return r; 624 } 625 626 td->dm_dev.bdev = bdev; 627 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 628 return 0; 629 } 630 631 /* 632 * Close a table device that we've been using. 633 */ 634 static void close_table_device(struct table_device *td, struct mapped_device *md) 635 { 636 if (!td->dm_dev.bdev) 637 return; 638 639 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 640 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 641 put_dax(td->dm_dev.dax_dev); 642 td->dm_dev.bdev = NULL; 643 td->dm_dev.dax_dev = NULL; 644 } 645 646 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 647 fmode_t mode) { 648 struct table_device *td; 649 650 list_for_each_entry(td, l, list) 651 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 652 return td; 653 654 return NULL; 655 } 656 657 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 658 struct dm_dev **result) { 659 int r; 660 struct table_device *td; 661 662 mutex_lock(&md->table_devices_lock); 663 td = find_table_device(&md->table_devices, dev, mode); 664 if (!td) { 665 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 666 if (!td) { 667 mutex_unlock(&md->table_devices_lock); 668 return -ENOMEM; 669 } 670 671 td->dm_dev.mode = mode; 672 td->dm_dev.bdev = NULL; 673 674 if ((r = open_table_device(td, dev, md))) { 675 mutex_unlock(&md->table_devices_lock); 676 kfree(td); 677 return r; 678 } 679 680 format_dev_t(td->dm_dev.name, dev); 681 682 atomic_set(&td->count, 0); 683 list_add(&td->list, &md->table_devices); 684 } 685 atomic_inc(&td->count); 686 mutex_unlock(&md->table_devices_lock); 687 688 *result = &td->dm_dev; 689 return 0; 690 } 691 EXPORT_SYMBOL_GPL(dm_get_table_device); 692 693 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 694 { 695 struct table_device *td = container_of(d, struct table_device, dm_dev); 696 697 mutex_lock(&md->table_devices_lock); 698 if (atomic_dec_and_test(&td->count)) { 699 close_table_device(td, md); 700 list_del(&td->list); 701 kfree(td); 702 } 703 mutex_unlock(&md->table_devices_lock); 704 } 705 EXPORT_SYMBOL(dm_put_table_device); 706 707 static void free_table_devices(struct list_head *devices) 708 { 709 struct list_head *tmp, *next; 710 711 list_for_each_safe(tmp, next, devices) { 712 struct table_device *td = list_entry(tmp, struct table_device, list); 713 714 DMWARN("dm_destroy: %s still exists with %d references", 715 td->dm_dev.name, atomic_read(&td->count)); 716 kfree(td); 717 } 718 } 719 720 /* 721 * Get the geometry associated with a dm device 722 */ 723 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 724 { 725 *geo = md->geometry; 726 727 return 0; 728 } 729 730 /* 731 * Set the geometry of a device. 732 */ 733 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 734 { 735 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 736 737 if (geo->start > sz) { 738 DMWARN("Start sector is beyond the geometry limits."); 739 return -EINVAL; 740 } 741 742 md->geometry = *geo; 743 744 return 0; 745 } 746 747 /*----------------------------------------------------------------- 748 * CRUD START: 749 * A more elegant soln is in the works that uses the queue 750 * merge fn, unfortunately there are a couple of changes to 751 * the block layer that I want to make for this. So in the 752 * interests of getting something for people to use I give 753 * you this clearly demarcated crap. 754 *---------------------------------------------------------------*/ 755 756 static int __noflush_suspending(struct mapped_device *md) 757 { 758 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 759 } 760 761 /* 762 * Decrements the number of outstanding ios that a bio has been 763 * cloned into, completing the original io if necc. 764 */ 765 static void dec_pending(struct dm_io *io, blk_status_t error) 766 { 767 unsigned long flags; 768 blk_status_t io_error; 769 struct bio *bio; 770 struct mapped_device *md = io->md; 771 772 /* Push-back supersedes any I/O errors */ 773 if (unlikely(error)) { 774 spin_lock_irqsave(&io->endio_lock, flags); 775 if (!(io->status == BLK_STS_DM_REQUEUE && 776 __noflush_suspending(md))) 777 io->status = error; 778 spin_unlock_irqrestore(&io->endio_lock, flags); 779 } 780 781 if (atomic_dec_and_test(&io->io_count)) { 782 if (io->status == BLK_STS_DM_REQUEUE) { 783 /* 784 * Target requested pushing back the I/O. 785 */ 786 spin_lock_irqsave(&md->deferred_lock, flags); 787 if (__noflush_suspending(md)) 788 bio_list_add_head(&md->deferred, io->bio); 789 else 790 /* noflush suspend was interrupted. */ 791 io->status = BLK_STS_IOERR; 792 spin_unlock_irqrestore(&md->deferred_lock, flags); 793 } 794 795 io_error = io->status; 796 bio = io->bio; 797 end_io_acct(io); 798 free_io(md, io); 799 800 if (io_error == BLK_STS_DM_REQUEUE) 801 return; 802 803 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 804 /* 805 * Preflush done for flush with data, reissue 806 * without REQ_PREFLUSH. 807 */ 808 bio->bi_opf &= ~REQ_PREFLUSH; 809 queue_io(md, bio); 810 } else { 811 /* done with normal IO or empty flush */ 812 bio->bi_status = io_error; 813 bio_endio(bio); 814 } 815 } 816 } 817 818 void disable_write_same(struct mapped_device *md) 819 { 820 struct queue_limits *limits = dm_get_queue_limits(md); 821 822 /* device doesn't really support WRITE SAME, disable it */ 823 limits->max_write_same_sectors = 0; 824 } 825 826 void disable_write_zeroes(struct mapped_device *md) 827 { 828 struct queue_limits *limits = dm_get_queue_limits(md); 829 830 /* device doesn't really support WRITE ZEROES, disable it */ 831 limits->max_write_zeroes_sectors = 0; 832 } 833 834 static void clone_endio(struct bio *bio) 835 { 836 blk_status_t error = bio->bi_status; 837 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 838 struct dm_io *io = tio->io; 839 struct mapped_device *md = tio->io->md; 840 dm_endio_fn endio = tio->ti->type->end_io; 841 842 if (unlikely(error == BLK_STS_TARGET)) { 843 if (bio_op(bio) == REQ_OP_WRITE_SAME && 844 !bio->bi_disk->queue->limits.max_write_same_sectors) 845 disable_write_same(md); 846 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 847 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 848 disable_write_zeroes(md); 849 } 850 851 if (endio) { 852 int r = endio(tio->ti, bio, &error); 853 switch (r) { 854 case DM_ENDIO_REQUEUE: 855 error = BLK_STS_DM_REQUEUE; 856 /*FALLTHRU*/ 857 case DM_ENDIO_DONE: 858 break; 859 case DM_ENDIO_INCOMPLETE: 860 /* The target will handle the io */ 861 return; 862 default: 863 DMWARN("unimplemented target endio return value: %d", r); 864 BUG(); 865 } 866 } 867 868 free_tio(tio); 869 dec_pending(io, error); 870 } 871 872 /* 873 * Return maximum size of I/O possible at the supplied sector up to the current 874 * target boundary. 875 */ 876 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 877 { 878 sector_t target_offset = dm_target_offset(ti, sector); 879 880 return ti->len - target_offset; 881 } 882 883 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 884 { 885 sector_t len = max_io_len_target_boundary(sector, ti); 886 sector_t offset, max_len; 887 888 /* 889 * Does the target need to split even further? 890 */ 891 if (ti->max_io_len) { 892 offset = dm_target_offset(ti, sector); 893 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 894 max_len = sector_div(offset, ti->max_io_len); 895 else 896 max_len = offset & (ti->max_io_len - 1); 897 max_len = ti->max_io_len - max_len; 898 899 if (len > max_len) 900 len = max_len; 901 } 902 903 return len; 904 } 905 906 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 907 { 908 if (len > UINT_MAX) { 909 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 910 (unsigned long long)len, UINT_MAX); 911 ti->error = "Maximum size of target IO is too large"; 912 return -EINVAL; 913 } 914 915 ti->max_io_len = (uint32_t) len; 916 917 return 0; 918 } 919 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 920 921 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 922 sector_t sector, int *srcu_idx) 923 { 924 struct dm_table *map; 925 struct dm_target *ti; 926 927 map = dm_get_live_table(md, srcu_idx); 928 if (!map) 929 return NULL; 930 931 ti = dm_table_find_target(map, sector); 932 if (!dm_target_is_valid(ti)) 933 return NULL; 934 935 return ti; 936 } 937 938 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 939 long nr_pages, void **kaddr, pfn_t *pfn) 940 { 941 struct mapped_device *md = dax_get_private(dax_dev); 942 sector_t sector = pgoff * PAGE_SECTORS; 943 struct dm_target *ti; 944 long len, ret = -EIO; 945 int srcu_idx; 946 947 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 948 949 if (!ti) 950 goto out; 951 if (!ti->type->direct_access) 952 goto out; 953 len = max_io_len(sector, ti) / PAGE_SECTORS; 954 if (len < 1) 955 goto out; 956 nr_pages = min(len, nr_pages); 957 if (ti->type->direct_access) 958 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 959 960 out: 961 dm_put_live_table(md, srcu_idx); 962 963 return ret; 964 } 965 966 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 967 void *addr, size_t bytes, struct iov_iter *i) 968 { 969 struct mapped_device *md = dax_get_private(dax_dev); 970 sector_t sector = pgoff * PAGE_SECTORS; 971 struct dm_target *ti; 972 long ret = 0; 973 int srcu_idx; 974 975 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 976 977 if (!ti) 978 goto out; 979 if (!ti->type->dax_copy_from_iter) { 980 ret = copy_from_iter(addr, bytes, i); 981 goto out; 982 } 983 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 984 out: 985 dm_put_live_table(md, srcu_idx); 986 987 return ret; 988 } 989 990 /* 991 * A target may call dm_accept_partial_bio only from the map routine. It is 992 * allowed for all bio types except REQ_PREFLUSH. 993 * 994 * dm_accept_partial_bio informs the dm that the target only wants to process 995 * additional n_sectors sectors of the bio and the rest of the data should be 996 * sent in a next bio. 997 * 998 * A diagram that explains the arithmetics: 999 * +--------------------+---------------+-------+ 1000 * | 1 | 2 | 3 | 1001 * +--------------------+---------------+-------+ 1002 * 1003 * <-------------- *tio->len_ptr ---------------> 1004 * <------- bi_size -------> 1005 * <-- n_sectors --> 1006 * 1007 * Region 1 was already iterated over with bio_advance or similar function. 1008 * (it may be empty if the target doesn't use bio_advance) 1009 * Region 2 is the remaining bio size that the target wants to process. 1010 * (it may be empty if region 1 is non-empty, although there is no reason 1011 * to make it empty) 1012 * The target requires that region 3 is to be sent in the next bio. 1013 * 1014 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1015 * the partially processed part (the sum of regions 1+2) must be the same for all 1016 * copies of the bio. 1017 */ 1018 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1019 { 1020 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1021 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1022 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1023 BUG_ON(bi_size > *tio->len_ptr); 1024 BUG_ON(n_sectors > bi_size); 1025 *tio->len_ptr -= bi_size - n_sectors; 1026 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1027 } 1028 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1029 1030 /* 1031 * The zone descriptors obtained with a zone report indicate 1032 * zone positions within the target device. The zone descriptors 1033 * must be remapped to match their position within the dm device. 1034 * A target may call dm_remap_zone_report after completion of a 1035 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained 1036 * from the target device mapping to the dm device. 1037 */ 1038 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 1039 { 1040 #ifdef CONFIG_BLK_DEV_ZONED 1041 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1042 struct bio *report_bio = tio->io->bio; 1043 struct blk_zone_report_hdr *hdr = NULL; 1044 struct blk_zone *zone; 1045 unsigned int nr_rep = 0; 1046 unsigned int ofst; 1047 struct bio_vec bvec; 1048 struct bvec_iter iter; 1049 void *addr; 1050 1051 if (bio->bi_status) 1052 return; 1053 1054 /* 1055 * Remap the start sector of the reported zones. For sequential zones, 1056 * also remap the write pointer position. 1057 */ 1058 bio_for_each_segment(bvec, report_bio, iter) { 1059 addr = kmap_atomic(bvec.bv_page); 1060 1061 /* Remember the report header in the first page */ 1062 if (!hdr) { 1063 hdr = addr; 1064 ofst = sizeof(struct blk_zone_report_hdr); 1065 } else 1066 ofst = 0; 1067 1068 /* Set zones start sector */ 1069 while (hdr->nr_zones && ofst < bvec.bv_len) { 1070 zone = addr + ofst; 1071 if (zone->start >= start + ti->len) { 1072 hdr->nr_zones = 0; 1073 break; 1074 } 1075 zone->start = zone->start + ti->begin - start; 1076 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 1077 if (zone->cond == BLK_ZONE_COND_FULL) 1078 zone->wp = zone->start + zone->len; 1079 else if (zone->cond == BLK_ZONE_COND_EMPTY) 1080 zone->wp = zone->start; 1081 else 1082 zone->wp = zone->wp + ti->begin - start; 1083 } 1084 ofst += sizeof(struct blk_zone); 1085 hdr->nr_zones--; 1086 nr_rep++; 1087 } 1088 1089 if (addr != hdr) 1090 kunmap_atomic(addr); 1091 1092 if (!hdr->nr_zones) 1093 break; 1094 } 1095 1096 if (hdr) { 1097 hdr->nr_zones = nr_rep; 1098 kunmap_atomic(hdr); 1099 } 1100 1101 bio_advance(report_bio, report_bio->bi_iter.bi_size); 1102 1103 #else /* !CONFIG_BLK_DEV_ZONED */ 1104 bio->bi_status = BLK_STS_NOTSUPP; 1105 #endif 1106 } 1107 EXPORT_SYMBOL_GPL(dm_remap_zone_report); 1108 1109 /* 1110 * Flush current->bio_list when the target map method blocks. 1111 * This fixes deadlocks in snapshot and possibly in other targets. 1112 */ 1113 struct dm_offload { 1114 struct blk_plug plug; 1115 struct blk_plug_cb cb; 1116 }; 1117 1118 static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) 1119 { 1120 struct dm_offload *o = container_of(cb, struct dm_offload, cb); 1121 struct bio_list list; 1122 struct bio *bio; 1123 int i; 1124 1125 INIT_LIST_HEAD(&o->cb.list); 1126 1127 if (unlikely(!current->bio_list)) 1128 return; 1129 1130 for (i = 0; i < 2; i++) { 1131 list = current->bio_list[i]; 1132 bio_list_init(¤t->bio_list[i]); 1133 1134 while ((bio = bio_list_pop(&list))) { 1135 struct bio_set *bs = bio->bi_pool; 1136 if (unlikely(!bs) || bs == fs_bio_set || 1137 !bs->rescue_workqueue) { 1138 bio_list_add(¤t->bio_list[i], bio); 1139 continue; 1140 } 1141 1142 spin_lock(&bs->rescue_lock); 1143 bio_list_add(&bs->rescue_list, bio); 1144 queue_work(bs->rescue_workqueue, &bs->rescue_work); 1145 spin_unlock(&bs->rescue_lock); 1146 } 1147 } 1148 } 1149 1150 static void dm_offload_start(struct dm_offload *o) 1151 { 1152 blk_start_plug(&o->plug); 1153 o->cb.callback = flush_current_bio_list; 1154 list_add(&o->cb.list, ¤t->plug->cb_list); 1155 } 1156 1157 static void dm_offload_end(struct dm_offload *o) 1158 { 1159 list_del(&o->cb.list); 1160 blk_finish_plug(&o->plug); 1161 } 1162 1163 static void __map_bio(struct dm_target_io *tio) 1164 { 1165 int r; 1166 sector_t sector; 1167 struct dm_offload o; 1168 struct bio *clone = &tio->clone; 1169 struct dm_target *ti = tio->ti; 1170 1171 clone->bi_end_io = clone_endio; 1172 1173 /* 1174 * Map the clone. If r == 0 we don't need to do 1175 * anything, the target has assumed ownership of 1176 * this io. 1177 */ 1178 atomic_inc(&tio->io->io_count); 1179 sector = clone->bi_iter.bi_sector; 1180 1181 dm_offload_start(&o); 1182 r = ti->type->map(ti, clone); 1183 dm_offload_end(&o); 1184 1185 switch (r) { 1186 case DM_MAPIO_SUBMITTED: 1187 break; 1188 case DM_MAPIO_REMAPPED: 1189 /* the bio has been remapped so dispatch it */ 1190 trace_block_bio_remap(clone->bi_disk->queue, clone, 1191 bio_dev(tio->io->bio), sector); 1192 generic_make_request(clone); 1193 break; 1194 case DM_MAPIO_KILL: 1195 dec_pending(tio->io, BLK_STS_IOERR); 1196 free_tio(tio); 1197 break; 1198 case DM_MAPIO_REQUEUE: 1199 dec_pending(tio->io, BLK_STS_DM_REQUEUE); 1200 free_tio(tio); 1201 break; 1202 default: 1203 DMWARN("unimplemented target map return value: %d", r); 1204 BUG(); 1205 } 1206 } 1207 1208 struct clone_info { 1209 struct mapped_device *md; 1210 struct dm_table *map; 1211 struct bio *bio; 1212 struct dm_io *io; 1213 sector_t sector; 1214 unsigned sector_count; 1215 }; 1216 1217 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1218 { 1219 bio->bi_iter.bi_sector = sector; 1220 bio->bi_iter.bi_size = to_bytes(len); 1221 } 1222 1223 /* 1224 * Creates a bio that consists of range of complete bvecs. 1225 */ 1226 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1227 sector_t sector, unsigned len) 1228 { 1229 struct bio *clone = &tio->clone; 1230 1231 __bio_clone_fast(clone, bio); 1232 1233 if (unlikely(bio_integrity(bio) != NULL)) { 1234 int r; 1235 1236 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1237 !dm_target_passes_integrity(tio->ti->type))) { 1238 DMWARN("%s: the target %s doesn't support integrity data.", 1239 dm_device_name(tio->io->md), 1240 tio->ti->type->name); 1241 return -EIO; 1242 } 1243 1244 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1245 if (r < 0) 1246 return r; 1247 } 1248 1249 if (bio_op(bio) != REQ_OP_ZONE_REPORT) 1250 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1251 clone->bi_iter.bi_size = to_bytes(len); 1252 1253 if (unlikely(bio_integrity(bio) != NULL)) 1254 bio_integrity_trim(clone); 1255 1256 return 0; 1257 } 1258 1259 static struct dm_target_io *alloc_tio(struct clone_info *ci, 1260 struct dm_target *ti, 1261 unsigned target_bio_nr) 1262 { 1263 struct dm_target_io *tio; 1264 struct bio *clone; 1265 1266 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1267 tio = container_of(clone, struct dm_target_io, clone); 1268 1269 tio->io = ci->io; 1270 tio->ti = ti; 1271 tio->target_bio_nr = target_bio_nr; 1272 1273 return tio; 1274 } 1275 1276 static void __clone_and_map_simple_bio(struct clone_info *ci, 1277 struct dm_target *ti, 1278 unsigned target_bio_nr, unsigned *len) 1279 { 1280 struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1281 struct bio *clone = &tio->clone; 1282 1283 tio->len_ptr = len; 1284 1285 __bio_clone_fast(clone, ci->bio); 1286 if (len) 1287 bio_setup_sector(clone, ci->sector, *len); 1288 1289 __map_bio(tio); 1290 } 1291 1292 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1293 unsigned num_bios, unsigned *len) 1294 { 1295 unsigned target_bio_nr; 1296 1297 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 1298 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 1299 } 1300 1301 static int __send_empty_flush(struct clone_info *ci) 1302 { 1303 unsigned target_nr = 0; 1304 struct dm_target *ti; 1305 1306 BUG_ON(bio_has_data(ci->bio)); 1307 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1308 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1309 1310 return 0; 1311 } 1312 1313 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1314 sector_t sector, unsigned *len) 1315 { 1316 struct bio *bio = ci->bio; 1317 struct dm_target_io *tio; 1318 unsigned target_bio_nr; 1319 unsigned num_target_bios = 1; 1320 int r = 0; 1321 1322 /* 1323 * Does the target want to receive duplicate copies of the bio? 1324 */ 1325 if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1326 num_target_bios = ti->num_write_bios(ti, bio); 1327 1328 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1329 tio = alloc_tio(ci, ti, target_bio_nr); 1330 tio->len_ptr = len; 1331 r = clone_bio(tio, bio, sector, *len); 1332 if (r < 0) { 1333 free_tio(tio); 1334 break; 1335 } 1336 __map_bio(tio); 1337 } 1338 1339 return r; 1340 } 1341 1342 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1343 1344 static unsigned get_num_discard_bios(struct dm_target *ti) 1345 { 1346 return ti->num_discard_bios; 1347 } 1348 1349 static unsigned get_num_write_same_bios(struct dm_target *ti) 1350 { 1351 return ti->num_write_same_bios; 1352 } 1353 1354 static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1355 { 1356 return ti->num_write_zeroes_bios; 1357 } 1358 1359 typedef bool (*is_split_required_fn)(struct dm_target *ti); 1360 1361 static bool is_split_required_for_discard(struct dm_target *ti) 1362 { 1363 return ti->split_discard_bios; 1364 } 1365 1366 static int __send_changing_extent_only(struct clone_info *ci, 1367 get_num_bios_fn get_num_bios, 1368 is_split_required_fn is_split_required) 1369 { 1370 struct dm_target *ti; 1371 unsigned len; 1372 unsigned num_bios; 1373 1374 do { 1375 ti = dm_table_find_target(ci->map, ci->sector); 1376 if (!dm_target_is_valid(ti)) 1377 return -EIO; 1378 1379 /* 1380 * Even though the device advertised support for this type of 1381 * request, that does not mean every target supports it, and 1382 * reconfiguration might also have changed that since the 1383 * check was performed. 1384 */ 1385 num_bios = get_num_bios ? get_num_bios(ti) : 0; 1386 if (!num_bios) 1387 return -EOPNOTSUPP; 1388 1389 if (is_split_required && !is_split_required(ti)) 1390 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1391 else 1392 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1393 1394 __send_duplicate_bios(ci, ti, num_bios, &len); 1395 1396 ci->sector += len; 1397 } while (ci->sector_count -= len); 1398 1399 return 0; 1400 } 1401 1402 static int __send_discard(struct clone_info *ci) 1403 { 1404 return __send_changing_extent_only(ci, get_num_discard_bios, 1405 is_split_required_for_discard); 1406 } 1407 1408 static int __send_write_same(struct clone_info *ci) 1409 { 1410 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 1411 } 1412 1413 static int __send_write_zeroes(struct clone_info *ci) 1414 { 1415 return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); 1416 } 1417 1418 /* 1419 * Select the correct strategy for processing a non-flush bio. 1420 */ 1421 static int __split_and_process_non_flush(struct clone_info *ci) 1422 { 1423 struct bio *bio = ci->bio; 1424 struct dm_target *ti; 1425 unsigned len; 1426 int r; 1427 1428 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1429 return __send_discard(ci); 1430 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1431 return __send_write_same(ci); 1432 else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) 1433 return __send_write_zeroes(ci); 1434 1435 ti = dm_table_find_target(ci->map, ci->sector); 1436 if (!dm_target_is_valid(ti)) 1437 return -EIO; 1438 1439 if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1440 len = ci->sector_count; 1441 else 1442 len = min_t(sector_t, max_io_len(ci->sector, ti), 1443 ci->sector_count); 1444 1445 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1446 if (r < 0) 1447 return r; 1448 1449 ci->sector += len; 1450 ci->sector_count -= len; 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * Entry point to split a bio into clones and submit them to the targets. 1457 */ 1458 static void __split_and_process_bio(struct mapped_device *md, 1459 struct dm_table *map, struct bio *bio) 1460 { 1461 struct clone_info ci; 1462 int error = 0; 1463 1464 if (unlikely(!map)) { 1465 bio_io_error(bio); 1466 return; 1467 } 1468 1469 ci.map = map; 1470 ci.md = md; 1471 ci.io = alloc_io(md); 1472 ci.io->status = 0; 1473 atomic_set(&ci.io->io_count, 1); 1474 ci.io->bio = bio; 1475 ci.io->md = md; 1476 spin_lock_init(&ci.io->endio_lock); 1477 ci.sector = bio->bi_iter.bi_sector; 1478 1479 start_io_acct(ci.io); 1480 1481 if (bio->bi_opf & REQ_PREFLUSH) { 1482 ci.bio = &ci.md->flush_bio; 1483 ci.sector_count = 0; 1484 error = __send_empty_flush(&ci); 1485 /* dec_pending submits any data associated with flush */ 1486 } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1487 ci.bio = bio; 1488 ci.sector_count = 0; 1489 error = __split_and_process_non_flush(&ci); 1490 } else { 1491 ci.bio = bio; 1492 ci.sector_count = bio_sectors(bio); 1493 while (ci.sector_count && !error) 1494 error = __split_and_process_non_flush(&ci); 1495 } 1496 1497 /* drop the extra reference count */ 1498 dec_pending(ci.io, errno_to_blk_status(error)); 1499 } 1500 /*----------------------------------------------------------------- 1501 * CRUD END 1502 *---------------------------------------------------------------*/ 1503 1504 /* 1505 * The request function that just remaps the bio built up by 1506 * dm_merge_bvec. 1507 */ 1508 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1509 { 1510 int rw = bio_data_dir(bio); 1511 struct mapped_device *md = q->queuedata; 1512 int srcu_idx; 1513 struct dm_table *map; 1514 1515 map = dm_get_live_table(md, &srcu_idx); 1516 1517 generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); 1518 1519 /* if we're suspended, we have to queue this io for later */ 1520 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1521 dm_put_live_table(md, srcu_idx); 1522 1523 if (!(bio->bi_opf & REQ_RAHEAD)) 1524 queue_io(md, bio); 1525 else 1526 bio_io_error(bio); 1527 return BLK_QC_T_NONE; 1528 } 1529 1530 __split_and_process_bio(md, map, bio); 1531 dm_put_live_table(md, srcu_idx); 1532 return BLK_QC_T_NONE; 1533 } 1534 1535 static int dm_any_congested(void *congested_data, int bdi_bits) 1536 { 1537 int r = bdi_bits; 1538 struct mapped_device *md = congested_data; 1539 struct dm_table *map; 1540 1541 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1542 if (dm_request_based(md)) { 1543 /* 1544 * With request-based DM we only need to check the 1545 * top-level queue for congestion. 1546 */ 1547 r = md->queue->backing_dev_info->wb.state & bdi_bits; 1548 } else { 1549 map = dm_get_live_table_fast(md); 1550 if (map) 1551 r = dm_table_any_congested(map, bdi_bits); 1552 dm_put_live_table_fast(md); 1553 } 1554 } 1555 1556 return r; 1557 } 1558 1559 /*----------------------------------------------------------------- 1560 * An IDR is used to keep track of allocated minor numbers. 1561 *---------------------------------------------------------------*/ 1562 static void free_minor(int minor) 1563 { 1564 spin_lock(&_minor_lock); 1565 idr_remove(&_minor_idr, minor); 1566 spin_unlock(&_minor_lock); 1567 } 1568 1569 /* 1570 * See if the device with a specific minor # is free. 1571 */ 1572 static int specific_minor(int minor) 1573 { 1574 int r; 1575 1576 if (minor >= (1 << MINORBITS)) 1577 return -EINVAL; 1578 1579 idr_preload(GFP_KERNEL); 1580 spin_lock(&_minor_lock); 1581 1582 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1583 1584 spin_unlock(&_minor_lock); 1585 idr_preload_end(); 1586 if (r < 0) 1587 return r == -ENOSPC ? -EBUSY : r; 1588 return 0; 1589 } 1590 1591 static int next_free_minor(int *minor) 1592 { 1593 int r; 1594 1595 idr_preload(GFP_KERNEL); 1596 spin_lock(&_minor_lock); 1597 1598 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1599 1600 spin_unlock(&_minor_lock); 1601 idr_preload_end(); 1602 if (r < 0) 1603 return r; 1604 *minor = r; 1605 return 0; 1606 } 1607 1608 static const struct block_device_operations dm_blk_dops; 1609 static const struct dax_operations dm_dax_ops; 1610 1611 static void dm_wq_work(struct work_struct *work); 1612 1613 void dm_init_md_queue(struct mapped_device *md) 1614 { 1615 /* 1616 * Request-based dm devices cannot be stacked on top of bio-based dm 1617 * devices. The type of this dm device may not have been decided yet. 1618 * The type is decided at the first table loading time. 1619 * To prevent problematic device stacking, clear the queue flag 1620 * for request stacking support until then. 1621 * 1622 * This queue is new, so no concurrency on the queue_flags. 1623 */ 1624 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1625 1626 /* 1627 * Initialize data that will only be used by a non-blk-mq DM queue 1628 * - must do so here (in alloc_dev callchain) before queue is used 1629 */ 1630 md->queue->queuedata = md; 1631 md->queue->backing_dev_info->congested_data = md; 1632 } 1633 1634 void dm_init_normal_md_queue(struct mapped_device *md) 1635 { 1636 md->use_blk_mq = false; 1637 dm_init_md_queue(md); 1638 1639 /* 1640 * Initialize aspects of queue that aren't relevant for blk-mq 1641 */ 1642 md->queue->backing_dev_info->congested_fn = dm_any_congested; 1643 } 1644 1645 static void cleanup_mapped_device(struct mapped_device *md) 1646 { 1647 if (md->wq) 1648 destroy_workqueue(md->wq); 1649 if (md->kworker_task) 1650 kthread_stop(md->kworker_task); 1651 mempool_destroy(md->io_pool); 1652 if (md->bs) 1653 bioset_free(md->bs); 1654 1655 if (md->dax_dev) { 1656 kill_dax(md->dax_dev); 1657 put_dax(md->dax_dev); 1658 md->dax_dev = NULL; 1659 } 1660 1661 if (md->disk) { 1662 spin_lock(&_minor_lock); 1663 md->disk->private_data = NULL; 1664 spin_unlock(&_minor_lock); 1665 del_gendisk(md->disk); 1666 put_disk(md->disk); 1667 } 1668 1669 if (md->queue) 1670 blk_cleanup_queue(md->queue); 1671 1672 cleanup_srcu_struct(&md->io_barrier); 1673 1674 if (md->bdev) { 1675 bdput(md->bdev); 1676 md->bdev = NULL; 1677 } 1678 1679 dm_mq_cleanup_mapped_device(md); 1680 } 1681 1682 /* 1683 * Allocate and initialise a blank device with a given minor. 1684 */ 1685 static struct mapped_device *alloc_dev(int minor) 1686 { 1687 int r, numa_node_id = dm_get_numa_node(); 1688 struct dax_device *dax_dev; 1689 struct mapped_device *md; 1690 void *old_md; 1691 1692 md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1693 if (!md) { 1694 DMWARN("unable to allocate device, out of memory."); 1695 return NULL; 1696 } 1697 1698 if (!try_module_get(THIS_MODULE)) 1699 goto bad_module_get; 1700 1701 /* get a minor number for the dev */ 1702 if (minor == DM_ANY_MINOR) 1703 r = next_free_minor(&minor); 1704 else 1705 r = specific_minor(minor); 1706 if (r < 0) 1707 goto bad_minor; 1708 1709 r = init_srcu_struct(&md->io_barrier); 1710 if (r < 0) 1711 goto bad_io_barrier; 1712 1713 md->numa_node_id = numa_node_id; 1714 md->use_blk_mq = dm_use_blk_mq_default(); 1715 md->init_tio_pdu = false; 1716 md->type = DM_TYPE_NONE; 1717 mutex_init(&md->suspend_lock); 1718 mutex_init(&md->type_lock); 1719 mutex_init(&md->table_devices_lock); 1720 spin_lock_init(&md->deferred_lock); 1721 atomic_set(&md->holders, 1); 1722 atomic_set(&md->open_count, 0); 1723 atomic_set(&md->event_nr, 0); 1724 atomic_set(&md->uevent_seq, 0); 1725 INIT_LIST_HEAD(&md->uevent_list); 1726 INIT_LIST_HEAD(&md->table_devices); 1727 spin_lock_init(&md->uevent_lock); 1728 1729 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 1730 if (!md->queue) 1731 goto bad; 1732 1733 dm_init_md_queue(md); 1734 1735 md->disk = alloc_disk_node(1, numa_node_id); 1736 if (!md->disk) 1737 goto bad; 1738 1739 atomic_set(&md->pending[0], 0); 1740 atomic_set(&md->pending[1], 0); 1741 init_waitqueue_head(&md->wait); 1742 INIT_WORK(&md->work, dm_wq_work); 1743 init_waitqueue_head(&md->eventq); 1744 init_completion(&md->kobj_holder.completion); 1745 md->kworker_task = NULL; 1746 1747 md->disk->major = _major; 1748 md->disk->first_minor = minor; 1749 md->disk->fops = &dm_blk_dops; 1750 md->disk->queue = md->queue; 1751 md->disk->private_data = md; 1752 sprintf(md->disk->disk_name, "dm-%d", minor); 1753 1754 dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1755 if (!dax_dev) 1756 goto bad; 1757 md->dax_dev = dax_dev; 1758 1759 add_disk(md->disk); 1760 format_dev_t(md->name, MKDEV(_major, minor)); 1761 1762 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1763 if (!md->wq) 1764 goto bad; 1765 1766 md->bdev = bdget_disk(md->disk, 0); 1767 if (!md->bdev) 1768 goto bad; 1769 1770 bio_init(&md->flush_bio, NULL, 0); 1771 bio_set_dev(&md->flush_bio, md->bdev); 1772 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1773 1774 dm_stats_init(&md->stats); 1775 1776 /* Populate the mapping, nobody knows we exist yet */ 1777 spin_lock(&_minor_lock); 1778 old_md = idr_replace(&_minor_idr, md, minor); 1779 spin_unlock(&_minor_lock); 1780 1781 BUG_ON(old_md != MINOR_ALLOCED); 1782 1783 return md; 1784 1785 bad: 1786 cleanup_mapped_device(md); 1787 bad_io_barrier: 1788 free_minor(minor); 1789 bad_minor: 1790 module_put(THIS_MODULE); 1791 bad_module_get: 1792 kfree(md); 1793 return NULL; 1794 } 1795 1796 static void unlock_fs(struct mapped_device *md); 1797 1798 static void free_dev(struct mapped_device *md) 1799 { 1800 int minor = MINOR(disk_devt(md->disk)); 1801 1802 unlock_fs(md); 1803 1804 cleanup_mapped_device(md); 1805 1806 free_table_devices(&md->table_devices); 1807 dm_stats_cleanup(&md->stats); 1808 free_minor(minor); 1809 1810 module_put(THIS_MODULE); 1811 kfree(md); 1812 } 1813 1814 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1815 { 1816 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1817 1818 if (md->bs) { 1819 /* The md already has necessary mempools. */ 1820 if (dm_table_bio_based(t)) { 1821 /* 1822 * Reload bioset because front_pad may have changed 1823 * because a different table was loaded. 1824 */ 1825 bioset_free(md->bs); 1826 md->bs = p->bs; 1827 p->bs = NULL; 1828 } 1829 /* 1830 * There's no need to reload with request-based dm 1831 * because the size of front_pad doesn't change. 1832 * Note for future: If you are to reload bioset, 1833 * prep-ed requests in the queue may refer 1834 * to bio from the old bioset, so you must walk 1835 * through the queue to unprep. 1836 */ 1837 goto out; 1838 } 1839 1840 BUG_ON(!p || md->io_pool || md->bs); 1841 1842 md->io_pool = p->io_pool; 1843 p->io_pool = NULL; 1844 md->bs = p->bs; 1845 p->bs = NULL; 1846 1847 out: 1848 /* mempool bind completed, no longer need any mempools in the table */ 1849 dm_table_free_md_mempools(t); 1850 } 1851 1852 /* 1853 * Bind a table to the device. 1854 */ 1855 static void event_callback(void *context) 1856 { 1857 unsigned long flags; 1858 LIST_HEAD(uevents); 1859 struct mapped_device *md = (struct mapped_device *) context; 1860 1861 spin_lock_irqsave(&md->uevent_lock, flags); 1862 list_splice_init(&md->uevent_list, &uevents); 1863 spin_unlock_irqrestore(&md->uevent_lock, flags); 1864 1865 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1866 1867 atomic_inc(&md->event_nr); 1868 atomic_inc(&dm_global_event_nr); 1869 wake_up(&md->eventq); 1870 wake_up(&dm_global_eventq); 1871 } 1872 1873 /* 1874 * Protected by md->suspend_lock obtained by dm_swap_table(). 1875 */ 1876 static void __set_size(struct mapped_device *md, sector_t size) 1877 { 1878 lockdep_assert_held(&md->suspend_lock); 1879 1880 set_capacity(md->disk, size); 1881 1882 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1883 } 1884 1885 /* 1886 * Returns old map, which caller must destroy. 1887 */ 1888 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1889 struct queue_limits *limits) 1890 { 1891 struct dm_table *old_map; 1892 struct request_queue *q = md->queue; 1893 sector_t size; 1894 1895 lockdep_assert_held(&md->suspend_lock); 1896 1897 size = dm_table_get_size(t); 1898 1899 /* 1900 * Wipe any geometry if the size of the table changed. 1901 */ 1902 if (size != dm_get_size(md)) 1903 memset(&md->geometry, 0, sizeof(md->geometry)); 1904 1905 __set_size(md, size); 1906 1907 dm_table_event_callback(t, event_callback, md); 1908 1909 /* 1910 * The queue hasn't been stopped yet, if the old table type wasn't 1911 * for request-based during suspension. So stop it to prevent 1912 * I/O mapping before resume. 1913 * This must be done before setting the queue restrictions, 1914 * because request-based dm may be run just after the setting. 1915 */ 1916 if (dm_table_request_based(t)) { 1917 dm_stop_queue(q); 1918 /* 1919 * Leverage the fact that request-based DM targets are 1920 * immutable singletons and establish md->immutable_target 1921 * - used to optimize both dm_request_fn and dm_mq_queue_rq 1922 */ 1923 md->immutable_target = dm_table_get_immutable_target(t); 1924 } 1925 1926 __bind_mempools(md, t); 1927 1928 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 1929 rcu_assign_pointer(md->map, (void *)t); 1930 md->immutable_target_type = dm_table_get_immutable_target_type(t); 1931 1932 dm_table_set_restrictions(t, q, limits); 1933 if (old_map) 1934 dm_sync_table(md); 1935 1936 return old_map; 1937 } 1938 1939 /* 1940 * Returns unbound table for the caller to free. 1941 */ 1942 static struct dm_table *__unbind(struct mapped_device *md) 1943 { 1944 struct dm_table *map = rcu_dereference_protected(md->map, 1); 1945 1946 if (!map) 1947 return NULL; 1948 1949 dm_table_event_callback(map, NULL, NULL); 1950 RCU_INIT_POINTER(md->map, NULL); 1951 dm_sync_table(md); 1952 1953 return map; 1954 } 1955 1956 /* 1957 * Constructor for a new device. 1958 */ 1959 int dm_create(int minor, struct mapped_device **result) 1960 { 1961 struct mapped_device *md; 1962 1963 md = alloc_dev(minor); 1964 if (!md) 1965 return -ENXIO; 1966 1967 dm_sysfs_init(md); 1968 1969 *result = md; 1970 return 0; 1971 } 1972 1973 /* 1974 * Functions to manage md->type. 1975 * All are required to hold md->type_lock. 1976 */ 1977 void dm_lock_md_type(struct mapped_device *md) 1978 { 1979 mutex_lock(&md->type_lock); 1980 } 1981 1982 void dm_unlock_md_type(struct mapped_device *md) 1983 { 1984 mutex_unlock(&md->type_lock); 1985 } 1986 1987 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 1988 { 1989 BUG_ON(!mutex_is_locked(&md->type_lock)); 1990 md->type = type; 1991 } 1992 1993 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 1994 { 1995 return md->type; 1996 } 1997 1998 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 1999 { 2000 return md->immutable_target_type; 2001 } 2002 2003 /* 2004 * The queue_limits are only valid as long as you have a reference 2005 * count on 'md'. 2006 */ 2007 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2008 { 2009 BUG_ON(!atomic_read(&md->holders)); 2010 return &md->queue->limits; 2011 } 2012 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2013 2014 /* 2015 * Setup the DM device's queue based on md's type 2016 */ 2017 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2018 { 2019 int r; 2020 enum dm_queue_mode type = dm_get_md_type(md); 2021 2022 switch (type) { 2023 case DM_TYPE_REQUEST_BASED: 2024 r = dm_old_init_request_queue(md, t); 2025 if (r) { 2026 DMERR("Cannot initialize queue for request-based mapped device"); 2027 return r; 2028 } 2029 break; 2030 case DM_TYPE_MQ_REQUEST_BASED: 2031 r = dm_mq_init_request_queue(md, t); 2032 if (r) { 2033 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2034 return r; 2035 } 2036 break; 2037 case DM_TYPE_BIO_BASED: 2038 case DM_TYPE_DAX_BIO_BASED: 2039 dm_init_normal_md_queue(md); 2040 blk_queue_make_request(md->queue, dm_make_request); 2041 /* 2042 * DM handles splitting bios as needed. Free the bio_split bioset 2043 * since it won't be used (saves 1 process per bio-based DM device). 2044 */ 2045 bioset_free(md->queue->bio_split); 2046 md->queue->bio_split = NULL; 2047 2048 if (type == DM_TYPE_DAX_BIO_BASED) 2049 queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); 2050 break; 2051 case DM_TYPE_NONE: 2052 WARN_ON_ONCE(true); 2053 break; 2054 } 2055 2056 return 0; 2057 } 2058 2059 struct mapped_device *dm_get_md(dev_t dev) 2060 { 2061 struct mapped_device *md; 2062 unsigned minor = MINOR(dev); 2063 2064 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2065 return NULL; 2066 2067 spin_lock(&_minor_lock); 2068 2069 md = idr_find(&_minor_idr, minor); 2070 if (md) { 2071 if ((md == MINOR_ALLOCED || 2072 (MINOR(disk_devt(dm_disk(md))) != minor) || 2073 dm_deleting_md(md) || 2074 test_bit(DMF_FREEING, &md->flags))) { 2075 md = NULL; 2076 goto out; 2077 } 2078 dm_get(md); 2079 } 2080 2081 out: 2082 spin_unlock(&_minor_lock); 2083 2084 return md; 2085 } 2086 EXPORT_SYMBOL_GPL(dm_get_md); 2087 2088 void *dm_get_mdptr(struct mapped_device *md) 2089 { 2090 return md->interface_ptr; 2091 } 2092 2093 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2094 { 2095 md->interface_ptr = ptr; 2096 } 2097 2098 void dm_get(struct mapped_device *md) 2099 { 2100 atomic_inc(&md->holders); 2101 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2102 } 2103 2104 int dm_hold(struct mapped_device *md) 2105 { 2106 spin_lock(&_minor_lock); 2107 if (test_bit(DMF_FREEING, &md->flags)) { 2108 spin_unlock(&_minor_lock); 2109 return -EBUSY; 2110 } 2111 dm_get(md); 2112 spin_unlock(&_minor_lock); 2113 return 0; 2114 } 2115 EXPORT_SYMBOL_GPL(dm_hold); 2116 2117 const char *dm_device_name(struct mapped_device *md) 2118 { 2119 return md->name; 2120 } 2121 EXPORT_SYMBOL_GPL(dm_device_name); 2122 2123 static void __dm_destroy(struct mapped_device *md, bool wait) 2124 { 2125 struct request_queue *q = dm_get_md_queue(md); 2126 struct dm_table *map; 2127 int srcu_idx; 2128 2129 might_sleep(); 2130 2131 spin_lock(&_minor_lock); 2132 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2133 set_bit(DMF_FREEING, &md->flags); 2134 spin_unlock(&_minor_lock); 2135 2136 blk_set_queue_dying(q); 2137 2138 if (dm_request_based(md) && md->kworker_task) 2139 kthread_flush_worker(&md->kworker); 2140 2141 /* 2142 * Take suspend_lock so that presuspend and postsuspend methods 2143 * do not race with internal suspend. 2144 */ 2145 mutex_lock(&md->suspend_lock); 2146 map = dm_get_live_table(md, &srcu_idx); 2147 if (!dm_suspended_md(md)) { 2148 dm_table_presuspend_targets(map); 2149 dm_table_postsuspend_targets(map); 2150 } 2151 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2152 dm_put_live_table(md, srcu_idx); 2153 mutex_unlock(&md->suspend_lock); 2154 2155 /* 2156 * Rare, but there may be I/O requests still going to complete, 2157 * for example. Wait for all references to disappear. 2158 * No one should increment the reference count of the mapped_device, 2159 * after the mapped_device state becomes DMF_FREEING. 2160 */ 2161 if (wait) 2162 while (atomic_read(&md->holders)) 2163 msleep(1); 2164 else if (atomic_read(&md->holders)) 2165 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2166 dm_device_name(md), atomic_read(&md->holders)); 2167 2168 dm_sysfs_exit(md); 2169 dm_table_destroy(__unbind(md)); 2170 free_dev(md); 2171 } 2172 2173 void dm_destroy(struct mapped_device *md) 2174 { 2175 __dm_destroy(md, true); 2176 } 2177 2178 void dm_destroy_immediate(struct mapped_device *md) 2179 { 2180 __dm_destroy(md, false); 2181 } 2182 2183 void dm_put(struct mapped_device *md) 2184 { 2185 atomic_dec(&md->holders); 2186 } 2187 EXPORT_SYMBOL_GPL(dm_put); 2188 2189 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2190 { 2191 int r = 0; 2192 DEFINE_WAIT(wait); 2193 2194 while (1) { 2195 prepare_to_wait(&md->wait, &wait, task_state); 2196 2197 if (!md_in_flight(md)) 2198 break; 2199 2200 if (signal_pending_state(task_state, current)) { 2201 r = -EINTR; 2202 break; 2203 } 2204 2205 io_schedule(); 2206 } 2207 finish_wait(&md->wait, &wait); 2208 2209 return r; 2210 } 2211 2212 /* 2213 * Process the deferred bios 2214 */ 2215 static void dm_wq_work(struct work_struct *work) 2216 { 2217 struct mapped_device *md = container_of(work, struct mapped_device, 2218 work); 2219 struct bio *c; 2220 int srcu_idx; 2221 struct dm_table *map; 2222 2223 map = dm_get_live_table(md, &srcu_idx); 2224 2225 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2226 spin_lock_irq(&md->deferred_lock); 2227 c = bio_list_pop(&md->deferred); 2228 spin_unlock_irq(&md->deferred_lock); 2229 2230 if (!c) 2231 break; 2232 2233 if (dm_request_based(md)) 2234 generic_make_request(c); 2235 else 2236 __split_and_process_bio(md, map, c); 2237 } 2238 2239 dm_put_live_table(md, srcu_idx); 2240 } 2241 2242 static void dm_queue_flush(struct mapped_device *md) 2243 { 2244 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2245 smp_mb__after_atomic(); 2246 queue_work(md->wq, &md->work); 2247 } 2248 2249 /* 2250 * Swap in a new table, returning the old one for the caller to destroy. 2251 */ 2252 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2253 { 2254 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2255 struct queue_limits limits; 2256 int r; 2257 2258 mutex_lock(&md->suspend_lock); 2259 2260 /* device must be suspended */ 2261 if (!dm_suspended_md(md)) 2262 goto out; 2263 2264 /* 2265 * If the new table has no data devices, retain the existing limits. 2266 * This helps multipath with queue_if_no_path if all paths disappear, 2267 * then new I/O is queued based on these limits, and then some paths 2268 * reappear. 2269 */ 2270 if (dm_table_has_no_data_devices(table)) { 2271 live_map = dm_get_live_table_fast(md); 2272 if (live_map) 2273 limits = md->queue->limits; 2274 dm_put_live_table_fast(md); 2275 } 2276 2277 if (!live_map) { 2278 r = dm_calculate_queue_limits(table, &limits); 2279 if (r) { 2280 map = ERR_PTR(r); 2281 goto out; 2282 } 2283 } 2284 2285 map = __bind(md, table, &limits); 2286 2287 out: 2288 mutex_unlock(&md->suspend_lock); 2289 return map; 2290 } 2291 2292 /* 2293 * Functions to lock and unlock any filesystem running on the 2294 * device. 2295 */ 2296 static int lock_fs(struct mapped_device *md) 2297 { 2298 int r; 2299 2300 WARN_ON(md->frozen_sb); 2301 2302 md->frozen_sb = freeze_bdev(md->bdev); 2303 if (IS_ERR(md->frozen_sb)) { 2304 r = PTR_ERR(md->frozen_sb); 2305 md->frozen_sb = NULL; 2306 return r; 2307 } 2308 2309 set_bit(DMF_FROZEN, &md->flags); 2310 2311 return 0; 2312 } 2313 2314 static void unlock_fs(struct mapped_device *md) 2315 { 2316 if (!test_bit(DMF_FROZEN, &md->flags)) 2317 return; 2318 2319 thaw_bdev(md->bdev, md->frozen_sb); 2320 md->frozen_sb = NULL; 2321 clear_bit(DMF_FROZEN, &md->flags); 2322 } 2323 2324 /* 2325 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2326 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2327 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2328 * 2329 * If __dm_suspend returns 0, the device is completely quiescent 2330 * now. There is no request-processing activity. All new requests 2331 * are being added to md->deferred list. 2332 */ 2333 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2334 unsigned suspend_flags, long task_state, 2335 int dmf_suspended_flag) 2336 { 2337 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2338 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2339 int r; 2340 2341 lockdep_assert_held(&md->suspend_lock); 2342 2343 /* 2344 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2345 * This flag is cleared before dm_suspend returns. 2346 */ 2347 if (noflush) 2348 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2349 else 2350 pr_debug("%s: suspending with flush\n", dm_device_name(md)); 2351 2352 /* 2353 * This gets reverted if there's an error later and the targets 2354 * provide the .presuspend_undo hook. 2355 */ 2356 dm_table_presuspend_targets(map); 2357 2358 /* 2359 * Flush I/O to the device. 2360 * Any I/O submitted after lock_fs() may not be flushed. 2361 * noflush takes precedence over do_lockfs. 2362 * (lock_fs() flushes I/Os and waits for them to complete.) 2363 */ 2364 if (!noflush && do_lockfs) { 2365 r = lock_fs(md); 2366 if (r) { 2367 dm_table_presuspend_undo_targets(map); 2368 return r; 2369 } 2370 } 2371 2372 /* 2373 * Here we must make sure that no processes are submitting requests 2374 * to target drivers i.e. no one may be executing 2375 * __split_and_process_bio. This is called from dm_request and 2376 * dm_wq_work. 2377 * 2378 * To get all processes out of __split_and_process_bio in dm_request, 2379 * we take the write lock. To prevent any process from reentering 2380 * __split_and_process_bio from dm_request and quiesce the thread 2381 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2382 * flush_workqueue(md->wq). 2383 */ 2384 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2385 if (map) 2386 synchronize_srcu(&md->io_barrier); 2387 2388 /* 2389 * Stop md->queue before flushing md->wq in case request-based 2390 * dm defers requests to md->wq from md->queue. 2391 */ 2392 if (dm_request_based(md)) { 2393 dm_stop_queue(md->queue); 2394 if (md->kworker_task) 2395 kthread_flush_worker(&md->kworker); 2396 } 2397 2398 flush_workqueue(md->wq); 2399 2400 /* 2401 * At this point no more requests are entering target request routines. 2402 * We call dm_wait_for_completion to wait for all existing requests 2403 * to finish. 2404 */ 2405 r = dm_wait_for_completion(md, task_state); 2406 if (!r) 2407 set_bit(dmf_suspended_flag, &md->flags); 2408 2409 if (noflush) 2410 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2411 if (map) 2412 synchronize_srcu(&md->io_barrier); 2413 2414 /* were we interrupted ? */ 2415 if (r < 0) { 2416 dm_queue_flush(md); 2417 2418 if (dm_request_based(md)) 2419 dm_start_queue(md->queue); 2420 2421 unlock_fs(md); 2422 dm_table_presuspend_undo_targets(map); 2423 /* pushback list is already flushed, so skip flush */ 2424 } 2425 2426 return r; 2427 } 2428 2429 /* 2430 * We need to be able to change a mapping table under a mounted 2431 * filesystem. For example we might want to move some data in 2432 * the background. Before the table can be swapped with 2433 * dm_bind_table, dm_suspend must be called to flush any in 2434 * flight bios and ensure that any further io gets deferred. 2435 */ 2436 /* 2437 * Suspend mechanism in request-based dm. 2438 * 2439 * 1. Flush all I/Os by lock_fs() if needed. 2440 * 2. Stop dispatching any I/O by stopping the request_queue. 2441 * 3. Wait for all in-flight I/Os to be completed or requeued. 2442 * 2443 * To abort suspend, start the request_queue. 2444 */ 2445 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2446 { 2447 struct dm_table *map = NULL; 2448 int r = 0; 2449 2450 retry: 2451 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2452 2453 if (dm_suspended_md(md)) { 2454 r = -EINVAL; 2455 goto out_unlock; 2456 } 2457 2458 if (dm_suspended_internally_md(md)) { 2459 /* already internally suspended, wait for internal resume */ 2460 mutex_unlock(&md->suspend_lock); 2461 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2462 if (r) 2463 return r; 2464 goto retry; 2465 } 2466 2467 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2468 2469 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2470 if (r) 2471 goto out_unlock; 2472 2473 dm_table_postsuspend_targets(map); 2474 2475 out_unlock: 2476 mutex_unlock(&md->suspend_lock); 2477 return r; 2478 } 2479 2480 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2481 { 2482 if (map) { 2483 int r = dm_table_resume_targets(map); 2484 if (r) 2485 return r; 2486 } 2487 2488 dm_queue_flush(md); 2489 2490 /* 2491 * Flushing deferred I/Os must be done after targets are resumed 2492 * so that mapping of targets can work correctly. 2493 * Request-based dm is queueing the deferred I/Os in its request_queue. 2494 */ 2495 if (dm_request_based(md)) 2496 dm_start_queue(md->queue); 2497 2498 unlock_fs(md); 2499 2500 return 0; 2501 } 2502 2503 int dm_resume(struct mapped_device *md) 2504 { 2505 int r; 2506 struct dm_table *map = NULL; 2507 2508 retry: 2509 r = -EINVAL; 2510 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2511 2512 if (!dm_suspended_md(md)) 2513 goto out; 2514 2515 if (dm_suspended_internally_md(md)) { 2516 /* already internally suspended, wait for internal resume */ 2517 mutex_unlock(&md->suspend_lock); 2518 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2519 if (r) 2520 return r; 2521 goto retry; 2522 } 2523 2524 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2525 if (!map || !dm_table_get_size(map)) 2526 goto out; 2527 2528 r = __dm_resume(md, map); 2529 if (r) 2530 goto out; 2531 2532 clear_bit(DMF_SUSPENDED, &md->flags); 2533 out: 2534 mutex_unlock(&md->suspend_lock); 2535 2536 return r; 2537 } 2538 2539 /* 2540 * Internal suspend/resume works like userspace-driven suspend. It waits 2541 * until all bios finish and prevents issuing new bios to the target drivers. 2542 * It may be used only from the kernel. 2543 */ 2544 2545 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2546 { 2547 struct dm_table *map = NULL; 2548 2549 lockdep_assert_held(&md->suspend_lock); 2550 2551 if (md->internal_suspend_count++) 2552 return; /* nested internal suspend */ 2553 2554 if (dm_suspended_md(md)) { 2555 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2556 return; /* nest suspend */ 2557 } 2558 2559 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2560 2561 /* 2562 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2563 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2564 * would require changing .presuspend to return an error -- avoid this 2565 * until there is a need for more elaborate variants of internal suspend. 2566 */ 2567 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2568 DMF_SUSPENDED_INTERNALLY); 2569 2570 dm_table_postsuspend_targets(map); 2571 } 2572 2573 static void __dm_internal_resume(struct mapped_device *md) 2574 { 2575 BUG_ON(!md->internal_suspend_count); 2576 2577 if (--md->internal_suspend_count) 2578 return; /* resume from nested internal suspend */ 2579 2580 if (dm_suspended_md(md)) 2581 goto done; /* resume from nested suspend */ 2582 2583 /* 2584 * NOTE: existing callers don't need to call dm_table_resume_targets 2585 * (which may fail -- so best to avoid it for now by passing NULL map) 2586 */ 2587 (void) __dm_resume(md, NULL); 2588 2589 done: 2590 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2591 smp_mb__after_atomic(); 2592 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2593 } 2594 2595 void dm_internal_suspend_noflush(struct mapped_device *md) 2596 { 2597 mutex_lock(&md->suspend_lock); 2598 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2599 mutex_unlock(&md->suspend_lock); 2600 } 2601 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2602 2603 void dm_internal_resume(struct mapped_device *md) 2604 { 2605 mutex_lock(&md->suspend_lock); 2606 __dm_internal_resume(md); 2607 mutex_unlock(&md->suspend_lock); 2608 } 2609 EXPORT_SYMBOL_GPL(dm_internal_resume); 2610 2611 /* 2612 * Fast variants of internal suspend/resume hold md->suspend_lock, 2613 * which prevents interaction with userspace-driven suspend. 2614 */ 2615 2616 void dm_internal_suspend_fast(struct mapped_device *md) 2617 { 2618 mutex_lock(&md->suspend_lock); 2619 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2620 return; 2621 2622 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2623 synchronize_srcu(&md->io_barrier); 2624 flush_workqueue(md->wq); 2625 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2626 } 2627 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2628 2629 void dm_internal_resume_fast(struct mapped_device *md) 2630 { 2631 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2632 goto done; 2633 2634 dm_queue_flush(md); 2635 2636 done: 2637 mutex_unlock(&md->suspend_lock); 2638 } 2639 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2640 2641 /*----------------------------------------------------------------- 2642 * Event notification. 2643 *---------------------------------------------------------------*/ 2644 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2645 unsigned cookie) 2646 { 2647 char udev_cookie[DM_COOKIE_LENGTH]; 2648 char *envp[] = { udev_cookie, NULL }; 2649 2650 if (!cookie) 2651 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2652 else { 2653 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2654 DM_COOKIE_ENV_VAR_NAME, cookie); 2655 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2656 action, envp); 2657 } 2658 } 2659 2660 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2661 { 2662 return atomic_add_return(1, &md->uevent_seq); 2663 } 2664 2665 uint32_t dm_get_event_nr(struct mapped_device *md) 2666 { 2667 return atomic_read(&md->event_nr); 2668 } 2669 2670 int dm_wait_event(struct mapped_device *md, int event_nr) 2671 { 2672 return wait_event_interruptible(md->eventq, 2673 (event_nr != atomic_read(&md->event_nr))); 2674 } 2675 2676 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2677 { 2678 unsigned long flags; 2679 2680 spin_lock_irqsave(&md->uevent_lock, flags); 2681 list_add(elist, &md->uevent_list); 2682 spin_unlock_irqrestore(&md->uevent_lock, flags); 2683 } 2684 2685 /* 2686 * The gendisk is only valid as long as you have a reference 2687 * count on 'md'. 2688 */ 2689 struct gendisk *dm_disk(struct mapped_device *md) 2690 { 2691 return md->disk; 2692 } 2693 EXPORT_SYMBOL_GPL(dm_disk); 2694 2695 struct kobject *dm_kobject(struct mapped_device *md) 2696 { 2697 return &md->kobj_holder.kobj; 2698 } 2699 2700 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2701 { 2702 struct mapped_device *md; 2703 2704 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2705 2706 if (test_bit(DMF_FREEING, &md->flags) || 2707 dm_deleting_md(md)) 2708 return NULL; 2709 2710 dm_get(md); 2711 return md; 2712 } 2713 2714 int dm_suspended_md(struct mapped_device *md) 2715 { 2716 return test_bit(DMF_SUSPENDED, &md->flags); 2717 } 2718 2719 int dm_suspended_internally_md(struct mapped_device *md) 2720 { 2721 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2722 } 2723 2724 int dm_test_deferred_remove_flag(struct mapped_device *md) 2725 { 2726 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2727 } 2728 2729 int dm_suspended(struct dm_target *ti) 2730 { 2731 return dm_suspended_md(dm_table_get_md(ti->table)); 2732 } 2733 EXPORT_SYMBOL_GPL(dm_suspended); 2734 2735 int dm_noflush_suspending(struct dm_target *ti) 2736 { 2737 return __noflush_suspending(dm_table_get_md(ti->table)); 2738 } 2739 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2740 2741 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2742 unsigned integrity, unsigned per_io_data_size) 2743 { 2744 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2745 unsigned int pool_size = 0; 2746 unsigned int front_pad; 2747 2748 if (!pools) 2749 return NULL; 2750 2751 switch (type) { 2752 case DM_TYPE_BIO_BASED: 2753 case DM_TYPE_DAX_BIO_BASED: 2754 pool_size = dm_get_reserved_bio_based_ios(); 2755 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2756 2757 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); 2758 if (!pools->io_pool) 2759 goto out; 2760 break; 2761 case DM_TYPE_REQUEST_BASED: 2762 case DM_TYPE_MQ_REQUEST_BASED: 2763 pool_size = dm_get_reserved_rq_based_ios(); 2764 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2765 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2766 break; 2767 default: 2768 BUG(); 2769 } 2770 2771 pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER); 2772 if (!pools->bs) 2773 goto out; 2774 2775 if (integrity && bioset_integrity_create(pools->bs, pool_size)) 2776 goto out; 2777 2778 return pools; 2779 2780 out: 2781 dm_free_md_mempools(pools); 2782 2783 return NULL; 2784 } 2785 2786 void dm_free_md_mempools(struct dm_md_mempools *pools) 2787 { 2788 if (!pools) 2789 return; 2790 2791 mempool_destroy(pools->io_pool); 2792 2793 if (pools->bs) 2794 bioset_free(pools->bs); 2795 2796 kfree(pools); 2797 } 2798 2799 struct dm_pr { 2800 u64 old_key; 2801 u64 new_key; 2802 u32 flags; 2803 bool fail_early; 2804 }; 2805 2806 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2807 void *data) 2808 { 2809 struct mapped_device *md = bdev->bd_disk->private_data; 2810 struct dm_table *table; 2811 struct dm_target *ti; 2812 int ret = -ENOTTY, srcu_idx; 2813 2814 table = dm_get_live_table(md, &srcu_idx); 2815 if (!table || !dm_table_get_size(table)) 2816 goto out; 2817 2818 /* We only support devices that have a single target */ 2819 if (dm_table_get_num_targets(table) != 1) 2820 goto out; 2821 ti = dm_table_get_target(table, 0); 2822 2823 ret = -EINVAL; 2824 if (!ti->type->iterate_devices) 2825 goto out; 2826 2827 ret = ti->type->iterate_devices(ti, fn, data); 2828 out: 2829 dm_put_live_table(md, srcu_idx); 2830 return ret; 2831 } 2832 2833 /* 2834 * For register / unregister we need to manually call out to every path. 2835 */ 2836 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 2837 sector_t start, sector_t len, void *data) 2838 { 2839 struct dm_pr *pr = data; 2840 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 2841 2842 if (!ops || !ops->pr_register) 2843 return -EOPNOTSUPP; 2844 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 2845 } 2846 2847 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 2848 u32 flags) 2849 { 2850 struct dm_pr pr = { 2851 .old_key = old_key, 2852 .new_key = new_key, 2853 .flags = flags, 2854 .fail_early = true, 2855 }; 2856 int ret; 2857 2858 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 2859 if (ret && new_key) { 2860 /* unregister all paths if we failed to register any path */ 2861 pr.old_key = new_key; 2862 pr.new_key = 0; 2863 pr.flags = 0; 2864 pr.fail_early = false; 2865 dm_call_pr(bdev, __dm_pr_register, &pr); 2866 } 2867 2868 return ret; 2869 } 2870 2871 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 2872 u32 flags) 2873 { 2874 struct mapped_device *md = bdev->bd_disk->private_data; 2875 const struct pr_ops *ops; 2876 fmode_t mode; 2877 int r; 2878 2879 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2880 if (r < 0) 2881 return r; 2882 2883 ops = bdev->bd_disk->fops->pr_ops; 2884 if (ops && ops->pr_reserve) 2885 r = ops->pr_reserve(bdev, key, type, flags); 2886 else 2887 r = -EOPNOTSUPP; 2888 2889 bdput(bdev); 2890 return r; 2891 } 2892 2893 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2894 { 2895 struct mapped_device *md = bdev->bd_disk->private_data; 2896 const struct pr_ops *ops; 2897 fmode_t mode; 2898 int r; 2899 2900 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2901 if (r < 0) 2902 return r; 2903 2904 ops = bdev->bd_disk->fops->pr_ops; 2905 if (ops && ops->pr_release) 2906 r = ops->pr_release(bdev, key, type); 2907 else 2908 r = -EOPNOTSUPP; 2909 2910 bdput(bdev); 2911 return r; 2912 } 2913 2914 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 2915 enum pr_type type, bool abort) 2916 { 2917 struct mapped_device *md = bdev->bd_disk->private_data; 2918 const struct pr_ops *ops; 2919 fmode_t mode; 2920 int r; 2921 2922 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2923 if (r < 0) 2924 return r; 2925 2926 ops = bdev->bd_disk->fops->pr_ops; 2927 if (ops && ops->pr_preempt) 2928 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 2929 else 2930 r = -EOPNOTSUPP; 2931 2932 bdput(bdev); 2933 return r; 2934 } 2935 2936 static int dm_pr_clear(struct block_device *bdev, u64 key) 2937 { 2938 struct mapped_device *md = bdev->bd_disk->private_data; 2939 const struct pr_ops *ops; 2940 fmode_t mode; 2941 int r; 2942 2943 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 2944 if (r < 0) 2945 return r; 2946 2947 ops = bdev->bd_disk->fops->pr_ops; 2948 if (ops && ops->pr_clear) 2949 r = ops->pr_clear(bdev, key); 2950 else 2951 r = -EOPNOTSUPP; 2952 2953 bdput(bdev); 2954 return r; 2955 } 2956 2957 static const struct pr_ops dm_pr_ops = { 2958 .pr_register = dm_pr_register, 2959 .pr_reserve = dm_pr_reserve, 2960 .pr_release = dm_pr_release, 2961 .pr_preempt = dm_pr_preempt, 2962 .pr_clear = dm_pr_clear, 2963 }; 2964 2965 static const struct block_device_operations dm_blk_dops = { 2966 .open = dm_blk_open, 2967 .release = dm_blk_close, 2968 .ioctl = dm_blk_ioctl, 2969 .getgeo = dm_blk_getgeo, 2970 .pr_ops = &dm_pr_ops, 2971 .owner = THIS_MODULE 2972 }; 2973 2974 static const struct dax_operations dm_dax_ops = { 2975 .direct_access = dm_dax_direct_access, 2976 .copy_from_iter = dm_dax_copy_from_iter, 2977 }; 2978 2979 /* 2980 * module hooks 2981 */ 2982 module_init(dm_init); 2983 module_exit(dm_exit); 2984 2985 module_param(major, uint, 0); 2986 MODULE_PARM_DESC(major, "The major number of the device mapper"); 2987 2988 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2989 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2990 2991 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 2992 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 2993 2994 MODULE_DESCRIPTION(DM_NAME " driver"); 2995 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2996 MODULE_LICENSE("GPL"); 2997