1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/mm.h> 16 #include <linux/sched/signal.h> 17 #include <linux/blkpg.h> 18 #include <linux/bio.h> 19 #include <linux/mempool.h> 20 #include <linux/dax.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/uio.h> 24 #include <linux/hdreg.h> 25 #include <linux/delay.h> 26 #include <linux/wait.h> 27 #include <linux/pr.h> 28 #include <linux/refcount.h> 29 #include <linux/part_stat.h> 30 #include <linux/blk-crypto.h> 31 32 #define DM_MSG_PREFIX "core" 33 34 /* 35 * Cookies are numeric values sent with CHANGE and REMOVE 36 * uevents while resuming, removing or renaming the device. 37 */ 38 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 39 #define DM_COOKIE_LENGTH 24 40 41 static const char *_name = DM_NAME; 42 43 static unsigned int major = 0; 44 static unsigned int _major = 0; 45 46 static DEFINE_IDR(_minor_idr); 47 48 static DEFINE_SPINLOCK(_minor_lock); 49 50 static void do_deferred_remove(struct work_struct *w); 51 52 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 53 54 static struct workqueue_struct *deferred_remove_workqueue; 55 56 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 57 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 58 59 void dm_issue_global_event(void) 60 { 61 atomic_inc(&dm_global_event_nr); 62 wake_up(&dm_global_eventq); 63 } 64 65 /* 66 * One of these is allocated (on-stack) per original bio. 67 */ 68 struct clone_info { 69 struct dm_table *map; 70 struct bio *bio; 71 struct dm_io *io; 72 sector_t sector; 73 unsigned sector_count; 74 }; 75 76 /* 77 * One of these is allocated per clone bio. 78 */ 79 #define DM_TIO_MAGIC 7282014 80 struct dm_target_io { 81 unsigned magic; 82 struct dm_io *io; 83 struct dm_target *ti; 84 unsigned target_bio_nr; 85 unsigned *len_ptr; 86 bool inside_dm_io; 87 struct bio clone; 88 }; 89 90 /* 91 * One of these is allocated per original bio. 92 * It contains the first clone used for that original. 93 */ 94 #define DM_IO_MAGIC 5191977 95 struct dm_io { 96 unsigned magic; 97 struct mapped_device *md; 98 blk_status_t status; 99 atomic_t io_count; 100 struct bio *orig_bio; 101 unsigned long start_time; 102 spinlock_t endio_lock; 103 struct dm_stats_aux stats_aux; 104 /* last member of dm_target_io is 'struct bio' */ 105 struct dm_target_io tio; 106 }; 107 108 void *dm_per_bio_data(struct bio *bio, size_t data_size) 109 { 110 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 111 if (!tio->inside_dm_io) 112 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 113 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 114 } 115 EXPORT_SYMBOL_GPL(dm_per_bio_data); 116 117 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 118 { 119 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 120 if (io->magic == DM_IO_MAGIC) 121 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 122 BUG_ON(io->magic != DM_TIO_MAGIC); 123 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 124 } 125 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 126 127 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 128 { 129 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 130 } 131 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 132 133 #define MINOR_ALLOCED ((void *)-1) 134 135 /* 136 * Bits for the md->flags field. 137 */ 138 #define DMF_BLOCK_IO_FOR_SUSPEND 0 139 #define DMF_SUSPENDED 1 140 #define DMF_FROZEN 2 141 #define DMF_FREEING 3 142 #define DMF_DELETING 4 143 #define DMF_NOFLUSH_SUSPENDING 5 144 #define DMF_DEFERRED_REMOVE 6 145 #define DMF_SUSPENDED_INTERNALLY 7 146 #define DMF_POST_SUSPENDING 8 147 148 #define DM_NUMA_NODE NUMA_NO_NODE 149 static int dm_numa_node = DM_NUMA_NODE; 150 151 /* 152 * For mempools pre-allocation at the table loading time. 153 */ 154 struct dm_md_mempools { 155 struct bio_set bs; 156 struct bio_set io_bs; 157 }; 158 159 struct table_device { 160 struct list_head list; 161 refcount_t count; 162 struct dm_dev dm_dev; 163 }; 164 165 /* 166 * Bio-based DM's mempools' reserved IOs set by the user. 167 */ 168 #define RESERVED_BIO_BASED_IOS 16 169 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 170 171 static int __dm_get_module_param_int(int *module_param, int min, int max) 172 { 173 int param = READ_ONCE(*module_param); 174 int modified_param = 0; 175 bool modified = true; 176 177 if (param < min) 178 modified_param = min; 179 else if (param > max) 180 modified_param = max; 181 else 182 modified = false; 183 184 if (modified) { 185 (void)cmpxchg(module_param, param, modified_param); 186 param = modified_param; 187 } 188 189 return param; 190 } 191 192 unsigned __dm_get_module_param(unsigned *module_param, 193 unsigned def, unsigned max) 194 { 195 unsigned param = READ_ONCE(*module_param); 196 unsigned modified_param = 0; 197 198 if (!param) 199 modified_param = def; 200 else if (param > max) 201 modified_param = max; 202 203 if (modified_param) { 204 (void)cmpxchg(module_param, param, modified_param); 205 param = modified_param; 206 } 207 208 return param; 209 } 210 211 unsigned dm_get_reserved_bio_based_ios(void) 212 { 213 return __dm_get_module_param(&reserved_bio_based_ios, 214 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 215 } 216 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 217 218 static unsigned dm_get_numa_node(void) 219 { 220 return __dm_get_module_param_int(&dm_numa_node, 221 DM_NUMA_NODE, num_online_nodes() - 1); 222 } 223 224 static int __init local_init(void) 225 { 226 int r; 227 228 r = dm_uevent_init(); 229 if (r) 230 return r; 231 232 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 233 if (!deferred_remove_workqueue) { 234 r = -ENOMEM; 235 goto out_uevent_exit; 236 } 237 238 _major = major; 239 r = register_blkdev(_major, _name); 240 if (r < 0) 241 goto out_free_workqueue; 242 243 if (!_major) 244 _major = r; 245 246 return 0; 247 248 out_free_workqueue: 249 destroy_workqueue(deferred_remove_workqueue); 250 out_uevent_exit: 251 dm_uevent_exit(); 252 253 return r; 254 } 255 256 static void local_exit(void) 257 { 258 flush_scheduled_work(); 259 destroy_workqueue(deferred_remove_workqueue); 260 261 unregister_blkdev(_major, _name); 262 dm_uevent_exit(); 263 264 _major = 0; 265 266 DMINFO("cleaned up"); 267 } 268 269 static int (*_inits[])(void) __initdata = { 270 local_init, 271 dm_target_init, 272 dm_linear_init, 273 dm_stripe_init, 274 dm_io_init, 275 dm_kcopyd_init, 276 dm_interface_init, 277 dm_statistics_init, 278 }; 279 280 static void (*_exits[])(void) = { 281 local_exit, 282 dm_target_exit, 283 dm_linear_exit, 284 dm_stripe_exit, 285 dm_io_exit, 286 dm_kcopyd_exit, 287 dm_interface_exit, 288 dm_statistics_exit, 289 }; 290 291 static int __init dm_init(void) 292 { 293 const int count = ARRAY_SIZE(_inits); 294 295 int r, i; 296 297 for (i = 0; i < count; i++) { 298 r = _inits[i](); 299 if (r) 300 goto bad; 301 } 302 303 return 0; 304 305 bad: 306 while (i--) 307 _exits[i](); 308 309 return r; 310 } 311 312 static void __exit dm_exit(void) 313 { 314 int i = ARRAY_SIZE(_exits); 315 316 while (i--) 317 _exits[i](); 318 319 /* 320 * Should be empty by this point. 321 */ 322 idr_destroy(&_minor_idr); 323 } 324 325 /* 326 * Block device functions 327 */ 328 int dm_deleting_md(struct mapped_device *md) 329 { 330 return test_bit(DMF_DELETING, &md->flags); 331 } 332 333 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 334 { 335 struct mapped_device *md; 336 337 spin_lock(&_minor_lock); 338 339 md = bdev->bd_disk->private_data; 340 if (!md) 341 goto out; 342 343 if (test_bit(DMF_FREEING, &md->flags) || 344 dm_deleting_md(md)) { 345 md = NULL; 346 goto out; 347 } 348 349 dm_get(md); 350 atomic_inc(&md->open_count); 351 out: 352 spin_unlock(&_minor_lock); 353 354 return md ? 0 : -ENXIO; 355 } 356 357 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 358 { 359 struct mapped_device *md; 360 361 spin_lock(&_minor_lock); 362 363 md = disk->private_data; 364 if (WARN_ON(!md)) 365 goto out; 366 367 if (atomic_dec_and_test(&md->open_count) && 368 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 369 queue_work(deferred_remove_workqueue, &deferred_remove_work); 370 371 dm_put(md); 372 out: 373 spin_unlock(&_minor_lock); 374 } 375 376 int dm_open_count(struct mapped_device *md) 377 { 378 return atomic_read(&md->open_count); 379 } 380 381 /* 382 * Guarantees nothing is using the device before it's deleted. 383 */ 384 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 385 { 386 int r = 0; 387 388 spin_lock(&_minor_lock); 389 390 if (dm_open_count(md)) { 391 r = -EBUSY; 392 if (mark_deferred) 393 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 394 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 395 r = -EEXIST; 396 else 397 set_bit(DMF_DELETING, &md->flags); 398 399 spin_unlock(&_minor_lock); 400 401 return r; 402 } 403 404 int dm_cancel_deferred_remove(struct mapped_device *md) 405 { 406 int r = 0; 407 408 spin_lock(&_minor_lock); 409 410 if (test_bit(DMF_DELETING, &md->flags)) 411 r = -EBUSY; 412 else 413 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 414 415 spin_unlock(&_minor_lock); 416 417 return r; 418 } 419 420 static void do_deferred_remove(struct work_struct *w) 421 { 422 dm_deferred_remove(); 423 } 424 425 sector_t dm_get_size(struct mapped_device *md) 426 { 427 return get_capacity(md->disk); 428 } 429 430 struct request_queue *dm_get_md_queue(struct mapped_device *md) 431 { 432 return md->queue; 433 } 434 435 struct dm_stats *dm_get_stats(struct mapped_device *md) 436 { 437 return &md->stats; 438 } 439 440 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 441 { 442 struct mapped_device *md = bdev->bd_disk->private_data; 443 444 return dm_get_geometry(md, geo); 445 } 446 447 #ifdef CONFIG_BLK_DEV_ZONED 448 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 449 { 450 struct dm_report_zones_args *args = data; 451 sector_t sector_diff = args->tgt->begin - args->start; 452 453 /* 454 * Ignore zones beyond the target range. 455 */ 456 if (zone->start >= args->start + args->tgt->len) 457 return 0; 458 459 /* 460 * Remap the start sector and write pointer position of the zone 461 * to match its position in the target range. 462 */ 463 zone->start += sector_diff; 464 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 465 if (zone->cond == BLK_ZONE_COND_FULL) 466 zone->wp = zone->start + zone->len; 467 else if (zone->cond == BLK_ZONE_COND_EMPTY) 468 zone->wp = zone->start; 469 else 470 zone->wp += sector_diff; 471 } 472 473 args->next_sector = zone->start + zone->len; 474 return args->orig_cb(zone, args->zone_idx++, args->orig_data); 475 } 476 EXPORT_SYMBOL_GPL(dm_report_zones_cb); 477 478 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 479 unsigned int nr_zones, report_zones_cb cb, void *data) 480 { 481 struct mapped_device *md = disk->private_data; 482 struct dm_table *map; 483 int srcu_idx, ret; 484 struct dm_report_zones_args args = { 485 .next_sector = sector, 486 .orig_data = data, 487 .orig_cb = cb, 488 }; 489 490 if (dm_suspended_md(md)) 491 return -EAGAIN; 492 493 map = dm_get_live_table(md, &srcu_idx); 494 if (!map) 495 return -EIO; 496 497 do { 498 struct dm_target *tgt; 499 500 tgt = dm_table_find_target(map, args.next_sector); 501 if (WARN_ON_ONCE(!tgt->type->report_zones)) { 502 ret = -EIO; 503 goto out; 504 } 505 506 args.tgt = tgt; 507 ret = tgt->type->report_zones(tgt, &args, 508 nr_zones - args.zone_idx); 509 if (ret < 0) 510 goto out; 511 } while (args.zone_idx < nr_zones && 512 args.next_sector < get_capacity(disk)); 513 514 ret = args.zone_idx; 515 out: 516 dm_put_live_table(md, srcu_idx); 517 return ret; 518 } 519 #else 520 #define dm_blk_report_zones NULL 521 #endif /* CONFIG_BLK_DEV_ZONED */ 522 523 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 524 struct block_device **bdev) 525 __acquires(md->io_barrier) 526 { 527 struct dm_target *tgt; 528 struct dm_table *map; 529 int r; 530 531 retry: 532 r = -ENOTTY; 533 map = dm_get_live_table(md, srcu_idx); 534 if (!map || !dm_table_get_size(map)) 535 return r; 536 537 /* We only support devices that have a single target */ 538 if (dm_table_get_num_targets(map) != 1) 539 return r; 540 541 tgt = dm_table_get_target(map, 0); 542 if (!tgt->type->prepare_ioctl) 543 return r; 544 545 if (dm_suspended_md(md)) 546 return -EAGAIN; 547 548 r = tgt->type->prepare_ioctl(tgt, bdev); 549 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 550 dm_put_live_table(md, *srcu_idx); 551 msleep(10); 552 goto retry; 553 } 554 555 return r; 556 } 557 558 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 559 __releases(md->io_barrier) 560 { 561 dm_put_live_table(md, srcu_idx); 562 } 563 564 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 565 unsigned int cmd, unsigned long arg) 566 { 567 struct mapped_device *md = bdev->bd_disk->private_data; 568 int r, srcu_idx; 569 570 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 571 if (r < 0) 572 goto out; 573 574 if (r > 0) { 575 /* 576 * Target determined this ioctl is being issued against a 577 * subset of the parent bdev; require extra privileges. 578 */ 579 if (!capable(CAP_SYS_RAWIO)) { 580 DMWARN_LIMIT( 581 "%s: sending ioctl %x to DM device without required privilege.", 582 current->comm, cmd); 583 r = -ENOIOCTLCMD; 584 goto out; 585 } 586 } 587 588 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 589 out: 590 dm_unprepare_ioctl(md, srcu_idx); 591 return r; 592 } 593 594 static void start_io_acct(struct dm_io *io); 595 596 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 597 { 598 struct dm_io *io; 599 struct dm_target_io *tio; 600 struct bio *clone; 601 602 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 603 if (!clone) 604 return NULL; 605 606 tio = container_of(clone, struct dm_target_io, clone); 607 tio->inside_dm_io = true; 608 tio->io = NULL; 609 610 io = container_of(tio, struct dm_io, tio); 611 io->magic = DM_IO_MAGIC; 612 io->status = 0; 613 atomic_set(&io->io_count, 1); 614 io->orig_bio = bio; 615 io->md = md; 616 spin_lock_init(&io->endio_lock); 617 618 start_io_acct(io); 619 620 return io; 621 } 622 623 static void free_io(struct mapped_device *md, struct dm_io *io) 624 { 625 bio_put(&io->tio.clone); 626 } 627 628 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 629 unsigned target_bio_nr, gfp_t gfp_mask) 630 { 631 struct dm_target_io *tio; 632 633 if (!ci->io->tio.io) { 634 /* the dm_target_io embedded in ci->io is available */ 635 tio = &ci->io->tio; 636 } else { 637 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 638 if (!clone) 639 return NULL; 640 641 tio = container_of(clone, struct dm_target_io, clone); 642 tio->inside_dm_io = false; 643 } 644 645 tio->magic = DM_TIO_MAGIC; 646 tio->io = ci->io; 647 tio->ti = ti; 648 tio->target_bio_nr = target_bio_nr; 649 650 return tio; 651 } 652 653 static void free_tio(struct dm_target_io *tio) 654 { 655 if (tio->inside_dm_io) 656 return; 657 bio_put(&tio->clone); 658 } 659 660 u64 dm_start_time_ns_from_clone(struct bio *bio) 661 { 662 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 663 struct dm_io *io = tio->io; 664 665 return jiffies_to_nsecs(io->start_time); 666 } 667 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 668 669 static void start_io_acct(struct dm_io *io) 670 { 671 struct mapped_device *md = io->md; 672 struct bio *bio = io->orig_bio; 673 674 io->start_time = bio_start_io_acct(bio); 675 if (unlikely(dm_stats_used(&md->stats))) 676 dm_stats_account_io(&md->stats, bio_data_dir(bio), 677 bio->bi_iter.bi_sector, bio_sectors(bio), 678 false, 0, &io->stats_aux); 679 } 680 681 static void end_io_acct(struct dm_io *io) 682 { 683 struct mapped_device *md = io->md; 684 struct bio *bio = io->orig_bio; 685 unsigned long duration = jiffies - io->start_time; 686 687 bio_end_io_acct(bio, io->start_time); 688 689 if (unlikely(dm_stats_used(&md->stats))) 690 dm_stats_account_io(&md->stats, bio_data_dir(bio), 691 bio->bi_iter.bi_sector, bio_sectors(bio), 692 true, duration, &io->stats_aux); 693 694 /* nudge anyone waiting on suspend queue */ 695 if (unlikely(wq_has_sleeper(&md->wait))) 696 wake_up(&md->wait); 697 } 698 699 /* 700 * Add the bio to the list of deferred io. 701 */ 702 static void queue_io(struct mapped_device *md, struct bio *bio) 703 { 704 unsigned long flags; 705 706 spin_lock_irqsave(&md->deferred_lock, flags); 707 bio_list_add(&md->deferred, bio); 708 spin_unlock_irqrestore(&md->deferred_lock, flags); 709 queue_work(md->wq, &md->work); 710 } 711 712 /* 713 * Everyone (including functions in this file), should use this 714 * function to access the md->map field, and make sure they call 715 * dm_put_live_table() when finished. 716 */ 717 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 718 { 719 *srcu_idx = srcu_read_lock(&md->io_barrier); 720 721 return srcu_dereference(md->map, &md->io_barrier); 722 } 723 724 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 725 { 726 srcu_read_unlock(&md->io_barrier, srcu_idx); 727 } 728 729 void dm_sync_table(struct mapped_device *md) 730 { 731 synchronize_srcu(&md->io_barrier); 732 synchronize_rcu_expedited(); 733 } 734 735 /* 736 * A fast alternative to dm_get_live_table/dm_put_live_table. 737 * The caller must not block between these two functions. 738 */ 739 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 740 { 741 rcu_read_lock(); 742 return rcu_dereference(md->map); 743 } 744 745 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 746 { 747 rcu_read_unlock(); 748 } 749 750 static char *_dm_claim_ptr = "I belong to device-mapper"; 751 752 /* 753 * Open a table device so we can use it as a map destination. 754 */ 755 static int open_table_device(struct table_device *td, dev_t dev, 756 struct mapped_device *md) 757 { 758 struct block_device *bdev; 759 760 int r; 761 762 BUG_ON(td->dm_dev.bdev); 763 764 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 765 if (IS_ERR(bdev)) 766 return PTR_ERR(bdev); 767 768 r = bd_link_disk_holder(bdev, dm_disk(md)); 769 if (r) { 770 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 771 return r; 772 } 773 774 td->dm_dev.bdev = bdev; 775 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 776 return 0; 777 } 778 779 /* 780 * Close a table device that we've been using. 781 */ 782 static void close_table_device(struct table_device *td, struct mapped_device *md) 783 { 784 if (!td->dm_dev.bdev) 785 return; 786 787 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 788 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 789 put_dax(td->dm_dev.dax_dev); 790 td->dm_dev.bdev = NULL; 791 td->dm_dev.dax_dev = NULL; 792 } 793 794 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 795 fmode_t mode) 796 { 797 struct table_device *td; 798 799 list_for_each_entry(td, l, list) 800 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 801 return td; 802 803 return NULL; 804 } 805 806 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 807 struct dm_dev **result) 808 { 809 int r; 810 struct table_device *td; 811 812 mutex_lock(&md->table_devices_lock); 813 td = find_table_device(&md->table_devices, dev, mode); 814 if (!td) { 815 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 816 if (!td) { 817 mutex_unlock(&md->table_devices_lock); 818 return -ENOMEM; 819 } 820 821 td->dm_dev.mode = mode; 822 td->dm_dev.bdev = NULL; 823 824 if ((r = open_table_device(td, dev, md))) { 825 mutex_unlock(&md->table_devices_lock); 826 kfree(td); 827 return r; 828 } 829 830 format_dev_t(td->dm_dev.name, dev); 831 832 refcount_set(&td->count, 1); 833 list_add(&td->list, &md->table_devices); 834 } else { 835 refcount_inc(&td->count); 836 } 837 mutex_unlock(&md->table_devices_lock); 838 839 *result = &td->dm_dev; 840 return 0; 841 } 842 EXPORT_SYMBOL_GPL(dm_get_table_device); 843 844 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 845 { 846 struct table_device *td = container_of(d, struct table_device, dm_dev); 847 848 mutex_lock(&md->table_devices_lock); 849 if (refcount_dec_and_test(&td->count)) { 850 close_table_device(td, md); 851 list_del(&td->list); 852 kfree(td); 853 } 854 mutex_unlock(&md->table_devices_lock); 855 } 856 EXPORT_SYMBOL(dm_put_table_device); 857 858 static void free_table_devices(struct list_head *devices) 859 { 860 struct list_head *tmp, *next; 861 862 list_for_each_safe(tmp, next, devices) { 863 struct table_device *td = list_entry(tmp, struct table_device, list); 864 865 DMWARN("dm_destroy: %s still exists with %d references", 866 td->dm_dev.name, refcount_read(&td->count)); 867 kfree(td); 868 } 869 } 870 871 /* 872 * Get the geometry associated with a dm device 873 */ 874 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 875 { 876 *geo = md->geometry; 877 878 return 0; 879 } 880 881 /* 882 * Set the geometry of a device. 883 */ 884 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 885 { 886 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 887 888 if (geo->start > sz) { 889 DMWARN("Start sector is beyond the geometry limits."); 890 return -EINVAL; 891 } 892 893 md->geometry = *geo; 894 895 return 0; 896 } 897 898 static int __noflush_suspending(struct mapped_device *md) 899 { 900 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 901 } 902 903 /* 904 * Decrements the number of outstanding ios that a bio has been 905 * cloned into, completing the original io if necc. 906 */ 907 static void dec_pending(struct dm_io *io, blk_status_t error) 908 { 909 unsigned long flags; 910 blk_status_t io_error; 911 struct bio *bio; 912 struct mapped_device *md = io->md; 913 914 /* Push-back supersedes any I/O errors */ 915 if (unlikely(error)) { 916 spin_lock_irqsave(&io->endio_lock, flags); 917 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 918 io->status = error; 919 spin_unlock_irqrestore(&io->endio_lock, flags); 920 } 921 922 if (atomic_dec_and_test(&io->io_count)) { 923 if (io->status == BLK_STS_DM_REQUEUE) { 924 /* 925 * Target requested pushing back the I/O. 926 */ 927 spin_lock_irqsave(&md->deferred_lock, flags); 928 if (__noflush_suspending(md)) 929 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 930 bio_list_add_head(&md->deferred, io->orig_bio); 931 else 932 /* noflush suspend was interrupted. */ 933 io->status = BLK_STS_IOERR; 934 spin_unlock_irqrestore(&md->deferred_lock, flags); 935 } 936 937 io_error = io->status; 938 bio = io->orig_bio; 939 end_io_acct(io); 940 free_io(md, io); 941 942 if (io_error == BLK_STS_DM_REQUEUE) 943 return; 944 945 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 946 /* 947 * Preflush done for flush with data, reissue 948 * without REQ_PREFLUSH. 949 */ 950 bio->bi_opf &= ~REQ_PREFLUSH; 951 queue_io(md, bio); 952 } else { 953 /* done with normal IO or empty flush */ 954 if (io_error) 955 bio->bi_status = io_error; 956 bio_endio(bio); 957 } 958 } 959 } 960 961 void disable_discard(struct mapped_device *md) 962 { 963 struct queue_limits *limits = dm_get_queue_limits(md); 964 965 /* device doesn't really support DISCARD, disable it */ 966 limits->max_discard_sectors = 0; 967 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 968 } 969 970 void disable_write_same(struct mapped_device *md) 971 { 972 struct queue_limits *limits = dm_get_queue_limits(md); 973 974 /* device doesn't really support WRITE SAME, disable it */ 975 limits->max_write_same_sectors = 0; 976 } 977 978 void disable_write_zeroes(struct mapped_device *md) 979 { 980 struct queue_limits *limits = dm_get_queue_limits(md); 981 982 /* device doesn't really support WRITE ZEROES, disable it */ 983 limits->max_write_zeroes_sectors = 0; 984 } 985 986 static void clone_endio(struct bio *bio) 987 { 988 blk_status_t error = bio->bi_status; 989 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 990 struct dm_io *io = tio->io; 991 struct mapped_device *md = tio->io->md; 992 dm_endio_fn endio = tio->ti->type->end_io; 993 struct bio *orig_bio = io->orig_bio; 994 995 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 996 if (bio_op(bio) == REQ_OP_DISCARD && 997 !bio->bi_disk->queue->limits.max_discard_sectors) 998 disable_discard(md); 999 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 1000 !bio->bi_disk->queue->limits.max_write_same_sectors) 1001 disable_write_same(md); 1002 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1003 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 1004 disable_write_zeroes(md); 1005 } 1006 1007 /* 1008 * For zone-append bios get offset in zone of the written 1009 * sector and add that to the original bio sector pos. 1010 */ 1011 if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { 1012 sector_t written_sector = bio->bi_iter.bi_sector; 1013 struct request_queue *q = orig_bio->bi_disk->queue; 1014 u64 mask = (u64)blk_queue_zone_sectors(q) - 1; 1015 1016 orig_bio->bi_iter.bi_sector += written_sector & mask; 1017 } 1018 1019 if (endio) { 1020 int r = endio(tio->ti, bio, &error); 1021 switch (r) { 1022 case DM_ENDIO_REQUEUE: 1023 error = BLK_STS_DM_REQUEUE; 1024 fallthrough; 1025 case DM_ENDIO_DONE: 1026 break; 1027 case DM_ENDIO_INCOMPLETE: 1028 /* The target will handle the io */ 1029 return; 1030 default: 1031 DMWARN("unimplemented target endio return value: %d", r); 1032 BUG(); 1033 } 1034 } 1035 1036 free_tio(tio); 1037 dec_pending(io, error); 1038 } 1039 1040 /* 1041 * Return maximum size of I/O possible at the supplied sector up to the current 1042 * target boundary. 1043 */ 1044 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1045 { 1046 sector_t target_offset = dm_target_offset(ti, sector); 1047 1048 return ti->len - target_offset; 1049 } 1050 1051 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1052 { 1053 sector_t len = max_io_len_target_boundary(sector, ti); 1054 sector_t offset, max_len; 1055 1056 /* 1057 * Does the target need to split even further? 1058 */ 1059 if (ti->max_io_len) { 1060 offset = dm_target_offset(ti, sector); 1061 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1062 max_len = sector_div(offset, ti->max_io_len); 1063 else 1064 max_len = offset & (ti->max_io_len - 1); 1065 max_len = ti->max_io_len - max_len; 1066 1067 if (len > max_len) 1068 len = max_len; 1069 } 1070 1071 return len; 1072 } 1073 1074 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1075 { 1076 if (len > UINT_MAX) { 1077 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1078 (unsigned long long)len, UINT_MAX); 1079 ti->error = "Maximum size of target IO is too large"; 1080 return -EINVAL; 1081 } 1082 1083 ti->max_io_len = (uint32_t) len; 1084 1085 return 0; 1086 } 1087 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1088 1089 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1090 sector_t sector, int *srcu_idx) 1091 __acquires(md->io_barrier) 1092 { 1093 struct dm_table *map; 1094 struct dm_target *ti; 1095 1096 map = dm_get_live_table(md, srcu_idx); 1097 if (!map) 1098 return NULL; 1099 1100 ti = dm_table_find_target(map, sector); 1101 if (!ti) 1102 return NULL; 1103 1104 return ti; 1105 } 1106 1107 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1108 long nr_pages, void **kaddr, pfn_t *pfn) 1109 { 1110 struct mapped_device *md = dax_get_private(dax_dev); 1111 sector_t sector = pgoff * PAGE_SECTORS; 1112 struct dm_target *ti; 1113 long len, ret = -EIO; 1114 int srcu_idx; 1115 1116 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1117 1118 if (!ti) 1119 goto out; 1120 if (!ti->type->direct_access) 1121 goto out; 1122 len = max_io_len(sector, ti) / PAGE_SECTORS; 1123 if (len < 1) 1124 goto out; 1125 nr_pages = min(len, nr_pages); 1126 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1127 1128 out: 1129 dm_put_live_table(md, srcu_idx); 1130 1131 return ret; 1132 } 1133 1134 static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 1135 int blocksize, sector_t start, sector_t len) 1136 { 1137 struct mapped_device *md = dax_get_private(dax_dev); 1138 struct dm_table *map; 1139 bool ret = false; 1140 int srcu_idx; 1141 1142 map = dm_get_live_table(md, &srcu_idx); 1143 if (!map) 1144 goto out; 1145 1146 ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 1147 1148 out: 1149 dm_put_live_table(md, srcu_idx); 1150 1151 return ret; 1152 } 1153 1154 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1155 void *addr, size_t bytes, struct iov_iter *i) 1156 { 1157 struct mapped_device *md = dax_get_private(dax_dev); 1158 sector_t sector = pgoff * PAGE_SECTORS; 1159 struct dm_target *ti; 1160 long ret = 0; 1161 int srcu_idx; 1162 1163 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1164 1165 if (!ti) 1166 goto out; 1167 if (!ti->type->dax_copy_from_iter) { 1168 ret = copy_from_iter(addr, bytes, i); 1169 goto out; 1170 } 1171 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 1172 out: 1173 dm_put_live_table(md, srcu_idx); 1174 1175 return ret; 1176 } 1177 1178 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1179 void *addr, size_t bytes, struct iov_iter *i) 1180 { 1181 struct mapped_device *md = dax_get_private(dax_dev); 1182 sector_t sector = pgoff * PAGE_SECTORS; 1183 struct dm_target *ti; 1184 long ret = 0; 1185 int srcu_idx; 1186 1187 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1188 1189 if (!ti) 1190 goto out; 1191 if (!ti->type->dax_copy_to_iter) { 1192 ret = copy_to_iter(addr, bytes, i); 1193 goto out; 1194 } 1195 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1196 out: 1197 dm_put_live_table(md, srcu_idx); 1198 1199 return ret; 1200 } 1201 1202 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1203 size_t nr_pages) 1204 { 1205 struct mapped_device *md = dax_get_private(dax_dev); 1206 sector_t sector = pgoff * PAGE_SECTORS; 1207 struct dm_target *ti; 1208 int ret = -EIO; 1209 int srcu_idx; 1210 1211 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1212 1213 if (!ti) 1214 goto out; 1215 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1216 /* 1217 * ->zero_page_range() is mandatory dax operation. If we are 1218 * here, something is wrong. 1219 */ 1220 dm_put_live_table(md, srcu_idx); 1221 goto out; 1222 } 1223 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1224 1225 out: 1226 dm_put_live_table(md, srcu_idx); 1227 1228 return ret; 1229 } 1230 1231 /* 1232 * A target may call dm_accept_partial_bio only from the map routine. It is 1233 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 1234 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 1235 * 1236 * dm_accept_partial_bio informs the dm that the target only wants to process 1237 * additional n_sectors sectors of the bio and the rest of the data should be 1238 * sent in a next bio. 1239 * 1240 * A diagram that explains the arithmetics: 1241 * +--------------------+---------------+-------+ 1242 * | 1 | 2 | 3 | 1243 * +--------------------+---------------+-------+ 1244 * 1245 * <-------------- *tio->len_ptr ---------------> 1246 * <------- bi_size -------> 1247 * <-- n_sectors --> 1248 * 1249 * Region 1 was already iterated over with bio_advance or similar function. 1250 * (it may be empty if the target doesn't use bio_advance) 1251 * Region 2 is the remaining bio size that the target wants to process. 1252 * (it may be empty if region 1 is non-empty, although there is no reason 1253 * to make it empty) 1254 * The target requires that region 3 is to be sent in the next bio. 1255 * 1256 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1257 * the partially processed part (the sum of regions 1+2) must be the same for all 1258 * copies of the bio. 1259 */ 1260 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1261 { 1262 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1263 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1264 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1265 BUG_ON(bi_size > *tio->len_ptr); 1266 BUG_ON(n_sectors > bi_size); 1267 *tio->len_ptr -= bi_size - n_sectors; 1268 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1269 } 1270 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1271 1272 static blk_qc_t __map_bio(struct dm_target_io *tio) 1273 { 1274 int r; 1275 sector_t sector; 1276 struct bio *clone = &tio->clone; 1277 struct dm_io *io = tio->io; 1278 struct dm_target *ti = tio->ti; 1279 blk_qc_t ret = BLK_QC_T_NONE; 1280 1281 clone->bi_end_io = clone_endio; 1282 1283 /* 1284 * Map the clone. If r == 0 we don't need to do 1285 * anything, the target has assumed ownership of 1286 * this io. 1287 */ 1288 atomic_inc(&io->io_count); 1289 sector = clone->bi_iter.bi_sector; 1290 1291 r = ti->type->map(ti, clone); 1292 switch (r) { 1293 case DM_MAPIO_SUBMITTED: 1294 break; 1295 case DM_MAPIO_REMAPPED: 1296 /* the bio has been remapped so dispatch it */ 1297 trace_block_bio_remap(clone->bi_disk->queue, clone, 1298 bio_dev(io->orig_bio), sector); 1299 ret = submit_bio_noacct(clone); 1300 break; 1301 case DM_MAPIO_KILL: 1302 free_tio(tio); 1303 dec_pending(io, BLK_STS_IOERR); 1304 break; 1305 case DM_MAPIO_REQUEUE: 1306 free_tio(tio); 1307 dec_pending(io, BLK_STS_DM_REQUEUE); 1308 break; 1309 default: 1310 DMWARN("unimplemented target map return value: %d", r); 1311 BUG(); 1312 } 1313 1314 return ret; 1315 } 1316 1317 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1318 { 1319 bio->bi_iter.bi_sector = sector; 1320 bio->bi_iter.bi_size = to_bytes(len); 1321 } 1322 1323 /* 1324 * Creates a bio that consists of range of complete bvecs. 1325 */ 1326 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1327 sector_t sector, unsigned len) 1328 { 1329 struct bio *clone = &tio->clone; 1330 1331 __bio_clone_fast(clone, bio); 1332 1333 bio_crypt_clone(clone, bio, GFP_NOIO); 1334 1335 if (bio_integrity(bio)) { 1336 int r; 1337 1338 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1339 !dm_target_passes_integrity(tio->ti->type))) { 1340 DMWARN("%s: the target %s doesn't support integrity data.", 1341 dm_device_name(tio->io->md), 1342 tio->ti->type->name); 1343 return -EIO; 1344 } 1345 1346 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1347 if (r < 0) 1348 return r; 1349 } 1350 1351 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1352 clone->bi_iter.bi_size = to_bytes(len); 1353 1354 if (bio_integrity(bio)) 1355 bio_integrity_trim(clone); 1356 1357 return 0; 1358 } 1359 1360 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1361 struct dm_target *ti, unsigned num_bios) 1362 { 1363 struct dm_target_io *tio; 1364 int try; 1365 1366 if (!num_bios) 1367 return; 1368 1369 if (num_bios == 1) { 1370 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1371 bio_list_add(blist, &tio->clone); 1372 return; 1373 } 1374 1375 for (try = 0; try < 2; try++) { 1376 int bio_nr; 1377 struct bio *bio; 1378 1379 if (try) 1380 mutex_lock(&ci->io->md->table_devices_lock); 1381 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1382 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1383 if (!tio) 1384 break; 1385 1386 bio_list_add(blist, &tio->clone); 1387 } 1388 if (try) 1389 mutex_unlock(&ci->io->md->table_devices_lock); 1390 if (bio_nr == num_bios) 1391 return; 1392 1393 while ((bio = bio_list_pop(blist))) { 1394 tio = container_of(bio, struct dm_target_io, clone); 1395 free_tio(tio); 1396 } 1397 } 1398 } 1399 1400 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1401 struct dm_target_io *tio, unsigned *len) 1402 { 1403 struct bio *clone = &tio->clone; 1404 1405 tio->len_ptr = len; 1406 1407 __bio_clone_fast(clone, ci->bio); 1408 if (len) 1409 bio_setup_sector(clone, ci->sector, *len); 1410 1411 return __map_bio(tio); 1412 } 1413 1414 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1415 unsigned num_bios, unsigned *len) 1416 { 1417 struct bio_list blist = BIO_EMPTY_LIST; 1418 struct bio *bio; 1419 struct dm_target_io *tio; 1420 1421 alloc_multiple_bios(&blist, ci, ti, num_bios); 1422 1423 while ((bio = bio_list_pop(&blist))) { 1424 tio = container_of(bio, struct dm_target_io, clone); 1425 (void) __clone_and_map_simple_bio(ci, tio, len); 1426 } 1427 } 1428 1429 static int __send_empty_flush(struct clone_info *ci) 1430 { 1431 unsigned target_nr = 0; 1432 struct dm_target *ti; 1433 1434 /* 1435 * Empty flush uses a statically initialized bio, as the base for 1436 * cloning. However, blkg association requires that a bdev is 1437 * associated with a gendisk, which doesn't happen until the bdev is 1438 * opened. So, blkg association is done at issue time of the flush 1439 * rather than when the device is created in alloc_dev(). 1440 */ 1441 bio_set_dev(ci->bio, ci->io->md->bdev); 1442 1443 BUG_ON(bio_has_data(ci->bio)); 1444 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1445 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1446 return 0; 1447 } 1448 1449 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1450 sector_t sector, unsigned *len) 1451 { 1452 struct bio *bio = ci->bio; 1453 struct dm_target_io *tio; 1454 int r; 1455 1456 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1457 tio->len_ptr = len; 1458 r = clone_bio(tio, bio, sector, *len); 1459 if (r < 0) { 1460 free_tio(tio); 1461 return r; 1462 } 1463 (void) __map_bio(tio); 1464 1465 return 0; 1466 } 1467 1468 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1469 1470 static unsigned get_num_discard_bios(struct dm_target *ti) 1471 { 1472 return ti->num_discard_bios; 1473 } 1474 1475 static unsigned get_num_secure_erase_bios(struct dm_target *ti) 1476 { 1477 return ti->num_secure_erase_bios; 1478 } 1479 1480 static unsigned get_num_write_same_bios(struct dm_target *ti) 1481 { 1482 return ti->num_write_same_bios; 1483 } 1484 1485 static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1486 { 1487 return ti->num_write_zeroes_bios; 1488 } 1489 1490 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1491 unsigned num_bios) 1492 { 1493 unsigned len; 1494 1495 /* 1496 * Even though the device advertised support for this type of 1497 * request, that does not mean every target supports it, and 1498 * reconfiguration might also have changed that since the 1499 * check was performed. 1500 */ 1501 if (!num_bios) 1502 return -EOPNOTSUPP; 1503 1504 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1505 1506 __send_duplicate_bios(ci, ti, num_bios, &len); 1507 1508 ci->sector += len; 1509 ci->sector_count -= len; 1510 1511 return 0; 1512 } 1513 1514 static int __send_discard(struct clone_info *ci, struct dm_target *ti) 1515 { 1516 return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); 1517 } 1518 1519 static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 1520 { 1521 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); 1522 } 1523 1524 static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 1525 { 1526 return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); 1527 } 1528 1529 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1530 { 1531 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); 1532 } 1533 1534 static bool is_abnormal_io(struct bio *bio) 1535 { 1536 bool r = false; 1537 1538 switch (bio_op(bio)) { 1539 case REQ_OP_DISCARD: 1540 case REQ_OP_SECURE_ERASE: 1541 case REQ_OP_WRITE_SAME: 1542 case REQ_OP_WRITE_ZEROES: 1543 r = true; 1544 break; 1545 } 1546 1547 return r; 1548 } 1549 1550 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1551 int *result) 1552 { 1553 struct bio *bio = ci->bio; 1554 1555 if (bio_op(bio) == REQ_OP_DISCARD) 1556 *result = __send_discard(ci, ti); 1557 else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 1558 *result = __send_secure_erase(ci, ti); 1559 else if (bio_op(bio) == REQ_OP_WRITE_SAME) 1560 *result = __send_write_same(ci, ti); 1561 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 1562 *result = __send_write_zeroes(ci, ti); 1563 else 1564 return false; 1565 1566 return true; 1567 } 1568 1569 /* 1570 * Select the correct strategy for processing a non-flush bio. 1571 */ 1572 static int __split_and_process_non_flush(struct clone_info *ci) 1573 { 1574 struct dm_target *ti; 1575 unsigned len; 1576 int r; 1577 1578 ti = dm_table_find_target(ci->map, ci->sector); 1579 if (!ti) 1580 return -EIO; 1581 1582 if (__process_abnormal_io(ci, ti, &r)) 1583 return r; 1584 1585 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1586 1587 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1588 if (r < 0) 1589 return r; 1590 1591 ci->sector += len; 1592 ci->sector_count -= len; 1593 1594 return 0; 1595 } 1596 1597 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1598 struct dm_table *map, struct bio *bio) 1599 { 1600 ci->map = map; 1601 ci->io = alloc_io(md, bio); 1602 ci->sector = bio->bi_iter.bi_sector; 1603 } 1604 1605 #define __dm_part_stat_sub(part, field, subnd) \ 1606 (part_stat_get(part, field) -= (subnd)) 1607 1608 /* 1609 * Entry point to split a bio into clones and submit them to the targets. 1610 */ 1611 static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1612 struct dm_table *map, struct bio *bio) 1613 { 1614 struct clone_info ci; 1615 blk_qc_t ret = BLK_QC_T_NONE; 1616 int error = 0; 1617 1618 init_clone_info(&ci, md, map, bio); 1619 1620 if (bio->bi_opf & REQ_PREFLUSH) { 1621 struct bio flush_bio; 1622 1623 /* 1624 * Use an on-stack bio for this, it's safe since we don't 1625 * need to reference it after submit. It's just used as 1626 * the basis for the clone(s). 1627 */ 1628 bio_init(&flush_bio, NULL, 0); 1629 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1630 ci.bio = &flush_bio; 1631 ci.sector_count = 0; 1632 error = __send_empty_flush(&ci); 1633 bio_uninit(ci.bio); 1634 /* dec_pending submits any data associated with flush */ 1635 } else if (op_is_zone_mgmt(bio_op(bio))) { 1636 ci.bio = bio; 1637 ci.sector_count = 0; 1638 error = __split_and_process_non_flush(&ci); 1639 } else { 1640 ci.bio = bio; 1641 ci.sector_count = bio_sectors(bio); 1642 while (ci.sector_count && !error) { 1643 error = __split_and_process_non_flush(&ci); 1644 if (current->bio_list && ci.sector_count && !error) { 1645 /* 1646 * Remainder must be passed to submit_bio_noacct() 1647 * so that it gets handled *after* bios already submitted 1648 * have been completely processed. 1649 * We take a clone of the original to store in 1650 * ci.io->orig_bio to be used by end_io_acct() and 1651 * for dec_pending to use for completion handling. 1652 */ 1653 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1654 GFP_NOIO, &md->queue->bio_split); 1655 ci.io->orig_bio = b; 1656 1657 /* 1658 * Adjust IO stats for each split, otherwise upon queue 1659 * reentry there will be redundant IO accounting. 1660 * NOTE: this is a stop-gap fix, a proper fix involves 1661 * significant refactoring of DM core's bio splitting 1662 * (by eliminating DM's splitting and just using bio_split) 1663 */ 1664 part_stat_lock(); 1665 __dm_part_stat_sub(&dm_disk(md)->part0, 1666 sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1667 part_stat_unlock(); 1668 1669 bio_chain(b, bio); 1670 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1671 ret = submit_bio_noacct(bio); 1672 break; 1673 } 1674 } 1675 } 1676 1677 /* drop the extra reference count */ 1678 dec_pending(ci.io, errno_to_blk_status(error)); 1679 return ret; 1680 } 1681 1682 /* 1683 * Optimized variant of __split_and_process_bio that leverages the 1684 * fact that targets that use it do _not_ have a need to split bios. 1685 */ 1686 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, 1687 struct bio *bio, struct dm_target *ti) 1688 { 1689 struct clone_info ci; 1690 blk_qc_t ret = BLK_QC_T_NONE; 1691 int error = 0; 1692 1693 init_clone_info(&ci, md, map, bio); 1694 1695 if (bio->bi_opf & REQ_PREFLUSH) { 1696 struct bio flush_bio; 1697 1698 /* 1699 * Use an on-stack bio for this, it's safe since we don't 1700 * need to reference it after submit. It's just used as 1701 * the basis for the clone(s). 1702 */ 1703 bio_init(&flush_bio, NULL, 0); 1704 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1705 ci.bio = &flush_bio; 1706 ci.sector_count = 0; 1707 error = __send_empty_flush(&ci); 1708 bio_uninit(ci.bio); 1709 /* dec_pending submits any data associated with flush */ 1710 } else { 1711 struct dm_target_io *tio; 1712 1713 ci.bio = bio; 1714 ci.sector_count = bio_sectors(bio); 1715 if (__process_abnormal_io(&ci, ti, &error)) 1716 goto out; 1717 1718 tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1719 ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1720 } 1721 out: 1722 /* drop the extra reference count */ 1723 dec_pending(ci.io, errno_to_blk_status(error)); 1724 return ret; 1725 } 1726 1727 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) 1728 { 1729 unsigned len, sector_count; 1730 1731 sector_count = bio_sectors(*bio); 1732 len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); 1733 1734 if (sector_count > len) { 1735 struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); 1736 1737 bio_chain(split, *bio); 1738 trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); 1739 submit_bio_noacct(*bio); 1740 *bio = split; 1741 } 1742 } 1743 1744 static blk_qc_t dm_process_bio(struct mapped_device *md, 1745 struct dm_table *map, struct bio *bio) 1746 { 1747 blk_qc_t ret = BLK_QC_T_NONE; 1748 struct dm_target *ti = md->immutable_target; 1749 1750 if (unlikely(!map)) { 1751 bio_io_error(bio); 1752 return ret; 1753 } 1754 1755 if (!ti) { 1756 ti = dm_table_find_target(map, bio->bi_iter.bi_sector); 1757 if (unlikely(!ti)) { 1758 bio_io_error(bio); 1759 return ret; 1760 } 1761 } 1762 1763 /* 1764 * If in ->queue_bio we need to use blk_queue_split(), otherwise 1765 * queue_limits for abnormal requests (e.g. discard, writesame, etc) 1766 * won't be imposed. 1767 */ 1768 if (current->bio_list) { 1769 if (is_abnormal_io(bio)) 1770 blk_queue_split(&bio); 1771 else 1772 dm_queue_split(md, ti, &bio); 1773 } 1774 1775 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1776 return __process_bio(md, map, bio, ti); 1777 else 1778 return __split_and_process_bio(md, map, bio); 1779 } 1780 1781 static blk_qc_t dm_submit_bio(struct bio *bio) 1782 { 1783 struct mapped_device *md = bio->bi_disk->private_data; 1784 blk_qc_t ret = BLK_QC_T_NONE; 1785 int srcu_idx; 1786 struct dm_table *map; 1787 1788 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { 1789 /* 1790 * We are called with a live reference on q_usage_counter, but 1791 * that one will be released as soon as we return. Grab an 1792 * extra one as blk_mq_submit_bio expects to be able to consume 1793 * a reference (which lives until the request is freed in case a 1794 * request is allocated). 1795 */ 1796 percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); 1797 return blk_mq_submit_bio(bio); 1798 } 1799 1800 map = dm_get_live_table(md, &srcu_idx); 1801 1802 /* if we're suspended, we have to queue this io for later */ 1803 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1804 dm_put_live_table(md, srcu_idx); 1805 1806 if (!(bio->bi_opf & REQ_RAHEAD)) 1807 queue_io(md, bio); 1808 else 1809 bio_io_error(bio); 1810 return ret; 1811 } 1812 1813 ret = dm_process_bio(md, map, bio); 1814 1815 dm_put_live_table(md, srcu_idx); 1816 return ret; 1817 } 1818 1819 /*----------------------------------------------------------------- 1820 * An IDR is used to keep track of allocated minor numbers. 1821 *---------------------------------------------------------------*/ 1822 static void free_minor(int minor) 1823 { 1824 spin_lock(&_minor_lock); 1825 idr_remove(&_minor_idr, minor); 1826 spin_unlock(&_minor_lock); 1827 } 1828 1829 /* 1830 * See if the device with a specific minor # is free. 1831 */ 1832 static int specific_minor(int minor) 1833 { 1834 int r; 1835 1836 if (minor >= (1 << MINORBITS)) 1837 return -EINVAL; 1838 1839 idr_preload(GFP_KERNEL); 1840 spin_lock(&_minor_lock); 1841 1842 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1843 1844 spin_unlock(&_minor_lock); 1845 idr_preload_end(); 1846 if (r < 0) 1847 return r == -ENOSPC ? -EBUSY : r; 1848 return 0; 1849 } 1850 1851 static int next_free_minor(int *minor) 1852 { 1853 int r; 1854 1855 idr_preload(GFP_KERNEL); 1856 spin_lock(&_minor_lock); 1857 1858 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1859 1860 spin_unlock(&_minor_lock); 1861 idr_preload_end(); 1862 if (r < 0) 1863 return r; 1864 *minor = r; 1865 return 0; 1866 } 1867 1868 static const struct block_device_operations dm_blk_dops; 1869 static const struct dax_operations dm_dax_ops; 1870 1871 static void dm_wq_work(struct work_struct *work); 1872 1873 static void cleanup_mapped_device(struct mapped_device *md) 1874 { 1875 if (md->wq) 1876 destroy_workqueue(md->wq); 1877 bioset_exit(&md->bs); 1878 bioset_exit(&md->io_bs); 1879 1880 if (md->dax_dev) { 1881 kill_dax(md->dax_dev); 1882 put_dax(md->dax_dev); 1883 md->dax_dev = NULL; 1884 } 1885 1886 if (md->disk) { 1887 spin_lock(&_minor_lock); 1888 md->disk->private_data = NULL; 1889 spin_unlock(&_minor_lock); 1890 del_gendisk(md->disk); 1891 put_disk(md->disk); 1892 } 1893 1894 if (md->queue) 1895 blk_cleanup_queue(md->queue); 1896 1897 cleanup_srcu_struct(&md->io_barrier); 1898 1899 if (md->bdev) { 1900 bdput(md->bdev); 1901 md->bdev = NULL; 1902 } 1903 1904 mutex_destroy(&md->suspend_lock); 1905 mutex_destroy(&md->type_lock); 1906 mutex_destroy(&md->table_devices_lock); 1907 1908 dm_mq_cleanup_mapped_device(md); 1909 } 1910 1911 /* 1912 * Allocate and initialise a blank device with a given minor. 1913 */ 1914 static struct mapped_device *alloc_dev(int minor) 1915 { 1916 int r, numa_node_id = dm_get_numa_node(); 1917 struct mapped_device *md; 1918 void *old_md; 1919 1920 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1921 if (!md) { 1922 DMWARN("unable to allocate device, out of memory."); 1923 return NULL; 1924 } 1925 1926 if (!try_module_get(THIS_MODULE)) 1927 goto bad_module_get; 1928 1929 /* get a minor number for the dev */ 1930 if (minor == DM_ANY_MINOR) 1931 r = next_free_minor(&minor); 1932 else 1933 r = specific_minor(minor); 1934 if (r < 0) 1935 goto bad_minor; 1936 1937 r = init_srcu_struct(&md->io_barrier); 1938 if (r < 0) 1939 goto bad_io_barrier; 1940 1941 md->numa_node_id = numa_node_id; 1942 md->init_tio_pdu = false; 1943 md->type = DM_TYPE_NONE; 1944 mutex_init(&md->suspend_lock); 1945 mutex_init(&md->type_lock); 1946 mutex_init(&md->table_devices_lock); 1947 spin_lock_init(&md->deferred_lock); 1948 atomic_set(&md->holders, 1); 1949 atomic_set(&md->open_count, 0); 1950 atomic_set(&md->event_nr, 0); 1951 atomic_set(&md->uevent_seq, 0); 1952 INIT_LIST_HEAD(&md->uevent_list); 1953 INIT_LIST_HEAD(&md->table_devices); 1954 spin_lock_init(&md->uevent_lock); 1955 1956 /* 1957 * default to bio-based until DM table is loaded and md->type 1958 * established. If request-based table is loaded: blk-mq will 1959 * override accordingly. 1960 */ 1961 md->queue = blk_alloc_queue(numa_node_id); 1962 if (!md->queue) 1963 goto bad; 1964 1965 md->disk = alloc_disk_node(1, md->numa_node_id); 1966 if (!md->disk) 1967 goto bad; 1968 1969 init_waitqueue_head(&md->wait); 1970 INIT_WORK(&md->work, dm_wq_work); 1971 init_waitqueue_head(&md->eventq); 1972 init_completion(&md->kobj_holder.completion); 1973 1974 md->disk->major = _major; 1975 md->disk->first_minor = minor; 1976 md->disk->fops = &dm_blk_dops; 1977 md->disk->queue = md->queue; 1978 md->disk->private_data = md; 1979 sprintf(md->disk->disk_name, "dm-%d", minor); 1980 1981 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1982 md->dax_dev = alloc_dax(md, md->disk->disk_name, 1983 &dm_dax_ops, 0); 1984 if (IS_ERR(md->dax_dev)) 1985 goto bad; 1986 } 1987 1988 add_disk_no_queue_reg(md->disk); 1989 format_dev_t(md->name, MKDEV(_major, minor)); 1990 1991 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1992 if (!md->wq) 1993 goto bad; 1994 1995 md->bdev = bdget_disk(md->disk, 0); 1996 if (!md->bdev) 1997 goto bad; 1998 1999 dm_stats_init(&md->stats); 2000 2001 /* Populate the mapping, nobody knows we exist yet */ 2002 spin_lock(&_minor_lock); 2003 old_md = idr_replace(&_minor_idr, md, minor); 2004 spin_unlock(&_minor_lock); 2005 2006 BUG_ON(old_md != MINOR_ALLOCED); 2007 2008 return md; 2009 2010 bad: 2011 cleanup_mapped_device(md); 2012 bad_io_barrier: 2013 free_minor(minor); 2014 bad_minor: 2015 module_put(THIS_MODULE); 2016 bad_module_get: 2017 kvfree(md); 2018 return NULL; 2019 } 2020 2021 static void unlock_fs(struct mapped_device *md); 2022 2023 static void free_dev(struct mapped_device *md) 2024 { 2025 int minor = MINOR(disk_devt(md->disk)); 2026 2027 unlock_fs(md); 2028 2029 cleanup_mapped_device(md); 2030 2031 free_table_devices(&md->table_devices); 2032 dm_stats_cleanup(&md->stats); 2033 free_minor(minor); 2034 2035 module_put(THIS_MODULE); 2036 kvfree(md); 2037 } 2038 2039 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 2040 { 2041 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2042 int ret = 0; 2043 2044 if (dm_table_bio_based(t)) { 2045 /* 2046 * The md may already have mempools that need changing. 2047 * If so, reload bioset because front_pad may have changed 2048 * because a different table was loaded. 2049 */ 2050 bioset_exit(&md->bs); 2051 bioset_exit(&md->io_bs); 2052 2053 } else if (bioset_initialized(&md->bs)) { 2054 /* 2055 * There's no need to reload with request-based dm 2056 * because the size of front_pad doesn't change. 2057 * Note for future: If you are to reload bioset, 2058 * prep-ed requests in the queue may refer 2059 * to bio from the old bioset, so you must walk 2060 * through the queue to unprep. 2061 */ 2062 goto out; 2063 } 2064 2065 BUG_ON(!p || 2066 bioset_initialized(&md->bs) || 2067 bioset_initialized(&md->io_bs)); 2068 2069 ret = bioset_init_from_src(&md->bs, &p->bs); 2070 if (ret) 2071 goto out; 2072 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 2073 if (ret) 2074 bioset_exit(&md->bs); 2075 out: 2076 /* mempool bind completed, no longer need any mempools in the table */ 2077 dm_table_free_md_mempools(t); 2078 return ret; 2079 } 2080 2081 /* 2082 * Bind a table to the device. 2083 */ 2084 static void event_callback(void *context) 2085 { 2086 unsigned long flags; 2087 LIST_HEAD(uevents); 2088 struct mapped_device *md = (struct mapped_device *) context; 2089 2090 spin_lock_irqsave(&md->uevent_lock, flags); 2091 list_splice_init(&md->uevent_list, &uevents); 2092 spin_unlock_irqrestore(&md->uevent_lock, flags); 2093 2094 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2095 2096 atomic_inc(&md->event_nr); 2097 wake_up(&md->eventq); 2098 dm_issue_global_event(); 2099 } 2100 2101 /* 2102 * Protected by md->suspend_lock obtained by dm_swap_table(). 2103 */ 2104 static void __set_size(struct mapped_device *md, sector_t size) 2105 { 2106 lockdep_assert_held(&md->suspend_lock); 2107 2108 set_capacity(md->disk, size); 2109 2110 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2111 } 2112 2113 /* 2114 * Returns old map, which caller must destroy. 2115 */ 2116 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2117 struct queue_limits *limits) 2118 { 2119 struct dm_table *old_map; 2120 struct request_queue *q = md->queue; 2121 bool request_based = dm_table_request_based(t); 2122 sector_t size; 2123 int ret; 2124 2125 lockdep_assert_held(&md->suspend_lock); 2126 2127 size = dm_table_get_size(t); 2128 2129 /* 2130 * Wipe any geometry if the size of the table changed. 2131 */ 2132 if (size != dm_get_size(md)) 2133 memset(&md->geometry, 0, sizeof(md->geometry)); 2134 2135 __set_size(md, size); 2136 2137 dm_table_event_callback(t, event_callback, md); 2138 2139 /* 2140 * The queue hasn't been stopped yet, if the old table type wasn't 2141 * for request-based during suspension. So stop it to prevent 2142 * I/O mapping before resume. 2143 * This must be done before setting the queue restrictions, 2144 * because request-based dm may be run just after the setting. 2145 */ 2146 if (request_based) 2147 dm_stop_queue(q); 2148 2149 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 2150 /* 2151 * Leverage the fact that request-based DM targets and 2152 * NVMe bio based targets are immutable singletons 2153 * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2154 * and __process_bio. 2155 */ 2156 md->immutable_target = dm_table_get_immutable_target(t); 2157 } 2158 2159 ret = __bind_mempools(md, t); 2160 if (ret) { 2161 old_map = ERR_PTR(ret); 2162 goto out; 2163 } 2164 2165 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2166 rcu_assign_pointer(md->map, (void *)t); 2167 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2168 2169 dm_table_set_restrictions(t, q, limits); 2170 if (old_map) 2171 dm_sync_table(md); 2172 2173 out: 2174 return old_map; 2175 } 2176 2177 /* 2178 * Returns unbound table for the caller to free. 2179 */ 2180 static struct dm_table *__unbind(struct mapped_device *md) 2181 { 2182 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2183 2184 if (!map) 2185 return NULL; 2186 2187 dm_table_event_callback(map, NULL, NULL); 2188 RCU_INIT_POINTER(md->map, NULL); 2189 dm_sync_table(md); 2190 2191 return map; 2192 } 2193 2194 /* 2195 * Constructor for a new device. 2196 */ 2197 int dm_create(int minor, struct mapped_device **result) 2198 { 2199 int r; 2200 struct mapped_device *md; 2201 2202 md = alloc_dev(minor); 2203 if (!md) 2204 return -ENXIO; 2205 2206 r = dm_sysfs_init(md); 2207 if (r) { 2208 free_dev(md); 2209 return r; 2210 } 2211 2212 *result = md; 2213 return 0; 2214 } 2215 2216 /* 2217 * Functions to manage md->type. 2218 * All are required to hold md->type_lock. 2219 */ 2220 void dm_lock_md_type(struct mapped_device *md) 2221 { 2222 mutex_lock(&md->type_lock); 2223 } 2224 2225 void dm_unlock_md_type(struct mapped_device *md) 2226 { 2227 mutex_unlock(&md->type_lock); 2228 } 2229 2230 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2231 { 2232 BUG_ON(!mutex_is_locked(&md->type_lock)); 2233 md->type = type; 2234 } 2235 2236 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2237 { 2238 return md->type; 2239 } 2240 2241 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2242 { 2243 return md->immutable_target_type; 2244 } 2245 2246 /* 2247 * The queue_limits are only valid as long as you have a reference 2248 * count on 'md'. 2249 */ 2250 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2251 { 2252 BUG_ON(!atomic_read(&md->holders)); 2253 return &md->queue->limits; 2254 } 2255 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2256 2257 /* 2258 * Setup the DM device's queue based on md's type 2259 */ 2260 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2261 { 2262 int r; 2263 struct queue_limits limits; 2264 enum dm_queue_mode type = dm_get_md_type(md); 2265 2266 switch (type) { 2267 case DM_TYPE_REQUEST_BASED: 2268 r = dm_mq_init_request_queue(md, t); 2269 if (r) { 2270 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2271 return r; 2272 } 2273 break; 2274 case DM_TYPE_BIO_BASED: 2275 case DM_TYPE_DAX_BIO_BASED: 2276 case DM_TYPE_NVME_BIO_BASED: 2277 break; 2278 case DM_TYPE_NONE: 2279 WARN_ON_ONCE(true); 2280 break; 2281 } 2282 2283 r = dm_calculate_queue_limits(t, &limits); 2284 if (r) { 2285 DMERR("Cannot calculate initial queue limits"); 2286 return r; 2287 } 2288 dm_table_set_restrictions(t, md->queue, &limits); 2289 blk_register_queue(md->disk); 2290 2291 return 0; 2292 } 2293 2294 struct mapped_device *dm_get_md(dev_t dev) 2295 { 2296 struct mapped_device *md; 2297 unsigned minor = MINOR(dev); 2298 2299 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2300 return NULL; 2301 2302 spin_lock(&_minor_lock); 2303 2304 md = idr_find(&_minor_idr, minor); 2305 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2306 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2307 md = NULL; 2308 goto out; 2309 } 2310 dm_get(md); 2311 out: 2312 spin_unlock(&_minor_lock); 2313 2314 return md; 2315 } 2316 EXPORT_SYMBOL_GPL(dm_get_md); 2317 2318 void *dm_get_mdptr(struct mapped_device *md) 2319 { 2320 return md->interface_ptr; 2321 } 2322 2323 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2324 { 2325 md->interface_ptr = ptr; 2326 } 2327 2328 void dm_get(struct mapped_device *md) 2329 { 2330 atomic_inc(&md->holders); 2331 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2332 } 2333 2334 int dm_hold(struct mapped_device *md) 2335 { 2336 spin_lock(&_minor_lock); 2337 if (test_bit(DMF_FREEING, &md->flags)) { 2338 spin_unlock(&_minor_lock); 2339 return -EBUSY; 2340 } 2341 dm_get(md); 2342 spin_unlock(&_minor_lock); 2343 return 0; 2344 } 2345 EXPORT_SYMBOL_GPL(dm_hold); 2346 2347 const char *dm_device_name(struct mapped_device *md) 2348 { 2349 return md->name; 2350 } 2351 EXPORT_SYMBOL_GPL(dm_device_name); 2352 2353 static void __dm_destroy(struct mapped_device *md, bool wait) 2354 { 2355 struct dm_table *map; 2356 int srcu_idx; 2357 2358 might_sleep(); 2359 2360 spin_lock(&_minor_lock); 2361 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2362 set_bit(DMF_FREEING, &md->flags); 2363 spin_unlock(&_minor_lock); 2364 2365 blk_set_queue_dying(md->queue); 2366 2367 /* 2368 * Take suspend_lock so that presuspend and postsuspend methods 2369 * do not race with internal suspend. 2370 */ 2371 mutex_lock(&md->suspend_lock); 2372 map = dm_get_live_table(md, &srcu_idx); 2373 if (!dm_suspended_md(md)) { 2374 dm_table_presuspend_targets(map); 2375 set_bit(DMF_SUSPENDED, &md->flags); 2376 set_bit(DMF_POST_SUSPENDING, &md->flags); 2377 dm_table_postsuspend_targets(map); 2378 } 2379 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2380 dm_put_live_table(md, srcu_idx); 2381 mutex_unlock(&md->suspend_lock); 2382 2383 /* 2384 * Rare, but there may be I/O requests still going to complete, 2385 * for example. Wait for all references to disappear. 2386 * No one should increment the reference count of the mapped_device, 2387 * after the mapped_device state becomes DMF_FREEING. 2388 */ 2389 if (wait) 2390 while (atomic_read(&md->holders)) 2391 msleep(1); 2392 else if (atomic_read(&md->holders)) 2393 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2394 dm_device_name(md), atomic_read(&md->holders)); 2395 2396 dm_sysfs_exit(md); 2397 dm_table_destroy(__unbind(md)); 2398 free_dev(md); 2399 } 2400 2401 void dm_destroy(struct mapped_device *md) 2402 { 2403 __dm_destroy(md, true); 2404 } 2405 2406 void dm_destroy_immediate(struct mapped_device *md) 2407 { 2408 __dm_destroy(md, false); 2409 } 2410 2411 void dm_put(struct mapped_device *md) 2412 { 2413 atomic_dec(&md->holders); 2414 } 2415 EXPORT_SYMBOL_GPL(dm_put); 2416 2417 static bool md_in_flight_bios(struct mapped_device *md) 2418 { 2419 int cpu; 2420 struct hd_struct *part = &dm_disk(md)->part0; 2421 long sum = 0; 2422 2423 for_each_possible_cpu(cpu) { 2424 sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 2425 sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 2426 } 2427 2428 return sum != 0; 2429 } 2430 2431 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) 2432 { 2433 int r = 0; 2434 DEFINE_WAIT(wait); 2435 2436 while (true) { 2437 prepare_to_wait(&md->wait, &wait, task_state); 2438 2439 if (!md_in_flight_bios(md)) 2440 break; 2441 2442 if (signal_pending_state(task_state, current)) { 2443 r = -EINTR; 2444 break; 2445 } 2446 2447 io_schedule(); 2448 } 2449 finish_wait(&md->wait, &wait); 2450 2451 return r; 2452 } 2453 2454 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2455 { 2456 int r = 0; 2457 2458 if (!queue_is_mq(md->queue)) 2459 return dm_wait_for_bios_completion(md, task_state); 2460 2461 while (true) { 2462 if (!blk_mq_queue_inflight(md->queue)) 2463 break; 2464 2465 if (signal_pending_state(task_state, current)) { 2466 r = -EINTR; 2467 break; 2468 } 2469 2470 msleep(5); 2471 } 2472 2473 return r; 2474 } 2475 2476 /* 2477 * Process the deferred bios 2478 */ 2479 static void dm_wq_work(struct work_struct *work) 2480 { 2481 struct mapped_device *md = container_of(work, struct mapped_device, 2482 work); 2483 struct bio *c; 2484 int srcu_idx; 2485 struct dm_table *map; 2486 2487 map = dm_get_live_table(md, &srcu_idx); 2488 2489 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2490 spin_lock_irq(&md->deferred_lock); 2491 c = bio_list_pop(&md->deferred); 2492 spin_unlock_irq(&md->deferred_lock); 2493 2494 if (!c) 2495 break; 2496 2497 if (dm_request_based(md)) 2498 (void) submit_bio_noacct(c); 2499 else 2500 (void) dm_process_bio(md, map, c); 2501 } 2502 2503 dm_put_live_table(md, srcu_idx); 2504 } 2505 2506 static void dm_queue_flush(struct mapped_device *md) 2507 { 2508 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2509 smp_mb__after_atomic(); 2510 queue_work(md->wq, &md->work); 2511 } 2512 2513 /* 2514 * Swap in a new table, returning the old one for the caller to destroy. 2515 */ 2516 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2517 { 2518 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2519 struct queue_limits limits; 2520 int r; 2521 2522 mutex_lock(&md->suspend_lock); 2523 2524 /* device must be suspended */ 2525 if (!dm_suspended_md(md)) 2526 goto out; 2527 2528 /* 2529 * If the new table has no data devices, retain the existing limits. 2530 * This helps multipath with queue_if_no_path if all paths disappear, 2531 * then new I/O is queued based on these limits, and then some paths 2532 * reappear. 2533 */ 2534 if (dm_table_has_no_data_devices(table)) { 2535 live_map = dm_get_live_table_fast(md); 2536 if (live_map) 2537 limits = md->queue->limits; 2538 dm_put_live_table_fast(md); 2539 } 2540 2541 if (!live_map) { 2542 r = dm_calculate_queue_limits(table, &limits); 2543 if (r) { 2544 map = ERR_PTR(r); 2545 goto out; 2546 } 2547 } 2548 2549 map = __bind(md, table, &limits); 2550 dm_issue_global_event(); 2551 2552 out: 2553 mutex_unlock(&md->suspend_lock); 2554 return map; 2555 } 2556 2557 /* 2558 * Functions to lock and unlock any filesystem running on the 2559 * device. 2560 */ 2561 static int lock_fs(struct mapped_device *md) 2562 { 2563 int r; 2564 2565 WARN_ON(md->frozen_sb); 2566 2567 md->frozen_sb = freeze_bdev(md->bdev); 2568 if (IS_ERR(md->frozen_sb)) { 2569 r = PTR_ERR(md->frozen_sb); 2570 md->frozen_sb = NULL; 2571 return r; 2572 } 2573 2574 set_bit(DMF_FROZEN, &md->flags); 2575 2576 return 0; 2577 } 2578 2579 static void unlock_fs(struct mapped_device *md) 2580 { 2581 if (!test_bit(DMF_FROZEN, &md->flags)) 2582 return; 2583 2584 thaw_bdev(md->bdev, md->frozen_sb); 2585 md->frozen_sb = NULL; 2586 clear_bit(DMF_FROZEN, &md->flags); 2587 } 2588 2589 /* 2590 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2591 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2592 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2593 * 2594 * If __dm_suspend returns 0, the device is completely quiescent 2595 * now. There is no request-processing activity. All new requests 2596 * are being added to md->deferred list. 2597 */ 2598 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2599 unsigned suspend_flags, long task_state, 2600 int dmf_suspended_flag) 2601 { 2602 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2603 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2604 int r; 2605 2606 lockdep_assert_held(&md->suspend_lock); 2607 2608 /* 2609 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2610 * This flag is cleared before dm_suspend returns. 2611 */ 2612 if (noflush) 2613 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2614 else 2615 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2616 2617 /* 2618 * This gets reverted if there's an error later and the targets 2619 * provide the .presuspend_undo hook. 2620 */ 2621 dm_table_presuspend_targets(map); 2622 2623 /* 2624 * Flush I/O to the device. 2625 * Any I/O submitted after lock_fs() may not be flushed. 2626 * noflush takes precedence over do_lockfs. 2627 * (lock_fs() flushes I/Os and waits for them to complete.) 2628 */ 2629 if (!noflush && do_lockfs) { 2630 r = lock_fs(md); 2631 if (r) { 2632 dm_table_presuspend_undo_targets(map); 2633 return r; 2634 } 2635 } 2636 2637 /* 2638 * Here we must make sure that no processes are submitting requests 2639 * to target drivers i.e. no one may be executing 2640 * __split_and_process_bio. This is called from dm_request and 2641 * dm_wq_work. 2642 * 2643 * To get all processes out of __split_and_process_bio in dm_request, 2644 * we take the write lock. To prevent any process from reentering 2645 * __split_and_process_bio from dm_request and quiesce the thread 2646 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2647 * flush_workqueue(md->wq). 2648 */ 2649 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2650 if (map) 2651 synchronize_srcu(&md->io_barrier); 2652 2653 /* 2654 * Stop md->queue before flushing md->wq in case request-based 2655 * dm defers requests to md->wq from md->queue. 2656 */ 2657 if (dm_request_based(md)) 2658 dm_stop_queue(md->queue); 2659 2660 flush_workqueue(md->wq); 2661 2662 /* 2663 * At this point no more requests are entering target request routines. 2664 * We call dm_wait_for_completion to wait for all existing requests 2665 * to finish. 2666 */ 2667 r = dm_wait_for_completion(md, task_state); 2668 if (!r) 2669 set_bit(dmf_suspended_flag, &md->flags); 2670 2671 if (noflush) 2672 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2673 if (map) 2674 synchronize_srcu(&md->io_barrier); 2675 2676 /* were we interrupted ? */ 2677 if (r < 0) { 2678 dm_queue_flush(md); 2679 2680 if (dm_request_based(md)) 2681 dm_start_queue(md->queue); 2682 2683 unlock_fs(md); 2684 dm_table_presuspend_undo_targets(map); 2685 /* pushback list is already flushed, so skip flush */ 2686 } 2687 2688 return r; 2689 } 2690 2691 /* 2692 * We need to be able to change a mapping table under a mounted 2693 * filesystem. For example we might want to move some data in 2694 * the background. Before the table can be swapped with 2695 * dm_bind_table, dm_suspend must be called to flush any in 2696 * flight bios and ensure that any further io gets deferred. 2697 */ 2698 /* 2699 * Suspend mechanism in request-based dm. 2700 * 2701 * 1. Flush all I/Os by lock_fs() if needed. 2702 * 2. Stop dispatching any I/O by stopping the request_queue. 2703 * 3. Wait for all in-flight I/Os to be completed or requeued. 2704 * 2705 * To abort suspend, start the request_queue. 2706 */ 2707 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2708 { 2709 struct dm_table *map = NULL; 2710 int r = 0; 2711 2712 retry: 2713 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2714 2715 if (dm_suspended_md(md)) { 2716 r = -EINVAL; 2717 goto out_unlock; 2718 } 2719 2720 if (dm_suspended_internally_md(md)) { 2721 /* already internally suspended, wait for internal resume */ 2722 mutex_unlock(&md->suspend_lock); 2723 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2724 if (r) 2725 return r; 2726 goto retry; 2727 } 2728 2729 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2730 2731 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2732 if (r) 2733 goto out_unlock; 2734 2735 set_bit(DMF_POST_SUSPENDING, &md->flags); 2736 dm_table_postsuspend_targets(map); 2737 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2738 2739 out_unlock: 2740 mutex_unlock(&md->suspend_lock); 2741 return r; 2742 } 2743 2744 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2745 { 2746 if (map) { 2747 int r = dm_table_resume_targets(map); 2748 if (r) 2749 return r; 2750 } 2751 2752 dm_queue_flush(md); 2753 2754 /* 2755 * Flushing deferred I/Os must be done after targets are resumed 2756 * so that mapping of targets can work correctly. 2757 * Request-based dm is queueing the deferred I/Os in its request_queue. 2758 */ 2759 if (dm_request_based(md)) 2760 dm_start_queue(md->queue); 2761 2762 unlock_fs(md); 2763 2764 return 0; 2765 } 2766 2767 int dm_resume(struct mapped_device *md) 2768 { 2769 int r; 2770 struct dm_table *map = NULL; 2771 2772 retry: 2773 r = -EINVAL; 2774 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2775 2776 if (!dm_suspended_md(md)) 2777 goto out; 2778 2779 if (dm_suspended_internally_md(md)) { 2780 /* already internally suspended, wait for internal resume */ 2781 mutex_unlock(&md->suspend_lock); 2782 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2783 if (r) 2784 return r; 2785 goto retry; 2786 } 2787 2788 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2789 if (!map || !dm_table_get_size(map)) 2790 goto out; 2791 2792 r = __dm_resume(md, map); 2793 if (r) 2794 goto out; 2795 2796 clear_bit(DMF_SUSPENDED, &md->flags); 2797 out: 2798 mutex_unlock(&md->suspend_lock); 2799 2800 return r; 2801 } 2802 2803 /* 2804 * Internal suspend/resume works like userspace-driven suspend. It waits 2805 * until all bios finish and prevents issuing new bios to the target drivers. 2806 * It may be used only from the kernel. 2807 */ 2808 2809 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2810 { 2811 struct dm_table *map = NULL; 2812 2813 lockdep_assert_held(&md->suspend_lock); 2814 2815 if (md->internal_suspend_count++) 2816 return; /* nested internal suspend */ 2817 2818 if (dm_suspended_md(md)) { 2819 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2820 return; /* nest suspend */ 2821 } 2822 2823 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2824 2825 /* 2826 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2827 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2828 * would require changing .presuspend to return an error -- avoid this 2829 * until there is a need for more elaborate variants of internal suspend. 2830 */ 2831 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2832 DMF_SUSPENDED_INTERNALLY); 2833 2834 set_bit(DMF_POST_SUSPENDING, &md->flags); 2835 dm_table_postsuspend_targets(map); 2836 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2837 } 2838 2839 static void __dm_internal_resume(struct mapped_device *md) 2840 { 2841 BUG_ON(!md->internal_suspend_count); 2842 2843 if (--md->internal_suspend_count) 2844 return; /* resume from nested internal suspend */ 2845 2846 if (dm_suspended_md(md)) 2847 goto done; /* resume from nested suspend */ 2848 2849 /* 2850 * NOTE: existing callers don't need to call dm_table_resume_targets 2851 * (which may fail -- so best to avoid it for now by passing NULL map) 2852 */ 2853 (void) __dm_resume(md, NULL); 2854 2855 done: 2856 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2857 smp_mb__after_atomic(); 2858 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2859 } 2860 2861 void dm_internal_suspend_noflush(struct mapped_device *md) 2862 { 2863 mutex_lock(&md->suspend_lock); 2864 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2865 mutex_unlock(&md->suspend_lock); 2866 } 2867 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2868 2869 void dm_internal_resume(struct mapped_device *md) 2870 { 2871 mutex_lock(&md->suspend_lock); 2872 __dm_internal_resume(md); 2873 mutex_unlock(&md->suspend_lock); 2874 } 2875 EXPORT_SYMBOL_GPL(dm_internal_resume); 2876 2877 /* 2878 * Fast variants of internal suspend/resume hold md->suspend_lock, 2879 * which prevents interaction with userspace-driven suspend. 2880 */ 2881 2882 void dm_internal_suspend_fast(struct mapped_device *md) 2883 { 2884 mutex_lock(&md->suspend_lock); 2885 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2886 return; 2887 2888 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2889 synchronize_srcu(&md->io_barrier); 2890 flush_workqueue(md->wq); 2891 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2892 } 2893 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2894 2895 void dm_internal_resume_fast(struct mapped_device *md) 2896 { 2897 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2898 goto done; 2899 2900 dm_queue_flush(md); 2901 2902 done: 2903 mutex_unlock(&md->suspend_lock); 2904 } 2905 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2906 2907 /*----------------------------------------------------------------- 2908 * Event notification. 2909 *---------------------------------------------------------------*/ 2910 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2911 unsigned cookie) 2912 { 2913 int r; 2914 unsigned noio_flag; 2915 char udev_cookie[DM_COOKIE_LENGTH]; 2916 char *envp[] = { udev_cookie, NULL }; 2917 2918 noio_flag = memalloc_noio_save(); 2919 2920 if (!cookie) 2921 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2922 else { 2923 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2924 DM_COOKIE_ENV_VAR_NAME, cookie); 2925 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2926 action, envp); 2927 } 2928 2929 memalloc_noio_restore(noio_flag); 2930 2931 return r; 2932 } 2933 2934 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2935 { 2936 return atomic_add_return(1, &md->uevent_seq); 2937 } 2938 2939 uint32_t dm_get_event_nr(struct mapped_device *md) 2940 { 2941 return atomic_read(&md->event_nr); 2942 } 2943 2944 int dm_wait_event(struct mapped_device *md, int event_nr) 2945 { 2946 return wait_event_interruptible(md->eventq, 2947 (event_nr != atomic_read(&md->event_nr))); 2948 } 2949 2950 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2951 { 2952 unsigned long flags; 2953 2954 spin_lock_irqsave(&md->uevent_lock, flags); 2955 list_add(elist, &md->uevent_list); 2956 spin_unlock_irqrestore(&md->uevent_lock, flags); 2957 } 2958 2959 /* 2960 * The gendisk is only valid as long as you have a reference 2961 * count on 'md'. 2962 */ 2963 struct gendisk *dm_disk(struct mapped_device *md) 2964 { 2965 return md->disk; 2966 } 2967 EXPORT_SYMBOL_GPL(dm_disk); 2968 2969 struct kobject *dm_kobject(struct mapped_device *md) 2970 { 2971 return &md->kobj_holder.kobj; 2972 } 2973 2974 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2975 { 2976 struct mapped_device *md; 2977 2978 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2979 2980 spin_lock(&_minor_lock); 2981 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2982 md = NULL; 2983 goto out; 2984 } 2985 dm_get(md); 2986 out: 2987 spin_unlock(&_minor_lock); 2988 2989 return md; 2990 } 2991 2992 int dm_suspended_md(struct mapped_device *md) 2993 { 2994 return test_bit(DMF_SUSPENDED, &md->flags); 2995 } 2996 2997 static int dm_post_suspending_md(struct mapped_device *md) 2998 { 2999 return test_bit(DMF_POST_SUSPENDING, &md->flags); 3000 } 3001 3002 int dm_suspended_internally_md(struct mapped_device *md) 3003 { 3004 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3005 } 3006 3007 int dm_test_deferred_remove_flag(struct mapped_device *md) 3008 { 3009 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3010 } 3011 3012 int dm_suspended(struct dm_target *ti) 3013 { 3014 return dm_suspended_md(dm_table_get_md(ti->table)); 3015 } 3016 EXPORT_SYMBOL_GPL(dm_suspended); 3017 3018 int dm_post_suspending(struct dm_target *ti) 3019 { 3020 return dm_post_suspending_md(dm_table_get_md(ti->table)); 3021 } 3022 EXPORT_SYMBOL_GPL(dm_post_suspending); 3023 3024 int dm_noflush_suspending(struct dm_target *ti) 3025 { 3026 return __noflush_suspending(dm_table_get_md(ti->table)); 3027 } 3028 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 3029 3030 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 3031 unsigned integrity, unsigned per_io_data_size, 3032 unsigned min_pool_size) 3033 { 3034 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 3035 unsigned int pool_size = 0; 3036 unsigned int front_pad, io_front_pad; 3037 int ret; 3038 3039 if (!pools) 3040 return NULL; 3041 3042 switch (type) { 3043 case DM_TYPE_BIO_BASED: 3044 case DM_TYPE_DAX_BIO_BASED: 3045 case DM_TYPE_NVME_BIO_BASED: 3046 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 3047 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3048 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 3049 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 3050 if (ret) 3051 goto out; 3052 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 3053 goto out; 3054 break; 3055 case DM_TYPE_REQUEST_BASED: 3056 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 3057 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3058 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 3059 break; 3060 default: 3061 BUG(); 3062 } 3063 3064 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 3065 if (ret) 3066 goto out; 3067 3068 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 3069 goto out; 3070 3071 return pools; 3072 3073 out: 3074 dm_free_md_mempools(pools); 3075 3076 return NULL; 3077 } 3078 3079 void dm_free_md_mempools(struct dm_md_mempools *pools) 3080 { 3081 if (!pools) 3082 return; 3083 3084 bioset_exit(&pools->bs); 3085 bioset_exit(&pools->io_bs); 3086 3087 kfree(pools); 3088 } 3089 3090 struct dm_pr { 3091 u64 old_key; 3092 u64 new_key; 3093 u32 flags; 3094 bool fail_early; 3095 }; 3096 3097 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 3098 void *data) 3099 { 3100 struct mapped_device *md = bdev->bd_disk->private_data; 3101 struct dm_table *table; 3102 struct dm_target *ti; 3103 int ret = -ENOTTY, srcu_idx; 3104 3105 table = dm_get_live_table(md, &srcu_idx); 3106 if (!table || !dm_table_get_size(table)) 3107 goto out; 3108 3109 /* We only support devices that have a single target */ 3110 if (dm_table_get_num_targets(table) != 1) 3111 goto out; 3112 ti = dm_table_get_target(table, 0); 3113 3114 ret = -EINVAL; 3115 if (!ti->type->iterate_devices) 3116 goto out; 3117 3118 ret = ti->type->iterate_devices(ti, fn, data); 3119 out: 3120 dm_put_live_table(md, srcu_idx); 3121 return ret; 3122 } 3123 3124 /* 3125 * For register / unregister we need to manually call out to every path. 3126 */ 3127 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3128 sector_t start, sector_t len, void *data) 3129 { 3130 struct dm_pr *pr = data; 3131 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3132 3133 if (!ops || !ops->pr_register) 3134 return -EOPNOTSUPP; 3135 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3136 } 3137 3138 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3139 u32 flags) 3140 { 3141 struct dm_pr pr = { 3142 .old_key = old_key, 3143 .new_key = new_key, 3144 .flags = flags, 3145 .fail_early = true, 3146 }; 3147 int ret; 3148 3149 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3150 if (ret && new_key) { 3151 /* unregister all paths if we failed to register any path */ 3152 pr.old_key = new_key; 3153 pr.new_key = 0; 3154 pr.flags = 0; 3155 pr.fail_early = false; 3156 dm_call_pr(bdev, __dm_pr_register, &pr); 3157 } 3158 3159 return ret; 3160 } 3161 3162 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3163 u32 flags) 3164 { 3165 struct mapped_device *md = bdev->bd_disk->private_data; 3166 const struct pr_ops *ops; 3167 int r, srcu_idx; 3168 3169 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3170 if (r < 0) 3171 goto out; 3172 3173 ops = bdev->bd_disk->fops->pr_ops; 3174 if (ops && ops->pr_reserve) 3175 r = ops->pr_reserve(bdev, key, type, flags); 3176 else 3177 r = -EOPNOTSUPP; 3178 out: 3179 dm_unprepare_ioctl(md, srcu_idx); 3180 return r; 3181 } 3182 3183 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3184 { 3185 struct mapped_device *md = bdev->bd_disk->private_data; 3186 const struct pr_ops *ops; 3187 int r, srcu_idx; 3188 3189 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3190 if (r < 0) 3191 goto out; 3192 3193 ops = bdev->bd_disk->fops->pr_ops; 3194 if (ops && ops->pr_release) 3195 r = ops->pr_release(bdev, key, type); 3196 else 3197 r = -EOPNOTSUPP; 3198 out: 3199 dm_unprepare_ioctl(md, srcu_idx); 3200 return r; 3201 } 3202 3203 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3204 enum pr_type type, bool abort) 3205 { 3206 struct mapped_device *md = bdev->bd_disk->private_data; 3207 const struct pr_ops *ops; 3208 int r, srcu_idx; 3209 3210 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3211 if (r < 0) 3212 goto out; 3213 3214 ops = bdev->bd_disk->fops->pr_ops; 3215 if (ops && ops->pr_preempt) 3216 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3217 else 3218 r = -EOPNOTSUPP; 3219 out: 3220 dm_unprepare_ioctl(md, srcu_idx); 3221 return r; 3222 } 3223 3224 static int dm_pr_clear(struct block_device *bdev, u64 key) 3225 { 3226 struct mapped_device *md = bdev->bd_disk->private_data; 3227 const struct pr_ops *ops; 3228 int r, srcu_idx; 3229 3230 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3231 if (r < 0) 3232 goto out; 3233 3234 ops = bdev->bd_disk->fops->pr_ops; 3235 if (ops && ops->pr_clear) 3236 r = ops->pr_clear(bdev, key); 3237 else 3238 r = -EOPNOTSUPP; 3239 out: 3240 dm_unprepare_ioctl(md, srcu_idx); 3241 return r; 3242 } 3243 3244 static const struct pr_ops dm_pr_ops = { 3245 .pr_register = dm_pr_register, 3246 .pr_reserve = dm_pr_reserve, 3247 .pr_release = dm_pr_release, 3248 .pr_preempt = dm_pr_preempt, 3249 .pr_clear = dm_pr_clear, 3250 }; 3251 3252 static const struct block_device_operations dm_blk_dops = { 3253 .submit_bio = dm_submit_bio, 3254 .open = dm_blk_open, 3255 .release = dm_blk_close, 3256 .ioctl = dm_blk_ioctl, 3257 .getgeo = dm_blk_getgeo, 3258 .report_zones = dm_blk_report_zones, 3259 .pr_ops = &dm_pr_ops, 3260 .owner = THIS_MODULE 3261 }; 3262 3263 static const struct dax_operations dm_dax_ops = { 3264 .direct_access = dm_dax_direct_access, 3265 .dax_supported = dm_dax_supported, 3266 .copy_from_iter = dm_dax_copy_from_iter, 3267 .copy_to_iter = dm_dax_copy_to_iter, 3268 .zero_page_range = dm_dax_zero_page_range, 3269 }; 3270 3271 /* 3272 * module hooks 3273 */ 3274 module_init(dm_init); 3275 module_exit(dm_exit); 3276 3277 module_param(major, uint, 0); 3278 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3279 3280 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3281 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3282 3283 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3284 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3285 3286 MODULE_DESCRIPTION(DM_NAME " driver"); 3287 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3288 MODULE_LICENSE("GPL"); 3289