1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/signal.h> 16 #include <linux/blkpg.h> 17 #include <linux/bio.h> 18 #include <linux/mempool.h> 19 #include <linux/dax.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/uio.h> 23 #include <linux/hdreg.h> 24 #include <linux/delay.h> 25 #include <linux/wait.h> 26 #include <linux/pr.h> 27 #include <linux/refcount.h> 28 29 #define DM_MSG_PREFIX "core" 30 31 /* 32 * Cookies are numeric values sent with CHANGE and REMOVE 33 * uevents while resuming, removing or renaming the device. 34 */ 35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 36 #define DM_COOKIE_LENGTH 24 37 38 static const char *_name = DM_NAME; 39 40 static unsigned int major = 0; 41 static unsigned int _major = 0; 42 43 static DEFINE_IDR(_minor_idr); 44 45 static DEFINE_SPINLOCK(_minor_lock); 46 47 static void do_deferred_remove(struct work_struct *w); 48 49 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 50 51 static struct workqueue_struct *deferred_remove_workqueue; 52 53 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 55 56 void dm_issue_global_event(void) 57 { 58 atomic_inc(&dm_global_event_nr); 59 wake_up(&dm_global_eventq); 60 } 61 62 /* 63 * One of these is allocated (on-stack) per original bio. 64 */ 65 struct clone_info { 66 struct dm_table *map; 67 struct bio *bio; 68 struct dm_io *io; 69 sector_t sector; 70 unsigned sector_count; 71 }; 72 73 /* 74 * One of these is allocated per clone bio. 75 */ 76 #define DM_TIO_MAGIC 7282014 77 struct dm_target_io { 78 unsigned magic; 79 struct dm_io *io; 80 struct dm_target *ti; 81 unsigned target_bio_nr; 82 unsigned *len_ptr; 83 bool inside_dm_io; 84 struct bio clone; 85 }; 86 87 /* 88 * One of these is allocated per original bio. 89 * It contains the first clone used for that original. 90 */ 91 #define DM_IO_MAGIC 5191977 92 struct dm_io { 93 unsigned magic; 94 struct mapped_device *md; 95 blk_status_t status; 96 atomic_t io_count; 97 struct bio *orig_bio; 98 unsigned long start_time; 99 spinlock_t endio_lock; 100 struct dm_stats_aux stats_aux; 101 /* last member of dm_target_io is 'struct bio' */ 102 struct dm_target_io tio; 103 }; 104 105 void *dm_per_bio_data(struct bio *bio, size_t data_size) 106 { 107 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 108 if (!tio->inside_dm_io) 109 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 110 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 111 } 112 EXPORT_SYMBOL_GPL(dm_per_bio_data); 113 114 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 115 { 116 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 117 if (io->magic == DM_IO_MAGIC) 118 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 119 BUG_ON(io->magic != DM_TIO_MAGIC); 120 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 121 } 122 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 123 124 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 125 { 126 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 127 } 128 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 129 130 #define MINOR_ALLOCED ((void *)-1) 131 132 /* 133 * Bits for the md->flags field. 134 */ 135 #define DMF_BLOCK_IO_FOR_SUSPEND 0 136 #define DMF_SUSPENDED 1 137 #define DMF_FROZEN 2 138 #define DMF_FREEING 3 139 #define DMF_DELETING 4 140 #define DMF_NOFLUSH_SUSPENDING 5 141 #define DMF_DEFERRED_REMOVE 6 142 #define DMF_SUSPENDED_INTERNALLY 7 143 144 #define DM_NUMA_NODE NUMA_NO_NODE 145 static int dm_numa_node = DM_NUMA_NODE; 146 147 /* 148 * For mempools pre-allocation at the table loading time. 149 */ 150 struct dm_md_mempools { 151 struct bio_set bs; 152 struct bio_set io_bs; 153 }; 154 155 struct table_device { 156 struct list_head list; 157 refcount_t count; 158 struct dm_dev dm_dev; 159 }; 160 161 /* 162 * Bio-based DM's mempools' reserved IOs set by the user. 163 */ 164 #define RESERVED_BIO_BASED_IOS 16 165 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 166 167 static int __dm_get_module_param_int(int *module_param, int min, int max) 168 { 169 int param = READ_ONCE(*module_param); 170 int modified_param = 0; 171 bool modified = true; 172 173 if (param < min) 174 modified_param = min; 175 else if (param > max) 176 modified_param = max; 177 else 178 modified = false; 179 180 if (modified) { 181 (void)cmpxchg(module_param, param, modified_param); 182 param = modified_param; 183 } 184 185 return param; 186 } 187 188 unsigned __dm_get_module_param(unsigned *module_param, 189 unsigned def, unsigned max) 190 { 191 unsigned param = READ_ONCE(*module_param); 192 unsigned modified_param = 0; 193 194 if (!param) 195 modified_param = def; 196 else if (param > max) 197 modified_param = max; 198 199 if (modified_param) { 200 (void)cmpxchg(module_param, param, modified_param); 201 param = modified_param; 202 } 203 204 return param; 205 } 206 207 unsigned dm_get_reserved_bio_based_ios(void) 208 { 209 return __dm_get_module_param(&reserved_bio_based_ios, 210 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 211 } 212 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 213 214 static unsigned dm_get_numa_node(void) 215 { 216 return __dm_get_module_param_int(&dm_numa_node, 217 DM_NUMA_NODE, num_online_nodes() - 1); 218 } 219 220 static int __init local_init(void) 221 { 222 int r; 223 224 r = dm_uevent_init(); 225 if (r) 226 return r; 227 228 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 229 if (!deferred_remove_workqueue) { 230 r = -ENOMEM; 231 goto out_uevent_exit; 232 } 233 234 _major = major; 235 r = register_blkdev(_major, _name); 236 if (r < 0) 237 goto out_free_workqueue; 238 239 if (!_major) 240 _major = r; 241 242 return 0; 243 244 out_free_workqueue: 245 destroy_workqueue(deferred_remove_workqueue); 246 out_uevent_exit: 247 dm_uevent_exit(); 248 249 return r; 250 } 251 252 static void local_exit(void) 253 { 254 flush_scheduled_work(); 255 destroy_workqueue(deferred_remove_workqueue); 256 257 unregister_blkdev(_major, _name); 258 dm_uevent_exit(); 259 260 _major = 0; 261 262 DMINFO("cleaned up"); 263 } 264 265 static int (*_inits[])(void) __initdata = { 266 local_init, 267 dm_target_init, 268 dm_linear_init, 269 dm_stripe_init, 270 dm_io_init, 271 dm_kcopyd_init, 272 dm_interface_init, 273 dm_statistics_init, 274 }; 275 276 static void (*_exits[])(void) = { 277 local_exit, 278 dm_target_exit, 279 dm_linear_exit, 280 dm_stripe_exit, 281 dm_io_exit, 282 dm_kcopyd_exit, 283 dm_interface_exit, 284 dm_statistics_exit, 285 }; 286 287 static int __init dm_init(void) 288 { 289 const int count = ARRAY_SIZE(_inits); 290 291 int r, i; 292 293 for (i = 0; i < count; i++) { 294 r = _inits[i](); 295 if (r) 296 goto bad; 297 } 298 299 return 0; 300 301 bad: 302 while (i--) 303 _exits[i](); 304 305 return r; 306 } 307 308 static void __exit dm_exit(void) 309 { 310 int i = ARRAY_SIZE(_exits); 311 312 while (i--) 313 _exits[i](); 314 315 /* 316 * Should be empty by this point. 317 */ 318 idr_destroy(&_minor_idr); 319 } 320 321 /* 322 * Block device functions 323 */ 324 int dm_deleting_md(struct mapped_device *md) 325 { 326 return test_bit(DMF_DELETING, &md->flags); 327 } 328 329 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 330 { 331 struct mapped_device *md; 332 333 spin_lock(&_minor_lock); 334 335 md = bdev->bd_disk->private_data; 336 if (!md) 337 goto out; 338 339 if (test_bit(DMF_FREEING, &md->flags) || 340 dm_deleting_md(md)) { 341 md = NULL; 342 goto out; 343 } 344 345 dm_get(md); 346 atomic_inc(&md->open_count); 347 out: 348 spin_unlock(&_minor_lock); 349 350 return md ? 0 : -ENXIO; 351 } 352 353 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 354 { 355 struct mapped_device *md; 356 357 spin_lock(&_minor_lock); 358 359 md = disk->private_data; 360 if (WARN_ON(!md)) 361 goto out; 362 363 if (atomic_dec_and_test(&md->open_count) && 364 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 365 queue_work(deferred_remove_workqueue, &deferred_remove_work); 366 367 dm_put(md); 368 out: 369 spin_unlock(&_minor_lock); 370 } 371 372 int dm_open_count(struct mapped_device *md) 373 { 374 return atomic_read(&md->open_count); 375 } 376 377 /* 378 * Guarantees nothing is using the device before it's deleted. 379 */ 380 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 381 { 382 int r = 0; 383 384 spin_lock(&_minor_lock); 385 386 if (dm_open_count(md)) { 387 r = -EBUSY; 388 if (mark_deferred) 389 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 390 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 391 r = -EEXIST; 392 else 393 set_bit(DMF_DELETING, &md->flags); 394 395 spin_unlock(&_minor_lock); 396 397 return r; 398 } 399 400 int dm_cancel_deferred_remove(struct mapped_device *md) 401 { 402 int r = 0; 403 404 spin_lock(&_minor_lock); 405 406 if (test_bit(DMF_DELETING, &md->flags)) 407 r = -EBUSY; 408 else 409 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 410 411 spin_unlock(&_minor_lock); 412 413 return r; 414 } 415 416 static void do_deferred_remove(struct work_struct *w) 417 { 418 dm_deferred_remove(); 419 } 420 421 sector_t dm_get_size(struct mapped_device *md) 422 { 423 return get_capacity(md->disk); 424 } 425 426 struct request_queue *dm_get_md_queue(struct mapped_device *md) 427 { 428 return md->queue; 429 } 430 431 struct dm_stats *dm_get_stats(struct mapped_device *md) 432 { 433 return &md->stats; 434 } 435 436 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 437 { 438 struct mapped_device *md = bdev->bd_disk->private_data; 439 440 return dm_get_geometry(md, geo); 441 } 442 443 #ifdef CONFIG_BLK_DEV_ZONED 444 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 445 { 446 struct dm_report_zones_args *args = data; 447 sector_t sector_diff = args->tgt->begin - args->start; 448 449 /* 450 * Ignore zones beyond the target range. 451 */ 452 if (zone->start >= args->start + args->tgt->len) 453 return 0; 454 455 /* 456 * Remap the start sector and write pointer position of the zone 457 * to match its position in the target range. 458 */ 459 zone->start += sector_diff; 460 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 461 if (zone->cond == BLK_ZONE_COND_FULL) 462 zone->wp = zone->start + zone->len; 463 else if (zone->cond == BLK_ZONE_COND_EMPTY) 464 zone->wp = zone->start; 465 else 466 zone->wp += sector_diff; 467 } 468 469 args->next_sector = zone->start + zone->len; 470 return args->orig_cb(zone, args->zone_idx++, args->orig_data); 471 } 472 EXPORT_SYMBOL_GPL(dm_report_zones_cb); 473 474 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 475 unsigned int nr_zones, report_zones_cb cb, void *data) 476 { 477 struct mapped_device *md = disk->private_data; 478 struct dm_table *map; 479 int srcu_idx, ret; 480 struct dm_report_zones_args args = { 481 .next_sector = sector, 482 .orig_data = data, 483 .orig_cb = cb, 484 }; 485 486 if (dm_suspended_md(md)) 487 return -EAGAIN; 488 489 map = dm_get_live_table(md, &srcu_idx); 490 if (!map) 491 return -EIO; 492 493 do { 494 struct dm_target *tgt; 495 496 tgt = dm_table_find_target(map, args.next_sector); 497 if (WARN_ON_ONCE(!tgt->type->report_zones)) { 498 ret = -EIO; 499 goto out; 500 } 501 502 args.tgt = tgt; 503 ret = tgt->type->report_zones(tgt, &args, nr_zones); 504 if (ret < 0) 505 goto out; 506 } while (args.zone_idx < nr_zones && 507 args.next_sector < get_capacity(disk)); 508 509 ret = args.zone_idx; 510 out: 511 dm_put_live_table(md, srcu_idx); 512 return ret; 513 } 514 #else 515 #define dm_blk_report_zones NULL 516 #endif /* CONFIG_BLK_DEV_ZONED */ 517 518 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 519 struct block_device **bdev) 520 __acquires(md->io_barrier) 521 { 522 struct dm_target *tgt; 523 struct dm_table *map; 524 int r; 525 526 retry: 527 r = -ENOTTY; 528 map = dm_get_live_table(md, srcu_idx); 529 if (!map || !dm_table_get_size(map)) 530 return r; 531 532 /* We only support devices that have a single target */ 533 if (dm_table_get_num_targets(map) != 1) 534 return r; 535 536 tgt = dm_table_get_target(map, 0); 537 if (!tgt->type->prepare_ioctl) 538 return r; 539 540 if (dm_suspended_md(md)) 541 return -EAGAIN; 542 543 r = tgt->type->prepare_ioctl(tgt, bdev); 544 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 545 dm_put_live_table(md, *srcu_idx); 546 msleep(10); 547 goto retry; 548 } 549 550 return r; 551 } 552 553 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 554 __releases(md->io_barrier) 555 { 556 dm_put_live_table(md, srcu_idx); 557 } 558 559 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 560 unsigned int cmd, unsigned long arg) 561 { 562 struct mapped_device *md = bdev->bd_disk->private_data; 563 int r, srcu_idx; 564 565 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 566 if (r < 0) 567 goto out; 568 569 if (r > 0) { 570 /* 571 * Target determined this ioctl is being issued against a 572 * subset of the parent bdev; require extra privileges. 573 */ 574 if (!capable(CAP_SYS_RAWIO)) { 575 DMWARN_LIMIT( 576 "%s: sending ioctl %x to DM device without required privilege.", 577 current->comm, cmd); 578 r = -ENOIOCTLCMD; 579 goto out; 580 } 581 } 582 583 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 584 out: 585 dm_unprepare_ioctl(md, srcu_idx); 586 return r; 587 } 588 589 static void start_io_acct(struct dm_io *io); 590 591 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 592 { 593 struct dm_io *io; 594 struct dm_target_io *tio; 595 struct bio *clone; 596 597 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 598 if (!clone) 599 return NULL; 600 601 tio = container_of(clone, struct dm_target_io, clone); 602 tio->inside_dm_io = true; 603 tio->io = NULL; 604 605 io = container_of(tio, struct dm_io, tio); 606 io->magic = DM_IO_MAGIC; 607 io->status = 0; 608 atomic_set(&io->io_count, 1); 609 io->orig_bio = bio; 610 io->md = md; 611 spin_lock_init(&io->endio_lock); 612 613 start_io_acct(io); 614 615 return io; 616 } 617 618 static void free_io(struct mapped_device *md, struct dm_io *io) 619 { 620 bio_put(&io->tio.clone); 621 } 622 623 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 624 unsigned target_bio_nr, gfp_t gfp_mask) 625 { 626 struct dm_target_io *tio; 627 628 if (!ci->io->tio.io) { 629 /* the dm_target_io embedded in ci->io is available */ 630 tio = &ci->io->tio; 631 } else { 632 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 633 if (!clone) 634 return NULL; 635 636 tio = container_of(clone, struct dm_target_io, clone); 637 tio->inside_dm_io = false; 638 } 639 640 tio->magic = DM_TIO_MAGIC; 641 tio->io = ci->io; 642 tio->ti = ti; 643 tio->target_bio_nr = target_bio_nr; 644 645 return tio; 646 } 647 648 static void free_tio(struct dm_target_io *tio) 649 { 650 if (tio->inside_dm_io) 651 return; 652 bio_put(&tio->clone); 653 } 654 655 static bool md_in_flight_bios(struct mapped_device *md) 656 { 657 int cpu; 658 struct hd_struct *part = &dm_disk(md)->part0; 659 long sum = 0; 660 661 for_each_possible_cpu(cpu) { 662 sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 663 sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 664 } 665 666 return sum != 0; 667 } 668 669 static bool md_in_flight(struct mapped_device *md) 670 { 671 if (queue_is_mq(md->queue)) 672 return blk_mq_queue_inflight(md->queue); 673 else 674 return md_in_flight_bios(md); 675 } 676 677 static void start_io_acct(struct dm_io *io) 678 { 679 struct mapped_device *md = io->md; 680 struct bio *bio = io->orig_bio; 681 682 io->start_time = jiffies; 683 684 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), 685 &dm_disk(md)->part0); 686 687 if (unlikely(dm_stats_used(&md->stats))) 688 dm_stats_account_io(&md->stats, bio_data_dir(bio), 689 bio->bi_iter.bi_sector, bio_sectors(bio), 690 false, 0, &io->stats_aux); 691 } 692 693 static void end_io_acct(struct dm_io *io) 694 { 695 struct mapped_device *md = io->md; 696 struct bio *bio = io->orig_bio; 697 unsigned long duration = jiffies - io->start_time; 698 699 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, 700 io->start_time); 701 702 if (unlikely(dm_stats_used(&md->stats))) 703 dm_stats_account_io(&md->stats, bio_data_dir(bio), 704 bio->bi_iter.bi_sector, bio_sectors(bio), 705 true, duration, &io->stats_aux); 706 707 /* nudge anyone waiting on suspend queue */ 708 if (unlikely(wq_has_sleeper(&md->wait))) 709 wake_up(&md->wait); 710 } 711 712 /* 713 * Add the bio to the list of deferred io. 714 */ 715 static void queue_io(struct mapped_device *md, struct bio *bio) 716 { 717 unsigned long flags; 718 719 spin_lock_irqsave(&md->deferred_lock, flags); 720 bio_list_add(&md->deferred, bio); 721 spin_unlock_irqrestore(&md->deferred_lock, flags); 722 queue_work(md->wq, &md->work); 723 } 724 725 /* 726 * Everyone (including functions in this file), should use this 727 * function to access the md->map field, and make sure they call 728 * dm_put_live_table() when finished. 729 */ 730 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 731 { 732 *srcu_idx = srcu_read_lock(&md->io_barrier); 733 734 return srcu_dereference(md->map, &md->io_barrier); 735 } 736 737 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 738 { 739 srcu_read_unlock(&md->io_barrier, srcu_idx); 740 } 741 742 void dm_sync_table(struct mapped_device *md) 743 { 744 synchronize_srcu(&md->io_barrier); 745 synchronize_rcu_expedited(); 746 } 747 748 /* 749 * A fast alternative to dm_get_live_table/dm_put_live_table. 750 * The caller must not block between these two functions. 751 */ 752 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 753 { 754 rcu_read_lock(); 755 return rcu_dereference(md->map); 756 } 757 758 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 759 { 760 rcu_read_unlock(); 761 } 762 763 static char *_dm_claim_ptr = "I belong to device-mapper"; 764 765 /* 766 * Open a table device so we can use it as a map destination. 767 */ 768 static int open_table_device(struct table_device *td, dev_t dev, 769 struct mapped_device *md) 770 { 771 struct block_device *bdev; 772 773 int r; 774 775 BUG_ON(td->dm_dev.bdev); 776 777 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 778 if (IS_ERR(bdev)) 779 return PTR_ERR(bdev); 780 781 r = bd_link_disk_holder(bdev, dm_disk(md)); 782 if (r) { 783 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 784 return r; 785 } 786 787 td->dm_dev.bdev = bdev; 788 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 789 return 0; 790 } 791 792 /* 793 * Close a table device that we've been using. 794 */ 795 static void close_table_device(struct table_device *td, struct mapped_device *md) 796 { 797 if (!td->dm_dev.bdev) 798 return; 799 800 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 801 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 802 put_dax(td->dm_dev.dax_dev); 803 td->dm_dev.bdev = NULL; 804 td->dm_dev.dax_dev = NULL; 805 } 806 807 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 808 fmode_t mode) 809 { 810 struct table_device *td; 811 812 list_for_each_entry(td, l, list) 813 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 814 return td; 815 816 return NULL; 817 } 818 819 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 820 struct dm_dev **result) 821 { 822 int r; 823 struct table_device *td; 824 825 mutex_lock(&md->table_devices_lock); 826 td = find_table_device(&md->table_devices, dev, mode); 827 if (!td) { 828 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 829 if (!td) { 830 mutex_unlock(&md->table_devices_lock); 831 return -ENOMEM; 832 } 833 834 td->dm_dev.mode = mode; 835 td->dm_dev.bdev = NULL; 836 837 if ((r = open_table_device(td, dev, md))) { 838 mutex_unlock(&md->table_devices_lock); 839 kfree(td); 840 return r; 841 } 842 843 format_dev_t(td->dm_dev.name, dev); 844 845 refcount_set(&td->count, 1); 846 list_add(&td->list, &md->table_devices); 847 } else { 848 refcount_inc(&td->count); 849 } 850 mutex_unlock(&md->table_devices_lock); 851 852 *result = &td->dm_dev; 853 return 0; 854 } 855 EXPORT_SYMBOL_GPL(dm_get_table_device); 856 857 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 858 { 859 struct table_device *td = container_of(d, struct table_device, dm_dev); 860 861 mutex_lock(&md->table_devices_lock); 862 if (refcount_dec_and_test(&td->count)) { 863 close_table_device(td, md); 864 list_del(&td->list); 865 kfree(td); 866 } 867 mutex_unlock(&md->table_devices_lock); 868 } 869 EXPORT_SYMBOL(dm_put_table_device); 870 871 static void free_table_devices(struct list_head *devices) 872 { 873 struct list_head *tmp, *next; 874 875 list_for_each_safe(tmp, next, devices) { 876 struct table_device *td = list_entry(tmp, struct table_device, list); 877 878 DMWARN("dm_destroy: %s still exists with %d references", 879 td->dm_dev.name, refcount_read(&td->count)); 880 kfree(td); 881 } 882 } 883 884 /* 885 * Get the geometry associated with a dm device 886 */ 887 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 888 { 889 *geo = md->geometry; 890 891 return 0; 892 } 893 894 /* 895 * Set the geometry of a device. 896 */ 897 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 898 { 899 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 900 901 if (geo->start > sz) { 902 DMWARN("Start sector is beyond the geometry limits."); 903 return -EINVAL; 904 } 905 906 md->geometry = *geo; 907 908 return 0; 909 } 910 911 static int __noflush_suspending(struct mapped_device *md) 912 { 913 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 914 } 915 916 /* 917 * Decrements the number of outstanding ios that a bio has been 918 * cloned into, completing the original io if necc. 919 */ 920 static void dec_pending(struct dm_io *io, blk_status_t error) 921 { 922 unsigned long flags; 923 blk_status_t io_error; 924 struct bio *bio; 925 struct mapped_device *md = io->md; 926 927 /* Push-back supersedes any I/O errors */ 928 if (unlikely(error)) { 929 spin_lock_irqsave(&io->endio_lock, flags); 930 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 931 io->status = error; 932 spin_unlock_irqrestore(&io->endio_lock, flags); 933 } 934 935 if (atomic_dec_and_test(&io->io_count)) { 936 if (io->status == BLK_STS_DM_REQUEUE) { 937 /* 938 * Target requested pushing back the I/O. 939 */ 940 spin_lock_irqsave(&md->deferred_lock, flags); 941 if (__noflush_suspending(md)) 942 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 943 bio_list_add_head(&md->deferred, io->orig_bio); 944 else 945 /* noflush suspend was interrupted. */ 946 io->status = BLK_STS_IOERR; 947 spin_unlock_irqrestore(&md->deferred_lock, flags); 948 } 949 950 io_error = io->status; 951 bio = io->orig_bio; 952 end_io_acct(io); 953 free_io(md, io); 954 955 if (io_error == BLK_STS_DM_REQUEUE) 956 return; 957 958 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 959 /* 960 * Preflush done for flush with data, reissue 961 * without REQ_PREFLUSH. 962 */ 963 bio->bi_opf &= ~REQ_PREFLUSH; 964 queue_io(md, bio); 965 } else { 966 /* done with normal IO or empty flush */ 967 if (io_error) 968 bio->bi_status = io_error; 969 bio_endio(bio); 970 } 971 } 972 } 973 974 void disable_discard(struct mapped_device *md) 975 { 976 struct queue_limits *limits = dm_get_queue_limits(md); 977 978 /* device doesn't really support DISCARD, disable it */ 979 limits->max_discard_sectors = 0; 980 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 981 } 982 983 void disable_write_same(struct mapped_device *md) 984 { 985 struct queue_limits *limits = dm_get_queue_limits(md); 986 987 /* device doesn't really support WRITE SAME, disable it */ 988 limits->max_write_same_sectors = 0; 989 } 990 991 void disable_write_zeroes(struct mapped_device *md) 992 { 993 struct queue_limits *limits = dm_get_queue_limits(md); 994 995 /* device doesn't really support WRITE ZEROES, disable it */ 996 limits->max_write_zeroes_sectors = 0; 997 } 998 999 static void clone_endio(struct bio *bio) 1000 { 1001 blk_status_t error = bio->bi_status; 1002 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1003 struct dm_io *io = tio->io; 1004 struct mapped_device *md = tio->io->md; 1005 dm_endio_fn endio = tio->ti->type->end_io; 1006 1007 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 1008 if (bio_op(bio) == REQ_OP_DISCARD && 1009 !bio->bi_disk->queue->limits.max_discard_sectors) 1010 disable_discard(md); 1011 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 1012 !bio->bi_disk->queue->limits.max_write_same_sectors) 1013 disable_write_same(md); 1014 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1015 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 1016 disable_write_zeroes(md); 1017 } 1018 1019 if (endio) { 1020 int r = endio(tio->ti, bio, &error); 1021 switch (r) { 1022 case DM_ENDIO_REQUEUE: 1023 error = BLK_STS_DM_REQUEUE; 1024 /*FALLTHRU*/ 1025 case DM_ENDIO_DONE: 1026 break; 1027 case DM_ENDIO_INCOMPLETE: 1028 /* The target will handle the io */ 1029 return; 1030 default: 1031 DMWARN("unimplemented target endio return value: %d", r); 1032 BUG(); 1033 } 1034 } 1035 1036 free_tio(tio); 1037 dec_pending(io, error); 1038 } 1039 1040 /* 1041 * Return maximum size of I/O possible at the supplied sector up to the current 1042 * target boundary. 1043 */ 1044 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1045 { 1046 sector_t target_offset = dm_target_offset(ti, sector); 1047 1048 return ti->len - target_offset; 1049 } 1050 1051 static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1052 { 1053 sector_t len = max_io_len_target_boundary(sector, ti); 1054 sector_t offset, max_len; 1055 1056 /* 1057 * Does the target need to split even further? 1058 */ 1059 if (ti->max_io_len) { 1060 offset = dm_target_offset(ti, sector); 1061 if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1062 max_len = sector_div(offset, ti->max_io_len); 1063 else 1064 max_len = offset & (ti->max_io_len - 1); 1065 max_len = ti->max_io_len - max_len; 1066 1067 if (len > max_len) 1068 len = max_len; 1069 } 1070 1071 return len; 1072 } 1073 1074 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1075 { 1076 if (len > UINT_MAX) { 1077 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1078 (unsigned long long)len, UINT_MAX); 1079 ti->error = "Maximum size of target IO is too large"; 1080 return -EINVAL; 1081 } 1082 1083 ti->max_io_len = (uint32_t) len; 1084 1085 return 0; 1086 } 1087 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1088 1089 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1090 sector_t sector, int *srcu_idx) 1091 __acquires(md->io_barrier) 1092 { 1093 struct dm_table *map; 1094 struct dm_target *ti; 1095 1096 map = dm_get_live_table(md, srcu_idx); 1097 if (!map) 1098 return NULL; 1099 1100 ti = dm_table_find_target(map, sector); 1101 if (!ti) 1102 return NULL; 1103 1104 return ti; 1105 } 1106 1107 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1108 long nr_pages, void **kaddr, pfn_t *pfn) 1109 { 1110 struct mapped_device *md = dax_get_private(dax_dev); 1111 sector_t sector = pgoff * PAGE_SECTORS; 1112 struct dm_target *ti; 1113 long len, ret = -EIO; 1114 int srcu_idx; 1115 1116 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1117 1118 if (!ti) 1119 goto out; 1120 if (!ti->type->direct_access) 1121 goto out; 1122 len = max_io_len(sector, ti) / PAGE_SECTORS; 1123 if (len < 1) 1124 goto out; 1125 nr_pages = min(len, nr_pages); 1126 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1127 1128 out: 1129 dm_put_live_table(md, srcu_idx); 1130 1131 return ret; 1132 } 1133 1134 static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 1135 int blocksize, sector_t start, sector_t len) 1136 { 1137 struct mapped_device *md = dax_get_private(dax_dev); 1138 struct dm_table *map; 1139 int srcu_idx; 1140 bool ret; 1141 1142 map = dm_get_live_table(md, &srcu_idx); 1143 if (!map) 1144 return false; 1145 1146 ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 1147 1148 dm_put_live_table(md, srcu_idx); 1149 1150 return ret; 1151 } 1152 1153 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1154 void *addr, size_t bytes, struct iov_iter *i) 1155 { 1156 struct mapped_device *md = dax_get_private(dax_dev); 1157 sector_t sector = pgoff * PAGE_SECTORS; 1158 struct dm_target *ti; 1159 long ret = 0; 1160 int srcu_idx; 1161 1162 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1163 1164 if (!ti) 1165 goto out; 1166 if (!ti->type->dax_copy_from_iter) { 1167 ret = copy_from_iter(addr, bytes, i); 1168 goto out; 1169 } 1170 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 1171 out: 1172 dm_put_live_table(md, srcu_idx); 1173 1174 return ret; 1175 } 1176 1177 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1178 void *addr, size_t bytes, struct iov_iter *i) 1179 { 1180 struct mapped_device *md = dax_get_private(dax_dev); 1181 sector_t sector = pgoff * PAGE_SECTORS; 1182 struct dm_target *ti; 1183 long ret = 0; 1184 int srcu_idx; 1185 1186 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1187 1188 if (!ti) 1189 goto out; 1190 if (!ti->type->dax_copy_to_iter) { 1191 ret = copy_to_iter(addr, bytes, i); 1192 goto out; 1193 } 1194 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1195 out: 1196 dm_put_live_table(md, srcu_idx); 1197 1198 return ret; 1199 } 1200 1201 /* 1202 * A target may call dm_accept_partial_bio only from the map routine. It is 1203 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 1204 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 1205 * 1206 * dm_accept_partial_bio informs the dm that the target only wants to process 1207 * additional n_sectors sectors of the bio and the rest of the data should be 1208 * sent in a next bio. 1209 * 1210 * A diagram that explains the arithmetics: 1211 * +--------------------+---------------+-------+ 1212 * | 1 | 2 | 3 | 1213 * +--------------------+---------------+-------+ 1214 * 1215 * <-------------- *tio->len_ptr ---------------> 1216 * <------- bi_size -------> 1217 * <-- n_sectors --> 1218 * 1219 * Region 1 was already iterated over with bio_advance or similar function. 1220 * (it may be empty if the target doesn't use bio_advance) 1221 * Region 2 is the remaining bio size that the target wants to process. 1222 * (it may be empty if region 1 is non-empty, although there is no reason 1223 * to make it empty) 1224 * The target requires that region 3 is to be sent in the next bio. 1225 * 1226 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1227 * the partially processed part (the sum of regions 1+2) must be the same for all 1228 * copies of the bio. 1229 */ 1230 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1231 { 1232 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1233 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1234 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1235 BUG_ON(bi_size > *tio->len_ptr); 1236 BUG_ON(n_sectors > bi_size); 1237 *tio->len_ptr -= bi_size - n_sectors; 1238 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1239 } 1240 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1241 1242 static blk_qc_t __map_bio(struct dm_target_io *tio) 1243 { 1244 int r; 1245 sector_t sector; 1246 struct bio *clone = &tio->clone; 1247 struct dm_io *io = tio->io; 1248 struct mapped_device *md = io->md; 1249 struct dm_target *ti = tio->ti; 1250 blk_qc_t ret = BLK_QC_T_NONE; 1251 1252 clone->bi_end_io = clone_endio; 1253 1254 /* 1255 * Map the clone. If r == 0 we don't need to do 1256 * anything, the target has assumed ownership of 1257 * this io. 1258 */ 1259 atomic_inc(&io->io_count); 1260 sector = clone->bi_iter.bi_sector; 1261 1262 r = ti->type->map(ti, clone); 1263 switch (r) { 1264 case DM_MAPIO_SUBMITTED: 1265 break; 1266 case DM_MAPIO_REMAPPED: 1267 /* the bio has been remapped so dispatch it */ 1268 trace_block_bio_remap(clone->bi_disk->queue, clone, 1269 bio_dev(io->orig_bio), sector); 1270 if (md->type == DM_TYPE_NVME_BIO_BASED) 1271 ret = direct_make_request(clone); 1272 else 1273 ret = generic_make_request(clone); 1274 break; 1275 case DM_MAPIO_KILL: 1276 free_tio(tio); 1277 dec_pending(io, BLK_STS_IOERR); 1278 break; 1279 case DM_MAPIO_REQUEUE: 1280 free_tio(tio); 1281 dec_pending(io, BLK_STS_DM_REQUEUE); 1282 break; 1283 default: 1284 DMWARN("unimplemented target map return value: %d", r); 1285 BUG(); 1286 } 1287 1288 return ret; 1289 } 1290 1291 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1292 { 1293 bio->bi_iter.bi_sector = sector; 1294 bio->bi_iter.bi_size = to_bytes(len); 1295 } 1296 1297 /* 1298 * Creates a bio that consists of range of complete bvecs. 1299 */ 1300 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1301 sector_t sector, unsigned len) 1302 { 1303 struct bio *clone = &tio->clone; 1304 1305 __bio_clone_fast(clone, bio); 1306 1307 if (bio_integrity(bio)) { 1308 int r; 1309 1310 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1311 !dm_target_passes_integrity(tio->ti->type))) { 1312 DMWARN("%s: the target %s doesn't support integrity data.", 1313 dm_device_name(tio->io->md), 1314 tio->ti->type->name); 1315 return -EIO; 1316 } 1317 1318 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1319 if (r < 0) 1320 return r; 1321 } 1322 1323 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1324 clone->bi_iter.bi_size = to_bytes(len); 1325 1326 if (bio_integrity(bio)) 1327 bio_integrity_trim(clone); 1328 1329 return 0; 1330 } 1331 1332 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1333 struct dm_target *ti, unsigned num_bios) 1334 { 1335 struct dm_target_io *tio; 1336 int try; 1337 1338 if (!num_bios) 1339 return; 1340 1341 if (num_bios == 1) { 1342 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1343 bio_list_add(blist, &tio->clone); 1344 return; 1345 } 1346 1347 for (try = 0; try < 2; try++) { 1348 int bio_nr; 1349 struct bio *bio; 1350 1351 if (try) 1352 mutex_lock(&ci->io->md->table_devices_lock); 1353 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1354 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1355 if (!tio) 1356 break; 1357 1358 bio_list_add(blist, &tio->clone); 1359 } 1360 if (try) 1361 mutex_unlock(&ci->io->md->table_devices_lock); 1362 if (bio_nr == num_bios) 1363 return; 1364 1365 while ((bio = bio_list_pop(blist))) { 1366 tio = container_of(bio, struct dm_target_io, clone); 1367 free_tio(tio); 1368 } 1369 } 1370 } 1371 1372 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1373 struct dm_target_io *tio, unsigned *len) 1374 { 1375 struct bio *clone = &tio->clone; 1376 1377 tio->len_ptr = len; 1378 1379 __bio_clone_fast(clone, ci->bio); 1380 if (len) 1381 bio_setup_sector(clone, ci->sector, *len); 1382 1383 return __map_bio(tio); 1384 } 1385 1386 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1387 unsigned num_bios, unsigned *len) 1388 { 1389 struct bio_list blist = BIO_EMPTY_LIST; 1390 struct bio *bio; 1391 struct dm_target_io *tio; 1392 1393 alloc_multiple_bios(&blist, ci, ti, num_bios); 1394 1395 while ((bio = bio_list_pop(&blist))) { 1396 tio = container_of(bio, struct dm_target_io, clone); 1397 (void) __clone_and_map_simple_bio(ci, tio, len); 1398 } 1399 } 1400 1401 static int __send_empty_flush(struct clone_info *ci) 1402 { 1403 unsigned target_nr = 0; 1404 struct dm_target *ti; 1405 1406 /* 1407 * Empty flush uses a statically initialized bio, as the base for 1408 * cloning. However, blkg association requires that a bdev is 1409 * associated with a gendisk, which doesn't happen until the bdev is 1410 * opened. So, blkg association is done at issue time of the flush 1411 * rather than when the device is created in alloc_dev(). 1412 */ 1413 bio_set_dev(ci->bio, ci->io->md->bdev); 1414 1415 BUG_ON(bio_has_data(ci->bio)); 1416 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1417 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1418 1419 bio_disassociate_blkg(ci->bio); 1420 1421 return 0; 1422 } 1423 1424 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1425 sector_t sector, unsigned *len) 1426 { 1427 struct bio *bio = ci->bio; 1428 struct dm_target_io *tio; 1429 int r; 1430 1431 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1432 tio->len_ptr = len; 1433 r = clone_bio(tio, bio, sector, *len); 1434 if (r < 0) { 1435 free_tio(tio); 1436 return r; 1437 } 1438 (void) __map_bio(tio); 1439 1440 return 0; 1441 } 1442 1443 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 1444 1445 static unsigned get_num_discard_bios(struct dm_target *ti) 1446 { 1447 return ti->num_discard_bios; 1448 } 1449 1450 static unsigned get_num_secure_erase_bios(struct dm_target *ti) 1451 { 1452 return ti->num_secure_erase_bios; 1453 } 1454 1455 static unsigned get_num_write_same_bios(struct dm_target *ti) 1456 { 1457 return ti->num_write_same_bios; 1458 } 1459 1460 static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1461 { 1462 return ti->num_write_zeroes_bios; 1463 } 1464 1465 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1466 unsigned num_bios) 1467 { 1468 unsigned len; 1469 1470 /* 1471 * Even though the device advertised support for this type of 1472 * request, that does not mean every target supports it, and 1473 * reconfiguration might also have changed that since the 1474 * check was performed. 1475 */ 1476 if (!num_bios) 1477 return -EOPNOTSUPP; 1478 1479 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1480 1481 __send_duplicate_bios(ci, ti, num_bios, &len); 1482 1483 ci->sector += len; 1484 ci->sector_count -= len; 1485 1486 return 0; 1487 } 1488 1489 static int __send_discard(struct clone_info *ci, struct dm_target *ti) 1490 { 1491 return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); 1492 } 1493 1494 static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 1495 { 1496 return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); 1497 } 1498 1499 static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 1500 { 1501 return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); 1502 } 1503 1504 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1505 { 1506 return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); 1507 } 1508 1509 static bool is_abnormal_io(struct bio *bio) 1510 { 1511 bool r = false; 1512 1513 switch (bio_op(bio)) { 1514 case REQ_OP_DISCARD: 1515 case REQ_OP_SECURE_ERASE: 1516 case REQ_OP_WRITE_SAME: 1517 case REQ_OP_WRITE_ZEROES: 1518 r = true; 1519 break; 1520 } 1521 1522 return r; 1523 } 1524 1525 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1526 int *result) 1527 { 1528 struct bio *bio = ci->bio; 1529 1530 if (bio_op(bio) == REQ_OP_DISCARD) 1531 *result = __send_discard(ci, ti); 1532 else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 1533 *result = __send_secure_erase(ci, ti); 1534 else if (bio_op(bio) == REQ_OP_WRITE_SAME) 1535 *result = __send_write_same(ci, ti); 1536 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 1537 *result = __send_write_zeroes(ci, ti); 1538 else 1539 return false; 1540 1541 return true; 1542 } 1543 1544 /* 1545 * Select the correct strategy for processing a non-flush bio. 1546 */ 1547 static int __split_and_process_non_flush(struct clone_info *ci) 1548 { 1549 struct dm_target *ti; 1550 unsigned len; 1551 int r; 1552 1553 ti = dm_table_find_target(ci->map, ci->sector); 1554 if (!ti) 1555 return -EIO; 1556 1557 if (__process_abnormal_io(ci, ti, &r)) 1558 return r; 1559 1560 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1561 1562 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1563 if (r < 0) 1564 return r; 1565 1566 ci->sector += len; 1567 ci->sector_count -= len; 1568 1569 return 0; 1570 } 1571 1572 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1573 struct dm_table *map, struct bio *bio) 1574 { 1575 ci->map = map; 1576 ci->io = alloc_io(md, bio); 1577 ci->sector = bio->bi_iter.bi_sector; 1578 } 1579 1580 #define __dm_part_stat_sub(part, field, subnd) \ 1581 (part_stat_get(part, field) -= (subnd)) 1582 1583 /* 1584 * Entry point to split a bio into clones and submit them to the targets. 1585 */ 1586 static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1587 struct dm_table *map, struct bio *bio) 1588 { 1589 struct clone_info ci; 1590 blk_qc_t ret = BLK_QC_T_NONE; 1591 int error = 0; 1592 1593 init_clone_info(&ci, md, map, bio); 1594 1595 if (bio->bi_opf & REQ_PREFLUSH) { 1596 struct bio flush_bio; 1597 1598 /* 1599 * Use an on-stack bio for this, it's safe since we don't 1600 * need to reference it after submit. It's just used as 1601 * the basis for the clone(s). 1602 */ 1603 bio_init(&flush_bio, NULL, 0); 1604 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1605 ci.bio = &flush_bio; 1606 ci.sector_count = 0; 1607 error = __send_empty_flush(&ci); 1608 /* dec_pending submits any data associated with flush */ 1609 } else if (op_is_zone_mgmt(bio_op(bio))) { 1610 ci.bio = bio; 1611 ci.sector_count = 0; 1612 error = __split_and_process_non_flush(&ci); 1613 } else { 1614 ci.bio = bio; 1615 ci.sector_count = bio_sectors(bio); 1616 while (ci.sector_count && !error) { 1617 error = __split_and_process_non_flush(&ci); 1618 if (current->bio_list && ci.sector_count && !error) { 1619 /* 1620 * Remainder must be passed to generic_make_request() 1621 * so that it gets handled *after* bios already submitted 1622 * have been completely processed. 1623 * We take a clone of the original to store in 1624 * ci.io->orig_bio to be used by end_io_acct() and 1625 * for dec_pending to use for completion handling. 1626 */ 1627 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1628 GFP_NOIO, &md->queue->bio_split); 1629 ci.io->orig_bio = b; 1630 1631 /* 1632 * Adjust IO stats for each split, otherwise upon queue 1633 * reentry there will be redundant IO accounting. 1634 * NOTE: this is a stop-gap fix, a proper fix involves 1635 * significant refactoring of DM core's bio splitting 1636 * (by eliminating DM's splitting and just using bio_split) 1637 */ 1638 part_stat_lock(); 1639 __dm_part_stat_sub(&dm_disk(md)->part0, 1640 sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1641 part_stat_unlock(); 1642 1643 bio_chain(b, bio); 1644 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1645 ret = generic_make_request(bio); 1646 break; 1647 } 1648 } 1649 } 1650 1651 /* drop the extra reference count */ 1652 dec_pending(ci.io, errno_to_blk_status(error)); 1653 return ret; 1654 } 1655 1656 /* 1657 * Optimized variant of __split_and_process_bio that leverages the 1658 * fact that targets that use it do _not_ have a need to split bios. 1659 */ 1660 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, 1661 struct bio *bio, struct dm_target *ti) 1662 { 1663 struct clone_info ci; 1664 blk_qc_t ret = BLK_QC_T_NONE; 1665 int error = 0; 1666 1667 init_clone_info(&ci, md, map, bio); 1668 1669 if (bio->bi_opf & REQ_PREFLUSH) { 1670 struct bio flush_bio; 1671 1672 /* 1673 * Use an on-stack bio for this, it's safe since we don't 1674 * need to reference it after submit. It's just used as 1675 * the basis for the clone(s). 1676 */ 1677 bio_init(&flush_bio, NULL, 0); 1678 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1679 ci.bio = &flush_bio; 1680 ci.sector_count = 0; 1681 error = __send_empty_flush(&ci); 1682 /* dec_pending submits any data associated with flush */ 1683 } else { 1684 struct dm_target_io *tio; 1685 1686 ci.bio = bio; 1687 ci.sector_count = bio_sectors(bio); 1688 if (__process_abnormal_io(&ci, ti, &error)) 1689 goto out; 1690 1691 tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1692 ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1693 } 1694 out: 1695 /* drop the extra reference count */ 1696 dec_pending(ci.io, errno_to_blk_status(error)); 1697 return ret; 1698 } 1699 1700 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) 1701 { 1702 unsigned len, sector_count; 1703 1704 sector_count = bio_sectors(*bio); 1705 len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); 1706 1707 if (sector_count > len) { 1708 struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); 1709 1710 bio_chain(split, *bio); 1711 trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); 1712 generic_make_request(*bio); 1713 *bio = split; 1714 } 1715 } 1716 1717 static blk_qc_t dm_process_bio(struct mapped_device *md, 1718 struct dm_table *map, struct bio *bio) 1719 { 1720 blk_qc_t ret = BLK_QC_T_NONE; 1721 struct dm_target *ti = md->immutable_target; 1722 1723 if (unlikely(!map)) { 1724 bio_io_error(bio); 1725 return ret; 1726 } 1727 1728 if (!ti) { 1729 ti = dm_table_find_target(map, bio->bi_iter.bi_sector); 1730 if (unlikely(!ti)) { 1731 bio_io_error(bio); 1732 return ret; 1733 } 1734 } 1735 1736 /* 1737 * If in ->make_request_fn we need to use blk_queue_split(), otherwise 1738 * queue_limits for abnormal requests (e.g. discard, writesame, etc) 1739 * won't be imposed. 1740 */ 1741 if (current->bio_list) { 1742 blk_queue_split(md->queue, &bio); 1743 if (!is_abnormal_io(bio)) 1744 dm_queue_split(md, ti, &bio); 1745 } 1746 1747 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1748 return __process_bio(md, map, bio, ti); 1749 else 1750 return __split_and_process_bio(md, map, bio); 1751 } 1752 1753 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1754 { 1755 struct mapped_device *md = q->queuedata; 1756 blk_qc_t ret = BLK_QC_T_NONE; 1757 int srcu_idx; 1758 struct dm_table *map; 1759 1760 map = dm_get_live_table(md, &srcu_idx); 1761 1762 /* if we're suspended, we have to queue this io for later */ 1763 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1764 dm_put_live_table(md, srcu_idx); 1765 1766 if (!(bio->bi_opf & REQ_RAHEAD)) 1767 queue_io(md, bio); 1768 else 1769 bio_io_error(bio); 1770 return ret; 1771 } 1772 1773 ret = dm_process_bio(md, map, bio); 1774 1775 dm_put_live_table(md, srcu_idx); 1776 return ret; 1777 } 1778 1779 static int dm_any_congested(void *congested_data, int bdi_bits) 1780 { 1781 int r = bdi_bits; 1782 struct mapped_device *md = congested_data; 1783 struct dm_table *map; 1784 1785 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1786 if (dm_request_based(md)) { 1787 /* 1788 * With request-based DM we only need to check the 1789 * top-level queue for congestion. 1790 */ 1791 r = md->queue->backing_dev_info->wb.state & bdi_bits; 1792 } else { 1793 map = dm_get_live_table_fast(md); 1794 if (map) 1795 r = dm_table_any_congested(map, bdi_bits); 1796 dm_put_live_table_fast(md); 1797 } 1798 } 1799 1800 return r; 1801 } 1802 1803 /*----------------------------------------------------------------- 1804 * An IDR is used to keep track of allocated minor numbers. 1805 *---------------------------------------------------------------*/ 1806 static void free_minor(int minor) 1807 { 1808 spin_lock(&_minor_lock); 1809 idr_remove(&_minor_idr, minor); 1810 spin_unlock(&_minor_lock); 1811 } 1812 1813 /* 1814 * See if the device with a specific minor # is free. 1815 */ 1816 static int specific_minor(int minor) 1817 { 1818 int r; 1819 1820 if (minor >= (1 << MINORBITS)) 1821 return -EINVAL; 1822 1823 idr_preload(GFP_KERNEL); 1824 spin_lock(&_minor_lock); 1825 1826 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1827 1828 spin_unlock(&_minor_lock); 1829 idr_preload_end(); 1830 if (r < 0) 1831 return r == -ENOSPC ? -EBUSY : r; 1832 return 0; 1833 } 1834 1835 static int next_free_minor(int *minor) 1836 { 1837 int r; 1838 1839 idr_preload(GFP_KERNEL); 1840 spin_lock(&_minor_lock); 1841 1842 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1843 1844 spin_unlock(&_minor_lock); 1845 idr_preload_end(); 1846 if (r < 0) 1847 return r; 1848 *minor = r; 1849 return 0; 1850 } 1851 1852 static const struct block_device_operations dm_blk_dops; 1853 static const struct dax_operations dm_dax_ops; 1854 1855 static void dm_wq_work(struct work_struct *work); 1856 1857 static void dm_init_normal_md_queue(struct mapped_device *md) 1858 { 1859 /* 1860 * Initialize aspects of queue that aren't relevant for blk-mq 1861 */ 1862 md->queue->backing_dev_info->congested_fn = dm_any_congested; 1863 } 1864 1865 static void cleanup_mapped_device(struct mapped_device *md) 1866 { 1867 if (md->wq) 1868 destroy_workqueue(md->wq); 1869 bioset_exit(&md->bs); 1870 bioset_exit(&md->io_bs); 1871 1872 if (md->dax_dev) { 1873 kill_dax(md->dax_dev); 1874 put_dax(md->dax_dev); 1875 md->dax_dev = NULL; 1876 } 1877 1878 if (md->disk) { 1879 spin_lock(&_minor_lock); 1880 md->disk->private_data = NULL; 1881 spin_unlock(&_minor_lock); 1882 del_gendisk(md->disk); 1883 put_disk(md->disk); 1884 } 1885 1886 if (md->queue) 1887 blk_cleanup_queue(md->queue); 1888 1889 cleanup_srcu_struct(&md->io_barrier); 1890 1891 if (md->bdev) { 1892 bdput(md->bdev); 1893 md->bdev = NULL; 1894 } 1895 1896 mutex_destroy(&md->suspend_lock); 1897 mutex_destroy(&md->type_lock); 1898 mutex_destroy(&md->table_devices_lock); 1899 1900 dm_mq_cleanup_mapped_device(md); 1901 } 1902 1903 /* 1904 * Allocate and initialise a blank device with a given minor. 1905 */ 1906 static struct mapped_device *alloc_dev(int minor) 1907 { 1908 int r, numa_node_id = dm_get_numa_node(); 1909 struct mapped_device *md; 1910 void *old_md; 1911 1912 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1913 if (!md) { 1914 DMWARN("unable to allocate device, out of memory."); 1915 return NULL; 1916 } 1917 1918 if (!try_module_get(THIS_MODULE)) 1919 goto bad_module_get; 1920 1921 /* get a minor number for the dev */ 1922 if (minor == DM_ANY_MINOR) 1923 r = next_free_minor(&minor); 1924 else 1925 r = specific_minor(minor); 1926 if (r < 0) 1927 goto bad_minor; 1928 1929 r = init_srcu_struct(&md->io_barrier); 1930 if (r < 0) 1931 goto bad_io_barrier; 1932 1933 md->numa_node_id = numa_node_id; 1934 md->init_tio_pdu = false; 1935 md->type = DM_TYPE_NONE; 1936 mutex_init(&md->suspend_lock); 1937 mutex_init(&md->type_lock); 1938 mutex_init(&md->table_devices_lock); 1939 spin_lock_init(&md->deferred_lock); 1940 atomic_set(&md->holders, 1); 1941 atomic_set(&md->open_count, 0); 1942 atomic_set(&md->event_nr, 0); 1943 atomic_set(&md->uevent_seq, 0); 1944 INIT_LIST_HEAD(&md->uevent_list); 1945 INIT_LIST_HEAD(&md->table_devices); 1946 spin_lock_init(&md->uevent_lock); 1947 1948 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 1949 if (!md->queue) 1950 goto bad; 1951 md->queue->queuedata = md; 1952 md->queue->backing_dev_info->congested_data = md; 1953 1954 md->disk = alloc_disk_node(1, md->numa_node_id); 1955 if (!md->disk) 1956 goto bad; 1957 1958 init_waitqueue_head(&md->wait); 1959 INIT_WORK(&md->work, dm_wq_work); 1960 init_waitqueue_head(&md->eventq); 1961 init_completion(&md->kobj_holder.completion); 1962 1963 md->disk->major = _major; 1964 md->disk->first_minor = minor; 1965 md->disk->fops = &dm_blk_dops; 1966 md->disk->queue = md->queue; 1967 md->disk->private_data = md; 1968 sprintf(md->disk->disk_name, "dm-%d", minor); 1969 1970 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1971 md->dax_dev = alloc_dax(md, md->disk->disk_name, 1972 &dm_dax_ops, 0); 1973 if (!md->dax_dev) 1974 goto bad; 1975 } 1976 1977 add_disk_no_queue_reg(md->disk); 1978 format_dev_t(md->name, MKDEV(_major, minor)); 1979 1980 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1981 if (!md->wq) 1982 goto bad; 1983 1984 md->bdev = bdget_disk(md->disk, 0); 1985 if (!md->bdev) 1986 goto bad; 1987 1988 dm_stats_init(&md->stats); 1989 1990 /* Populate the mapping, nobody knows we exist yet */ 1991 spin_lock(&_minor_lock); 1992 old_md = idr_replace(&_minor_idr, md, minor); 1993 spin_unlock(&_minor_lock); 1994 1995 BUG_ON(old_md != MINOR_ALLOCED); 1996 1997 return md; 1998 1999 bad: 2000 cleanup_mapped_device(md); 2001 bad_io_barrier: 2002 free_minor(minor); 2003 bad_minor: 2004 module_put(THIS_MODULE); 2005 bad_module_get: 2006 kvfree(md); 2007 return NULL; 2008 } 2009 2010 static void unlock_fs(struct mapped_device *md); 2011 2012 static void free_dev(struct mapped_device *md) 2013 { 2014 int minor = MINOR(disk_devt(md->disk)); 2015 2016 unlock_fs(md); 2017 2018 cleanup_mapped_device(md); 2019 2020 free_table_devices(&md->table_devices); 2021 dm_stats_cleanup(&md->stats); 2022 free_minor(minor); 2023 2024 module_put(THIS_MODULE); 2025 kvfree(md); 2026 } 2027 2028 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 2029 { 2030 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2031 int ret = 0; 2032 2033 if (dm_table_bio_based(t)) { 2034 /* 2035 * The md may already have mempools that need changing. 2036 * If so, reload bioset because front_pad may have changed 2037 * because a different table was loaded. 2038 */ 2039 bioset_exit(&md->bs); 2040 bioset_exit(&md->io_bs); 2041 2042 } else if (bioset_initialized(&md->bs)) { 2043 /* 2044 * There's no need to reload with request-based dm 2045 * because the size of front_pad doesn't change. 2046 * Note for future: If you are to reload bioset, 2047 * prep-ed requests in the queue may refer 2048 * to bio from the old bioset, so you must walk 2049 * through the queue to unprep. 2050 */ 2051 goto out; 2052 } 2053 2054 BUG_ON(!p || 2055 bioset_initialized(&md->bs) || 2056 bioset_initialized(&md->io_bs)); 2057 2058 ret = bioset_init_from_src(&md->bs, &p->bs); 2059 if (ret) 2060 goto out; 2061 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 2062 if (ret) 2063 bioset_exit(&md->bs); 2064 out: 2065 /* mempool bind completed, no longer need any mempools in the table */ 2066 dm_table_free_md_mempools(t); 2067 return ret; 2068 } 2069 2070 /* 2071 * Bind a table to the device. 2072 */ 2073 static void event_callback(void *context) 2074 { 2075 unsigned long flags; 2076 LIST_HEAD(uevents); 2077 struct mapped_device *md = (struct mapped_device *) context; 2078 2079 spin_lock_irqsave(&md->uevent_lock, flags); 2080 list_splice_init(&md->uevent_list, &uevents); 2081 spin_unlock_irqrestore(&md->uevent_lock, flags); 2082 2083 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2084 2085 atomic_inc(&md->event_nr); 2086 wake_up(&md->eventq); 2087 dm_issue_global_event(); 2088 } 2089 2090 /* 2091 * Protected by md->suspend_lock obtained by dm_swap_table(). 2092 */ 2093 static void __set_size(struct mapped_device *md, sector_t size) 2094 { 2095 lockdep_assert_held(&md->suspend_lock); 2096 2097 set_capacity(md->disk, size); 2098 2099 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2100 } 2101 2102 /* 2103 * Returns old map, which caller must destroy. 2104 */ 2105 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2106 struct queue_limits *limits) 2107 { 2108 struct dm_table *old_map; 2109 struct request_queue *q = md->queue; 2110 bool request_based = dm_table_request_based(t); 2111 sector_t size; 2112 int ret; 2113 2114 lockdep_assert_held(&md->suspend_lock); 2115 2116 size = dm_table_get_size(t); 2117 2118 /* 2119 * Wipe any geometry if the size of the table changed. 2120 */ 2121 if (size != dm_get_size(md)) 2122 memset(&md->geometry, 0, sizeof(md->geometry)); 2123 2124 __set_size(md, size); 2125 2126 dm_table_event_callback(t, event_callback, md); 2127 2128 /* 2129 * The queue hasn't been stopped yet, if the old table type wasn't 2130 * for request-based during suspension. So stop it to prevent 2131 * I/O mapping before resume. 2132 * This must be done before setting the queue restrictions, 2133 * because request-based dm may be run just after the setting. 2134 */ 2135 if (request_based) 2136 dm_stop_queue(q); 2137 2138 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 2139 /* 2140 * Leverage the fact that request-based DM targets and 2141 * NVMe bio based targets are immutable singletons 2142 * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2143 * and __process_bio. 2144 */ 2145 md->immutable_target = dm_table_get_immutable_target(t); 2146 } 2147 2148 ret = __bind_mempools(md, t); 2149 if (ret) { 2150 old_map = ERR_PTR(ret); 2151 goto out; 2152 } 2153 2154 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2155 rcu_assign_pointer(md->map, (void *)t); 2156 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2157 2158 dm_table_set_restrictions(t, q, limits); 2159 if (old_map) 2160 dm_sync_table(md); 2161 2162 out: 2163 return old_map; 2164 } 2165 2166 /* 2167 * Returns unbound table for the caller to free. 2168 */ 2169 static struct dm_table *__unbind(struct mapped_device *md) 2170 { 2171 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2172 2173 if (!map) 2174 return NULL; 2175 2176 dm_table_event_callback(map, NULL, NULL); 2177 RCU_INIT_POINTER(md->map, NULL); 2178 dm_sync_table(md); 2179 2180 return map; 2181 } 2182 2183 /* 2184 * Constructor for a new device. 2185 */ 2186 int dm_create(int minor, struct mapped_device **result) 2187 { 2188 int r; 2189 struct mapped_device *md; 2190 2191 md = alloc_dev(minor); 2192 if (!md) 2193 return -ENXIO; 2194 2195 r = dm_sysfs_init(md); 2196 if (r) { 2197 free_dev(md); 2198 return r; 2199 } 2200 2201 *result = md; 2202 return 0; 2203 } 2204 2205 /* 2206 * Functions to manage md->type. 2207 * All are required to hold md->type_lock. 2208 */ 2209 void dm_lock_md_type(struct mapped_device *md) 2210 { 2211 mutex_lock(&md->type_lock); 2212 } 2213 2214 void dm_unlock_md_type(struct mapped_device *md) 2215 { 2216 mutex_unlock(&md->type_lock); 2217 } 2218 2219 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2220 { 2221 BUG_ON(!mutex_is_locked(&md->type_lock)); 2222 md->type = type; 2223 } 2224 2225 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2226 { 2227 return md->type; 2228 } 2229 2230 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2231 { 2232 return md->immutable_target_type; 2233 } 2234 2235 /* 2236 * The queue_limits are only valid as long as you have a reference 2237 * count on 'md'. 2238 */ 2239 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2240 { 2241 BUG_ON(!atomic_read(&md->holders)); 2242 return &md->queue->limits; 2243 } 2244 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2245 2246 /* 2247 * Setup the DM device's queue based on md's type 2248 */ 2249 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2250 { 2251 int r; 2252 struct queue_limits limits; 2253 enum dm_queue_mode type = dm_get_md_type(md); 2254 2255 switch (type) { 2256 case DM_TYPE_REQUEST_BASED: 2257 r = dm_mq_init_request_queue(md, t); 2258 if (r) { 2259 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2260 return r; 2261 } 2262 break; 2263 case DM_TYPE_BIO_BASED: 2264 case DM_TYPE_DAX_BIO_BASED: 2265 case DM_TYPE_NVME_BIO_BASED: 2266 dm_init_normal_md_queue(md); 2267 blk_queue_make_request(md->queue, dm_make_request); 2268 break; 2269 case DM_TYPE_NONE: 2270 WARN_ON_ONCE(true); 2271 break; 2272 } 2273 2274 r = dm_calculate_queue_limits(t, &limits); 2275 if (r) { 2276 DMERR("Cannot calculate initial queue limits"); 2277 return r; 2278 } 2279 dm_table_set_restrictions(t, md->queue, &limits); 2280 blk_register_queue(md->disk); 2281 2282 return 0; 2283 } 2284 2285 struct mapped_device *dm_get_md(dev_t dev) 2286 { 2287 struct mapped_device *md; 2288 unsigned minor = MINOR(dev); 2289 2290 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2291 return NULL; 2292 2293 spin_lock(&_minor_lock); 2294 2295 md = idr_find(&_minor_idr, minor); 2296 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2297 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2298 md = NULL; 2299 goto out; 2300 } 2301 dm_get(md); 2302 out: 2303 spin_unlock(&_minor_lock); 2304 2305 return md; 2306 } 2307 EXPORT_SYMBOL_GPL(dm_get_md); 2308 2309 void *dm_get_mdptr(struct mapped_device *md) 2310 { 2311 return md->interface_ptr; 2312 } 2313 2314 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2315 { 2316 md->interface_ptr = ptr; 2317 } 2318 2319 void dm_get(struct mapped_device *md) 2320 { 2321 atomic_inc(&md->holders); 2322 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2323 } 2324 2325 int dm_hold(struct mapped_device *md) 2326 { 2327 spin_lock(&_minor_lock); 2328 if (test_bit(DMF_FREEING, &md->flags)) { 2329 spin_unlock(&_minor_lock); 2330 return -EBUSY; 2331 } 2332 dm_get(md); 2333 spin_unlock(&_minor_lock); 2334 return 0; 2335 } 2336 EXPORT_SYMBOL_GPL(dm_hold); 2337 2338 const char *dm_device_name(struct mapped_device *md) 2339 { 2340 return md->name; 2341 } 2342 EXPORT_SYMBOL_GPL(dm_device_name); 2343 2344 static void __dm_destroy(struct mapped_device *md, bool wait) 2345 { 2346 struct dm_table *map; 2347 int srcu_idx; 2348 2349 might_sleep(); 2350 2351 spin_lock(&_minor_lock); 2352 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2353 set_bit(DMF_FREEING, &md->flags); 2354 spin_unlock(&_minor_lock); 2355 2356 blk_set_queue_dying(md->queue); 2357 2358 /* 2359 * Take suspend_lock so that presuspend and postsuspend methods 2360 * do not race with internal suspend. 2361 */ 2362 mutex_lock(&md->suspend_lock); 2363 map = dm_get_live_table(md, &srcu_idx); 2364 if (!dm_suspended_md(md)) { 2365 dm_table_presuspend_targets(map); 2366 dm_table_postsuspend_targets(map); 2367 } 2368 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2369 dm_put_live_table(md, srcu_idx); 2370 mutex_unlock(&md->suspend_lock); 2371 2372 /* 2373 * Rare, but there may be I/O requests still going to complete, 2374 * for example. Wait for all references to disappear. 2375 * No one should increment the reference count of the mapped_device, 2376 * after the mapped_device state becomes DMF_FREEING. 2377 */ 2378 if (wait) 2379 while (atomic_read(&md->holders)) 2380 msleep(1); 2381 else if (atomic_read(&md->holders)) 2382 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2383 dm_device_name(md), atomic_read(&md->holders)); 2384 2385 dm_sysfs_exit(md); 2386 dm_table_destroy(__unbind(md)); 2387 free_dev(md); 2388 } 2389 2390 void dm_destroy(struct mapped_device *md) 2391 { 2392 __dm_destroy(md, true); 2393 } 2394 2395 void dm_destroy_immediate(struct mapped_device *md) 2396 { 2397 __dm_destroy(md, false); 2398 } 2399 2400 void dm_put(struct mapped_device *md) 2401 { 2402 atomic_dec(&md->holders); 2403 } 2404 EXPORT_SYMBOL_GPL(dm_put); 2405 2406 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2407 { 2408 int r = 0; 2409 DEFINE_WAIT(wait); 2410 2411 while (1) { 2412 prepare_to_wait(&md->wait, &wait, task_state); 2413 2414 if (!md_in_flight(md)) 2415 break; 2416 2417 if (signal_pending_state(task_state, current)) { 2418 r = -EINTR; 2419 break; 2420 } 2421 2422 io_schedule(); 2423 } 2424 finish_wait(&md->wait, &wait); 2425 2426 return r; 2427 } 2428 2429 /* 2430 * Process the deferred bios 2431 */ 2432 static void dm_wq_work(struct work_struct *work) 2433 { 2434 struct mapped_device *md = container_of(work, struct mapped_device, 2435 work); 2436 struct bio *c; 2437 int srcu_idx; 2438 struct dm_table *map; 2439 2440 map = dm_get_live_table(md, &srcu_idx); 2441 2442 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2443 spin_lock_irq(&md->deferred_lock); 2444 c = bio_list_pop(&md->deferred); 2445 spin_unlock_irq(&md->deferred_lock); 2446 2447 if (!c) 2448 break; 2449 2450 if (dm_request_based(md)) 2451 (void) generic_make_request(c); 2452 else 2453 (void) dm_process_bio(md, map, c); 2454 } 2455 2456 dm_put_live_table(md, srcu_idx); 2457 } 2458 2459 static void dm_queue_flush(struct mapped_device *md) 2460 { 2461 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2462 smp_mb__after_atomic(); 2463 queue_work(md->wq, &md->work); 2464 } 2465 2466 /* 2467 * Swap in a new table, returning the old one for the caller to destroy. 2468 */ 2469 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2470 { 2471 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2472 struct queue_limits limits; 2473 int r; 2474 2475 mutex_lock(&md->suspend_lock); 2476 2477 /* device must be suspended */ 2478 if (!dm_suspended_md(md)) 2479 goto out; 2480 2481 /* 2482 * If the new table has no data devices, retain the existing limits. 2483 * This helps multipath with queue_if_no_path if all paths disappear, 2484 * then new I/O is queued based on these limits, and then some paths 2485 * reappear. 2486 */ 2487 if (dm_table_has_no_data_devices(table)) { 2488 live_map = dm_get_live_table_fast(md); 2489 if (live_map) 2490 limits = md->queue->limits; 2491 dm_put_live_table_fast(md); 2492 } 2493 2494 if (!live_map) { 2495 r = dm_calculate_queue_limits(table, &limits); 2496 if (r) { 2497 map = ERR_PTR(r); 2498 goto out; 2499 } 2500 } 2501 2502 map = __bind(md, table, &limits); 2503 dm_issue_global_event(); 2504 2505 out: 2506 mutex_unlock(&md->suspend_lock); 2507 return map; 2508 } 2509 2510 /* 2511 * Functions to lock and unlock any filesystem running on the 2512 * device. 2513 */ 2514 static int lock_fs(struct mapped_device *md) 2515 { 2516 int r; 2517 2518 WARN_ON(md->frozen_sb); 2519 2520 md->frozen_sb = freeze_bdev(md->bdev); 2521 if (IS_ERR(md->frozen_sb)) { 2522 r = PTR_ERR(md->frozen_sb); 2523 md->frozen_sb = NULL; 2524 return r; 2525 } 2526 2527 set_bit(DMF_FROZEN, &md->flags); 2528 2529 return 0; 2530 } 2531 2532 static void unlock_fs(struct mapped_device *md) 2533 { 2534 if (!test_bit(DMF_FROZEN, &md->flags)) 2535 return; 2536 2537 thaw_bdev(md->bdev, md->frozen_sb); 2538 md->frozen_sb = NULL; 2539 clear_bit(DMF_FROZEN, &md->flags); 2540 } 2541 2542 /* 2543 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2544 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2545 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2546 * 2547 * If __dm_suspend returns 0, the device is completely quiescent 2548 * now. There is no request-processing activity. All new requests 2549 * are being added to md->deferred list. 2550 */ 2551 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2552 unsigned suspend_flags, long task_state, 2553 int dmf_suspended_flag) 2554 { 2555 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2556 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2557 int r; 2558 2559 lockdep_assert_held(&md->suspend_lock); 2560 2561 /* 2562 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2563 * This flag is cleared before dm_suspend returns. 2564 */ 2565 if (noflush) 2566 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2567 else 2568 pr_debug("%s: suspending with flush\n", dm_device_name(md)); 2569 2570 /* 2571 * This gets reverted if there's an error later and the targets 2572 * provide the .presuspend_undo hook. 2573 */ 2574 dm_table_presuspend_targets(map); 2575 2576 /* 2577 * Flush I/O to the device. 2578 * Any I/O submitted after lock_fs() may not be flushed. 2579 * noflush takes precedence over do_lockfs. 2580 * (lock_fs() flushes I/Os and waits for them to complete.) 2581 */ 2582 if (!noflush && do_lockfs) { 2583 r = lock_fs(md); 2584 if (r) { 2585 dm_table_presuspend_undo_targets(map); 2586 return r; 2587 } 2588 } 2589 2590 /* 2591 * Here we must make sure that no processes are submitting requests 2592 * to target drivers i.e. no one may be executing 2593 * __split_and_process_bio. This is called from dm_request and 2594 * dm_wq_work. 2595 * 2596 * To get all processes out of __split_and_process_bio in dm_request, 2597 * we take the write lock. To prevent any process from reentering 2598 * __split_and_process_bio from dm_request and quiesce the thread 2599 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 2600 * flush_workqueue(md->wq). 2601 */ 2602 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2603 if (map) 2604 synchronize_srcu(&md->io_barrier); 2605 2606 /* 2607 * Stop md->queue before flushing md->wq in case request-based 2608 * dm defers requests to md->wq from md->queue. 2609 */ 2610 if (dm_request_based(md)) 2611 dm_stop_queue(md->queue); 2612 2613 flush_workqueue(md->wq); 2614 2615 /* 2616 * At this point no more requests are entering target request routines. 2617 * We call dm_wait_for_completion to wait for all existing requests 2618 * to finish. 2619 */ 2620 r = dm_wait_for_completion(md, task_state); 2621 if (!r) 2622 set_bit(dmf_suspended_flag, &md->flags); 2623 2624 if (noflush) 2625 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2626 if (map) 2627 synchronize_srcu(&md->io_barrier); 2628 2629 /* were we interrupted ? */ 2630 if (r < 0) { 2631 dm_queue_flush(md); 2632 2633 if (dm_request_based(md)) 2634 dm_start_queue(md->queue); 2635 2636 unlock_fs(md); 2637 dm_table_presuspend_undo_targets(map); 2638 /* pushback list is already flushed, so skip flush */ 2639 } 2640 2641 return r; 2642 } 2643 2644 /* 2645 * We need to be able to change a mapping table under a mounted 2646 * filesystem. For example we might want to move some data in 2647 * the background. Before the table can be swapped with 2648 * dm_bind_table, dm_suspend must be called to flush any in 2649 * flight bios and ensure that any further io gets deferred. 2650 */ 2651 /* 2652 * Suspend mechanism in request-based dm. 2653 * 2654 * 1. Flush all I/Os by lock_fs() if needed. 2655 * 2. Stop dispatching any I/O by stopping the request_queue. 2656 * 3. Wait for all in-flight I/Os to be completed or requeued. 2657 * 2658 * To abort suspend, start the request_queue. 2659 */ 2660 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2661 { 2662 struct dm_table *map = NULL; 2663 int r = 0; 2664 2665 retry: 2666 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2667 2668 if (dm_suspended_md(md)) { 2669 r = -EINVAL; 2670 goto out_unlock; 2671 } 2672 2673 if (dm_suspended_internally_md(md)) { 2674 /* already internally suspended, wait for internal resume */ 2675 mutex_unlock(&md->suspend_lock); 2676 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2677 if (r) 2678 return r; 2679 goto retry; 2680 } 2681 2682 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2683 2684 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2685 if (r) 2686 goto out_unlock; 2687 2688 dm_table_postsuspend_targets(map); 2689 2690 out_unlock: 2691 mutex_unlock(&md->suspend_lock); 2692 return r; 2693 } 2694 2695 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2696 { 2697 if (map) { 2698 int r = dm_table_resume_targets(map); 2699 if (r) 2700 return r; 2701 } 2702 2703 dm_queue_flush(md); 2704 2705 /* 2706 * Flushing deferred I/Os must be done after targets are resumed 2707 * so that mapping of targets can work correctly. 2708 * Request-based dm is queueing the deferred I/Os in its request_queue. 2709 */ 2710 if (dm_request_based(md)) 2711 dm_start_queue(md->queue); 2712 2713 unlock_fs(md); 2714 2715 return 0; 2716 } 2717 2718 int dm_resume(struct mapped_device *md) 2719 { 2720 int r; 2721 struct dm_table *map = NULL; 2722 2723 retry: 2724 r = -EINVAL; 2725 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2726 2727 if (!dm_suspended_md(md)) 2728 goto out; 2729 2730 if (dm_suspended_internally_md(md)) { 2731 /* already internally suspended, wait for internal resume */ 2732 mutex_unlock(&md->suspend_lock); 2733 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2734 if (r) 2735 return r; 2736 goto retry; 2737 } 2738 2739 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2740 if (!map || !dm_table_get_size(map)) 2741 goto out; 2742 2743 r = __dm_resume(md, map); 2744 if (r) 2745 goto out; 2746 2747 clear_bit(DMF_SUSPENDED, &md->flags); 2748 out: 2749 mutex_unlock(&md->suspend_lock); 2750 2751 return r; 2752 } 2753 2754 /* 2755 * Internal suspend/resume works like userspace-driven suspend. It waits 2756 * until all bios finish and prevents issuing new bios to the target drivers. 2757 * It may be used only from the kernel. 2758 */ 2759 2760 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2761 { 2762 struct dm_table *map = NULL; 2763 2764 lockdep_assert_held(&md->suspend_lock); 2765 2766 if (md->internal_suspend_count++) 2767 return; /* nested internal suspend */ 2768 2769 if (dm_suspended_md(md)) { 2770 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2771 return; /* nest suspend */ 2772 } 2773 2774 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2775 2776 /* 2777 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2778 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2779 * would require changing .presuspend to return an error -- avoid this 2780 * until there is a need for more elaborate variants of internal suspend. 2781 */ 2782 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2783 DMF_SUSPENDED_INTERNALLY); 2784 2785 dm_table_postsuspend_targets(map); 2786 } 2787 2788 static void __dm_internal_resume(struct mapped_device *md) 2789 { 2790 BUG_ON(!md->internal_suspend_count); 2791 2792 if (--md->internal_suspend_count) 2793 return; /* resume from nested internal suspend */ 2794 2795 if (dm_suspended_md(md)) 2796 goto done; /* resume from nested suspend */ 2797 2798 /* 2799 * NOTE: existing callers don't need to call dm_table_resume_targets 2800 * (which may fail -- so best to avoid it for now by passing NULL map) 2801 */ 2802 (void) __dm_resume(md, NULL); 2803 2804 done: 2805 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2806 smp_mb__after_atomic(); 2807 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2808 } 2809 2810 void dm_internal_suspend_noflush(struct mapped_device *md) 2811 { 2812 mutex_lock(&md->suspend_lock); 2813 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2814 mutex_unlock(&md->suspend_lock); 2815 } 2816 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2817 2818 void dm_internal_resume(struct mapped_device *md) 2819 { 2820 mutex_lock(&md->suspend_lock); 2821 __dm_internal_resume(md); 2822 mutex_unlock(&md->suspend_lock); 2823 } 2824 EXPORT_SYMBOL_GPL(dm_internal_resume); 2825 2826 /* 2827 * Fast variants of internal suspend/resume hold md->suspend_lock, 2828 * which prevents interaction with userspace-driven suspend. 2829 */ 2830 2831 void dm_internal_suspend_fast(struct mapped_device *md) 2832 { 2833 mutex_lock(&md->suspend_lock); 2834 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2835 return; 2836 2837 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2838 synchronize_srcu(&md->io_barrier); 2839 flush_workqueue(md->wq); 2840 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2841 } 2842 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2843 2844 void dm_internal_resume_fast(struct mapped_device *md) 2845 { 2846 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2847 goto done; 2848 2849 dm_queue_flush(md); 2850 2851 done: 2852 mutex_unlock(&md->suspend_lock); 2853 } 2854 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2855 2856 /*----------------------------------------------------------------- 2857 * Event notification. 2858 *---------------------------------------------------------------*/ 2859 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2860 unsigned cookie) 2861 { 2862 char udev_cookie[DM_COOKIE_LENGTH]; 2863 char *envp[] = { udev_cookie, NULL }; 2864 2865 if (!cookie) 2866 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2867 else { 2868 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2869 DM_COOKIE_ENV_VAR_NAME, cookie); 2870 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2871 action, envp); 2872 } 2873 } 2874 2875 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2876 { 2877 return atomic_add_return(1, &md->uevent_seq); 2878 } 2879 2880 uint32_t dm_get_event_nr(struct mapped_device *md) 2881 { 2882 return atomic_read(&md->event_nr); 2883 } 2884 2885 int dm_wait_event(struct mapped_device *md, int event_nr) 2886 { 2887 return wait_event_interruptible(md->eventq, 2888 (event_nr != atomic_read(&md->event_nr))); 2889 } 2890 2891 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2892 { 2893 unsigned long flags; 2894 2895 spin_lock_irqsave(&md->uevent_lock, flags); 2896 list_add(elist, &md->uevent_list); 2897 spin_unlock_irqrestore(&md->uevent_lock, flags); 2898 } 2899 2900 /* 2901 * The gendisk is only valid as long as you have a reference 2902 * count on 'md'. 2903 */ 2904 struct gendisk *dm_disk(struct mapped_device *md) 2905 { 2906 return md->disk; 2907 } 2908 EXPORT_SYMBOL_GPL(dm_disk); 2909 2910 struct kobject *dm_kobject(struct mapped_device *md) 2911 { 2912 return &md->kobj_holder.kobj; 2913 } 2914 2915 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2916 { 2917 struct mapped_device *md; 2918 2919 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2920 2921 spin_lock(&_minor_lock); 2922 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2923 md = NULL; 2924 goto out; 2925 } 2926 dm_get(md); 2927 out: 2928 spin_unlock(&_minor_lock); 2929 2930 return md; 2931 } 2932 2933 int dm_suspended_md(struct mapped_device *md) 2934 { 2935 return test_bit(DMF_SUSPENDED, &md->flags); 2936 } 2937 2938 int dm_suspended_internally_md(struct mapped_device *md) 2939 { 2940 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2941 } 2942 2943 int dm_test_deferred_remove_flag(struct mapped_device *md) 2944 { 2945 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2946 } 2947 2948 int dm_suspended(struct dm_target *ti) 2949 { 2950 return dm_suspended_md(dm_table_get_md(ti->table)); 2951 } 2952 EXPORT_SYMBOL_GPL(dm_suspended); 2953 2954 int dm_noflush_suspending(struct dm_target *ti) 2955 { 2956 return __noflush_suspending(dm_table_get_md(ti->table)); 2957 } 2958 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2959 2960 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2961 unsigned integrity, unsigned per_io_data_size, 2962 unsigned min_pool_size) 2963 { 2964 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2965 unsigned int pool_size = 0; 2966 unsigned int front_pad, io_front_pad; 2967 int ret; 2968 2969 if (!pools) 2970 return NULL; 2971 2972 switch (type) { 2973 case DM_TYPE_BIO_BASED: 2974 case DM_TYPE_DAX_BIO_BASED: 2975 case DM_TYPE_NVME_BIO_BASED: 2976 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2977 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2978 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 2979 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2980 if (ret) 2981 goto out; 2982 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2983 goto out; 2984 break; 2985 case DM_TYPE_REQUEST_BASED: 2986 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2987 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2988 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2989 break; 2990 default: 2991 BUG(); 2992 } 2993 2994 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 2995 if (ret) 2996 goto out; 2997 2998 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 2999 goto out; 3000 3001 return pools; 3002 3003 out: 3004 dm_free_md_mempools(pools); 3005 3006 return NULL; 3007 } 3008 3009 void dm_free_md_mempools(struct dm_md_mempools *pools) 3010 { 3011 if (!pools) 3012 return; 3013 3014 bioset_exit(&pools->bs); 3015 bioset_exit(&pools->io_bs); 3016 3017 kfree(pools); 3018 } 3019 3020 struct dm_pr { 3021 u64 old_key; 3022 u64 new_key; 3023 u32 flags; 3024 bool fail_early; 3025 }; 3026 3027 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 3028 void *data) 3029 { 3030 struct mapped_device *md = bdev->bd_disk->private_data; 3031 struct dm_table *table; 3032 struct dm_target *ti; 3033 int ret = -ENOTTY, srcu_idx; 3034 3035 table = dm_get_live_table(md, &srcu_idx); 3036 if (!table || !dm_table_get_size(table)) 3037 goto out; 3038 3039 /* We only support devices that have a single target */ 3040 if (dm_table_get_num_targets(table) != 1) 3041 goto out; 3042 ti = dm_table_get_target(table, 0); 3043 3044 ret = -EINVAL; 3045 if (!ti->type->iterate_devices) 3046 goto out; 3047 3048 ret = ti->type->iterate_devices(ti, fn, data); 3049 out: 3050 dm_put_live_table(md, srcu_idx); 3051 return ret; 3052 } 3053 3054 /* 3055 * For register / unregister we need to manually call out to every path. 3056 */ 3057 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3058 sector_t start, sector_t len, void *data) 3059 { 3060 struct dm_pr *pr = data; 3061 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3062 3063 if (!ops || !ops->pr_register) 3064 return -EOPNOTSUPP; 3065 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3066 } 3067 3068 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3069 u32 flags) 3070 { 3071 struct dm_pr pr = { 3072 .old_key = old_key, 3073 .new_key = new_key, 3074 .flags = flags, 3075 .fail_early = true, 3076 }; 3077 int ret; 3078 3079 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3080 if (ret && new_key) { 3081 /* unregister all paths if we failed to register any path */ 3082 pr.old_key = new_key; 3083 pr.new_key = 0; 3084 pr.flags = 0; 3085 pr.fail_early = false; 3086 dm_call_pr(bdev, __dm_pr_register, &pr); 3087 } 3088 3089 return ret; 3090 } 3091 3092 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3093 u32 flags) 3094 { 3095 struct mapped_device *md = bdev->bd_disk->private_data; 3096 const struct pr_ops *ops; 3097 int r, srcu_idx; 3098 3099 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3100 if (r < 0) 3101 goto out; 3102 3103 ops = bdev->bd_disk->fops->pr_ops; 3104 if (ops && ops->pr_reserve) 3105 r = ops->pr_reserve(bdev, key, type, flags); 3106 else 3107 r = -EOPNOTSUPP; 3108 out: 3109 dm_unprepare_ioctl(md, srcu_idx); 3110 return r; 3111 } 3112 3113 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3114 { 3115 struct mapped_device *md = bdev->bd_disk->private_data; 3116 const struct pr_ops *ops; 3117 int r, srcu_idx; 3118 3119 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3120 if (r < 0) 3121 goto out; 3122 3123 ops = bdev->bd_disk->fops->pr_ops; 3124 if (ops && ops->pr_release) 3125 r = ops->pr_release(bdev, key, type); 3126 else 3127 r = -EOPNOTSUPP; 3128 out: 3129 dm_unprepare_ioctl(md, srcu_idx); 3130 return r; 3131 } 3132 3133 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3134 enum pr_type type, bool abort) 3135 { 3136 struct mapped_device *md = bdev->bd_disk->private_data; 3137 const struct pr_ops *ops; 3138 int r, srcu_idx; 3139 3140 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3141 if (r < 0) 3142 goto out; 3143 3144 ops = bdev->bd_disk->fops->pr_ops; 3145 if (ops && ops->pr_preempt) 3146 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3147 else 3148 r = -EOPNOTSUPP; 3149 out: 3150 dm_unprepare_ioctl(md, srcu_idx); 3151 return r; 3152 } 3153 3154 static int dm_pr_clear(struct block_device *bdev, u64 key) 3155 { 3156 struct mapped_device *md = bdev->bd_disk->private_data; 3157 const struct pr_ops *ops; 3158 int r, srcu_idx; 3159 3160 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3161 if (r < 0) 3162 goto out; 3163 3164 ops = bdev->bd_disk->fops->pr_ops; 3165 if (ops && ops->pr_clear) 3166 r = ops->pr_clear(bdev, key); 3167 else 3168 r = -EOPNOTSUPP; 3169 out: 3170 dm_unprepare_ioctl(md, srcu_idx); 3171 return r; 3172 } 3173 3174 static const struct pr_ops dm_pr_ops = { 3175 .pr_register = dm_pr_register, 3176 .pr_reserve = dm_pr_reserve, 3177 .pr_release = dm_pr_release, 3178 .pr_preempt = dm_pr_preempt, 3179 .pr_clear = dm_pr_clear, 3180 }; 3181 3182 static const struct block_device_operations dm_blk_dops = { 3183 .open = dm_blk_open, 3184 .release = dm_blk_close, 3185 .ioctl = dm_blk_ioctl, 3186 .getgeo = dm_blk_getgeo, 3187 .report_zones = dm_blk_report_zones, 3188 .pr_ops = &dm_pr_ops, 3189 .owner = THIS_MODULE 3190 }; 3191 3192 static const struct dax_operations dm_dax_ops = { 3193 .direct_access = dm_dax_direct_access, 3194 .dax_supported = dm_dax_supported, 3195 .copy_from_iter = dm_dax_copy_from_iter, 3196 .copy_to_iter = dm_dax_copy_to_iter, 3197 }; 3198 3199 /* 3200 * module hooks 3201 */ 3202 module_init(dm_init); 3203 module_exit(dm_exit); 3204 3205 module_param(major, uint, 0); 3206 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3207 3208 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3209 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3210 3211 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3212 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3213 3214 MODULE_DESCRIPTION(DM_NAME " driver"); 3215 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3216 MODULE_LICENSE("GPL"); 3217