1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/mm.h> 16 #include <linux/sched/signal.h> 17 #include <linux/blkpg.h> 18 #include <linux/bio.h> 19 #include <linux/mempool.h> 20 #include <linux/dax.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/uio.h> 24 #include <linux/hdreg.h> 25 #include <linux/delay.h> 26 #include <linux/wait.h> 27 #include <linux/pr.h> 28 #include <linux/refcount.h> 29 #include <linux/part_stat.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/keyslot-manager.h> 32 33 #define DM_MSG_PREFIX "core" 34 35 /* 36 * Cookies are numeric values sent with CHANGE and REMOVE 37 * uevents while resuming, removing or renaming the device. 38 */ 39 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 40 #define DM_COOKIE_LENGTH 24 41 42 static const char *_name = DM_NAME; 43 44 static unsigned int major = 0; 45 static unsigned int _major = 0; 46 47 static DEFINE_IDR(_minor_idr); 48 49 static DEFINE_SPINLOCK(_minor_lock); 50 51 static void do_deferred_remove(struct work_struct *w); 52 53 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 54 55 static struct workqueue_struct *deferred_remove_workqueue; 56 57 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 58 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 59 60 void dm_issue_global_event(void) 61 { 62 atomic_inc(&dm_global_event_nr); 63 wake_up(&dm_global_eventq); 64 } 65 66 /* 67 * One of these is allocated (on-stack) per original bio. 68 */ 69 struct clone_info { 70 struct dm_table *map; 71 struct bio *bio; 72 struct dm_io *io; 73 sector_t sector; 74 unsigned sector_count; 75 }; 76 77 /* 78 * One of these is allocated per clone bio. 79 */ 80 #define DM_TIO_MAGIC 7282014 81 struct dm_target_io { 82 unsigned magic; 83 struct dm_io *io; 84 struct dm_target *ti; 85 unsigned target_bio_nr; 86 unsigned *len_ptr; 87 bool inside_dm_io; 88 struct bio clone; 89 }; 90 91 /* 92 * One of these is allocated per original bio. 93 * It contains the first clone used for that original. 94 */ 95 #define DM_IO_MAGIC 5191977 96 struct dm_io { 97 unsigned magic; 98 struct mapped_device *md; 99 blk_status_t status; 100 atomic_t io_count; 101 struct bio *orig_bio; 102 unsigned long start_time; 103 spinlock_t endio_lock; 104 struct dm_stats_aux stats_aux; 105 /* last member of dm_target_io is 'struct bio' */ 106 struct dm_target_io tio; 107 }; 108 109 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 110 #define DM_IO_BIO_OFFSET \ 111 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 112 113 void *dm_per_bio_data(struct bio *bio, size_t data_size) 114 { 115 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 116 if (!tio->inside_dm_io) 117 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 118 return (char *)bio - DM_IO_BIO_OFFSET - data_size; 119 } 120 EXPORT_SYMBOL_GPL(dm_per_bio_data); 121 122 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 123 { 124 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 125 if (io->magic == DM_IO_MAGIC) 126 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 127 BUG_ON(io->magic != DM_TIO_MAGIC); 128 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 129 } 130 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 131 132 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 133 { 134 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 135 } 136 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 137 138 #define MINOR_ALLOCED ((void *)-1) 139 140 /* 141 * Bits for the md->flags field. 142 */ 143 #define DMF_BLOCK_IO_FOR_SUSPEND 0 144 #define DMF_SUSPENDED 1 145 #define DMF_FROZEN 2 146 #define DMF_FREEING 3 147 #define DMF_DELETING 4 148 #define DMF_NOFLUSH_SUSPENDING 5 149 #define DMF_DEFERRED_REMOVE 6 150 #define DMF_SUSPENDED_INTERNALLY 7 151 #define DMF_POST_SUSPENDING 8 152 153 #define DM_NUMA_NODE NUMA_NO_NODE 154 static int dm_numa_node = DM_NUMA_NODE; 155 156 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 157 static int swap_bios = DEFAULT_SWAP_BIOS; 158 static int get_swap_bios(void) 159 { 160 int latch = READ_ONCE(swap_bios); 161 if (unlikely(latch <= 0)) 162 latch = DEFAULT_SWAP_BIOS; 163 return latch; 164 } 165 166 /* 167 * For mempools pre-allocation at the table loading time. 168 */ 169 struct dm_md_mempools { 170 struct bio_set bs; 171 struct bio_set io_bs; 172 }; 173 174 struct table_device { 175 struct list_head list; 176 refcount_t count; 177 struct dm_dev dm_dev; 178 }; 179 180 /* 181 * Bio-based DM's mempools' reserved IOs set by the user. 182 */ 183 #define RESERVED_BIO_BASED_IOS 16 184 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 185 186 static int __dm_get_module_param_int(int *module_param, int min, int max) 187 { 188 int param = READ_ONCE(*module_param); 189 int modified_param = 0; 190 bool modified = true; 191 192 if (param < min) 193 modified_param = min; 194 else if (param > max) 195 modified_param = max; 196 else 197 modified = false; 198 199 if (modified) { 200 (void)cmpxchg(module_param, param, modified_param); 201 param = modified_param; 202 } 203 204 return param; 205 } 206 207 unsigned __dm_get_module_param(unsigned *module_param, 208 unsigned def, unsigned max) 209 { 210 unsigned param = READ_ONCE(*module_param); 211 unsigned modified_param = 0; 212 213 if (!param) 214 modified_param = def; 215 else if (param > max) 216 modified_param = max; 217 218 if (modified_param) { 219 (void)cmpxchg(module_param, param, modified_param); 220 param = modified_param; 221 } 222 223 return param; 224 } 225 226 unsigned dm_get_reserved_bio_based_ios(void) 227 { 228 return __dm_get_module_param(&reserved_bio_based_ios, 229 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 230 } 231 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 232 233 static unsigned dm_get_numa_node(void) 234 { 235 return __dm_get_module_param_int(&dm_numa_node, 236 DM_NUMA_NODE, num_online_nodes() - 1); 237 } 238 239 static int __init local_init(void) 240 { 241 int r; 242 243 r = dm_uevent_init(); 244 if (r) 245 return r; 246 247 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 248 if (!deferred_remove_workqueue) { 249 r = -ENOMEM; 250 goto out_uevent_exit; 251 } 252 253 _major = major; 254 r = register_blkdev(_major, _name); 255 if (r < 0) 256 goto out_free_workqueue; 257 258 if (!_major) 259 _major = r; 260 261 return 0; 262 263 out_free_workqueue: 264 destroy_workqueue(deferred_remove_workqueue); 265 out_uevent_exit: 266 dm_uevent_exit(); 267 268 return r; 269 } 270 271 static void local_exit(void) 272 { 273 flush_scheduled_work(); 274 destroy_workqueue(deferred_remove_workqueue); 275 276 unregister_blkdev(_major, _name); 277 dm_uevent_exit(); 278 279 _major = 0; 280 281 DMINFO("cleaned up"); 282 } 283 284 static int (*_inits[])(void) __initdata = { 285 local_init, 286 dm_target_init, 287 dm_linear_init, 288 dm_stripe_init, 289 dm_io_init, 290 dm_kcopyd_init, 291 dm_interface_init, 292 dm_statistics_init, 293 }; 294 295 static void (*_exits[])(void) = { 296 local_exit, 297 dm_target_exit, 298 dm_linear_exit, 299 dm_stripe_exit, 300 dm_io_exit, 301 dm_kcopyd_exit, 302 dm_interface_exit, 303 dm_statistics_exit, 304 }; 305 306 static int __init dm_init(void) 307 { 308 const int count = ARRAY_SIZE(_inits); 309 310 int r, i; 311 312 for (i = 0; i < count; i++) { 313 r = _inits[i](); 314 if (r) 315 goto bad; 316 } 317 318 return 0; 319 320 bad: 321 while (i--) 322 _exits[i](); 323 324 return r; 325 } 326 327 static void __exit dm_exit(void) 328 { 329 int i = ARRAY_SIZE(_exits); 330 331 while (i--) 332 _exits[i](); 333 334 /* 335 * Should be empty by this point. 336 */ 337 idr_destroy(&_minor_idr); 338 } 339 340 /* 341 * Block device functions 342 */ 343 int dm_deleting_md(struct mapped_device *md) 344 { 345 return test_bit(DMF_DELETING, &md->flags); 346 } 347 348 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 349 { 350 struct mapped_device *md; 351 352 spin_lock(&_minor_lock); 353 354 md = bdev->bd_disk->private_data; 355 if (!md) 356 goto out; 357 358 if (test_bit(DMF_FREEING, &md->flags) || 359 dm_deleting_md(md)) { 360 md = NULL; 361 goto out; 362 } 363 364 dm_get(md); 365 atomic_inc(&md->open_count); 366 out: 367 spin_unlock(&_minor_lock); 368 369 return md ? 0 : -ENXIO; 370 } 371 372 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 373 { 374 struct mapped_device *md; 375 376 spin_lock(&_minor_lock); 377 378 md = disk->private_data; 379 if (WARN_ON(!md)) 380 goto out; 381 382 if (atomic_dec_and_test(&md->open_count) && 383 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 384 queue_work(deferred_remove_workqueue, &deferred_remove_work); 385 386 dm_put(md); 387 out: 388 spin_unlock(&_minor_lock); 389 } 390 391 int dm_open_count(struct mapped_device *md) 392 { 393 return atomic_read(&md->open_count); 394 } 395 396 /* 397 * Guarantees nothing is using the device before it's deleted. 398 */ 399 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 400 { 401 int r = 0; 402 403 spin_lock(&_minor_lock); 404 405 if (dm_open_count(md)) { 406 r = -EBUSY; 407 if (mark_deferred) 408 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 409 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 410 r = -EEXIST; 411 else 412 set_bit(DMF_DELETING, &md->flags); 413 414 spin_unlock(&_minor_lock); 415 416 return r; 417 } 418 419 int dm_cancel_deferred_remove(struct mapped_device *md) 420 { 421 int r = 0; 422 423 spin_lock(&_minor_lock); 424 425 if (test_bit(DMF_DELETING, &md->flags)) 426 r = -EBUSY; 427 else 428 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 429 430 spin_unlock(&_minor_lock); 431 432 return r; 433 } 434 435 static void do_deferred_remove(struct work_struct *w) 436 { 437 dm_deferred_remove(); 438 } 439 440 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 441 { 442 struct mapped_device *md = bdev->bd_disk->private_data; 443 444 return dm_get_geometry(md, geo); 445 } 446 447 #ifdef CONFIG_BLK_DEV_ZONED 448 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 449 { 450 struct dm_report_zones_args *args = data; 451 sector_t sector_diff = args->tgt->begin - args->start; 452 453 /* 454 * Ignore zones beyond the target range. 455 */ 456 if (zone->start >= args->start + args->tgt->len) 457 return 0; 458 459 /* 460 * Remap the start sector and write pointer position of the zone 461 * to match its position in the target range. 462 */ 463 zone->start += sector_diff; 464 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 465 if (zone->cond == BLK_ZONE_COND_FULL) 466 zone->wp = zone->start + zone->len; 467 else if (zone->cond == BLK_ZONE_COND_EMPTY) 468 zone->wp = zone->start; 469 else 470 zone->wp += sector_diff; 471 } 472 473 args->next_sector = zone->start + zone->len; 474 return args->orig_cb(zone, args->zone_idx++, args->orig_data); 475 } 476 EXPORT_SYMBOL_GPL(dm_report_zones_cb); 477 478 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 479 unsigned int nr_zones, report_zones_cb cb, void *data) 480 { 481 struct mapped_device *md = disk->private_data; 482 struct dm_table *map; 483 int srcu_idx, ret; 484 struct dm_report_zones_args args = { 485 .next_sector = sector, 486 .orig_data = data, 487 .orig_cb = cb, 488 }; 489 490 if (dm_suspended_md(md)) 491 return -EAGAIN; 492 493 map = dm_get_live_table(md, &srcu_idx); 494 if (!map) { 495 ret = -EIO; 496 goto out; 497 } 498 499 do { 500 struct dm_target *tgt; 501 502 tgt = dm_table_find_target(map, args.next_sector); 503 if (WARN_ON_ONCE(!tgt->type->report_zones)) { 504 ret = -EIO; 505 goto out; 506 } 507 508 args.tgt = tgt; 509 ret = tgt->type->report_zones(tgt, &args, 510 nr_zones - args.zone_idx); 511 if (ret < 0) 512 goto out; 513 } while (args.zone_idx < nr_zones && 514 args.next_sector < get_capacity(disk)); 515 516 ret = args.zone_idx; 517 out: 518 dm_put_live_table(md, srcu_idx); 519 return ret; 520 } 521 #else 522 #define dm_blk_report_zones NULL 523 #endif /* CONFIG_BLK_DEV_ZONED */ 524 525 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 526 struct block_device **bdev) 527 { 528 struct dm_target *tgt; 529 struct dm_table *map; 530 int r; 531 532 retry: 533 r = -ENOTTY; 534 map = dm_get_live_table(md, srcu_idx); 535 if (!map || !dm_table_get_size(map)) 536 return r; 537 538 /* We only support devices that have a single target */ 539 if (dm_table_get_num_targets(map) != 1) 540 return r; 541 542 tgt = dm_table_get_target(map, 0); 543 if (!tgt->type->prepare_ioctl) 544 return r; 545 546 if (dm_suspended_md(md)) 547 return -EAGAIN; 548 549 r = tgt->type->prepare_ioctl(tgt, bdev); 550 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 551 dm_put_live_table(md, *srcu_idx); 552 msleep(10); 553 goto retry; 554 } 555 556 return r; 557 } 558 559 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 560 { 561 dm_put_live_table(md, srcu_idx); 562 } 563 564 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 565 unsigned int cmd, unsigned long arg) 566 { 567 struct mapped_device *md = bdev->bd_disk->private_data; 568 int r, srcu_idx; 569 570 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 571 if (r < 0) 572 goto out; 573 574 if (r > 0) { 575 /* 576 * Target determined this ioctl is being issued against a 577 * subset of the parent bdev; require extra privileges. 578 */ 579 if (!capable(CAP_SYS_RAWIO)) { 580 DMDEBUG_LIMIT( 581 "%s: sending ioctl %x to DM device without required privilege.", 582 current->comm, cmd); 583 r = -ENOIOCTLCMD; 584 goto out; 585 } 586 } 587 588 if (!bdev->bd_disk->fops->ioctl) 589 r = -ENOTTY; 590 else 591 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 592 out: 593 dm_unprepare_ioctl(md, srcu_idx); 594 return r; 595 } 596 597 u64 dm_start_time_ns_from_clone(struct bio *bio) 598 { 599 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 600 struct dm_io *io = tio->io; 601 602 return jiffies_to_nsecs(io->start_time); 603 } 604 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 605 606 static void start_io_acct(struct dm_io *io) 607 { 608 struct mapped_device *md = io->md; 609 struct bio *bio = io->orig_bio; 610 611 io->start_time = bio_start_io_acct(bio); 612 if (unlikely(dm_stats_used(&md->stats))) 613 dm_stats_account_io(&md->stats, bio_data_dir(bio), 614 bio->bi_iter.bi_sector, bio_sectors(bio), 615 false, 0, &io->stats_aux); 616 } 617 618 static void end_io_acct(struct dm_io *io) 619 { 620 struct mapped_device *md = io->md; 621 struct bio *bio = io->orig_bio; 622 unsigned long duration = jiffies - io->start_time; 623 624 bio_end_io_acct(bio, io->start_time); 625 626 if (unlikely(dm_stats_used(&md->stats))) 627 dm_stats_account_io(&md->stats, bio_data_dir(bio), 628 bio->bi_iter.bi_sector, bio_sectors(bio), 629 true, duration, &io->stats_aux); 630 631 /* nudge anyone waiting on suspend queue */ 632 if (unlikely(wq_has_sleeper(&md->wait))) 633 wake_up(&md->wait); 634 } 635 636 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 637 { 638 struct dm_io *io; 639 struct dm_target_io *tio; 640 struct bio *clone; 641 642 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 643 if (!clone) 644 return NULL; 645 646 tio = container_of(clone, struct dm_target_io, clone); 647 tio->inside_dm_io = true; 648 tio->io = NULL; 649 650 io = container_of(tio, struct dm_io, tio); 651 io->magic = DM_IO_MAGIC; 652 io->status = 0; 653 atomic_set(&io->io_count, 1); 654 io->orig_bio = bio; 655 io->md = md; 656 spin_lock_init(&io->endio_lock); 657 658 start_io_acct(io); 659 660 return io; 661 } 662 663 static void free_io(struct mapped_device *md, struct dm_io *io) 664 { 665 bio_put(&io->tio.clone); 666 } 667 668 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 669 unsigned target_bio_nr, gfp_t gfp_mask) 670 { 671 struct dm_target_io *tio; 672 673 if (!ci->io->tio.io) { 674 /* the dm_target_io embedded in ci->io is available */ 675 tio = &ci->io->tio; 676 } else { 677 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 678 if (!clone) 679 return NULL; 680 681 tio = container_of(clone, struct dm_target_io, clone); 682 tio->inside_dm_io = false; 683 } 684 685 tio->magic = DM_TIO_MAGIC; 686 tio->io = ci->io; 687 tio->ti = ti; 688 tio->target_bio_nr = target_bio_nr; 689 690 return tio; 691 } 692 693 static void free_tio(struct dm_target_io *tio) 694 { 695 if (tio->inside_dm_io) 696 return; 697 bio_put(&tio->clone); 698 } 699 700 /* 701 * Add the bio to the list of deferred io. 702 */ 703 static void queue_io(struct mapped_device *md, struct bio *bio) 704 { 705 unsigned long flags; 706 707 spin_lock_irqsave(&md->deferred_lock, flags); 708 bio_list_add(&md->deferred, bio); 709 spin_unlock_irqrestore(&md->deferred_lock, flags); 710 queue_work(md->wq, &md->work); 711 } 712 713 /* 714 * Everyone (including functions in this file), should use this 715 * function to access the md->map field, and make sure they call 716 * dm_put_live_table() when finished. 717 */ 718 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 719 { 720 *srcu_idx = srcu_read_lock(&md->io_barrier); 721 722 return srcu_dereference(md->map, &md->io_barrier); 723 } 724 725 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 726 { 727 srcu_read_unlock(&md->io_barrier, srcu_idx); 728 } 729 730 void dm_sync_table(struct mapped_device *md) 731 { 732 synchronize_srcu(&md->io_barrier); 733 synchronize_rcu_expedited(); 734 } 735 736 /* 737 * A fast alternative to dm_get_live_table/dm_put_live_table. 738 * The caller must not block between these two functions. 739 */ 740 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 741 { 742 rcu_read_lock(); 743 return rcu_dereference(md->map); 744 } 745 746 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 747 { 748 rcu_read_unlock(); 749 } 750 751 static char *_dm_claim_ptr = "I belong to device-mapper"; 752 753 /* 754 * Open a table device so we can use it as a map destination. 755 */ 756 static int open_table_device(struct table_device *td, dev_t dev, 757 struct mapped_device *md) 758 { 759 struct block_device *bdev; 760 761 int r; 762 763 BUG_ON(td->dm_dev.bdev); 764 765 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 766 if (IS_ERR(bdev)) 767 return PTR_ERR(bdev); 768 769 r = bd_link_disk_holder(bdev, dm_disk(md)); 770 if (r) { 771 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 772 return r; 773 } 774 775 td->dm_dev.bdev = bdev; 776 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 777 return 0; 778 } 779 780 /* 781 * Close a table device that we've been using. 782 */ 783 static void close_table_device(struct table_device *td, struct mapped_device *md) 784 { 785 if (!td->dm_dev.bdev) 786 return; 787 788 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 789 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 790 put_dax(td->dm_dev.dax_dev); 791 td->dm_dev.bdev = NULL; 792 td->dm_dev.dax_dev = NULL; 793 } 794 795 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 796 fmode_t mode) 797 { 798 struct table_device *td; 799 800 list_for_each_entry(td, l, list) 801 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 802 return td; 803 804 return NULL; 805 } 806 807 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 808 struct dm_dev **result) 809 { 810 int r; 811 struct table_device *td; 812 813 mutex_lock(&md->table_devices_lock); 814 td = find_table_device(&md->table_devices, dev, mode); 815 if (!td) { 816 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 817 if (!td) { 818 mutex_unlock(&md->table_devices_lock); 819 return -ENOMEM; 820 } 821 822 td->dm_dev.mode = mode; 823 td->dm_dev.bdev = NULL; 824 825 if ((r = open_table_device(td, dev, md))) { 826 mutex_unlock(&md->table_devices_lock); 827 kfree(td); 828 return r; 829 } 830 831 format_dev_t(td->dm_dev.name, dev); 832 833 refcount_set(&td->count, 1); 834 list_add(&td->list, &md->table_devices); 835 } else { 836 refcount_inc(&td->count); 837 } 838 mutex_unlock(&md->table_devices_lock); 839 840 *result = &td->dm_dev; 841 return 0; 842 } 843 EXPORT_SYMBOL_GPL(dm_get_table_device); 844 845 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 846 { 847 struct table_device *td = container_of(d, struct table_device, dm_dev); 848 849 mutex_lock(&md->table_devices_lock); 850 if (refcount_dec_and_test(&td->count)) { 851 close_table_device(td, md); 852 list_del(&td->list); 853 kfree(td); 854 } 855 mutex_unlock(&md->table_devices_lock); 856 } 857 EXPORT_SYMBOL(dm_put_table_device); 858 859 static void free_table_devices(struct list_head *devices) 860 { 861 struct list_head *tmp, *next; 862 863 list_for_each_safe(tmp, next, devices) { 864 struct table_device *td = list_entry(tmp, struct table_device, list); 865 866 DMWARN("dm_destroy: %s still exists with %d references", 867 td->dm_dev.name, refcount_read(&td->count)); 868 kfree(td); 869 } 870 } 871 872 /* 873 * Get the geometry associated with a dm device 874 */ 875 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 876 { 877 *geo = md->geometry; 878 879 return 0; 880 } 881 882 /* 883 * Set the geometry of a device. 884 */ 885 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 886 { 887 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 888 889 if (geo->start > sz) { 890 DMWARN("Start sector is beyond the geometry limits."); 891 return -EINVAL; 892 } 893 894 md->geometry = *geo; 895 896 return 0; 897 } 898 899 static int __noflush_suspending(struct mapped_device *md) 900 { 901 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 902 } 903 904 /* 905 * Decrements the number of outstanding ios that a bio has been 906 * cloned into, completing the original io if necc. 907 */ 908 static void dec_pending(struct dm_io *io, blk_status_t error) 909 { 910 unsigned long flags; 911 blk_status_t io_error; 912 struct bio *bio; 913 struct mapped_device *md = io->md; 914 915 /* Push-back supersedes any I/O errors */ 916 if (unlikely(error)) { 917 spin_lock_irqsave(&io->endio_lock, flags); 918 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 919 io->status = error; 920 spin_unlock_irqrestore(&io->endio_lock, flags); 921 } 922 923 if (atomic_dec_and_test(&io->io_count)) { 924 if (io->status == BLK_STS_DM_REQUEUE) { 925 /* 926 * Target requested pushing back the I/O. 927 */ 928 spin_lock_irqsave(&md->deferred_lock, flags); 929 if (__noflush_suspending(md)) 930 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 931 bio_list_add_head(&md->deferred, io->orig_bio); 932 else 933 /* noflush suspend was interrupted. */ 934 io->status = BLK_STS_IOERR; 935 spin_unlock_irqrestore(&md->deferred_lock, flags); 936 } 937 938 io_error = io->status; 939 bio = io->orig_bio; 940 end_io_acct(io); 941 free_io(md, io); 942 943 if (io_error == BLK_STS_DM_REQUEUE) 944 return; 945 946 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 947 /* 948 * Preflush done for flush with data, reissue 949 * without REQ_PREFLUSH. 950 */ 951 bio->bi_opf &= ~REQ_PREFLUSH; 952 queue_io(md, bio); 953 } else { 954 /* done with normal IO or empty flush */ 955 if (io_error) 956 bio->bi_status = io_error; 957 bio_endio(bio); 958 } 959 } 960 } 961 962 void disable_discard(struct mapped_device *md) 963 { 964 struct queue_limits *limits = dm_get_queue_limits(md); 965 966 /* device doesn't really support DISCARD, disable it */ 967 limits->max_discard_sectors = 0; 968 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 969 } 970 971 void disable_write_same(struct mapped_device *md) 972 { 973 struct queue_limits *limits = dm_get_queue_limits(md); 974 975 /* device doesn't really support WRITE SAME, disable it */ 976 limits->max_write_same_sectors = 0; 977 } 978 979 void disable_write_zeroes(struct mapped_device *md) 980 { 981 struct queue_limits *limits = dm_get_queue_limits(md); 982 983 /* device doesn't really support WRITE ZEROES, disable it */ 984 limits->max_write_zeroes_sectors = 0; 985 } 986 987 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 988 { 989 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 990 } 991 992 static void clone_endio(struct bio *bio) 993 { 994 blk_status_t error = bio->bi_status; 995 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 996 struct dm_io *io = tio->io; 997 struct mapped_device *md = tio->io->md; 998 dm_endio_fn endio = tio->ti->type->end_io; 999 struct bio *orig_bio = io->orig_bio; 1000 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 1001 1002 if (unlikely(error == BLK_STS_TARGET)) { 1003 if (bio_op(bio) == REQ_OP_DISCARD && 1004 !q->limits.max_discard_sectors) 1005 disable_discard(md); 1006 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 1007 !q->limits.max_write_same_sectors) 1008 disable_write_same(md); 1009 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1010 !q->limits.max_write_zeroes_sectors) 1011 disable_write_zeroes(md); 1012 } 1013 1014 /* 1015 * For zone-append bios get offset in zone of the written 1016 * sector and add that to the original bio sector pos. 1017 */ 1018 if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { 1019 sector_t written_sector = bio->bi_iter.bi_sector; 1020 struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue; 1021 u64 mask = (u64)blk_queue_zone_sectors(q) - 1; 1022 1023 orig_bio->bi_iter.bi_sector += written_sector & mask; 1024 } 1025 1026 if (endio) { 1027 int r = endio(tio->ti, bio, &error); 1028 switch (r) { 1029 case DM_ENDIO_REQUEUE: 1030 error = BLK_STS_DM_REQUEUE; 1031 fallthrough; 1032 case DM_ENDIO_DONE: 1033 break; 1034 case DM_ENDIO_INCOMPLETE: 1035 /* The target will handle the io */ 1036 return; 1037 default: 1038 DMWARN("unimplemented target endio return value: %d", r); 1039 BUG(); 1040 } 1041 } 1042 1043 if (unlikely(swap_bios_limit(tio->ti, bio))) { 1044 struct mapped_device *md = io->md; 1045 up(&md->swap_bios_semaphore); 1046 } 1047 1048 free_tio(tio); 1049 dec_pending(io, error); 1050 } 1051 1052 /* 1053 * Return maximum size of I/O possible at the supplied sector up to the current 1054 * target boundary. 1055 */ 1056 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1057 sector_t target_offset) 1058 { 1059 return ti->len - target_offset; 1060 } 1061 1062 static sector_t max_io_len(struct dm_target *ti, sector_t sector) 1063 { 1064 sector_t target_offset = dm_target_offset(ti, sector); 1065 sector_t len = max_io_len_target_boundary(ti, target_offset); 1066 sector_t max_len; 1067 1068 /* 1069 * Does the target need to split IO even further? 1070 * - varied (per target) IO splitting is a tenet of DM; this 1071 * explains why stacked chunk_sectors based splitting via 1072 * blk_max_size_offset() isn't possible here. So pass in 1073 * ti->max_io_len to override stacked chunk_sectors. 1074 */ 1075 if (ti->max_io_len) { 1076 max_len = blk_max_size_offset(ti->table->md->queue, 1077 target_offset, ti->max_io_len); 1078 if (len > max_len) 1079 len = max_len; 1080 } 1081 1082 return len; 1083 } 1084 1085 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1086 { 1087 if (len > UINT_MAX) { 1088 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1089 (unsigned long long)len, UINT_MAX); 1090 ti->error = "Maximum size of target IO is too large"; 1091 return -EINVAL; 1092 } 1093 1094 ti->max_io_len = (uint32_t) len; 1095 1096 return 0; 1097 } 1098 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1099 1100 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1101 sector_t sector, int *srcu_idx) 1102 __acquires(md->io_barrier) 1103 { 1104 struct dm_table *map; 1105 struct dm_target *ti; 1106 1107 map = dm_get_live_table(md, srcu_idx); 1108 if (!map) 1109 return NULL; 1110 1111 ti = dm_table_find_target(map, sector); 1112 if (!ti) 1113 return NULL; 1114 1115 return ti; 1116 } 1117 1118 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1119 long nr_pages, void **kaddr, pfn_t *pfn) 1120 { 1121 struct mapped_device *md = dax_get_private(dax_dev); 1122 sector_t sector = pgoff * PAGE_SECTORS; 1123 struct dm_target *ti; 1124 long len, ret = -EIO; 1125 int srcu_idx; 1126 1127 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1128 1129 if (!ti) 1130 goto out; 1131 if (!ti->type->direct_access) 1132 goto out; 1133 len = max_io_len(ti, sector) / PAGE_SECTORS; 1134 if (len < 1) 1135 goto out; 1136 nr_pages = min(len, nr_pages); 1137 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1138 1139 out: 1140 dm_put_live_table(md, srcu_idx); 1141 1142 return ret; 1143 } 1144 1145 static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 1146 int blocksize, sector_t start, sector_t len) 1147 { 1148 struct mapped_device *md = dax_get_private(dax_dev); 1149 struct dm_table *map; 1150 bool ret = false; 1151 int srcu_idx; 1152 1153 map = dm_get_live_table(md, &srcu_idx); 1154 if (!map) 1155 goto out; 1156 1157 ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); 1158 1159 out: 1160 dm_put_live_table(md, srcu_idx); 1161 1162 return ret; 1163 } 1164 1165 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1166 void *addr, size_t bytes, struct iov_iter *i) 1167 { 1168 struct mapped_device *md = dax_get_private(dax_dev); 1169 sector_t sector = pgoff * PAGE_SECTORS; 1170 struct dm_target *ti; 1171 long ret = 0; 1172 int srcu_idx; 1173 1174 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1175 1176 if (!ti) 1177 goto out; 1178 if (!ti->type->dax_copy_from_iter) { 1179 ret = copy_from_iter(addr, bytes, i); 1180 goto out; 1181 } 1182 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 1183 out: 1184 dm_put_live_table(md, srcu_idx); 1185 1186 return ret; 1187 } 1188 1189 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1190 void *addr, size_t bytes, struct iov_iter *i) 1191 { 1192 struct mapped_device *md = dax_get_private(dax_dev); 1193 sector_t sector = pgoff * PAGE_SECTORS; 1194 struct dm_target *ti; 1195 long ret = 0; 1196 int srcu_idx; 1197 1198 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1199 1200 if (!ti) 1201 goto out; 1202 if (!ti->type->dax_copy_to_iter) { 1203 ret = copy_to_iter(addr, bytes, i); 1204 goto out; 1205 } 1206 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1207 out: 1208 dm_put_live_table(md, srcu_idx); 1209 1210 return ret; 1211 } 1212 1213 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1214 size_t nr_pages) 1215 { 1216 struct mapped_device *md = dax_get_private(dax_dev); 1217 sector_t sector = pgoff * PAGE_SECTORS; 1218 struct dm_target *ti; 1219 int ret = -EIO; 1220 int srcu_idx; 1221 1222 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1223 1224 if (!ti) 1225 goto out; 1226 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1227 /* 1228 * ->zero_page_range() is mandatory dax operation. If we are 1229 * here, something is wrong. 1230 */ 1231 goto out; 1232 } 1233 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1234 out: 1235 dm_put_live_table(md, srcu_idx); 1236 1237 return ret; 1238 } 1239 1240 /* 1241 * A target may call dm_accept_partial_bio only from the map routine. It is 1242 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 1243 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 1244 * 1245 * dm_accept_partial_bio informs the dm that the target only wants to process 1246 * additional n_sectors sectors of the bio and the rest of the data should be 1247 * sent in a next bio. 1248 * 1249 * A diagram that explains the arithmetics: 1250 * +--------------------+---------------+-------+ 1251 * | 1 | 2 | 3 | 1252 * +--------------------+---------------+-------+ 1253 * 1254 * <-------------- *tio->len_ptr ---------------> 1255 * <------- bi_size -------> 1256 * <-- n_sectors --> 1257 * 1258 * Region 1 was already iterated over with bio_advance or similar function. 1259 * (it may be empty if the target doesn't use bio_advance) 1260 * Region 2 is the remaining bio size that the target wants to process. 1261 * (it may be empty if region 1 is non-empty, although there is no reason 1262 * to make it empty) 1263 * The target requires that region 3 is to be sent in the next bio. 1264 * 1265 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1266 * the partially processed part (the sum of regions 1+2) must be the same for all 1267 * copies of the bio. 1268 */ 1269 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1270 { 1271 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1272 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1273 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1274 BUG_ON(bi_size > *tio->len_ptr); 1275 BUG_ON(n_sectors > bi_size); 1276 *tio->len_ptr -= bi_size - n_sectors; 1277 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1278 } 1279 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1280 1281 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1282 { 1283 mutex_lock(&md->swap_bios_lock); 1284 while (latch < md->swap_bios) { 1285 cond_resched(); 1286 down(&md->swap_bios_semaphore); 1287 md->swap_bios--; 1288 } 1289 while (latch > md->swap_bios) { 1290 cond_resched(); 1291 up(&md->swap_bios_semaphore); 1292 md->swap_bios++; 1293 } 1294 mutex_unlock(&md->swap_bios_lock); 1295 } 1296 1297 static blk_qc_t __map_bio(struct dm_target_io *tio) 1298 { 1299 int r; 1300 sector_t sector; 1301 struct bio *clone = &tio->clone; 1302 struct dm_io *io = tio->io; 1303 struct dm_target *ti = tio->ti; 1304 blk_qc_t ret = BLK_QC_T_NONE; 1305 1306 clone->bi_end_io = clone_endio; 1307 1308 /* 1309 * Map the clone. If r == 0 we don't need to do 1310 * anything, the target has assumed ownership of 1311 * this io. 1312 */ 1313 atomic_inc(&io->io_count); 1314 sector = clone->bi_iter.bi_sector; 1315 1316 if (unlikely(swap_bios_limit(ti, clone))) { 1317 struct mapped_device *md = io->md; 1318 int latch = get_swap_bios(); 1319 if (unlikely(latch != md->swap_bios)) 1320 __set_swap_bios_limit(md, latch); 1321 down(&md->swap_bios_semaphore); 1322 } 1323 1324 r = ti->type->map(ti, clone); 1325 switch (r) { 1326 case DM_MAPIO_SUBMITTED: 1327 break; 1328 case DM_MAPIO_REMAPPED: 1329 /* the bio has been remapped so dispatch it */ 1330 trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); 1331 ret = submit_bio_noacct(clone); 1332 break; 1333 case DM_MAPIO_KILL: 1334 if (unlikely(swap_bios_limit(ti, clone))) { 1335 struct mapped_device *md = io->md; 1336 up(&md->swap_bios_semaphore); 1337 } 1338 free_tio(tio); 1339 dec_pending(io, BLK_STS_IOERR); 1340 break; 1341 case DM_MAPIO_REQUEUE: 1342 if (unlikely(swap_bios_limit(ti, clone))) { 1343 struct mapped_device *md = io->md; 1344 up(&md->swap_bios_semaphore); 1345 } 1346 free_tio(tio); 1347 dec_pending(io, BLK_STS_DM_REQUEUE); 1348 break; 1349 default: 1350 DMWARN("unimplemented target map return value: %d", r); 1351 BUG(); 1352 } 1353 1354 return ret; 1355 } 1356 1357 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1358 { 1359 bio->bi_iter.bi_sector = sector; 1360 bio->bi_iter.bi_size = to_bytes(len); 1361 } 1362 1363 /* 1364 * Creates a bio that consists of range of complete bvecs. 1365 */ 1366 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1367 sector_t sector, unsigned len) 1368 { 1369 struct bio *clone = &tio->clone; 1370 int r; 1371 1372 __bio_clone_fast(clone, bio); 1373 1374 r = bio_crypt_clone(clone, bio, GFP_NOIO); 1375 if (r < 0) 1376 return r; 1377 1378 if (bio_integrity(bio)) { 1379 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1380 !dm_target_passes_integrity(tio->ti->type))) { 1381 DMWARN("%s: the target %s doesn't support integrity data.", 1382 dm_device_name(tio->io->md), 1383 tio->ti->type->name); 1384 return -EIO; 1385 } 1386 1387 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1388 if (r < 0) 1389 return r; 1390 } 1391 1392 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1393 clone->bi_iter.bi_size = to_bytes(len); 1394 1395 if (bio_integrity(bio)) 1396 bio_integrity_trim(clone); 1397 1398 return 0; 1399 } 1400 1401 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1402 struct dm_target *ti, unsigned num_bios) 1403 { 1404 struct dm_target_io *tio; 1405 int try; 1406 1407 if (!num_bios) 1408 return; 1409 1410 if (num_bios == 1) { 1411 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1412 bio_list_add(blist, &tio->clone); 1413 return; 1414 } 1415 1416 for (try = 0; try < 2; try++) { 1417 int bio_nr; 1418 struct bio *bio; 1419 1420 if (try) 1421 mutex_lock(&ci->io->md->table_devices_lock); 1422 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1423 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1424 if (!tio) 1425 break; 1426 1427 bio_list_add(blist, &tio->clone); 1428 } 1429 if (try) 1430 mutex_unlock(&ci->io->md->table_devices_lock); 1431 if (bio_nr == num_bios) 1432 return; 1433 1434 while ((bio = bio_list_pop(blist))) { 1435 tio = container_of(bio, struct dm_target_io, clone); 1436 free_tio(tio); 1437 } 1438 } 1439 } 1440 1441 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1442 struct dm_target_io *tio, unsigned *len) 1443 { 1444 struct bio *clone = &tio->clone; 1445 1446 tio->len_ptr = len; 1447 1448 __bio_clone_fast(clone, ci->bio); 1449 if (len) 1450 bio_setup_sector(clone, ci->sector, *len); 1451 1452 return __map_bio(tio); 1453 } 1454 1455 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1456 unsigned num_bios, unsigned *len) 1457 { 1458 struct bio_list blist = BIO_EMPTY_LIST; 1459 struct bio *bio; 1460 struct dm_target_io *tio; 1461 1462 alloc_multiple_bios(&blist, ci, ti, num_bios); 1463 1464 while ((bio = bio_list_pop(&blist))) { 1465 tio = container_of(bio, struct dm_target_io, clone); 1466 (void) __clone_and_map_simple_bio(ci, tio, len); 1467 } 1468 } 1469 1470 static int __send_empty_flush(struct clone_info *ci) 1471 { 1472 unsigned target_nr = 0; 1473 struct dm_target *ti; 1474 struct bio flush_bio; 1475 1476 /* 1477 * Use an on-stack bio for this, it's safe since we don't 1478 * need to reference it after submit. It's just used as 1479 * the basis for the clone(s). 1480 */ 1481 bio_init(&flush_bio, NULL, 0); 1482 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1483 bio_set_dev(&flush_bio, ci->io->md->disk->part0); 1484 1485 ci->bio = &flush_bio; 1486 ci->sector_count = 0; 1487 1488 BUG_ON(bio_has_data(ci->bio)); 1489 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1490 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1491 1492 bio_uninit(ci->bio); 1493 return 0; 1494 } 1495 1496 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1497 sector_t sector, unsigned *len) 1498 { 1499 struct bio *bio = ci->bio; 1500 struct dm_target_io *tio; 1501 int r; 1502 1503 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1504 tio->len_ptr = len; 1505 r = clone_bio(tio, bio, sector, *len); 1506 if (r < 0) { 1507 free_tio(tio); 1508 return r; 1509 } 1510 (void) __map_bio(tio); 1511 1512 return 0; 1513 } 1514 1515 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1516 unsigned num_bios) 1517 { 1518 unsigned len; 1519 1520 /* 1521 * Even though the device advertised support for this type of 1522 * request, that does not mean every target supports it, and 1523 * reconfiguration might also have changed that since the 1524 * check was performed. 1525 */ 1526 if (!num_bios) 1527 return -EOPNOTSUPP; 1528 1529 len = min_t(sector_t, ci->sector_count, 1530 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1531 1532 __send_duplicate_bios(ci, ti, num_bios, &len); 1533 1534 ci->sector += len; 1535 ci->sector_count -= len; 1536 1537 return 0; 1538 } 1539 1540 static bool is_abnormal_io(struct bio *bio) 1541 { 1542 bool r = false; 1543 1544 switch (bio_op(bio)) { 1545 case REQ_OP_DISCARD: 1546 case REQ_OP_SECURE_ERASE: 1547 case REQ_OP_WRITE_SAME: 1548 case REQ_OP_WRITE_ZEROES: 1549 r = true; 1550 break; 1551 } 1552 1553 return r; 1554 } 1555 1556 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1557 int *result) 1558 { 1559 struct bio *bio = ci->bio; 1560 unsigned num_bios = 0; 1561 1562 switch (bio_op(bio)) { 1563 case REQ_OP_DISCARD: 1564 num_bios = ti->num_discard_bios; 1565 break; 1566 case REQ_OP_SECURE_ERASE: 1567 num_bios = ti->num_secure_erase_bios; 1568 break; 1569 case REQ_OP_WRITE_SAME: 1570 num_bios = ti->num_write_same_bios; 1571 break; 1572 case REQ_OP_WRITE_ZEROES: 1573 num_bios = ti->num_write_zeroes_bios; 1574 break; 1575 default: 1576 return false; 1577 } 1578 1579 *result = __send_changing_extent_only(ci, ti, num_bios); 1580 return true; 1581 } 1582 1583 /* 1584 * Select the correct strategy for processing a non-flush bio. 1585 */ 1586 static int __split_and_process_non_flush(struct clone_info *ci) 1587 { 1588 struct dm_target *ti; 1589 unsigned len; 1590 int r; 1591 1592 ti = dm_table_find_target(ci->map, ci->sector); 1593 if (!ti) 1594 return -EIO; 1595 1596 if (__process_abnormal_io(ci, ti, &r)) 1597 return r; 1598 1599 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1600 1601 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1602 if (r < 0) 1603 return r; 1604 1605 ci->sector += len; 1606 ci->sector_count -= len; 1607 1608 return 0; 1609 } 1610 1611 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1612 struct dm_table *map, struct bio *bio) 1613 { 1614 ci->map = map; 1615 ci->io = alloc_io(md, bio); 1616 ci->sector = bio->bi_iter.bi_sector; 1617 } 1618 1619 #define __dm_part_stat_sub(part, field, subnd) \ 1620 (part_stat_get(part, field) -= (subnd)) 1621 1622 /* 1623 * Entry point to split a bio into clones and submit them to the targets. 1624 */ 1625 static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1626 struct dm_table *map, struct bio *bio) 1627 { 1628 struct clone_info ci; 1629 blk_qc_t ret = BLK_QC_T_NONE; 1630 int error = 0; 1631 1632 init_clone_info(&ci, md, map, bio); 1633 1634 if (bio->bi_opf & REQ_PREFLUSH) { 1635 error = __send_empty_flush(&ci); 1636 /* dec_pending submits any data associated with flush */ 1637 } else if (op_is_zone_mgmt(bio_op(bio))) { 1638 ci.bio = bio; 1639 ci.sector_count = 0; 1640 error = __split_and_process_non_flush(&ci); 1641 } else { 1642 ci.bio = bio; 1643 ci.sector_count = bio_sectors(bio); 1644 while (ci.sector_count && !error) { 1645 error = __split_and_process_non_flush(&ci); 1646 if (ci.sector_count && !error) { 1647 /* 1648 * Remainder must be passed to submit_bio_noacct() 1649 * so that it gets handled *after* bios already submitted 1650 * have been completely processed. 1651 * We take a clone of the original to store in 1652 * ci.io->orig_bio to be used by end_io_acct() and 1653 * for dec_pending to use for completion handling. 1654 */ 1655 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1656 GFP_NOIO, &md->queue->bio_split); 1657 ci.io->orig_bio = b; 1658 1659 /* 1660 * Adjust IO stats for each split, otherwise upon queue 1661 * reentry there will be redundant IO accounting. 1662 * NOTE: this is a stop-gap fix, a proper fix involves 1663 * significant refactoring of DM core's bio splitting 1664 * (by eliminating DM's splitting and just using bio_split) 1665 */ 1666 part_stat_lock(); 1667 __dm_part_stat_sub(dm_disk(md)->part0, 1668 sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1669 part_stat_unlock(); 1670 1671 bio_chain(b, bio); 1672 trace_block_split(b, bio->bi_iter.bi_sector); 1673 ret = submit_bio_noacct(bio); 1674 break; 1675 } 1676 } 1677 } 1678 1679 /* drop the extra reference count */ 1680 dec_pending(ci.io, errno_to_blk_status(error)); 1681 return ret; 1682 } 1683 1684 static blk_qc_t dm_submit_bio(struct bio *bio) 1685 { 1686 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1687 blk_qc_t ret = BLK_QC_T_NONE; 1688 int srcu_idx; 1689 struct dm_table *map; 1690 1691 map = dm_get_live_table(md, &srcu_idx); 1692 if (unlikely(!map)) { 1693 DMERR_LIMIT("%s: mapping table unavailable, erroring io", 1694 dm_device_name(md)); 1695 bio_io_error(bio); 1696 goto out; 1697 } 1698 1699 /* If suspended, queue this IO for later */ 1700 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1701 if (bio->bi_opf & REQ_NOWAIT) 1702 bio_wouldblock_error(bio); 1703 else if (bio->bi_opf & REQ_RAHEAD) 1704 bio_io_error(bio); 1705 else 1706 queue_io(md, bio); 1707 goto out; 1708 } 1709 1710 /* 1711 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1712 * otherwise associated queue_limits won't be imposed. 1713 */ 1714 if (is_abnormal_io(bio)) 1715 blk_queue_split(&bio); 1716 1717 ret = __split_and_process_bio(md, map, bio); 1718 out: 1719 dm_put_live_table(md, srcu_idx); 1720 return ret; 1721 } 1722 1723 /*----------------------------------------------------------------- 1724 * An IDR is used to keep track of allocated minor numbers. 1725 *---------------------------------------------------------------*/ 1726 static void free_minor(int minor) 1727 { 1728 spin_lock(&_minor_lock); 1729 idr_remove(&_minor_idr, minor); 1730 spin_unlock(&_minor_lock); 1731 } 1732 1733 /* 1734 * See if the device with a specific minor # is free. 1735 */ 1736 static int specific_minor(int minor) 1737 { 1738 int r; 1739 1740 if (minor >= (1 << MINORBITS)) 1741 return -EINVAL; 1742 1743 idr_preload(GFP_KERNEL); 1744 spin_lock(&_minor_lock); 1745 1746 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1747 1748 spin_unlock(&_minor_lock); 1749 idr_preload_end(); 1750 if (r < 0) 1751 return r == -ENOSPC ? -EBUSY : r; 1752 return 0; 1753 } 1754 1755 static int next_free_minor(int *minor) 1756 { 1757 int r; 1758 1759 idr_preload(GFP_KERNEL); 1760 spin_lock(&_minor_lock); 1761 1762 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1763 1764 spin_unlock(&_minor_lock); 1765 idr_preload_end(); 1766 if (r < 0) 1767 return r; 1768 *minor = r; 1769 return 0; 1770 } 1771 1772 static const struct block_device_operations dm_blk_dops; 1773 static const struct block_device_operations dm_rq_blk_dops; 1774 static const struct dax_operations dm_dax_ops; 1775 1776 static void dm_wq_work(struct work_struct *work); 1777 1778 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1779 static void dm_queue_destroy_keyslot_manager(struct request_queue *q) 1780 { 1781 dm_destroy_keyslot_manager(q->ksm); 1782 } 1783 1784 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1785 1786 static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q) 1787 { 1788 } 1789 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1790 1791 static void cleanup_mapped_device(struct mapped_device *md) 1792 { 1793 if (md->wq) 1794 destroy_workqueue(md->wq); 1795 bioset_exit(&md->bs); 1796 bioset_exit(&md->io_bs); 1797 1798 if (md->dax_dev) { 1799 kill_dax(md->dax_dev); 1800 put_dax(md->dax_dev); 1801 md->dax_dev = NULL; 1802 } 1803 1804 if (md->disk) { 1805 spin_lock(&_minor_lock); 1806 md->disk->private_data = NULL; 1807 spin_unlock(&_minor_lock); 1808 del_gendisk(md->disk); 1809 put_disk(md->disk); 1810 } 1811 1812 if (md->queue) { 1813 dm_queue_destroy_keyslot_manager(md->queue); 1814 blk_cleanup_queue(md->queue); 1815 } 1816 1817 cleanup_srcu_struct(&md->io_barrier); 1818 1819 mutex_destroy(&md->suspend_lock); 1820 mutex_destroy(&md->type_lock); 1821 mutex_destroy(&md->table_devices_lock); 1822 mutex_destroy(&md->swap_bios_lock); 1823 1824 dm_mq_cleanup_mapped_device(md); 1825 } 1826 1827 /* 1828 * Allocate and initialise a blank device with a given minor. 1829 */ 1830 static struct mapped_device *alloc_dev(int minor) 1831 { 1832 int r, numa_node_id = dm_get_numa_node(); 1833 struct mapped_device *md; 1834 void *old_md; 1835 1836 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1837 if (!md) { 1838 DMWARN("unable to allocate device, out of memory."); 1839 return NULL; 1840 } 1841 1842 if (!try_module_get(THIS_MODULE)) 1843 goto bad_module_get; 1844 1845 /* get a minor number for the dev */ 1846 if (minor == DM_ANY_MINOR) 1847 r = next_free_minor(&minor); 1848 else 1849 r = specific_minor(minor); 1850 if (r < 0) 1851 goto bad_minor; 1852 1853 r = init_srcu_struct(&md->io_barrier); 1854 if (r < 0) 1855 goto bad_io_barrier; 1856 1857 md->numa_node_id = numa_node_id; 1858 md->init_tio_pdu = false; 1859 md->type = DM_TYPE_NONE; 1860 mutex_init(&md->suspend_lock); 1861 mutex_init(&md->type_lock); 1862 mutex_init(&md->table_devices_lock); 1863 spin_lock_init(&md->deferred_lock); 1864 atomic_set(&md->holders, 1); 1865 atomic_set(&md->open_count, 0); 1866 atomic_set(&md->event_nr, 0); 1867 atomic_set(&md->uevent_seq, 0); 1868 INIT_LIST_HEAD(&md->uevent_list); 1869 INIT_LIST_HEAD(&md->table_devices); 1870 spin_lock_init(&md->uevent_lock); 1871 1872 /* 1873 * default to bio-based until DM table is loaded and md->type 1874 * established. If request-based table is loaded: blk-mq will 1875 * override accordingly. 1876 */ 1877 md->queue = blk_alloc_queue(numa_node_id); 1878 if (!md->queue) 1879 goto bad; 1880 1881 md->disk = alloc_disk_node(1, md->numa_node_id); 1882 if (!md->disk) 1883 goto bad; 1884 1885 init_waitqueue_head(&md->wait); 1886 INIT_WORK(&md->work, dm_wq_work); 1887 init_waitqueue_head(&md->eventq); 1888 init_completion(&md->kobj_holder.completion); 1889 1890 md->swap_bios = get_swap_bios(); 1891 sema_init(&md->swap_bios_semaphore, md->swap_bios); 1892 mutex_init(&md->swap_bios_lock); 1893 1894 md->disk->major = _major; 1895 md->disk->first_minor = minor; 1896 md->disk->fops = &dm_blk_dops; 1897 md->disk->queue = md->queue; 1898 md->disk->private_data = md; 1899 sprintf(md->disk->disk_name, "dm-%d", minor); 1900 1901 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1902 md->dax_dev = alloc_dax(md, md->disk->disk_name, 1903 &dm_dax_ops, 0); 1904 if (IS_ERR(md->dax_dev)) 1905 goto bad; 1906 } 1907 1908 add_disk_no_queue_reg(md->disk); 1909 format_dev_t(md->name, MKDEV(_major, minor)); 1910 1911 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1912 if (!md->wq) 1913 goto bad; 1914 1915 dm_stats_init(&md->stats); 1916 1917 /* Populate the mapping, nobody knows we exist yet */ 1918 spin_lock(&_minor_lock); 1919 old_md = idr_replace(&_minor_idr, md, minor); 1920 spin_unlock(&_minor_lock); 1921 1922 BUG_ON(old_md != MINOR_ALLOCED); 1923 1924 return md; 1925 1926 bad: 1927 cleanup_mapped_device(md); 1928 bad_io_barrier: 1929 free_minor(minor); 1930 bad_minor: 1931 module_put(THIS_MODULE); 1932 bad_module_get: 1933 kvfree(md); 1934 return NULL; 1935 } 1936 1937 static void unlock_fs(struct mapped_device *md); 1938 1939 static void free_dev(struct mapped_device *md) 1940 { 1941 int minor = MINOR(disk_devt(md->disk)); 1942 1943 unlock_fs(md); 1944 1945 cleanup_mapped_device(md); 1946 1947 free_table_devices(&md->table_devices); 1948 dm_stats_cleanup(&md->stats); 1949 free_minor(minor); 1950 1951 module_put(THIS_MODULE); 1952 kvfree(md); 1953 } 1954 1955 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1956 { 1957 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1958 int ret = 0; 1959 1960 if (dm_table_bio_based(t)) { 1961 /* 1962 * The md may already have mempools that need changing. 1963 * If so, reload bioset because front_pad may have changed 1964 * because a different table was loaded. 1965 */ 1966 bioset_exit(&md->bs); 1967 bioset_exit(&md->io_bs); 1968 1969 } else if (bioset_initialized(&md->bs)) { 1970 /* 1971 * There's no need to reload with request-based dm 1972 * because the size of front_pad doesn't change. 1973 * Note for future: If you are to reload bioset, 1974 * prep-ed requests in the queue may refer 1975 * to bio from the old bioset, so you must walk 1976 * through the queue to unprep. 1977 */ 1978 goto out; 1979 } 1980 1981 BUG_ON(!p || 1982 bioset_initialized(&md->bs) || 1983 bioset_initialized(&md->io_bs)); 1984 1985 ret = bioset_init_from_src(&md->bs, &p->bs); 1986 if (ret) 1987 goto out; 1988 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 1989 if (ret) 1990 bioset_exit(&md->bs); 1991 out: 1992 /* mempool bind completed, no longer need any mempools in the table */ 1993 dm_table_free_md_mempools(t); 1994 return ret; 1995 } 1996 1997 /* 1998 * Bind a table to the device. 1999 */ 2000 static void event_callback(void *context) 2001 { 2002 unsigned long flags; 2003 LIST_HEAD(uevents); 2004 struct mapped_device *md = (struct mapped_device *) context; 2005 2006 spin_lock_irqsave(&md->uevent_lock, flags); 2007 list_splice_init(&md->uevent_list, &uevents); 2008 spin_unlock_irqrestore(&md->uevent_lock, flags); 2009 2010 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 2011 2012 atomic_inc(&md->event_nr); 2013 wake_up(&md->eventq); 2014 dm_issue_global_event(); 2015 } 2016 2017 /* 2018 * Returns old map, which caller must destroy. 2019 */ 2020 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2021 struct queue_limits *limits) 2022 { 2023 struct dm_table *old_map; 2024 struct request_queue *q = md->queue; 2025 bool request_based = dm_table_request_based(t); 2026 sector_t size; 2027 int ret; 2028 2029 lockdep_assert_held(&md->suspend_lock); 2030 2031 size = dm_table_get_size(t); 2032 2033 /* 2034 * Wipe any geometry if the size of the table changed. 2035 */ 2036 if (size != dm_get_size(md)) 2037 memset(&md->geometry, 0, sizeof(md->geometry)); 2038 2039 if (!get_capacity(md->disk)) 2040 set_capacity(md->disk, size); 2041 else 2042 set_capacity_and_notify(md->disk, size); 2043 2044 dm_table_event_callback(t, event_callback, md); 2045 2046 /* 2047 * The queue hasn't been stopped yet, if the old table type wasn't 2048 * for request-based during suspension. So stop it to prevent 2049 * I/O mapping before resume. 2050 * This must be done before setting the queue restrictions, 2051 * because request-based dm may be run just after the setting. 2052 */ 2053 if (request_based) 2054 dm_stop_queue(q); 2055 2056 if (request_based) { 2057 /* 2058 * Leverage the fact that request-based DM targets are 2059 * immutable singletons - used to optimize dm_mq_queue_rq. 2060 */ 2061 md->immutable_target = dm_table_get_immutable_target(t); 2062 } 2063 2064 ret = __bind_mempools(md, t); 2065 if (ret) { 2066 old_map = ERR_PTR(ret); 2067 goto out; 2068 } 2069 2070 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2071 rcu_assign_pointer(md->map, (void *)t); 2072 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2073 2074 dm_table_set_restrictions(t, q, limits); 2075 if (old_map) 2076 dm_sync_table(md); 2077 2078 out: 2079 return old_map; 2080 } 2081 2082 /* 2083 * Returns unbound table for the caller to free. 2084 */ 2085 static struct dm_table *__unbind(struct mapped_device *md) 2086 { 2087 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2088 2089 if (!map) 2090 return NULL; 2091 2092 dm_table_event_callback(map, NULL, NULL); 2093 RCU_INIT_POINTER(md->map, NULL); 2094 dm_sync_table(md); 2095 2096 return map; 2097 } 2098 2099 /* 2100 * Constructor for a new device. 2101 */ 2102 int dm_create(int minor, struct mapped_device **result) 2103 { 2104 int r; 2105 struct mapped_device *md; 2106 2107 md = alloc_dev(minor); 2108 if (!md) 2109 return -ENXIO; 2110 2111 r = dm_sysfs_init(md); 2112 if (r) { 2113 free_dev(md); 2114 return r; 2115 } 2116 2117 *result = md; 2118 return 0; 2119 } 2120 2121 /* 2122 * Functions to manage md->type. 2123 * All are required to hold md->type_lock. 2124 */ 2125 void dm_lock_md_type(struct mapped_device *md) 2126 { 2127 mutex_lock(&md->type_lock); 2128 } 2129 2130 void dm_unlock_md_type(struct mapped_device *md) 2131 { 2132 mutex_unlock(&md->type_lock); 2133 } 2134 2135 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2136 { 2137 BUG_ON(!mutex_is_locked(&md->type_lock)); 2138 md->type = type; 2139 } 2140 2141 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2142 { 2143 return md->type; 2144 } 2145 2146 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2147 { 2148 return md->immutable_target_type; 2149 } 2150 2151 /* 2152 * The queue_limits are only valid as long as you have a reference 2153 * count on 'md'. 2154 */ 2155 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2156 { 2157 BUG_ON(!atomic_read(&md->holders)); 2158 return &md->queue->limits; 2159 } 2160 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2161 2162 /* 2163 * Setup the DM device's queue based on md's type 2164 */ 2165 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2166 { 2167 int r; 2168 struct queue_limits limits; 2169 enum dm_queue_mode type = dm_get_md_type(md); 2170 2171 switch (type) { 2172 case DM_TYPE_REQUEST_BASED: 2173 md->disk->fops = &dm_rq_blk_dops; 2174 r = dm_mq_init_request_queue(md, t); 2175 if (r) { 2176 DMERR("Cannot initialize queue for request-based dm mapped device"); 2177 return r; 2178 } 2179 break; 2180 case DM_TYPE_BIO_BASED: 2181 case DM_TYPE_DAX_BIO_BASED: 2182 break; 2183 case DM_TYPE_NONE: 2184 WARN_ON_ONCE(true); 2185 break; 2186 } 2187 2188 r = dm_calculate_queue_limits(t, &limits); 2189 if (r) { 2190 DMERR("Cannot calculate initial queue limits"); 2191 return r; 2192 } 2193 dm_table_set_restrictions(t, md->queue, &limits); 2194 blk_register_queue(md->disk); 2195 2196 return 0; 2197 } 2198 2199 struct mapped_device *dm_get_md(dev_t dev) 2200 { 2201 struct mapped_device *md; 2202 unsigned minor = MINOR(dev); 2203 2204 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2205 return NULL; 2206 2207 spin_lock(&_minor_lock); 2208 2209 md = idr_find(&_minor_idr, minor); 2210 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2211 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2212 md = NULL; 2213 goto out; 2214 } 2215 dm_get(md); 2216 out: 2217 spin_unlock(&_minor_lock); 2218 2219 return md; 2220 } 2221 EXPORT_SYMBOL_GPL(dm_get_md); 2222 2223 void *dm_get_mdptr(struct mapped_device *md) 2224 { 2225 return md->interface_ptr; 2226 } 2227 2228 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2229 { 2230 md->interface_ptr = ptr; 2231 } 2232 2233 void dm_get(struct mapped_device *md) 2234 { 2235 atomic_inc(&md->holders); 2236 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2237 } 2238 2239 int dm_hold(struct mapped_device *md) 2240 { 2241 spin_lock(&_minor_lock); 2242 if (test_bit(DMF_FREEING, &md->flags)) { 2243 spin_unlock(&_minor_lock); 2244 return -EBUSY; 2245 } 2246 dm_get(md); 2247 spin_unlock(&_minor_lock); 2248 return 0; 2249 } 2250 EXPORT_SYMBOL_GPL(dm_hold); 2251 2252 const char *dm_device_name(struct mapped_device *md) 2253 { 2254 return md->name; 2255 } 2256 EXPORT_SYMBOL_GPL(dm_device_name); 2257 2258 static void __dm_destroy(struct mapped_device *md, bool wait) 2259 { 2260 struct dm_table *map; 2261 int srcu_idx; 2262 2263 might_sleep(); 2264 2265 spin_lock(&_minor_lock); 2266 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2267 set_bit(DMF_FREEING, &md->flags); 2268 spin_unlock(&_minor_lock); 2269 2270 blk_set_queue_dying(md->queue); 2271 2272 /* 2273 * Take suspend_lock so that presuspend and postsuspend methods 2274 * do not race with internal suspend. 2275 */ 2276 mutex_lock(&md->suspend_lock); 2277 map = dm_get_live_table(md, &srcu_idx); 2278 if (!dm_suspended_md(md)) { 2279 dm_table_presuspend_targets(map); 2280 set_bit(DMF_SUSPENDED, &md->flags); 2281 set_bit(DMF_POST_SUSPENDING, &md->flags); 2282 dm_table_postsuspend_targets(map); 2283 } 2284 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2285 dm_put_live_table(md, srcu_idx); 2286 mutex_unlock(&md->suspend_lock); 2287 2288 /* 2289 * Rare, but there may be I/O requests still going to complete, 2290 * for example. Wait for all references to disappear. 2291 * No one should increment the reference count of the mapped_device, 2292 * after the mapped_device state becomes DMF_FREEING. 2293 */ 2294 if (wait) 2295 while (atomic_read(&md->holders)) 2296 msleep(1); 2297 else if (atomic_read(&md->holders)) 2298 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2299 dm_device_name(md), atomic_read(&md->holders)); 2300 2301 dm_sysfs_exit(md); 2302 dm_table_destroy(__unbind(md)); 2303 free_dev(md); 2304 } 2305 2306 void dm_destroy(struct mapped_device *md) 2307 { 2308 __dm_destroy(md, true); 2309 } 2310 2311 void dm_destroy_immediate(struct mapped_device *md) 2312 { 2313 __dm_destroy(md, false); 2314 } 2315 2316 void dm_put(struct mapped_device *md) 2317 { 2318 atomic_dec(&md->holders); 2319 } 2320 EXPORT_SYMBOL_GPL(dm_put); 2321 2322 static bool md_in_flight_bios(struct mapped_device *md) 2323 { 2324 int cpu; 2325 struct block_device *part = dm_disk(md)->part0; 2326 long sum = 0; 2327 2328 for_each_possible_cpu(cpu) { 2329 sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 2330 sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 2331 } 2332 2333 return sum != 0; 2334 } 2335 2336 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) 2337 { 2338 int r = 0; 2339 DEFINE_WAIT(wait); 2340 2341 while (true) { 2342 prepare_to_wait(&md->wait, &wait, task_state); 2343 2344 if (!md_in_flight_bios(md)) 2345 break; 2346 2347 if (signal_pending_state(task_state, current)) { 2348 r = -EINTR; 2349 break; 2350 } 2351 2352 io_schedule(); 2353 } 2354 finish_wait(&md->wait, &wait); 2355 2356 return r; 2357 } 2358 2359 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2360 { 2361 int r = 0; 2362 2363 if (!queue_is_mq(md->queue)) 2364 return dm_wait_for_bios_completion(md, task_state); 2365 2366 while (true) { 2367 if (!blk_mq_queue_inflight(md->queue)) 2368 break; 2369 2370 if (signal_pending_state(task_state, current)) { 2371 r = -EINTR; 2372 break; 2373 } 2374 2375 msleep(5); 2376 } 2377 2378 return r; 2379 } 2380 2381 /* 2382 * Process the deferred bios 2383 */ 2384 static void dm_wq_work(struct work_struct *work) 2385 { 2386 struct mapped_device *md = container_of(work, struct mapped_device, work); 2387 struct bio *bio; 2388 2389 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2390 spin_lock_irq(&md->deferred_lock); 2391 bio = bio_list_pop(&md->deferred); 2392 spin_unlock_irq(&md->deferred_lock); 2393 2394 if (!bio) 2395 break; 2396 2397 submit_bio_noacct(bio); 2398 } 2399 } 2400 2401 static void dm_queue_flush(struct mapped_device *md) 2402 { 2403 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2404 smp_mb__after_atomic(); 2405 queue_work(md->wq, &md->work); 2406 } 2407 2408 /* 2409 * Swap in a new table, returning the old one for the caller to destroy. 2410 */ 2411 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2412 { 2413 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2414 struct queue_limits limits; 2415 int r; 2416 2417 mutex_lock(&md->suspend_lock); 2418 2419 /* device must be suspended */ 2420 if (!dm_suspended_md(md)) 2421 goto out; 2422 2423 /* 2424 * If the new table has no data devices, retain the existing limits. 2425 * This helps multipath with queue_if_no_path if all paths disappear, 2426 * then new I/O is queued based on these limits, and then some paths 2427 * reappear. 2428 */ 2429 if (dm_table_has_no_data_devices(table)) { 2430 live_map = dm_get_live_table_fast(md); 2431 if (live_map) 2432 limits = md->queue->limits; 2433 dm_put_live_table_fast(md); 2434 } 2435 2436 if (!live_map) { 2437 r = dm_calculate_queue_limits(table, &limits); 2438 if (r) { 2439 map = ERR_PTR(r); 2440 goto out; 2441 } 2442 } 2443 2444 map = __bind(md, table, &limits); 2445 dm_issue_global_event(); 2446 2447 out: 2448 mutex_unlock(&md->suspend_lock); 2449 return map; 2450 } 2451 2452 /* 2453 * Functions to lock and unlock any filesystem running on the 2454 * device. 2455 */ 2456 static int lock_fs(struct mapped_device *md) 2457 { 2458 int r; 2459 2460 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2461 2462 r = freeze_bdev(md->disk->part0); 2463 if (!r) 2464 set_bit(DMF_FROZEN, &md->flags); 2465 return r; 2466 } 2467 2468 static void unlock_fs(struct mapped_device *md) 2469 { 2470 if (!test_bit(DMF_FROZEN, &md->flags)) 2471 return; 2472 thaw_bdev(md->disk->part0); 2473 clear_bit(DMF_FROZEN, &md->flags); 2474 } 2475 2476 /* 2477 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2478 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2479 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2480 * 2481 * If __dm_suspend returns 0, the device is completely quiescent 2482 * now. There is no request-processing activity. All new requests 2483 * are being added to md->deferred list. 2484 */ 2485 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2486 unsigned suspend_flags, long task_state, 2487 int dmf_suspended_flag) 2488 { 2489 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2490 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2491 int r; 2492 2493 lockdep_assert_held(&md->suspend_lock); 2494 2495 /* 2496 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2497 * This flag is cleared before dm_suspend returns. 2498 */ 2499 if (noflush) 2500 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2501 else 2502 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2503 2504 /* 2505 * This gets reverted if there's an error later and the targets 2506 * provide the .presuspend_undo hook. 2507 */ 2508 dm_table_presuspend_targets(map); 2509 2510 /* 2511 * Flush I/O to the device. 2512 * Any I/O submitted after lock_fs() may not be flushed. 2513 * noflush takes precedence over do_lockfs. 2514 * (lock_fs() flushes I/Os and waits for them to complete.) 2515 */ 2516 if (!noflush && do_lockfs) { 2517 r = lock_fs(md); 2518 if (r) { 2519 dm_table_presuspend_undo_targets(map); 2520 return r; 2521 } 2522 } 2523 2524 /* 2525 * Here we must make sure that no processes are submitting requests 2526 * to target drivers i.e. no one may be executing 2527 * __split_and_process_bio from dm_submit_bio. 2528 * 2529 * To get all processes out of __split_and_process_bio in dm_submit_bio, 2530 * we take the write lock. To prevent any process from reentering 2531 * __split_and_process_bio from dm_submit_bio and quiesce the thread 2532 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2533 * flush_workqueue(md->wq). 2534 */ 2535 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2536 if (map) 2537 synchronize_srcu(&md->io_barrier); 2538 2539 /* 2540 * Stop md->queue before flushing md->wq in case request-based 2541 * dm defers requests to md->wq from md->queue. 2542 */ 2543 if (dm_request_based(md)) 2544 dm_stop_queue(md->queue); 2545 2546 flush_workqueue(md->wq); 2547 2548 /* 2549 * At this point no more requests are entering target request routines. 2550 * We call dm_wait_for_completion to wait for all existing requests 2551 * to finish. 2552 */ 2553 r = dm_wait_for_completion(md, task_state); 2554 if (!r) 2555 set_bit(dmf_suspended_flag, &md->flags); 2556 2557 if (noflush) 2558 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2559 if (map) 2560 synchronize_srcu(&md->io_barrier); 2561 2562 /* were we interrupted ? */ 2563 if (r < 0) { 2564 dm_queue_flush(md); 2565 2566 if (dm_request_based(md)) 2567 dm_start_queue(md->queue); 2568 2569 unlock_fs(md); 2570 dm_table_presuspend_undo_targets(map); 2571 /* pushback list is already flushed, so skip flush */ 2572 } 2573 2574 return r; 2575 } 2576 2577 /* 2578 * We need to be able to change a mapping table under a mounted 2579 * filesystem. For example we might want to move some data in 2580 * the background. Before the table can be swapped with 2581 * dm_bind_table, dm_suspend must be called to flush any in 2582 * flight bios and ensure that any further io gets deferred. 2583 */ 2584 /* 2585 * Suspend mechanism in request-based dm. 2586 * 2587 * 1. Flush all I/Os by lock_fs() if needed. 2588 * 2. Stop dispatching any I/O by stopping the request_queue. 2589 * 3. Wait for all in-flight I/Os to be completed or requeued. 2590 * 2591 * To abort suspend, start the request_queue. 2592 */ 2593 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2594 { 2595 struct dm_table *map = NULL; 2596 int r = 0; 2597 2598 retry: 2599 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2600 2601 if (dm_suspended_md(md)) { 2602 r = -EINVAL; 2603 goto out_unlock; 2604 } 2605 2606 if (dm_suspended_internally_md(md)) { 2607 /* already internally suspended, wait for internal resume */ 2608 mutex_unlock(&md->suspend_lock); 2609 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2610 if (r) 2611 return r; 2612 goto retry; 2613 } 2614 2615 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2616 2617 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2618 if (r) 2619 goto out_unlock; 2620 2621 set_bit(DMF_POST_SUSPENDING, &md->flags); 2622 dm_table_postsuspend_targets(map); 2623 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2624 2625 out_unlock: 2626 mutex_unlock(&md->suspend_lock); 2627 return r; 2628 } 2629 2630 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2631 { 2632 if (map) { 2633 int r = dm_table_resume_targets(map); 2634 if (r) 2635 return r; 2636 } 2637 2638 dm_queue_flush(md); 2639 2640 /* 2641 * Flushing deferred I/Os must be done after targets are resumed 2642 * so that mapping of targets can work correctly. 2643 * Request-based dm is queueing the deferred I/Os in its request_queue. 2644 */ 2645 if (dm_request_based(md)) 2646 dm_start_queue(md->queue); 2647 2648 unlock_fs(md); 2649 2650 return 0; 2651 } 2652 2653 int dm_resume(struct mapped_device *md) 2654 { 2655 int r; 2656 struct dm_table *map = NULL; 2657 2658 retry: 2659 r = -EINVAL; 2660 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2661 2662 if (!dm_suspended_md(md)) 2663 goto out; 2664 2665 if (dm_suspended_internally_md(md)) { 2666 /* already internally suspended, wait for internal resume */ 2667 mutex_unlock(&md->suspend_lock); 2668 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2669 if (r) 2670 return r; 2671 goto retry; 2672 } 2673 2674 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2675 if (!map || !dm_table_get_size(map)) 2676 goto out; 2677 2678 r = __dm_resume(md, map); 2679 if (r) 2680 goto out; 2681 2682 clear_bit(DMF_SUSPENDED, &md->flags); 2683 out: 2684 mutex_unlock(&md->suspend_lock); 2685 2686 return r; 2687 } 2688 2689 /* 2690 * Internal suspend/resume works like userspace-driven suspend. It waits 2691 * until all bios finish and prevents issuing new bios to the target drivers. 2692 * It may be used only from the kernel. 2693 */ 2694 2695 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2696 { 2697 struct dm_table *map = NULL; 2698 2699 lockdep_assert_held(&md->suspend_lock); 2700 2701 if (md->internal_suspend_count++) 2702 return; /* nested internal suspend */ 2703 2704 if (dm_suspended_md(md)) { 2705 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2706 return; /* nest suspend */ 2707 } 2708 2709 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2710 2711 /* 2712 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2713 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2714 * would require changing .presuspend to return an error -- avoid this 2715 * until there is a need for more elaborate variants of internal suspend. 2716 */ 2717 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2718 DMF_SUSPENDED_INTERNALLY); 2719 2720 set_bit(DMF_POST_SUSPENDING, &md->flags); 2721 dm_table_postsuspend_targets(map); 2722 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2723 } 2724 2725 static void __dm_internal_resume(struct mapped_device *md) 2726 { 2727 BUG_ON(!md->internal_suspend_count); 2728 2729 if (--md->internal_suspend_count) 2730 return; /* resume from nested internal suspend */ 2731 2732 if (dm_suspended_md(md)) 2733 goto done; /* resume from nested suspend */ 2734 2735 /* 2736 * NOTE: existing callers don't need to call dm_table_resume_targets 2737 * (which may fail -- so best to avoid it for now by passing NULL map) 2738 */ 2739 (void) __dm_resume(md, NULL); 2740 2741 done: 2742 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2743 smp_mb__after_atomic(); 2744 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2745 } 2746 2747 void dm_internal_suspend_noflush(struct mapped_device *md) 2748 { 2749 mutex_lock(&md->suspend_lock); 2750 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2751 mutex_unlock(&md->suspend_lock); 2752 } 2753 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2754 2755 void dm_internal_resume(struct mapped_device *md) 2756 { 2757 mutex_lock(&md->suspend_lock); 2758 __dm_internal_resume(md); 2759 mutex_unlock(&md->suspend_lock); 2760 } 2761 EXPORT_SYMBOL_GPL(dm_internal_resume); 2762 2763 /* 2764 * Fast variants of internal suspend/resume hold md->suspend_lock, 2765 * which prevents interaction with userspace-driven suspend. 2766 */ 2767 2768 void dm_internal_suspend_fast(struct mapped_device *md) 2769 { 2770 mutex_lock(&md->suspend_lock); 2771 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2772 return; 2773 2774 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2775 synchronize_srcu(&md->io_barrier); 2776 flush_workqueue(md->wq); 2777 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2778 } 2779 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2780 2781 void dm_internal_resume_fast(struct mapped_device *md) 2782 { 2783 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2784 goto done; 2785 2786 dm_queue_flush(md); 2787 2788 done: 2789 mutex_unlock(&md->suspend_lock); 2790 } 2791 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2792 2793 /*----------------------------------------------------------------- 2794 * Event notification. 2795 *---------------------------------------------------------------*/ 2796 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2797 unsigned cookie) 2798 { 2799 int r; 2800 unsigned noio_flag; 2801 char udev_cookie[DM_COOKIE_LENGTH]; 2802 char *envp[] = { udev_cookie, NULL }; 2803 2804 noio_flag = memalloc_noio_save(); 2805 2806 if (!cookie) 2807 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2808 else { 2809 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2810 DM_COOKIE_ENV_VAR_NAME, cookie); 2811 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2812 action, envp); 2813 } 2814 2815 memalloc_noio_restore(noio_flag); 2816 2817 return r; 2818 } 2819 2820 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2821 { 2822 return atomic_add_return(1, &md->uevent_seq); 2823 } 2824 2825 uint32_t dm_get_event_nr(struct mapped_device *md) 2826 { 2827 return atomic_read(&md->event_nr); 2828 } 2829 2830 int dm_wait_event(struct mapped_device *md, int event_nr) 2831 { 2832 return wait_event_interruptible(md->eventq, 2833 (event_nr != atomic_read(&md->event_nr))); 2834 } 2835 2836 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2837 { 2838 unsigned long flags; 2839 2840 spin_lock_irqsave(&md->uevent_lock, flags); 2841 list_add(elist, &md->uevent_list); 2842 spin_unlock_irqrestore(&md->uevent_lock, flags); 2843 } 2844 2845 /* 2846 * The gendisk is only valid as long as you have a reference 2847 * count on 'md'. 2848 */ 2849 struct gendisk *dm_disk(struct mapped_device *md) 2850 { 2851 return md->disk; 2852 } 2853 EXPORT_SYMBOL_GPL(dm_disk); 2854 2855 struct kobject *dm_kobject(struct mapped_device *md) 2856 { 2857 return &md->kobj_holder.kobj; 2858 } 2859 2860 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2861 { 2862 struct mapped_device *md; 2863 2864 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2865 2866 spin_lock(&_minor_lock); 2867 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2868 md = NULL; 2869 goto out; 2870 } 2871 dm_get(md); 2872 out: 2873 spin_unlock(&_minor_lock); 2874 2875 return md; 2876 } 2877 2878 int dm_suspended_md(struct mapped_device *md) 2879 { 2880 return test_bit(DMF_SUSPENDED, &md->flags); 2881 } 2882 2883 static int dm_post_suspending_md(struct mapped_device *md) 2884 { 2885 return test_bit(DMF_POST_SUSPENDING, &md->flags); 2886 } 2887 2888 int dm_suspended_internally_md(struct mapped_device *md) 2889 { 2890 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2891 } 2892 2893 int dm_test_deferred_remove_flag(struct mapped_device *md) 2894 { 2895 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2896 } 2897 2898 int dm_suspended(struct dm_target *ti) 2899 { 2900 return dm_suspended_md(ti->table->md); 2901 } 2902 EXPORT_SYMBOL_GPL(dm_suspended); 2903 2904 int dm_post_suspending(struct dm_target *ti) 2905 { 2906 return dm_post_suspending_md(ti->table->md); 2907 } 2908 EXPORT_SYMBOL_GPL(dm_post_suspending); 2909 2910 int dm_noflush_suspending(struct dm_target *ti) 2911 { 2912 return __noflush_suspending(ti->table->md); 2913 } 2914 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2915 2916 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2917 unsigned integrity, unsigned per_io_data_size, 2918 unsigned min_pool_size) 2919 { 2920 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2921 unsigned int pool_size = 0; 2922 unsigned int front_pad, io_front_pad; 2923 int ret; 2924 2925 if (!pools) 2926 return NULL; 2927 2928 switch (type) { 2929 case DM_TYPE_BIO_BASED: 2930 case DM_TYPE_DAX_BIO_BASED: 2931 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2932 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 2933 io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 2934 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2935 if (ret) 2936 goto out; 2937 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2938 goto out; 2939 break; 2940 case DM_TYPE_REQUEST_BASED: 2941 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2942 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2943 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2944 break; 2945 default: 2946 BUG(); 2947 } 2948 2949 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 2950 if (ret) 2951 goto out; 2952 2953 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 2954 goto out; 2955 2956 return pools; 2957 2958 out: 2959 dm_free_md_mempools(pools); 2960 2961 return NULL; 2962 } 2963 2964 void dm_free_md_mempools(struct dm_md_mempools *pools) 2965 { 2966 if (!pools) 2967 return; 2968 2969 bioset_exit(&pools->bs); 2970 bioset_exit(&pools->io_bs); 2971 2972 kfree(pools); 2973 } 2974 2975 struct dm_pr { 2976 u64 old_key; 2977 u64 new_key; 2978 u32 flags; 2979 bool fail_early; 2980 }; 2981 2982 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2983 void *data) 2984 { 2985 struct mapped_device *md = bdev->bd_disk->private_data; 2986 struct dm_table *table; 2987 struct dm_target *ti; 2988 int ret = -ENOTTY, srcu_idx; 2989 2990 table = dm_get_live_table(md, &srcu_idx); 2991 if (!table || !dm_table_get_size(table)) 2992 goto out; 2993 2994 /* We only support devices that have a single target */ 2995 if (dm_table_get_num_targets(table) != 1) 2996 goto out; 2997 ti = dm_table_get_target(table, 0); 2998 2999 ret = -EINVAL; 3000 if (!ti->type->iterate_devices) 3001 goto out; 3002 3003 ret = ti->type->iterate_devices(ti, fn, data); 3004 out: 3005 dm_put_live_table(md, srcu_idx); 3006 return ret; 3007 } 3008 3009 /* 3010 * For register / unregister we need to manually call out to every path. 3011 */ 3012 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 3013 sector_t start, sector_t len, void *data) 3014 { 3015 struct dm_pr *pr = data; 3016 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3017 3018 if (!ops || !ops->pr_register) 3019 return -EOPNOTSUPP; 3020 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 3021 } 3022 3023 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 3024 u32 flags) 3025 { 3026 struct dm_pr pr = { 3027 .old_key = old_key, 3028 .new_key = new_key, 3029 .flags = flags, 3030 .fail_early = true, 3031 }; 3032 int ret; 3033 3034 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 3035 if (ret && new_key) { 3036 /* unregister all paths if we failed to register any path */ 3037 pr.old_key = new_key; 3038 pr.new_key = 0; 3039 pr.flags = 0; 3040 pr.fail_early = false; 3041 dm_call_pr(bdev, __dm_pr_register, &pr); 3042 } 3043 3044 return ret; 3045 } 3046 3047 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 3048 u32 flags) 3049 { 3050 struct mapped_device *md = bdev->bd_disk->private_data; 3051 const struct pr_ops *ops; 3052 int r, srcu_idx; 3053 3054 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3055 if (r < 0) 3056 goto out; 3057 3058 ops = bdev->bd_disk->fops->pr_ops; 3059 if (ops && ops->pr_reserve) 3060 r = ops->pr_reserve(bdev, key, type, flags); 3061 else 3062 r = -EOPNOTSUPP; 3063 out: 3064 dm_unprepare_ioctl(md, srcu_idx); 3065 return r; 3066 } 3067 3068 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3069 { 3070 struct mapped_device *md = bdev->bd_disk->private_data; 3071 const struct pr_ops *ops; 3072 int r, srcu_idx; 3073 3074 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3075 if (r < 0) 3076 goto out; 3077 3078 ops = bdev->bd_disk->fops->pr_ops; 3079 if (ops && ops->pr_release) 3080 r = ops->pr_release(bdev, key, type); 3081 else 3082 r = -EOPNOTSUPP; 3083 out: 3084 dm_unprepare_ioctl(md, srcu_idx); 3085 return r; 3086 } 3087 3088 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3089 enum pr_type type, bool abort) 3090 { 3091 struct mapped_device *md = bdev->bd_disk->private_data; 3092 const struct pr_ops *ops; 3093 int r, srcu_idx; 3094 3095 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3096 if (r < 0) 3097 goto out; 3098 3099 ops = bdev->bd_disk->fops->pr_ops; 3100 if (ops && ops->pr_preempt) 3101 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3102 else 3103 r = -EOPNOTSUPP; 3104 out: 3105 dm_unprepare_ioctl(md, srcu_idx); 3106 return r; 3107 } 3108 3109 static int dm_pr_clear(struct block_device *bdev, u64 key) 3110 { 3111 struct mapped_device *md = bdev->bd_disk->private_data; 3112 const struct pr_ops *ops; 3113 int r, srcu_idx; 3114 3115 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3116 if (r < 0) 3117 goto out; 3118 3119 ops = bdev->bd_disk->fops->pr_ops; 3120 if (ops && ops->pr_clear) 3121 r = ops->pr_clear(bdev, key); 3122 else 3123 r = -EOPNOTSUPP; 3124 out: 3125 dm_unprepare_ioctl(md, srcu_idx); 3126 return r; 3127 } 3128 3129 static const struct pr_ops dm_pr_ops = { 3130 .pr_register = dm_pr_register, 3131 .pr_reserve = dm_pr_reserve, 3132 .pr_release = dm_pr_release, 3133 .pr_preempt = dm_pr_preempt, 3134 .pr_clear = dm_pr_clear, 3135 }; 3136 3137 static const struct block_device_operations dm_blk_dops = { 3138 .submit_bio = dm_submit_bio, 3139 .open = dm_blk_open, 3140 .release = dm_blk_close, 3141 .ioctl = dm_blk_ioctl, 3142 .getgeo = dm_blk_getgeo, 3143 .report_zones = dm_blk_report_zones, 3144 .pr_ops = &dm_pr_ops, 3145 .owner = THIS_MODULE 3146 }; 3147 3148 static const struct block_device_operations dm_rq_blk_dops = { 3149 .open = dm_blk_open, 3150 .release = dm_blk_close, 3151 .ioctl = dm_blk_ioctl, 3152 .getgeo = dm_blk_getgeo, 3153 .pr_ops = &dm_pr_ops, 3154 .owner = THIS_MODULE 3155 }; 3156 3157 static const struct dax_operations dm_dax_ops = { 3158 .direct_access = dm_dax_direct_access, 3159 .dax_supported = dm_dax_supported, 3160 .copy_from_iter = dm_dax_copy_from_iter, 3161 .copy_to_iter = dm_dax_copy_to_iter, 3162 .zero_page_range = dm_dax_zero_page_range, 3163 }; 3164 3165 /* 3166 * module hooks 3167 */ 3168 module_init(dm_init); 3169 module_exit(dm_exit); 3170 3171 module_param(major, uint, 0); 3172 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3173 3174 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3175 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3176 3177 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3178 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3179 3180 module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3181 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3182 3183 MODULE_DESCRIPTION(DM_NAME " driver"); 3184 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3185 MODULE_LICENSE("GPL"); 3186