1 /* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 #include "dm-rq.h" 10 #include "dm-uevent.h" 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/sched/mm.h> 16 #include <linux/sched/signal.h> 17 #include <linux/blkpg.h> 18 #include <linux/bio.h> 19 #include <linux/mempool.h> 20 #include <linux/dax.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/uio.h> 24 #include <linux/hdreg.h> 25 #include <linux/delay.h> 26 #include <linux/wait.h> 27 #include <linux/pr.h> 28 #include <linux/refcount.h> 29 #include <linux/part_stat.h> 30 #include <linux/blk-crypto.h> 31 32 #define DM_MSG_PREFIX "core" 33 34 /* 35 * Cookies are numeric values sent with CHANGE and REMOVE 36 * uevents while resuming, removing or renaming the device. 37 */ 38 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 39 #define DM_COOKIE_LENGTH 24 40 41 static const char *_name = DM_NAME; 42 43 static unsigned int major = 0; 44 static unsigned int _major = 0; 45 46 static DEFINE_IDR(_minor_idr); 47 48 static DEFINE_SPINLOCK(_minor_lock); 49 50 static void do_deferred_remove(struct work_struct *w); 51 52 static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 53 54 static struct workqueue_struct *deferred_remove_workqueue; 55 56 atomic_t dm_global_event_nr = ATOMIC_INIT(0); 57 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 58 59 void dm_issue_global_event(void) 60 { 61 atomic_inc(&dm_global_event_nr); 62 wake_up(&dm_global_eventq); 63 } 64 65 /* 66 * One of these is allocated (on-stack) per original bio. 67 */ 68 struct clone_info { 69 struct dm_table *map; 70 struct bio *bio; 71 struct dm_io *io; 72 sector_t sector; 73 unsigned sector_count; 74 }; 75 76 /* 77 * One of these is allocated per clone bio. 78 */ 79 #define DM_TIO_MAGIC 7282014 80 struct dm_target_io { 81 unsigned magic; 82 struct dm_io *io; 83 struct dm_target *ti; 84 unsigned target_bio_nr; 85 unsigned *len_ptr; 86 bool inside_dm_io; 87 struct bio clone; 88 }; 89 90 /* 91 * One of these is allocated per original bio. 92 * It contains the first clone used for that original. 93 */ 94 #define DM_IO_MAGIC 5191977 95 struct dm_io { 96 unsigned magic; 97 struct mapped_device *md; 98 blk_status_t status; 99 atomic_t io_count; 100 struct bio *orig_bio; 101 unsigned long start_time; 102 spinlock_t endio_lock; 103 struct dm_stats_aux stats_aux; 104 /* last member of dm_target_io is 'struct bio' */ 105 struct dm_target_io tio; 106 }; 107 108 void *dm_per_bio_data(struct bio *bio, size_t data_size) 109 { 110 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 111 if (!tio->inside_dm_io) 112 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 113 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 114 } 115 EXPORT_SYMBOL_GPL(dm_per_bio_data); 116 117 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 118 { 119 struct dm_io *io = (struct dm_io *)((char *)data + data_size); 120 if (io->magic == DM_IO_MAGIC) 121 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 122 BUG_ON(io->magic != DM_TIO_MAGIC); 123 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 124 } 125 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 126 127 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 128 { 129 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 130 } 131 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 132 133 #define MINOR_ALLOCED ((void *)-1) 134 135 /* 136 * Bits for the md->flags field. 137 */ 138 #define DMF_BLOCK_IO_FOR_SUSPEND 0 139 #define DMF_SUSPENDED 1 140 #define DMF_FROZEN 2 141 #define DMF_FREEING 3 142 #define DMF_DELETING 4 143 #define DMF_NOFLUSH_SUSPENDING 5 144 #define DMF_DEFERRED_REMOVE 6 145 #define DMF_SUSPENDED_INTERNALLY 7 146 #define DMF_POST_SUSPENDING 8 147 148 #define DM_NUMA_NODE NUMA_NO_NODE 149 static int dm_numa_node = DM_NUMA_NODE; 150 151 /* 152 * For mempools pre-allocation at the table loading time. 153 */ 154 struct dm_md_mempools { 155 struct bio_set bs; 156 struct bio_set io_bs; 157 }; 158 159 struct table_device { 160 struct list_head list; 161 refcount_t count; 162 struct dm_dev dm_dev; 163 }; 164 165 /* 166 * Bio-based DM's mempools' reserved IOs set by the user. 167 */ 168 #define RESERVED_BIO_BASED_IOS 16 169 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 170 171 static int __dm_get_module_param_int(int *module_param, int min, int max) 172 { 173 int param = READ_ONCE(*module_param); 174 int modified_param = 0; 175 bool modified = true; 176 177 if (param < min) 178 modified_param = min; 179 else if (param > max) 180 modified_param = max; 181 else 182 modified = false; 183 184 if (modified) { 185 (void)cmpxchg(module_param, param, modified_param); 186 param = modified_param; 187 } 188 189 return param; 190 } 191 192 unsigned __dm_get_module_param(unsigned *module_param, 193 unsigned def, unsigned max) 194 { 195 unsigned param = READ_ONCE(*module_param); 196 unsigned modified_param = 0; 197 198 if (!param) 199 modified_param = def; 200 else if (param > max) 201 modified_param = max; 202 203 if (modified_param) { 204 (void)cmpxchg(module_param, param, modified_param); 205 param = modified_param; 206 } 207 208 return param; 209 } 210 211 unsigned dm_get_reserved_bio_based_ios(void) 212 { 213 return __dm_get_module_param(&reserved_bio_based_ios, 214 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 215 } 216 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 217 218 static unsigned dm_get_numa_node(void) 219 { 220 return __dm_get_module_param_int(&dm_numa_node, 221 DM_NUMA_NODE, num_online_nodes() - 1); 222 } 223 224 static int __init local_init(void) 225 { 226 int r; 227 228 r = dm_uevent_init(); 229 if (r) 230 return r; 231 232 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 233 if (!deferred_remove_workqueue) { 234 r = -ENOMEM; 235 goto out_uevent_exit; 236 } 237 238 _major = major; 239 r = register_blkdev(_major, _name); 240 if (r < 0) 241 goto out_free_workqueue; 242 243 if (!_major) 244 _major = r; 245 246 return 0; 247 248 out_free_workqueue: 249 destroy_workqueue(deferred_remove_workqueue); 250 out_uevent_exit: 251 dm_uevent_exit(); 252 253 return r; 254 } 255 256 static void local_exit(void) 257 { 258 flush_scheduled_work(); 259 destroy_workqueue(deferred_remove_workqueue); 260 261 unregister_blkdev(_major, _name); 262 dm_uevent_exit(); 263 264 _major = 0; 265 266 DMINFO("cleaned up"); 267 } 268 269 static int (*_inits[])(void) __initdata = { 270 local_init, 271 dm_target_init, 272 dm_linear_init, 273 dm_stripe_init, 274 dm_io_init, 275 dm_kcopyd_init, 276 dm_interface_init, 277 dm_statistics_init, 278 }; 279 280 static void (*_exits[])(void) = { 281 local_exit, 282 dm_target_exit, 283 dm_linear_exit, 284 dm_stripe_exit, 285 dm_io_exit, 286 dm_kcopyd_exit, 287 dm_interface_exit, 288 dm_statistics_exit, 289 }; 290 291 static int __init dm_init(void) 292 { 293 const int count = ARRAY_SIZE(_inits); 294 295 int r, i; 296 297 for (i = 0; i < count; i++) { 298 r = _inits[i](); 299 if (r) 300 goto bad; 301 } 302 303 return 0; 304 305 bad: 306 while (i--) 307 _exits[i](); 308 309 return r; 310 } 311 312 static void __exit dm_exit(void) 313 { 314 int i = ARRAY_SIZE(_exits); 315 316 while (i--) 317 _exits[i](); 318 319 /* 320 * Should be empty by this point. 321 */ 322 idr_destroy(&_minor_idr); 323 } 324 325 /* 326 * Block device functions 327 */ 328 int dm_deleting_md(struct mapped_device *md) 329 { 330 return test_bit(DMF_DELETING, &md->flags); 331 } 332 333 static int dm_blk_open(struct block_device *bdev, fmode_t mode) 334 { 335 struct mapped_device *md; 336 337 spin_lock(&_minor_lock); 338 339 md = bdev->bd_disk->private_data; 340 if (!md) 341 goto out; 342 343 if (test_bit(DMF_FREEING, &md->flags) || 344 dm_deleting_md(md)) { 345 md = NULL; 346 goto out; 347 } 348 349 dm_get(md); 350 atomic_inc(&md->open_count); 351 out: 352 spin_unlock(&_minor_lock); 353 354 return md ? 0 : -ENXIO; 355 } 356 357 static void dm_blk_close(struct gendisk *disk, fmode_t mode) 358 { 359 struct mapped_device *md; 360 361 spin_lock(&_minor_lock); 362 363 md = disk->private_data; 364 if (WARN_ON(!md)) 365 goto out; 366 367 if (atomic_dec_and_test(&md->open_count) && 368 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 369 queue_work(deferred_remove_workqueue, &deferred_remove_work); 370 371 dm_put(md); 372 out: 373 spin_unlock(&_minor_lock); 374 } 375 376 int dm_open_count(struct mapped_device *md) 377 { 378 return atomic_read(&md->open_count); 379 } 380 381 /* 382 * Guarantees nothing is using the device before it's deleted. 383 */ 384 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 385 { 386 int r = 0; 387 388 spin_lock(&_minor_lock); 389 390 if (dm_open_count(md)) { 391 r = -EBUSY; 392 if (mark_deferred) 393 set_bit(DMF_DEFERRED_REMOVE, &md->flags); 394 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 395 r = -EEXIST; 396 else 397 set_bit(DMF_DELETING, &md->flags); 398 399 spin_unlock(&_minor_lock); 400 401 return r; 402 } 403 404 int dm_cancel_deferred_remove(struct mapped_device *md) 405 { 406 int r = 0; 407 408 spin_lock(&_minor_lock); 409 410 if (test_bit(DMF_DELETING, &md->flags)) 411 r = -EBUSY; 412 else 413 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 414 415 spin_unlock(&_minor_lock); 416 417 return r; 418 } 419 420 static void do_deferred_remove(struct work_struct *w) 421 { 422 dm_deferred_remove(); 423 } 424 425 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 426 { 427 struct mapped_device *md = bdev->bd_disk->private_data; 428 429 return dm_get_geometry(md, geo); 430 } 431 432 #ifdef CONFIG_BLK_DEV_ZONED 433 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 434 { 435 struct dm_report_zones_args *args = data; 436 sector_t sector_diff = args->tgt->begin - args->start; 437 438 /* 439 * Ignore zones beyond the target range. 440 */ 441 if (zone->start >= args->start + args->tgt->len) 442 return 0; 443 444 /* 445 * Remap the start sector and write pointer position of the zone 446 * to match its position in the target range. 447 */ 448 zone->start += sector_diff; 449 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 450 if (zone->cond == BLK_ZONE_COND_FULL) 451 zone->wp = zone->start + zone->len; 452 else if (zone->cond == BLK_ZONE_COND_EMPTY) 453 zone->wp = zone->start; 454 else 455 zone->wp += sector_diff; 456 } 457 458 args->next_sector = zone->start + zone->len; 459 return args->orig_cb(zone, args->zone_idx++, args->orig_data); 460 } 461 EXPORT_SYMBOL_GPL(dm_report_zones_cb); 462 463 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 464 unsigned int nr_zones, report_zones_cb cb, void *data) 465 { 466 struct mapped_device *md = disk->private_data; 467 struct dm_table *map; 468 int srcu_idx, ret; 469 struct dm_report_zones_args args = { 470 .next_sector = sector, 471 .orig_data = data, 472 .orig_cb = cb, 473 }; 474 475 if (dm_suspended_md(md)) 476 return -EAGAIN; 477 478 map = dm_get_live_table(md, &srcu_idx); 479 if (!map) { 480 ret = -EIO; 481 goto out; 482 } 483 484 do { 485 struct dm_target *tgt; 486 487 tgt = dm_table_find_target(map, args.next_sector); 488 if (WARN_ON_ONCE(!tgt->type->report_zones)) { 489 ret = -EIO; 490 goto out; 491 } 492 493 args.tgt = tgt; 494 ret = tgt->type->report_zones(tgt, &args, 495 nr_zones - args.zone_idx); 496 if (ret < 0) 497 goto out; 498 } while (args.zone_idx < nr_zones && 499 args.next_sector < get_capacity(disk)); 500 501 ret = args.zone_idx; 502 out: 503 dm_put_live_table(md, srcu_idx); 504 return ret; 505 } 506 #else 507 #define dm_blk_report_zones NULL 508 #endif /* CONFIG_BLK_DEV_ZONED */ 509 510 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 511 struct block_device **bdev) 512 __acquires(md->io_barrier) 513 { 514 struct dm_target *tgt; 515 struct dm_table *map; 516 int r; 517 518 retry: 519 r = -ENOTTY; 520 map = dm_get_live_table(md, srcu_idx); 521 if (!map || !dm_table_get_size(map)) 522 return r; 523 524 /* We only support devices that have a single target */ 525 if (dm_table_get_num_targets(map) != 1) 526 return r; 527 528 tgt = dm_table_get_target(map, 0); 529 if (!tgt->type->prepare_ioctl) 530 return r; 531 532 if (dm_suspended_md(md)) 533 return -EAGAIN; 534 535 r = tgt->type->prepare_ioctl(tgt, bdev); 536 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 537 dm_put_live_table(md, *srcu_idx); 538 msleep(10); 539 goto retry; 540 } 541 542 return r; 543 } 544 545 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 546 __releases(md->io_barrier) 547 { 548 dm_put_live_table(md, srcu_idx); 549 } 550 551 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 552 unsigned int cmd, unsigned long arg) 553 { 554 struct mapped_device *md = bdev->bd_disk->private_data; 555 int r, srcu_idx; 556 557 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 558 if (r < 0) 559 goto out; 560 561 if (r > 0) { 562 /* 563 * Target determined this ioctl is being issued against a 564 * subset of the parent bdev; require extra privileges. 565 */ 566 if (!capable(CAP_SYS_RAWIO)) { 567 DMWARN_LIMIT( 568 "%s: sending ioctl %x to DM device without required privilege.", 569 current->comm, cmd); 570 r = -ENOIOCTLCMD; 571 goto out; 572 } 573 } 574 575 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 576 out: 577 dm_unprepare_ioctl(md, srcu_idx); 578 return r; 579 } 580 581 u64 dm_start_time_ns_from_clone(struct bio *bio) 582 { 583 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 584 struct dm_io *io = tio->io; 585 586 return jiffies_to_nsecs(io->start_time); 587 } 588 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 589 590 static void start_io_acct(struct dm_io *io) 591 { 592 struct mapped_device *md = io->md; 593 struct bio *bio = io->orig_bio; 594 595 io->start_time = bio_start_io_acct(bio); 596 if (unlikely(dm_stats_used(&md->stats))) 597 dm_stats_account_io(&md->stats, bio_data_dir(bio), 598 bio->bi_iter.bi_sector, bio_sectors(bio), 599 false, 0, &io->stats_aux); 600 } 601 602 static void end_io_acct(struct dm_io *io) 603 { 604 struct mapped_device *md = io->md; 605 struct bio *bio = io->orig_bio; 606 unsigned long duration = jiffies - io->start_time; 607 608 bio_end_io_acct(bio, io->start_time); 609 610 if (unlikely(dm_stats_used(&md->stats))) 611 dm_stats_account_io(&md->stats, bio_data_dir(bio), 612 bio->bi_iter.bi_sector, bio_sectors(bio), 613 true, duration, &io->stats_aux); 614 615 /* nudge anyone waiting on suspend queue */ 616 if (unlikely(wq_has_sleeper(&md->wait))) 617 wake_up(&md->wait); 618 } 619 620 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 621 { 622 struct dm_io *io; 623 struct dm_target_io *tio; 624 struct bio *clone; 625 626 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 627 if (!clone) 628 return NULL; 629 630 tio = container_of(clone, struct dm_target_io, clone); 631 tio->inside_dm_io = true; 632 tio->io = NULL; 633 634 io = container_of(tio, struct dm_io, tio); 635 io->magic = DM_IO_MAGIC; 636 io->status = 0; 637 atomic_set(&io->io_count, 1); 638 io->orig_bio = bio; 639 io->md = md; 640 spin_lock_init(&io->endio_lock); 641 642 start_io_acct(io); 643 644 return io; 645 } 646 647 static void free_io(struct mapped_device *md, struct dm_io *io) 648 { 649 bio_put(&io->tio.clone); 650 } 651 652 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 653 unsigned target_bio_nr, gfp_t gfp_mask) 654 { 655 struct dm_target_io *tio; 656 657 if (!ci->io->tio.io) { 658 /* the dm_target_io embedded in ci->io is available */ 659 tio = &ci->io->tio; 660 } else { 661 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 662 if (!clone) 663 return NULL; 664 665 tio = container_of(clone, struct dm_target_io, clone); 666 tio->inside_dm_io = false; 667 } 668 669 tio->magic = DM_TIO_MAGIC; 670 tio->io = ci->io; 671 tio->ti = ti; 672 tio->target_bio_nr = target_bio_nr; 673 674 return tio; 675 } 676 677 static void free_tio(struct dm_target_io *tio) 678 { 679 if (tio->inside_dm_io) 680 return; 681 bio_put(&tio->clone); 682 } 683 684 /* 685 * Add the bio to the list of deferred io. 686 */ 687 static void queue_io(struct mapped_device *md, struct bio *bio) 688 { 689 unsigned long flags; 690 691 spin_lock_irqsave(&md->deferred_lock, flags); 692 bio_list_add(&md->deferred, bio); 693 spin_unlock_irqrestore(&md->deferred_lock, flags); 694 queue_work(md->wq, &md->work); 695 } 696 697 /* 698 * Everyone (including functions in this file), should use this 699 * function to access the md->map field, and make sure they call 700 * dm_put_live_table() when finished. 701 */ 702 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 703 { 704 *srcu_idx = srcu_read_lock(&md->io_barrier); 705 706 return srcu_dereference(md->map, &md->io_barrier); 707 } 708 709 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 710 { 711 srcu_read_unlock(&md->io_barrier, srcu_idx); 712 } 713 714 void dm_sync_table(struct mapped_device *md) 715 { 716 synchronize_srcu(&md->io_barrier); 717 synchronize_rcu_expedited(); 718 } 719 720 /* 721 * A fast alternative to dm_get_live_table/dm_put_live_table. 722 * The caller must not block between these two functions. 723 */ 724 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 725 { 726 rcu_read_lock(); 727 return rcu_dereference(md->map); 728 } 729 730 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 731 { 732 rcu_read_unlock(); 733 } 734 735 static char *_dm_claim_ptr = "I belong to device-mapper"; 736 737 /* 738 * Open a table device so we can use it as a map destination. 739 */ 740 static int open_table_device(struct table_device *td, dev_t dev, 741 struct mapped_device *md) 742 { 743 struct block_device *bdev; 744 745 int r; 746 747 BUG_ON(td->dm_dev.bdev); 748 749 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 750 if (IS_ERR(bdev)) 751 return PTR_ERR(bdev); 752 753 r = bd_link_disk_holder(bdev, dm_disk(md)); 754 if (r) { 755 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 756 return r; 757 } 758 759 td->dm_dev.bdev = bdev; 760 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 761 return 0; 762 } 763 764 /* 765 * Close a table device that we've been using. 766 */ 767 static void close_table_device(struct table_device *td, struct mapped_device *md) 768 { 769 if (!td->dm_dev.bdev) 770 return; 771 772 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 773 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 774 put_dax(td->dm_dev.dax_dev); 775 td->dm_dev.bdev = NULL; 776 td->dm_dev.dax_dev = NULL; 777 } 778 779 static struct table_device *find_table_device(struct list_head *l, dev_t dev, 780 fmode_t mode) 781 { 782 struct table_device *td; 783 784 list_for_each_entry(td, l, list) 785 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 786 return td; 787 788 return NULL; 789 } 790 791 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 792 struct dm_dev **result) 793 { 794 int r; 795 struct table_device *td; 796 797 mutex_lock(&md->table_devices_lock); 798 td = find_table_device(&md->table_devices, dev, mode); 799 if (!td) { 800 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 801 if (!td) { 802 mutex_unlock(&md->table_devices_lock); 803 return -ENOMEM; 804 } 805 806 td->dm_dev.mode = mode; 807 td->dm_dev.bdev = NULL; 808 809 if ((r = open_table_device(td, dev, md))) { 810 mutex_unlock(&md->table_devices_lock); 811 kfree(td); 812 return r; 813 } 814 815 format_dev_t(td->dm_dev.name, dev); 816 817 refcount_set(&td->count, 1); 818 list_add(&td->list, &md->table_devices); 819 } else { 820 refcount_inc(&td->count); 821 } 822 mutex_unlock(&md->table_devices_lock); 823 824 *result = &td->dm_dev; 825 return 0; 826 } 827 EXPORT_SYMBOL_GPL(dm_get_table_device); 828 829 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 830 { 831 struct table_device *td = container_of(d, struct table_device, dm_dev); 832 833 mutex_lock(&md->table_devices_lock); 834 if (refcount_dec_and_test(&td->count)) { 835 close_table_device(td, md); 836 list_del(&td->list); 837 kfree(td); 838 } 839 mutex_unlock(&md->table_devices_lock); 840 } 841 EXPORT_SYMBOL(dm_put_table_device); 842 843 static void free_table_devices(struct list_head *devices) 844 { 845 struct list_head *tmp, *next; 846 847 list_for_each_safe(tmp, next, devices) { 848 struct table_device *td = list_entry(tmp, struct table_device, list); 849 850 DMWARN("dm_destroy: %s still exists with %d references", 851 td->dm_dev.name, refcount_read(&td->count)); 852 kfree(td); 853 } 854 } 855 856 /* 857 * Get the geometry associated with a dm device 858 */ 859 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 860 { 861 *geo = md->geometry; 862 863 return 0; 864 } 865 866 /* 867 * Set the geometry of a device. 868 */ 869 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 870 { 871 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 872 873 if (geo->start > sz) { 874 DMWARN("Start sector is beyond the geometry limits."); 875 return -EINVAL; 876 } 877 878 md->geometry = *geo; 879 880 return 0; 881 } 882 883 static int __noflush_suspending(struct mapped_device *md) 884 { 885 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 886 } 887 888 /* 889 * Decrements the number of outstanding ios that a bio has been 890 * cloned into, completing the original io if necc. 891 */ 892 static void dec_pending(struct dm_io *io, blk_status_t error) 893 { 894 unsigned long flags; 895 blk_status_t io_error; 896 struct bio *bio; 897 struct mapped_device *md = io->md; 898 899 /* Push-back supersedes any I/O errors */ 900 if (unlikely(error)) { 901 spin_lock_irqsave(&io->endio_lock, flags); 902 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 903 io->status = error; 904 spin_unlock_irqrestore(&io->endio_lock, flags); 905 } 906 907 if (atomic_dec_and_test(&io->io_count)) { 908 if (io->status == BLK_STS_DM_REQUEUE) { 909 /* 910 * Target requested pushing back the I/O. 911 */ 912 spin_lock_irqsave(&md->deferred_lock, flags); 913 if (__noflush_suspending(md)) 914 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 915 bio_list_add_head(&md->deferred, io->orig_bio); 916 else 917 /* noflush suspend was interrupted. */ 918 io->status = BLK_STS_IOERR; 919 spin_unlock_irqrestore(&md->deferred_lock, flags); 920 } 921 922 io_error = io->status; 923 bio = io->orig_bio; 924 end_io_acct(io); 925 free_io(md, io); 926 927 if (io_error == BLK_STS_DM_REQUEUE) 928 return; 929 930 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 931 /* 932 * Preflush done for flush with data, reissue 933 * without REQ_PREFLUSH. 934 */ 935 bio->bi_opf &= ~REQ_PREFLUSH; 936 queue_io(md, bio); 937 } else { 938 /* done with normal IO or empty flush */ 939 if (io_error) 940 bio->bi_status = io_error; 941 bio_endio(bio); 942 } 943 } 944 } 945 946 void disable_discard(struct mapped_device *md) 947 { 948 struct queue_limits *limits = dm_get_queue_limits(md); 949 950 /* device doesn't really support DISCARD, disable it */ 951 limits->max_discard_sectors = 0; 952 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 953 } 954 955 void disable_write_same(struct mapped_device *md) 956 { 957 struct queue_limits *limits = dm_get_queue_limits(md); 958 959 /* device doesn't really support WRITE SAME, disable it */ 960 limits->max_write_same_sectors = 0; 961 } 962 963 void disable_write_zeroes(struct mapped_device *md) 964 { 965 struct queue_limits *limits = dm_get_queue_limits(md); 966 967 /* device doesn't really support WRITE ZEROES, disable it */ 968 limits->max_write_zeroes_sectors = 0; 969 } 970 971 static void clone_endio(struct bio *bio) 972 { 973 blk_status_t error = bio->bi_status; 974 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 975 struct dm_io *io = tio->io; 976 struct mapped_device *md = tio->io->md; 977 dm_endio_fn endio = tio->ti->type->end_io; 978 struct bio *orig_bio = io->orig_bio; 979 980 if (unlikely(error == BLK_STS_TARGET)) { 981 if (bio_op(bio) == REQ_OP_DISCARD && 982 !bio->bi_disk->queue->limits.max_discard_sectors) 983 disable_discard(md); 984 else if (bio_op(bio) == REQ_OP_WRITE_SAME && 985 !bio->bi_disk->queue->limits.max_write_same_sectors) 986 disable_write_same(md); 987 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 988 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 989 disable_write_zeroes(md); 990 } 991 992 /* 993 * For zone-append bios get offset in zone of the written 994 * sector and add that to the original bio sector pos. 995 */ 996 if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { 997 sector_t written_sector = bio->bi_iter.bi_sector; 998 struct request_queue *q = orig_bio->bi_disk->queue; 999 u64 mask = (u64)blk_queue_zone_sectors(q) - 1; 1000 1001 orig_bio->bi_iter.bi_sector += written_sector & mask; 1002 } 1003 1004 if (endio) { 1005 int r = endio(tio->ti, bio, &error); 1006 switch (r) { 1007 case DM_ENDIO_REQUEUE: 1008 error = BLK_STS_DM_REQUEUE; 1009 fallthrough; 1010 case DM_ENDIO_DONE: 1011 break; 1012 case DM_ENDIO_INCOMPLETE: 1013 /* The target will handle the io */ 1014 return; 1015 default: 1016 DMWARN("unimplemented target endio return value: %d", r); 1017 BUG(); 1018 } 1019 } 1020 1021 free_tio(tio); 1022 dec_pending(io, error); 1023 } 1024 1025 /* 1026 * Return maximum size of I/O possible at the supplied sector up to the current 1027 * target boundary. 1028 */ 1029 static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 1030 sector_t target_offset) 1031 { 1032 return ti->len - target_offset; 1033 } 1034 1035 static sector_t max_io_len(struct dm_target *ti, sector_t sector) 1036 { 1037 sector_t target_offset = dm_target_offset(ti, sector); 1038 sector_t len = max_io_len_target_boundary(ti, target_offset); 1039 sector_t max_len; 1040 1041 /* 1042 * Does the target need to split even further? 1043 * - q->limits.chunk_sectors reflects ti->max_io_len so 1044 * blk_max_size_offset() provides required splitting. 1045 * - blk_max_size_offset() also respects q->limits.max_sectors 1046 */ 1047 max_len = blk_max_size_offset(ti->table->md->queue, 1048 target_offset); 1049 if (len > max_len) 1050 len = max_len; 1051 1052 return len; 1053 } 1054 1055 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1056 { 1057 if (len > UINT_MAX) { 1058 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1059 (unsigned long long)len, UINT_MAX); 1060 ti->error = "Maximum size of target IO is too large"; 1061 return -EINVAL; 1062 } 1063 1064 ti->max_io_len = (uint32_t) len; 1065 1066 return 0; 1067 } 1068 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1069 1070 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1071 sector_t sector, int *srcu_idx) 1072 __acquires(md->io_barrier) 1073 { 1074 struct dm_table *map; 1075 struct dm_target *ti; 1076 1077 map = dm_get_live_table(md, srcu_idx); 1078 if (!map) 1079 return NULL; 1080 1081 ti = dm_table_find_target(map, sector); 1082 if (!ti) 1083 return NULL; 1084 1085 return ti; 1086 } 1087 1088 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1089 long nr_pages, void **kaddr, pfn_t *pfn) 1090 { 1091 struct mapped_device *md = dax_get_private(dax_dev); 1092 sector_t sector = pgoff * PAGE_SECTORS; 1093 struct dm_target *ti; 1094 long len, ret = -EIO; 1095 int srcu_idx; 1096 1097 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1098 1099 if (!ti) 1100 goto out; 1101 if (!ti->type->direct_access) 1102 goto out; 1103 len = max_io_len(ti, sector) / PAGE_SECTORS; 1104 if (len < 1) 1105 goto out; 1106 nr_pages = min(len, nr_pages); 1107 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1108 1109 out: 1110 dm_put_live_table(md, srcu_idx); 1111 1112 return ret; 1113 } 1114 1115 static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 1116 int blocksize, sector_t start, sector_t len) 1117 { 1118 struct mapped_device *md = dax_get_private(dax_dev); 1119 struct dm_table *map; 1120 bool ret = false; 1121 int srcu_idx; 1122 1123 map = dm_get_live_table(md, &srcu_idx); 1124 if (!map) 1125 goto out; 1126 1127 ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 1128 1129 out: 1130 dm_put_live_table(md, srcu_idx); 1131 1132 return ret; 1133 } 1134 1135 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1136 void *addr, size_t bytes, struct iov_iter *i) 1137 { 1138 struct mapped_device *md = dax_get_private(dax_dev); 1139 sector_t sector = pgoff * PAGE_SECTORS; 1140 struct dm_target *ti; 1141 long ret = 0; 1142 int srcu_idx; 1143 1144 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1145 1146 if (!ti) 1147 goto out; 1148 if (!ti->type->dax_copy_from_iter) { 1149 ret = copy_from_iter(addr, bytes, i); 1150 goto out; 1151 } 1152 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 1153 out: 1154 dm_put_live_table(md, srcu_idx); 1155 1156 return ret; 1157 } 1158 1159 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1160 void *addr, size_t bytes, struct iov_iter *i) 1161 { 1162 struct mapped_device *md = dax_get_private(dax_dev); 1163 sector_t sector = pgoff * PAGE_SECTORS; 1164 struct dm_target *ti; 1165 long ret = 0; 1166 int srcu_idx; 1167 1168 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1169 1170 if (!ti) 1171 goto out; 1172 if (!ti->type->dax_copy_to_iter) { 1173 ret = copy_to_iter(addr, bytes, i); 1174 goto out; 1175 } 1176 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1177 out: 1178 dm_put_live_table(md, srcu_idx); 1179 1180 return ret; 1181 } 1182 1183 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1184 size_t nr_pages) 1185 { 1186 struct mapped_device *md = dax_get_private(dax_dev); 1187 sector_t sector = pgoff * PAGE_SECTORS; 1188 struct dm_target *ti; 1189 int ret = -EIO; 1190 int srcu_idx; 1191 1192 ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1193 1194 if (!ti) 1195 goto out; 1196 if (WARN_ON(!ti->type->dax_zero_page_range)) { 1197 /* 1198 * ->zero_page_range() is mandatory dax operation. If we are 1199 * here, something is wrong. 1200 */ 1201 dm_put_live_table(md, srcu_idx); 1202 goto out; 1203 } 1204 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1205 1206 out: 1207 dm_put_live_table(md, srcu_idx); 1208 1209 return ret; 1210 } 1211 1212 /* 1213 * A target may call dm_accept_partial_bio only from the map routine. It is 1214 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 1215 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 1216 * 1217 * dm_accept_partial_bio informs the dm that the target only wants to process 1218 * additional n_sectors sectors of the bio and the rest of the data should be 1219 * sent in a next bio. 1220 * 1221 * A diagram that explains the arithmetics: 1222 * +--------------------+---------------+-------+ 1223 * | 1 | 2 | 3 | 1224 * +--------------------+---------------+-------+ 1225 * 1226 * <-------------- *tio->len_ptr ---------------> 1227 * <------- bi_size -------> 1228 * <-- n_sectors --> 1229 * 1230 * Region 1 was already iterated over with bio_advance or similar function. 1231 * (it may be empty if the target doesn't use bio_advance) 1232 * Region 2 is the remaining bio size that the target wants to process. 1233 * (it may be empty if region 1 is non-empty, although there is no reason 1234 * to make it empty) 1235 * The target requires that region 3 is to be sent in the next bio. 1236 * 1237 * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1238 * the partially processed part (the sum of regions 1+2) must be the same for all 1239 * copies of the bio. 1240 */ 1241 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1242 { 1243 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1244 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1245 BUG_ON(bio->bi_opf & REQ_PREFLUSH); 1246 BUG_ON(bi_size > *tio->len_ptr); 1247 BUG_ON(n_sectors > bi_size); 1248 *tio->len_ptr -= bi_size - n_sectors; 1249 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1250 } 1251 EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1252 1253 static blk_qc_t __map_bio(struct dm_target_io *tio) 1254 { 1255 int r; 1256 sector_t sector; 1257 struct bio *clone = &tio->clone; 1258 struct dm_io *io = tio->io; 1259 struct dm_target *ti = tio->ti; 1260 blk_qc_t ret = BLK_QC_T_NONE; 1261 1262 clone->bi_end_io = clone_endio; 1263 1264 /* 1265 * Map the clone. If r == 0 we don't need to do 1266 * anything, the target has assumed ownership of 1267 * this io. 1268 */ 1269 atomic_inc(&io->io_count); 1270 sector = clone->bi_iter.bi_sector; 1271 1272 r = ti->type->map(ti, clone); 1273 switch (r) { 1274 case DM_MAPIO_SUBMITTED: 1275 break; 1276 case DM_MAPIO_REMAPPED: 1277 /* the bio has been remapped so dispatch it */ 1278 trace_block_bio_remap(clone->bi_disk->queue, clone, 1279 bio_dev(io->orig_bio), sector); 1280 ret = submit_bio_noacct(clone); 1281 break; 1282 case DM_MAPIO_KILL: 1283 free_tio(tio); 1284 dec_pending(io, BLK_STS_IOERR); 1285 break; 1286 case DM_MAPIO_REQUEUE: 1287 free_tio(tio); 1288 dec_pending(io, BLK_STS_DM_REQUEUE); 1289 break; 1290 default: 1291 DMWARN("unimplemented target map return value: %d", r); 1292 BUG(); 1293 } 1294 1295 return ret; 1296 } 1297 1298 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1299 { 1300 bio->bi_iter.bi_sector = sector; 1301 bio->bi_iter.bi_size = to_bytes(len); 1302 } 1303 1304 /* 1305 * Creates a bio that consists of range of complete bvecs. 1306 */ 1307 static int clone_bio(struct dm_target_io *tio, struct bio *bio, 1308 sector_t sector, unsigned len) 1309 { 1310 struct bio *clone = &tio->clone; 1311 int r; 1312 1313 __bio_clone_fast(clone, bio); 1314 1315 r = bio_crypt_clone(clone, bio, GFP_NOIO); 1316 if (r < 0) 1317 return r; 1318 1319 if (bio_integrity(bio)) { 1320 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1321 !dm_target_passes_integrity(tio->ti->type))) { 1322 DMWARN("%s: the target %s doesn't support integrity data.", 1323 dm_device_name(tio->io->md), 1324 tio->ti->type->name); 1325 return -EIO; 1326 } 1327 1328 r = bio_integrity_clone(clone, bio, GFP_NOIO); 1329 if (r < 0) 1330 return r; 1331 } 1332 1333 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1334 clone->bi_iter.bi_size = to_bytes(len); 1335 1336 if (bio_integrity(bio)) 1337 bio_integrity_trim(clone); 1338 1339 return 0; 1340 } 1341 1342 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1343 struct dm_target *ti, unsigned num_bios) 1344 { 1345 struct dm_target_io *tio; 1346 int try; 1347 1348 if (!num_bios) 1349 return; 1350 1351 if (num_bios == 1) { 1352 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1353 bio_list_add(blist, &tio->clone); 1354 return; 1355 } 1356 1357 for (try = 0; try < 2; try++) { 1358 int bio_nr; 1359 struct bio *bio; 1360 1361 if (try) 1362 mutex_lock(&ci->io->md->table_devices_lock); 1363 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1364 tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1365 if (!tio) 1366 break; 1367 1368 bio_list_add(blist, &tio->clone); 1369 } 1370 if (try) 1371 mutex_unlock(&ci->io->md->table_devices_lock); 1372 if (bio_nr == num_bios) 1373 return; 1374 1375 while ((bio = bio_list_pop(blist))) { 1376 tio = container_of(bio, struct dm_target_io, clone); 1377 free_tio(tio); 1378 } 1379 } 1380 } 1381 1382 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1383 struct dm_target_io *tio, unsigned *len) 1384 { 1385 struct bio *clone = &tio->clone; 1386 1387 tio->len_ptr = len; 1388 1389 __bio_clone_fast(clone, ci->bio); 1390 if (len) 1391 bio_setup_sector(clone, ci->sector, *len); 1392 1393 return __map_bio(tio); 1394 } 1395 1396 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1397 unsigned num_bios, unsigned *len) 1398 { 1399 struct bio_list blist = BIO_EMPTY_LIST; 1400 struct bio *bio; 1401 struct dm_target_io *tio; 1402 1403 alloc_multiple_bios(&blist, ci, ti, num_bios); 1404 1405 while ((bio = bio_list_pop(&blist))) { 1406 tio = container_of(bio, struct dm_target_io, clone); 1407 (void) __clone_and_map_simple_bio(ci, tio, len); 1408 } 1409 } 1410 1411 static int __send_empty_flush(struct clone_info *ci) 1412 { 1413 unsigned target_nr = 0; 1414 struct dm_target *ti; 1415 struct bio flush_bio; 1416 1417 /* 1418 * Use an on-stack bio for this, it's safe since we don't 1419 * need to reference it after submit. It's just used as 1420 * the basis for the clone(s). 1421 */ 1422 bio_init(&flush_bio, NULL, 0); 1423 flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1424 ci->bio = &flush_bio; 1425 ci->sector_count = 0; 1426 1427 /* 1428 * Empty flush uses a statically initialized bio, as the base for 1429 * cloning. However, blkg association requires that a bdev is 1430 * associated with a gendisk, which doesn't happen until the bdev is 1431 * opened. So, blkg association is done at issue time of the flush 1432 * rather than when the device is created in alloc_dev(). 1433 */ 1434 bio_set_dev(ci->bio, ci->io->md->bdev); 1435 1436 BUG_ON(bio_has_data(ci->bio)); 1437 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1438 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1439 1440 bio_uninit(ci->bio); 1441 return 0; 1442 } 1443 1444 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1445 sector_t sector, unsigned *len) 1446 { 1447 struct bio *bio = ci->bio; 1448 struct dm_target_io *tio; 1449 int r; 1450 1451 tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1452 tio->len_ptr = len; 1453 r = clone_bio(tio, bio, sector, *len); 1454 if (r < 0) { 1455 free_tio(tio); 1456 return r; 1457 } 1458 (void) __map_bio(tio); 1459 1460 return 0; 1461 } 1462 1463 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1464 unsigned num_bios) 1465 { 1466 unsigned len; 1467 1468 /* 1469 * Even though the device advertised support for this type of 1470 * request, that does not mean every target supports it, and 1471 * reconfiguration might also have changed that since the 1472 * check was performed. 1473 */ 1474 if (!num_bios) 1475 return -EOPNOTSUPP; 1476 1477 len = min_t(sector_t, ci->sector_count, 1478 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 1479 1480 __send_duplicate_bios(ci, ti, num_bios, &len); 1481 1482 ci->sector += len; 1483 ci->sector_count -= len; 1484 1485 return 0; 1486 } 1487 1488 static bool is_abnormal_io(struct bio *bio) 1489 { 1490 bool r = false; 1491 1492 switch (bio_op(bio)) { 1493 case REQ_OP_DISCARD: 1494 case REQ_OP_SECURE_ERASE: 1495 case REQ_OP_WRITE_SAME: 1496 case REQ_OP_WRITE_ZEROES: 1497 r = true; 1498 break; 1499 } 1500 1501 return r; 1502 } 1503 1504 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1505 int *result) 1506 { 1507 struct bio *bio = ci->bio; 1508 unsigned num_bios = 0; 1509 1510 switch (bio_op(bio)) { 1511 case REQ_OP_DISCARD: 1512 num_bios = ti->num_discard_bios; 1513 break; 1514 case REQ_OP_SECURE_ERASE: 1515 num_bios = ti->num_secure_erase_bios; 1516 break; 1517 case REQ_OP_WRITE_SAME: 1518 num_bios = ti->num_write_same_bios; 1519 break; 1520 case REQ_OP_WRITE_ZEROES: 1521 num_bios = ti->num_write_zeroes_bios; 1522 break; 1523 default: 1524 return false; 1525 } 1526 1527 *result = __send_changing_extent_only(ci, ti, num_bios); 1528 return true; 1529 } 1530 1531 /* 1532 * Select the correct strategy for processing a non-flush bio. 1533 */ 1534 static int __split_and_process_non_flush(struct clone_info *ci) 1535 { 1536 struct dm_target *ti; 1537 unsigned len; 1538 int r; 1539 1540 ti = dm_table_find_target(ci->map, ci->sector); 1541 if (!ti) 1542 return -EIO; 1543 1544 if (__process_abnormal_io(ci, ti, &r)) 1545 return r; 1546 1547 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1548 1549 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1550 if (r < 0) 1551 return r; 1552 1553 ci->sector += len; 1554 ci->sector_count -= len; 1555 1556 return 0; 1557 } 1558 1559 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1560 struct dm_table *map, struct bio *bio) 1561 { 1562 ci->map = map; 1563 ci->io = alloc_io(md, bio); 1564 ci->sector = bio->bi_iter.bi_sector; 1565 } 1566 1567 #define __dm_part_stat_sub(part, field, subnd) \ 1568 (part_stat_get(part, field) -= (subnd)) 1569 1570 /* 1571 * Entry point to split a bio into clones and submit them to the targets. 1572 */ 1573 static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1574 struct dm_table *map, struct bio *bio) 1575 { 1576 struct clone_info ci; 1577 blk_qc_t ret = BLK_QC_T_NONE; 1578 int error = 0; 1579 1580 init_clone_info(&ci, md, map, bio); 1581 1582 if (bio->bi_opf & REQ_PREFLUSH) { 1583 error = __send_empty_flush(&ci); 1584 /* dec_pending submits any data associated with flush */ 1585 } else if (op_is_zone_mgmt(bio_op(bio))) { 1586 ci.bio = bio; 1587 ci.sector_count = 0; 1588 error = __split_and_process_non_flush(&ci); 1589 } else { 1590 ci.bio = bio; 1591 ci.sector_count = bio_sectors(bio); 1592 while (ci.sector_count && !error) { 1593 error = __split_and_process_non_flush(&ci); 1594 if (current->bio_list && ci.sector_count && !error) { 1595 /* 1596 * Remainder must be passed to submit_bio_noacct() 1597 * so that it gets handled *after* bios already submitted 1598 * have been completely processed. 1599 * We take a clone of the original to store in 1600 * ci.io->orig_bio to be used by end_io_acct() and 1601 * for dec_pending to use for completion handling. 1602 */ 1603 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1604 GFP_NOIO, &md->queue->bio_split); 1605 ci.io->orig_bio = b; 1606 1607 /* 1608 * Adjust IO stats for each split, otherwise upon queue 1609 * reentry there will be redundant IO accounting. 1610 * NOTE: this is a stop-gap fix, a proper fix involves 1611 * significant refactoring of DM core's bio splitting 1612 * (by eliminating DM's splitting and just using bio_split) 1613 */ 1614 part_stat_lock(); 1615 __dm_part_stat_sub(&dm_disk(md)->part0, 1616 sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1617 part_stat_unlock(); 1618 1619 bio_chain(b, bio); 1620 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1621 ret = submit_bio_noacct(bio); 1622 break; 1623 } 1624 } 1625 } 1626 1627 /* drop the extra reference count */ 1628 dec_pending(ci.io, errno_to_blk_status(error)); 1629 return ret; 1630 } 1631 1632 static blk_qc_t dm_submit_bio(struct bio *bio) 1633 { 1634 struct mapped_device *md = bio->bi_disk->private_data; 1635 blk_qc_t ret = BLK_QC_T_NONE; 1636 int srcu_idx; 1637 struct dm_table *map; 1638 1639 map = dm_get_live_table(md, &srcu_idx); 1640 if (unlikely(!map)) { 1641 DMERR_LIMIT("%s: mapping table unavailable, erroring io", 1642 dm_device_name(md)); 1643 bio_io_error(bio); 1644 goto out; 1645 } 1646 1647 /* If suspended, queue this IO for later */ 1648 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1649 if (bio->bi_opf & REQ_NOWAIT) 1650 bio_wouldblock_error(bio); 1651 else if (bio->bi_opf & REQ_RAHEAD) 1652 bio_io_error(bio); 1653 else 1654 queue_io(md, bio); 1655 goto out; 1656 } 1657 1658 /* 1659 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1660 * otherwise associated queue_limits won't be imposed. 1661 */ 1662 if (is_abnormal_io(bio)) 1663 blk_queue_split(&bio); 1664 1665 ret = __split_and_process_bio(md, map, bio); 1666 out: 1667 dm_put_live_table(md, srcu_idx); 1668 return ret; 1669 } 1670 1671 /*----------------------------------------------------------------- 1672 * An IDR is used to keep track of allocated minor numbers. 1673 *---------------------------------------------------------------*/ 1674 static void free_minor(int minor) 1675 { 1676 spin_lock(&_minor_lock); 1677 idr_remove(&_minor_idr, minor); 1678 spin_unlock(&_minor_lock); 1679 } 1680 1681 /* 1682 * See if the device with a specific minor # is free. 1683 */ 1684 static int specific_minor(int minor) 1685 { 1686 int r; 1687 1688 if (minor >= (1 << MINORBITS)) 1689 return -EINVAL; 1690 1691 idr_preload(GFP_KERNEL); 1692 spin_lock(&_minor_lock); 1693 1694 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 1695 1696 spin_unlock(&_minor_lock); 1697 idr_preload_end(); 1698 if (r < 0) 1699 return r == -ENOSPC ? -EBUSY : r; 1700 return 0; 1701 } 1702 1703 static int next_free_minor(int *minor) 1704 { 1705 int r; 1706 1707 idr_preload(GFP_KERNEL); 1708 spin_lock(&_minor_lock); 1709 1710 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 1711 1712 spin_unlock(&_minor_lock); 1713 idr_preload_end(); 1714 if (r < 0) 1715 return r; 1716 *minor = r; 1717 return 0; 1718 } 1719 1720 static const struct block_device_operations dm_blk_dops; 1721 static const struct block_device_operations dm_rq_blk_dops; 1722 static const struct dax_operations dm_dax_ops; 1723 1724 static void dm_wq_work(struct work_struct *work); 1725 1726 static void cleanup_mapped_device(struct mapped_device *md) 1727 { 1728 if (md->wq) 1729 destroy_workqueue(md->wq); 1730 bioset_exit(&md->bs); 1731 bioset_exit(&md->io_bs); 1732 1733 if (md->dax_dev) { 1734 kill_dax(md->dax_dev); 1735 put_dax(md->dax_dev); 1736 md->dax_dev = NULL; 1737 } 1738 1739 if (md->disk) { 1740 spin_lock(&_minor_lock); 1741 md->disk->private_data = NULL; 1742 spin_unlock(&_minor_lock); 1743 del_gendisk(md->disk); 1744 put_disk(md->disk); 1745 } 1746 1747 if (md->queue) 1748 blk_cleanup_queue(md->queue); 1749 1750 cleanup_srcu_struct(&md->io_barrier); 1751 1752 if (md->bdev) { 1753 bdput(md->bdev); 1754 md->bdev = NULL; 1755 } 1756 1757 mutex_destroy(&md->suspend_lock); 1758 mutex_destroy(&md->type_lock); 1759 mutex_destroy(&md->table_devices_lock); 1760 1761 dm_mq_cleanup_mapped_device(md); 1762 } 1763 1764 /* 1765 * Allocate and initialise a blank device with a given minor. 1766 */ 1767 static struct mapped_device *alloc_dev(int minor) 1768 { 1769 int r, numa_node_id = dm_get_numa_node(); 1770 struct mapped_device *md; 1771 void *old_md; 1772 1773 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 1774 if (!md) { 1775 DMWARN("unable to allocate device, out of memory."); 1776 return NULL; 1777 } 1778 1779 if (!try_module_get(THIS_MODULE)) 1780 goto bad_module_get; 1781 1782 /* get a minor number for the dev */ 1783 if (minor == DM_ANY_MINOR) 1784 r = next_free_minor(&minor); 1785 else 1786 r = specific_minor(minor); 1787 if (r < 0) 1788 goto bad_minor; 1789 1790 r = init_srcu_struct(&md->io_barrier); 1791 if (r < 0) 1792 goto bad_io_barrier; 1793 1794 md->numa_node_id = numa_node_id; 1795 md->init_tio_pdu = false; 1796 md->type = DM_TYPE_NONE; 1797 mutex_init(&md->suspend_lock); 1798 mutex_init(&md->type_lock); 1799 mutex_init(&md->table_devices_lock); 1800 spin_lock_init(&md->deferred_lock); 1801 atomic_set(&md->holders, 1); 1802 atomic_set(&md->open_count, 0); 1803 atomic_set(&md->event_nr, 0); 1804 atomic_set(&md->uevent_seq, 0); 1805 INIT_LIST_HEAD(&md->uevent_list); 1806 INIT_LIST_HEAD(&md->table_devices); 1807 spin_lock_init(&md->uevent_lock); 1808 1809 /* 1810 * default to bio-based until DM table is loaded and md->type 1811 * established. If request-based table is loaded: blk-mq will 1812 * override accordingly. 1813 */ 1814 md->queue = blk_alloc_queue(numa_node_id); 1815 if (!md->queue) 1816 goto bad; 1817 1818 md->disk = alloc_disk_node(1, md->numa_node_id); 1819 if (!md->disk) 1820 goto bad; 1821 1822 init_waitqueue_head(&md->wait); 1823 INIT_WORK(&md->work, dm_wq_work); 1824 init_waitqueue_head(&md->eventq); 1825 init_completion(&md->kobj_holder.completion); 1826 1827 md->disk->major = _major; 1828 md->disk->first_minor = minor; 1829 md->disk->fops = &dm_blk_dops; 1830 md->disk->queue = md->queue; 1831 md->disk->private_data = md; 1832 sprintf(md->disk->disk_name, "dm-%d", minor); 1833 1834 if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1835 md->dax_dev = alloc_dax(md, md->disk->disk_name, 1836 &dm_dax_ops, 0); 1837 if (IS_ERR(md->dax_dev)) 1838 goto bad; 1839 } 1840 1841 add_disk_no_queue_reg(md->disk); 1842 format_dev_t(md->name, MKDEV(_major, minor)); 1843 1844 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1845 if (!md->wq) 1846 goto bad; 1847 1848 md->bdev = bdget_disk(md->disk, 0); 1849 if (!md->bdev) 1850 goto bad; 1851 1852 dm_stats_init(&md->stats); 1853 1854 /* Populate the mapping, nobody knows we exist yet */ 1855 spin_lock(&_minor_lock); 1856 old_md = idr_replace(&_minor_idr, md, minor); 1857 spin_unlock(&_minor_lock); 1858 1859 BUG_ON(old_md != MINOR_ALLOCED); 1860 1861 return md; 1862 1863 bad: 1864 cleanup_mapped_device(md); 1865 bad_io_barrier: 1866 free_minor(minor); 1867 bad_minor: 1868 module_put(THIS_MODULE); 1869 bad_module_get: 1870 kvfree(md); 1871 return NULL; 1872 } 1873 1874 static void unlock_fs(struct mapped_device *md); 1875 1876 static void free_dev(struct mapped_device *md) 1877 { 1878 int minor = MINOR(disk_devt(md->disk)); 1879 1880 unlock_fs(md); 1881 1882 cleanup_mapped_device(md); 1883 1884 free_table_devices(&md->table_devices); 1885 dm_stats_cleanup(&md->stats); 1886 free_minor(minor); 1887 1888 module_put(THIS_MODULE); 1889 kvfree(md); 1890 } 1891 1892 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1893 { 1894 struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1895 int ret = 0; 1896 1897 if (dm_table_bio_based(t)) { 1898 /* 1899 * The md may already have mempools that need changing. 1900 * If so, reload bioset because front_pad may have changed 1901 * because a different table was loaded. 1902 */ 1903 bioset_exit(&md->bs); 1904 bioset_exit(&md->io_bs); 1905 1906 } else if (bioset_initialized(&md->bs)) { 1907 /* 1908 * There's no need to reload with request-based dm 1909 * because the size of front_pad doesn't change. 1910 * Note for future: If you are to reload bioset, 1911 * prep-ed requests in the queue may refer 1912 * to bio from the old bioset, so you must walk 1913 * through the queue to unprep. 1914 */ 1915 goto out; 1916 } 1917 1918 BUG_ON(!p || 1919 bioset_initialized(&md->bs) || 1920 bioset_initialized(&md->io_bs)); 1921 1922 ret = bioset_init_from_src(&md->bs, &p->bs); 1923 if (ret) 1924 goto out; 1925 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 1926 if (ret) 1927 bioset_exit(&md->bs); 1928 out: 1929 /* mempool bind completed, no longer need any mempools in the table */ 1930 dm_table_free_md_mempools(t); 1931 return ret; 1932 } 1933 1934 /* 1935 * Bind a table to the device. 1936 */ 1937 static void event_callback(void *context) 1938 { 1939 unsigned long flags; 1940 LIST_HEAD(uevents); 1941 struct mapped_device *md = (struct mapped_device *) context; 1942 1943 spin_lock_irqsave(&md->uevent_lock, flags); 1944 list_splice_init(&md->uevent_list, &uevents); 1945 spin_unlock_irqrestore(&md->uevent_lock, flags); 1946 1947 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1948 1949 atomic_inc(&md->event_nr); 1950 wake_up(&md->eventq); 1951 dm_issue_global_event(); 1952 } 1953 1954 /* 1955 * Returns old map, which caller must destroy. 1956 */ 1957 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1958 struct queue_limits *limits) 1959 { 1960 struct dm_table *old_map; 1961 struct request_queue *q = md->queue; 1962 bool request_based = dm_table_request_based(t); 1963 sector_t size; 1964 int ret; 1965 1966 lockdep_assert_held(&md->suspend_lock); 1967 1968 size = dm_table_get_size(t); 1969 1970 /* 1971 * Wipe any geometry if the size of the table changed. 1972 */ 1973 if (size != dm_get_size(md)) 1974 memset(&md->geometry, 0, sizeof(md->geometry)); 1975 1976 set_capacity(md->disk, size); 1977 bd_set_nr_sectors(md->bdev, size); 1978 1979 dm_table_event_callback(t, event_callback, md); 1980 1981 /* 1982 * The queue hasn't been stopped yet, if the old table type wasn't 1983 * for request-based during suspension. So stop it to prevent 1984 * I/O mapping before resume. 1985 * This must be done before setting the queue restrictions, 1986 * because request-based dm may be run just after the setting. 1987 */ 1988 if (request_based) 1989 dm_stop_queue(q); 1990 1991 if (request_based) { 1992 /* 1993 * Leverage the fact that request-based DM targets are 1994 * immutable singletons - used to optimize dm_mq_queue_rq. 1995 */ 1996 md->immutable_target = dm_table_get_immutable_target(t); 1997 } 1998 1999 ret = __bind_mempools(md, t); 2000 if (ret) { 2001 old_map = ERR_PTR(ret); 2002 goto out; 2003 } 2004 2005 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2006 rcu_assign_pointer(md->map, (void *)t); 2007 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2008 2009 dm_table_set_restrictions(t, q, limits); 2010 if (old_map) 2011 dm_sync_table(md); 2012 2013 out: 2014 return old_map; 2015 } 2016 2017 /* 2018 * Returns unbound table for the caller to free. 2019 */ 2020 static struct dm_table *__unbind(struct mapped_device *md) 2021 { 2022 struct dm_table *map = rcu_dereference_protected(md->map, 1); 2023 2024 if (!map) 2025 return NULL; 2026 2027 dm_table_event_callback(map, NULL, NULL); 2028 RCU_INIT_POINTER(md->map, NULL); 2029 dm_sync_table(md); 2030 2031 return map; 2032 } 2033 2034 /* 2035 * Constructor for a new device. 2036 */ 2037 int dm_create(int minor, struct mapped_device **result) 2038 { 2039 int r; 2040 struct mapped_device *md; 2041 2042 md = alloc_dev(minor); 2043 if (!md) 2044 return -ENXIO; 2045 2046 r = dm_sysfs_init(md); 2047 if (r) { 2048 free_dev(md); 2049 return r; 2050 } 2051 2052 *result = md; 2053 return 0; 2054 } 2055 2056 /* 2057 * Functions to manage md->type. 2058 * All are required to hold md->type_lock. 2059 */ 2060 void dm_lock_md_type(struct mapped_device *md) 2061 { 2062 mutex_lock(&md->type_lock); 2063 } 2064 2065 void dm_unlock_md_type(struct mapped_device *md) 2066 { 2067 mutex_unlock(&md->type_lock); 2068 } 2069 2070 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2071 { 2072 BUG_ON(!mutex_is_locked(&md->type_lock)); 2073 md->type = type; 2074 } 2075 2076 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2077 { 2078 return md->type; 2079 } 2080 2081 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2082 { 2083 return md->immutable_target_type; 2084 } 2085 2086 /* 2087 * The queue_limits are only valid as long as you have a reference 2088 * count on 'md'. 2089 */ 2090 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2091 { 2092 BUG_ON(!atomic_read(&md->holders)); 2093 return &md->queue->limits; 2094 } 2095 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2096 2097 /* 2098 * Setup the DM device's queue based on md's type 2099 */ 2100 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 2101 { 2102 int r; 2103 struct queue_limits limits; 2104 enum dm_queue_mode type = dm_get_md_type(md); 2105 2106 switch (type) { 2107 case DM_TYPE_REQUEST_BASED: 2108 md->disk->fops = &dm_rq_blk_dops; 2109 r = dm_mq_init_request_queue(md, t); 2110 if (r) { 2111 DMERR("Cannot initialize queue for request-based dm mapped device"); 2112 return r; 2113 } 2114 break; 2115 case DM_TYPE_BIO_BASED: 2116 case DM_TYPE_DAX_BIO_BASED: 2117 break; 2118 case DM_TYPE_NONE: 2119 WARN_ON_ONCE(true); 2120 break; 2121 } 2122 2123 r = dm_calculate_queue_limits(t, &limits); 2124 if (r) { 2125 DMERR("Cannot calculate initial queue limits"); 2126 return r; 2127 } 2128 dm_table_set_restrictions(t, md->queue, &limits); 2129 blk_register_queue(md->disk); 2130 2131 return 0; 2132 } 2133 2134 struct mapped_device *dm_get_md(dev_t dev) 2135 { 2136 struct mapped_device *md; 2137 unsigned minor = MINOR(dev); 2138 2139 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 2140 return NULL; 2141 2142 spin_lock(&_minor_lock); 2143 2144 md = idr_find(&_minor_idr, minor); 2145 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2146 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2147 md = NULL; 2148 goto out; 2149 } 2150 dm_get(md); 2151 out: 2152 spin_unlock(&_minor_lock); 2153 2154 return md; 2155 } 2156 EXPORT_SYMBOL_GPL(dm_get_md); 2157 2158 void *dm_get_mdptr(struct mapped_device *md) 2159 { 2160 return md->interface_ptr; 2161 } 2162 2163 void dm_set_mdptr(struct mapped_device *md, void *ptr) 2164 { 2165 md->interface_ptr = ptr; 2166 } 2167 2168 void dm_get(struct mapped_device *md) 2169 { 2170 atomic_inc(&md->holders); 2171 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2172 } 2173 2174 int dm_hold(struct mapped_device *md) 2175 { 2176 spin_lock(&_minor_lock); 2177 if (test_bit(DMF_FREEING, &md->flags)) { 2178 spin_unlock(&_minor_lock); 2179 return -EBUSY; 2180 } 2181 dm_get(md); 2182 spin_unlock(&_minor_lock); 2183 return 0; 2184 } 2185 EXPORT_SYMBOL_GPL(dm_hold); 2186 2187 const char *dm_device_name(struct mapped_device *md) 2188 { 2189 return md->name; 2190 } 2191 EXPORT_SYMBOL_GPL(dm_device_name); 2192 2193 static void __dm_destroy(struct mapped_device *md, bool wait) 2194 { 2195 struct dm_table *map; 2196 int srcu_idx; 2197 2198 might_sleep(); 2199 2200 spin_lock(&_minor_lock); 2201 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2202 set_bit(DMF_FREEING, &md->flags); 2203 spin_unlock(&_minor_lock); 2204 2205 blk_set_queue_dying(md->queue); 2206 2207 /* 2208 * Take suspend_lock so that presuspend and postsuspend methods 2209 * do not race with internal suspend. 2210 */ 2211 mutex_lock(&md->suspend_lock); 2212 map = dm_get_live_table(md, &srcu_idx); 2213 if (!dm_suspended_md(md)) { 2214 dm_table_presuspend_targets(map); 2215 set_bit(DMF_SUSPENDED, &md->flags); 2216 set_bit(DMF_POST_SUSPENDING, &md->flags); 2217 dm_table_postsuspend_targets(map); 2218 } 2219 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2220 dm_put_live_table(md, srcu_idx); 2221 mutex_unlock(&md->suspend_lock); 2222 2223 /* 2224 * Rare, but there may be I/O requests still going to complete, 2225 * for example. Wait for all references to disappear. 2226 * No one should increment the reference count of the mapped_device, 2227 * after the mapped_device state becomes DMF_FREEING. 2228 */ 2229 if (wait) 2230 while (atomic_read(&md->holders)) 2231 msleep(1); 2232 else if (atomic_read(&md->holders)) 2233 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2234 dm_device_name(md), atomic_read(&md->holders)); 2235 2236 dm_sysfs_exit(md); 2237 dm_table_destroy(__unbind(md)); 2238 free_dev(md); 2239 } 2240 2241 void dm_destroy(struct mapped_device *md) 2242 { 2243 __dm_destroy(md, true); 2244 } 2245 2246 void dm_destroy_immediate(struct mapped_device *md) 2247 { 2248 __dm_destroy(md, false); 2249 } 2250 2251 void dm_put(struct mapped_device *md) 2252 { 2253 atomic_dec(&md->holders); 2254 } 2255 EXPORT_SYMBOL_GPL(dm_put); 2256 2257 static bool md_in_flight_bios(struct mapped_device *md) 2258 { 2259 int cpu; 2260 struct hd_struct *part = &dm_disk(md)->part0; 2261 long sum = 0; 2262 2263 for_each_possible_cpu(cpu) { 2264 sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 2265 sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 2266 } 2267 2268 return sum != 0; 2269 } 2270 2271 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) 2272 { 2273 int r = 0; 2274 DEFINE_WAIT(wait); 2275 2276 while (true) { 2277 prepare_to_wait(&md->wait, &wait, task_state); 2278 2279 if (!md_in_flight_bios(md)) 2280 break; 2281 2282 if (signal_pending_state(task_state, current)) { 2283 r = -EINTR; 2284 break; 2285 } 2286 2287 io_schedule(); 2288 } 2289 finish_wait(&md->wait, &wait); 2290 2291 return r; 2292 } 2293 2294 static int dm_wait_for_completion(struct mapped_device *md, long task_state) 2295 { 2296 int r = 0; 2297 2298 if (!queue_is_mq(md->queue)) 2299 return dm_wait_for_bios_completion(md, task_state); 2300 2301 while (true) { 2302 if (!blk_mq_queue_inflight(md->queue)) 2303 break; 2304 2305 if (signal_pending_state(task_state, current)) { 2306 r = -EINTR; 2307 break; 2308 } 2309 2310 msleep(5); 2311 } 2312 2313 return r; 2314 } 2315 2316 /* 2317 * Process the deferred bios 2318 */ 2319 static void dm_wq_work(struct work_struct *work) 2320 { 2321 struct mapped_device *md = container_of(work, struct mapped_device, work); 2322 struct bio *bio; 2323 2324 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2325 spin_lock_irq(&md->deferred_lock); 2326 bio = bio_list_pop(&md->deferred); 2327 spin_unlock_irq(&md->deferred_lock); 2328 2329 if (!bio) 2330 break; 2331 2332 submit_bio_noacct(bio); 2333 } 2334 } 2335 2336 static void dm_queue_flush(struct mapped_device *md) 2337 { 2338 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2339 smp_mb__after_atomic(); 2340 queue_work(md->wq, &md->work); 2341 } 2342 2343 /* 2344 * Swap in a new table, returning the old one for the caller to destroy. 2345 */ 2346 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2347 { 2348 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2349 struct queue_limits limits; 2350 int r; 2351 2352 mutex_lock(&md->suspend_lock); 2353 2354 /* device must be suspended */ 2355 if (!dm_suspended_md(md)) 2356 goto out; 2357 2358 /* 2359 * If the new table has no data devices, retain the existing limits. 2360 * This helps multipath with queue_if_no_path if all paths disappear, 2361 * then new I/O is queued based on these limits, and then some paths 2362 * reappear. 2363 */ 2364 if (dm_table_has_no_data_devices(table)) { 2365 live_map = dm_get_live_table_fast(md); 2366 if (live_map) 2367 limits = md->queue->limits; 2368 dm_put_live_table_fast(md); 2369 } 2370 2371 if (!live_map) { 2372 r = dm_calculate_queue_limits(table, &limits); 2373 if (r) { 2374 map = ERR_PTR(r); 2375 goto out; 2376 } 2377 } 2378 2379 map = __bind(md, table, &limits); 2380 dm_issue_global_event(); 2381 2382 out: 2383 mutex_unlock(&md->suspend_lock); 2384 return map; 2385 } 2386 2387 /* 2388 * Functions to lock and unlock any filesystem running on the 2389 * device. 2390 */ 2391 static int lock_fs(struct mapped_device *md) 2392 { 2393 int r; 2394 2395 WARN_ON(md->frozen_sb); 2396 2397 md->frozen_sb = freeze_bdev(md->bdev); 2398 if (IS_ERR(md->frozen_sb)) { 2399 r = PTR_ERR(md->frozen_sb); 2400 md->frozen_sb = NULL; 2401 return r; 2402 } 2403 2404 set_bit(DMF_FROZEN, &md->flags); 2405 2406 return 0; 2407 } 2408 2409 static void unlock_fs(struct mapped_device *md) 2410 { 2411 if (!test_bit(DMF_FROZEN, &md->flags)) 2412 return; 2413 2414 thaw_bdev(md->bdev, md->frozen_sb); 2415 md->frozen_sb = NULL; 2416 clear_bit(DMF_FROZEN, &md->flags); 2417 } 2418 2419 /* 2420 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2421 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2422 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2423 * 2424 * If __dm_suspend returns 0, the device is completely quiescent 2425 * now. There is no request-processing activity. All new requests 2426 * are being added to md->deferred list. 2427 */ 2428 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2429 unsigned suspend_flags, long task_state, 2430 int dmf_suspended_flag) 2431 { 2432 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2433 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2434 int r; 2435 2436 lockdep_assert_held(&md->suspend_lock); 2437 2438 /* 2439 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2440 * This flag is cleared before dm_suspend returns. 2441 */ 2442 if (noflush) 2443 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2444 else 2445 DMDEBUG("%s: suspending with flush", dm_device_name(md)); 2446 2447 /* 2448 * This gets reverted if there's an error later and the targets 2449 * provide the .presuspend_undo hook. 2450 */ 2451 dm_table_presuspend_targets(map); 2452 2453 /* 2454 * Flush I/O to the device. 2455 * Any I/O submitted after lock_fs() may not be flushed. 2456 * noflush takes precedence over do_lockfs. 2457 * (lock_fs() flushes I/Os and waits for them to complete.) 2458 */ 2459 if (!noflush && do_lockfs) { 2460 r = lock_fs(md); 2461 if (r) { 2462 dm_table_presuspend_undo_targets(map); 2463 return r; 2464 } 2465 } 2466 2467 /* 2468 * Here we must make sure that no processes are submitting requests 2469 * to target drivers i.e. no one may be executing 2470 * __split_and_process_bio from dm_submit_bio. 2471 * 2472 * To get all processes out of __split_and_process_bio in dm_submit_bio, 2473 * we take the write lock. To prevent any process from reentering 2474 * __split_and_process_bio from dm_submit_bio and quiesce the thread 2475 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 2476 * flush_workqueue(md->wq). 2477 */ 2478 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2479 if (map) 2480 synchronize_srcu(&md->io_barrier); 2481 2482 /* 2483 * Stop md->queue before flushing md->wq in case request-based 2484 * dm defers requests to md->wq from md->queue. 2485 */ 2486 if (dm_request_based(md)) 2487 dm_stop_queue(md->queue); 2488 2489 flush_workqueue(md->wq); 2490 2491 /* 2492 * At this point no more requests are entering target request routines. 2493 * We call dm_wait_for_completion to wait for all existing requests 2494 * to finish. 2495 */ 2496 r = dm_wait_for_completion(md, task_state); 2497 if (!r) 2498 set_bit(dmf_suspended_flag, &md->flags); 2499 2500 if (noflush) 2501 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2502 if (map) 2503 synchronize_srcu(&md->io_barrier); 2504 2505 /* were we interrupted ? */ 2506 if (r < 0) { 2507 dm_queue_flush(md); 2508 2509 if (dm_request_based(md)) 2510 dm_start_queue(md->queue); 2511 2512 unlock_fs(md); 2513 dm_table_presuspend_undo_targets(map); 2514 /* pushback list is already flushed, so skip flush */ 2515 } 2516 2517 return r; 2518 } 2519 2520 /* 2521 * We need to be able to change a mapping table under a mounted 2522 * filesystem. For example we might want to move some data in 2523 * the background. Before the table can be swapped with 2524 * dm_bind_table, dm_suspend must be called to flush any in 2525 * flight bios and ensure that any further io gets deferred. 2526 */ 2527 /* 2528 * Suspend mechanism in request-based dm. 2529 * 2530 * 1. Flush all I/Os by lock_fs() if needed. 2531 * 2. Stop dispatching any I/O by stopping the request_queue. 2532 * 3. Wait for all in-flight I/Os to be completed or requeued. 2533 * 2534 * To abort suspend, start the request_queue. 2535 */ 2536 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2537 { 2538 struct dm_table *map = NULL; 2539 int r = 0; 2540 2541 retry: 2542 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2543 2544 if (dm_suspended_md(md)) { 2545 r = -EINVAL; 2546 goto out_unlock; 2547 } 2548 2549 if (dm_suspended_internally_md(md)) { 2550 /* already internally suspended, wait for internal resume */ 2551 mutex_unlock(&md->suspend_lock); 2552 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2553 if (r) 2554 return r; 2555 goto retry; 2556 } 2557 2558 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2559 2560 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2561 if (r) 2562 goto out_unlock; 2563 2564 set_bit(DMF_POST_SUSPENDING, &md->flags); 2565 dm_table_postsuspend_targets(map); 2566 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2567 2568 out_unlock: 2569 mutex_unlock(&md->suspend_lock); 2570 return r; 2571 } 2572 2573 static int __dm_resume(struct mapped_device *md, struct dm_table *map) 2574 { 2575 if (map) { 2576 int r = dm_table_resume_targets(map); 2577 if (r) 2578 return r; 2579 } 2580 2581 dm_queue_flush(md); 2582 2583 /* 2584 * Flushing deferred I/Os must be done after targets are resumed 2585 * so that mapping of targets can work correctly. 2586 * Request-based dm is queueing the deferred I/Os in its request_queue. 2587 */ 2588 if (dm_request_based(md)) 2589 dm_start_queue(md->queue); 2590 2591 unlock_fs(md); 2592 2593 return 0; 2594 } 2595 2596 int dm_resume(struct mapped_device *md) 2597 { 2598 int r; 2599 struct dm_table *map = NULL; 2600 2601 retry: 2602 r = -EINVAL; 2603 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2604 2605 if (!dm_suspended_md(md)) 2606 goto out; 2607 2608 if (dm_suspended_internally_md(md)) { 2609 /* already internally suspended, wait for internal resume */ 2610 mutex_unlock(&md->suspend_lock); 2611 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2612 if (r) 2613 return r; 2614 goto retry; 2615 } 2616 2617 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2618 if (!map || !dm_table_get_size(map)) 2619 goto out; 2620 2621 r = __dm_resume(md, map); 2622 if (r) 2623 goto out; 2624 2625 clear_bit(DMF_SUSPENDED, &md->flags); 2626 out: 2627 mutex_unlock(&md->suspend_lock); 2628 2629 return r; 2630 } 2631 2632 /* 2633 * Internal suspend/resume works like userspace-driven suspend. It waits 2634 * until all bios finish and prevents issuing new bios to the target drivers. 2635 * It may be used only from the kernel. 2636 */ 2637 2638 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2639 { 2640 struct dm_table *map = NULL; 2641 2642 lockdep_assert_held(&md->suspend_lock); 2643 2644 if (md->internal_suspend_count++) 2645 return; /* nested internal suspend */ 2646 2647 if (dm_suspended_md(md)) { 2648 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2649 return; /* nest suspend */ 2650 } 2651 2652 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2653 2654 /* 2655 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2656 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2657 * would require changing .presuspend to return an error -- avoid this 2658 * until there is a need for more elaborate variants of internal suspend. 2659 */ 2660 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2661 DMF_SUSPENDED_INTERNALLY); 2662 2663 set_bit(DMF_POST_SUSPENDING, &md->flags); 2664 dm_table_postsuspend_targets(map); 2665 clear_bit(DMF_POST_SUSPENDING, &md->flags); 2666 } 2667 2668 static void __dm_internal_resume(struct mapped_device *md) 2669 { 2670 BUG_ON(!md->internal_suspend_count); 2671 2672 if (--md->internal_suspend_count) 2673 return; /* resume from nested internal suspend */ 2674 2675 if (dm_suspended_md(md)) 2676 goto done; /* resume from nested suspend */ 2677 2678 /* 2679 * NOTE: existing callers don't need to call dm_table_resume_targets 2680 * (which may fail -- so best to avoid it for now by passing NULL map) 2681 */ 2682 (void) __dm_resume(md, NULL); 2683 2684 done: 2685 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2686 smp_mb__after_atomic(); 2687 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2688 } 2689 2690 void dm_internal_suspend_noflush(struct mapped_device *md) 2691 { 2692 mutex_lock(&md->suspend_lock); 2693 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2694 mutex_unlock(&md->suspend_lock); 2695 } 2696 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2697 2698 void dm_internal_resume(struct mapped_device *md) 2699 { 2700 mutex_lock(&md->suspend_lock); 2701 __dm_internal_resume(md); 2702 mutex_unlock(&md->suspend_lock); 2703 } 2704 EXPORT_SYMBOL_GPL(dm_internal_resume); 2705 2706 /* 2707 * Fast variants of internal suspend/resume hold md->suspend_lock, 2708 * which prevents interaction with userspace-driven suspend. 2709 */ 2710 2711 void dm_internal_suspend_fast(struct mapped_device *md) 2712 { 2713 mutex_lock(&md->suspend_lock); 2714 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2715 return; 2716 2717 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2718 synchronize_srcu(&md->io_barrier); 2719 flush_workqueue(md->wq); 2720 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2721 } 2722 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2723 2724 void dm_internal_resume_fast(struct mapped_device *md) 2725 { 2726 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2727 goto done; 2728 2729 dm_queue_flush(md); 2730 2731 done: 2732 mutex_unlock(&md->suspend_lock); 2733 } 2734 EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2735 2736 /*----------------------------------------------------------------- 2737 * Event notification. 2738 *---------------------------------------------------------------*/ 2739 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2740 unsigned cookie) 2741 { 2742 int r; 2743 unsigned noio_flag; 2744 char udev_cookie[DM_COOKIE_LENGTH]; 2745 char *envp[] = { udev_cookie, NULL }; 2746 2747 noio_flag = memalloc_noio_save(); 2748 2749 if (!cookie) 2750 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2751 else { 2752 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2753 DM_COOKIE_ENV_VAR_NAME, cookie); 2754 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2755 action, envp); 2756 } 2757 2758 memalloc_noio_restore(noio_flag); 2759 2760 return r; 2761 } 2762 2763 uint32_t dm_next_uevent_seq(struct mapped_device *md) 2764 { 2765 return atomic_add_return(1, &md->uevent_seq); 2766 } 2767 2768 uint32_t dm_get_event_nr(struct mapped_device *md) 2769 { 2770 return atomic_read(&md->event_nr); 2771 } 2772 2773 int dm_wait_event(struct mapped_device *md, int event_nr) 2774 { 2775 return wait_event_interruptible(md->eventq, 2776 (event_nr != atomic_read(&md->event_nr))); 2777 } 2778 2779 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 2780 { 2781 unsigned long flags; 2782 2783 spin_lock_irqsave(&md->uevent_lock, flags); 2784 list_add(elist, &md->uevent_list); 2785 spin_unlock_irqrestore(&md->uevent_lock, flags); 2786 } 2787 2788 /* 2789 * The gendisk is only valid as long as you have a reference 2790 * count on 'md'. 2791 */ 2792 struct gendisk *dm_disk(struct mapped_device *md) 2793 { 2794 return md->disk; 2795 } 2796 EXPORT_SYMBOL_GPL(dm_disk); 2797 2798 struct kobject *dm_kobject(struct mapped_device *md) 2799 { 2800 return &md->kobj_holder.kobj; 2801 } 2802 2803 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2804 { 2805 struct mapped_device *md; 2806 2807 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2808 2809 spin_lock(&_minor_lock); 2810 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2811 md = NULL; 2812 goto out; 2813 } 2814 dm_get(md); 2815 out: 2816 spin_unlock(&_minor_lock); 2817 2818 return md; 2819 } 2820 2821 int dm_suspended_md(struct mapped_device *md) 2822 { 2823 return test_bit(DMF_SUSPENDED, &md->flags); 2824 } 2825 2826 static int dm_post_suspending_md(struct mapped_device *md) 2827 { 2828 return test_bit(DMF_POST_SUSPENDING, &md->flags); 2829 } 2830 2831 int dm_suspended_internally_md(struct mapped_device *md) 2832 { 2833 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2834 } 2835 2836 int dm_test_deferred_remove_flag(struct mapped_device *md) 2837 { 2838 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2839 } 2840 2841 int dm_suspended(struct dm_target *ti) 2842 { 2843 return dm_suspended_md(ti->table->md); 2844 } 2845 EXPORT_SYMBOL_GPL(dm_suspended); 2846 2847 int dm_post_suspending(struct dm_target *ti) 2848 { 2849 return dm_post_suspending_md(ti->table->md); 2850 } 2851 EXPORT_SYMBOL_GPL(dm_post_suspending); 2852 2853 int dm_noflush_suspending(struct dm_target *ti) 2854 { 2855 return __noflush_suspending(ti->table->md); 2856 } 2857 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2858 2859 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2860 unsigned integrity, unsigned per_io_data_size, 2861 unsigned min_pool_size) 2862 { 2863 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2864 unsigned int pool_size = 0; 2865 unsigned int front_pad, io_front_pad; 2866 int ret; 2867 2868 if (!pools) 2869 return NULL; 2870 2871 switch (type) { 2872 case DM_TYPE_BIO_BASED: 2873 case DM_TYPE_DAX_BIO_BASED: 2874 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 2875 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2876 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 2877 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 2878 if (ret) 2879 goto out; 2880 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2881 goto out; 2882 break; 2883 case DM_TYPE_REQUEST_BASED: 2884 pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 2885 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2886 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2887 break; 2888 default: 2889 BUG(); 2890 } 2891 2892 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 2893 if (ret) 2894 goto out; 2895 2896 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 2897 goto out; 2898 2899 return pools; 2900 2901 out: 2902 dm_free_md_mempools(pools); 2903 2904 return NULL; 2905 } 2906 2907 void dm_free_md_mempools(struct dm_md_mempools *pools) 2908 { 2909 if (!pools) 2910 return; 2911 2912 bioset_exit(&pools->bs); 2913 bioset_exit(&pools->io_bs); 2914 2915 kfree(pools); 2916 } 2917 2918 struct dm_pr { 2919 u64 old_key; 2920 u64 new_key; 2921 u32 flags; 2922 bool fail_early; 2923 }; 2924 2925 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 2926 void *data) 2927 { 2928 struct mapped_device *md = bdev->bd_disk->private_data; 2929 struct dm_table *table; 2930 struct dm_target *ti; 2931 int ret = -ENOTTY, srcu_idx; 2932 2933 table = dm_get_live_table(md, &srcu_idx); 2934 if (!table || !dm_table_get_size(table)) 2935 goto out; 2936 2937 /* We only support devices that have a single target */ 2938 if (dm_table_get_num_targets(table) != 1) 2939 goto out; 2940 ti = dm_table_get_target(table, 0); 2941 2942 ret = -EINVAL; 2943 if (!ti->type->iterate_devices) 2944 goto out; 2945 2946 ret = ti->type->iterate_devices(ti, fn, data); 2947 out: 2948 dm_put_live_table(md, srcu_idx); 2949 return ret; 2950 } 2951 2952 /* 2953 * For register / unregister we need to manually call out to every path. 2954 */ 2955 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 2956 sector_t start, sector_t len, void *data) 2957 { 2958 struct dm_pr *pr = data; 2959 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 2960 2961 if (!ops || !ops->pr_register) 2962 return -EOPNOTSUPP; 2963 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 2964 } 2965 2966 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 2967 u32 flags) 2968 { 2969 struct dm_pr pr = { 2970 .old_key = old_key, 2971 .new_key = new_key, 2972 .flags = flags, 2973 .fail_early = true, 2974 }; 2975 int ret; 2976 2977 ret = dm_call_pr(bdev, __dm_pr_register, &pr); 2978 if (ret && new_key) { 2979 /* unregister all paths if we failed to register any path */ 2980 pr.old_key = new_key; 2981 pr.new_key = 0; 2982 pr.flags = 0; 2983 pr.fail_early = false; 2984 dm_call_pr(bdev, __dm_pr_register, &pr); 2985 } 2986 2987 return ret; 2988 } 2989 2990 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 2991 u32 flags) 2992 { 2993 struct mapped_device *md = bdev->bd_disk->private_data; 2994 const struct pr_ops *ops; 2995 int r, srcu_idx; 2996 2997 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 2998 if (r < 0) 2999 goto out; 3000 3001 ops = bdev->bd_disk->fops->pr_ops; 3002 if (ops && ops->pr_reserve) 3003 r = ops->pr_reserve(bdev, key, type, flags); 3004 else 3005 r = -EOPNOTSUPP; 3006 out: 3007 dm_unprepare_ioctl(md, srcu_idx); 3008 return r; 3009 } 3010 3011 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 3012 { 3013 struct mapped_device *md = bdev->bd_disk->private_data; 3014 const struct pr_ops *ops; 3015 int r, srcu_idx; 3016 3017 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3018 if (r < 0) 3019 goto out; 3020 3021 ops = bdev->bd_disk->fops->pr_ops; 3022 if (ops && ops->pr_release) 3023 r = ops->pr_release(bdev, key, type); 3024 else 3025 r = -EOPNOTSUPP; 3026 out: 3027 dm_unprepare_ioctl(md, srcu_idx); 3028 return r; 3029 } 3030 3031 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 3032 enum pr_type type, bool abort) 3033 { 3034 struct mapped_device *md = bdev->bd_disk->private_data; 3035 const struct pr_ops *ops; 3036 int r, srcu_idx; 3037 3038 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3039 if (r < 0) 3040 goto out; 3041 3042 ops = bdev->bd_disk->fops->pr_ops; 3043 if (ops && ops->pr_preempt) 3044 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 3045 else 3046 r = -EOPNOTSUPP; 3047 out: 3048 dm_unprepare_ioctl(md, srcu_idx); 3049 return r; 3050 } 3051 3052 static int dm_pr_clear(struct block_device *bdev, u64 key) 3053 { 3054 struct mapped_device *md = bdev->bd_disk->private_data; 3055 const struct pr_ops *ops; 3056 int r, srcu_idx; 3057 3058 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 3059 if (r < 0) 3060 goto out; 3061 3062 ops = bdev->bd_disk->fops->pr_ops; 3063 if (ops && ops->pr_clear) 3064 r = ops->pr_clear(bdev, key); 3065 else 3066 r = -EOPNOTSUPP; 3067 out: 3068 dm_unprepare_ioctl(md, srcu_idx); 3069 return r; 3070 } 3071 3072 static const struct pr_ops dm_pr_ops = { 3073 .pr_register = dm_pr_register, 3074 .pr_reserve = dm_pr_reserve, 3075 .pr_release = dm_pr_release, 3076 .pr_preempt = dm_pr_preempt, 3077 .pr_clear = dm_pr_clear, 3078 }; 3079 3080 static const struct block_device_operations dm_blk_dops = { 3081 .submit_bio = dm_submit_bio, 3082 .open = dm_blk_open, 3083 .release = dm_blk_close, 3084 .ioctl = dm_blk_ioctl, 3085 .getgeo = dm_blk_getgeo, 3086 .report_zones = dm_blk_report_zones, 3087 .pr_ops = &dm_pr_ops, 3088 .owner = THIS_MODULE 3089 }; 3090 3091 static const struct block_device_operations dm_rq_blk_dops = { 3092 .open = dm_blk_open, 3093 .release = dm_blk_close, 3094 .ioctl = dm_blk_ioctl, 3095 .getgeo = dm_blk_getgeo, 3096 .pr_ops = &dm_pr_ops, 3097 .owner = THIS_MODULE 3098 }; 3099 3100 static const struct dax_operations dm_dax_ops = { 3101 .direct_access = dm_dax_direct_access, 3102 .dax_supported = dm_dax_supported, 3103 .copy_from_iter = dm_dax_copy_from_iter, 3104 .copy_to_iter = dm_dax_copy_to_iter, 3105 .zero_page_range = dm_dax_zero_page_range, 3106 }; 3107 3108 /* 3109 * module hooks 3110 */ 3111 module_init(dm_init); 3112 module_exit(dm_exit); 3113 3114 module_param(major, uint, 0); 3115 MODULE_PARM_DESC(major, "The major number of the device mapper"); 3116 3117 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3118 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3119 3120 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3121 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3122 3123 MODULE_DESCRIPTION(DM_NAME " driver"); 3124 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3125 MODULE_LICENSE("GPL"); 3126