13bd94003SHeinz Mauelshagen // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 4784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * This file is released under the GPL. 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 94cc96131SMike Snitzer #include "dm-core.h" 104cc96131SMike Snitzer #include "dm-rq.h" 1151e5b2bdSMike Anderson #include "dm-uevent.h" 1291ccbbacSTushar Sugandhi #include "dm-ima.h" 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds #include <linux/init.h> 151da177e4SLinus Torvalds #include <linux/module.h> 1648c9c27bSArjan van de Ven #include <linux/mutex.h> 176958c1c6SMikulas Patocka #include <linux/sched/mm.h> 18174cd4b1SIngo Molnar #include <linux/sched/signal.h> 191da177e4SLinus Torvalds #include <linux/blkpg.h> 201da177e4SLinus Torvalds #include <linux/bio.h> 211da177e4SLinus Torvalds #include <linux/mempool.h> 22f26c5719SDan Williams #include <linux/dax.h> 231da177e4SLinus Torvalds #include <linux/slab.h> 241da177e4SLinus Torvalds #include <linux/idr.h> 257e026c8cSDan Williams #include <linux/uio.h> 263ac51e74SDarrick J. Wong #include <linux/hdreg.h> 273f77316dSKiyoshi Ueda #include <linux/delay.h> 28ffcc3936SMike Snitzer #include <linux/wait.h> 2971cdb697SChristoph Hellwig #include <linux/pr.h> 30b0b4d7c6SElena Reshetova #include <linux/refcount.h> 31c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 32a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 331e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h> 3455782138SLi Zefan 3572d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3672d94861SAlasdair G Kergon 3760935eb2SMilan Broz /* 3860935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3960935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4060935eb2SMilan Broz */ 4160935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4260935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4360935eb2SMilan Broz 44b99fdcdcSMing Lei /* 45b99fdcdcSMing Lei * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 46b99fdcdcSMing Lei * dm_io into one list, and reuse bio->bi_private as the list head. Before 47b99fdcdcSMing Lei * ending this fs bio, we will recover its ->bi_private. 48b99fdcdcSMing Lei */ 49b99fdcdcSMing Lei #define REQ_DM_POLL_LIST REQ_DRV 50b99fdcdcSMing Lei 511da177e4SLinus Torvalds static const char *_name = DM_NAME; 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds static unsigned int major = 0; 541da177e4SLinus Torvalds static unsigned int _major = 0; 551da177e4SLinus Torvalds 56d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 57d15b774cSAlasdair G Kergon 58f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 592c140a24SMikulas Patocka 602c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 612c140a24SMikulas Patocka 622c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 632c140a24SMikulas Patocka 64acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 65acfe0ad7SMikulas Patocka 6693e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 6793e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 6893e6442cSMikulas Patocka 6962e08243SMikulas Patocka void dm_issue_global_event(void) 7062e08243SMikulas Patocka { 7162e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 7262e08243SMikulas Patocka wake_up(&dm_global_eventq); 7362e08243SMikulas Patocka } 7462e08243SMikulas Patocka 75442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(stats_enabled); 76442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); 77442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(zoned_enabled); 78442761fdSMike Snitzer 791da177e4SLinus Torvalds /* 8064f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 811da177e4SLinus Torvalds */ 8264f52b0eSMike Snitzer struct clone_info { 8364f52b0eSMike Snitzer struct dm_table *map; 8464f52b0eSMike Snitzer struct bio *bio; 8564f52b0eSMike Snitzer struct dm_io *io; 8664f52b0eSMike Snitzer sector_t sector; 87*86a3238cSHeinz Mauelshagen unsigned int sector_count; 884edadf6dSMike Snitzer bool is_abnormal_io:1; 894edadf6dSMike Snitzer bool submit_as_polled:1; 9064f52b0eSMike Snitzer }; 9164f52b0eSMike Snitzer 926c23f0bdSChristoph Hellwig static inline struct dm_target_io *clone_to_tio(struct bio *clone) 936c23f0bdSChristoph Hellwig { 946c23f0bdSChristoph Hellwig return container_of(clone, struct dm_target_io, clone); 956c23f0bdSChristoph Hellwig } 966c23f0bdSChristoph Hellwig 9764f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 9864f52b0eSMike Snitzer { 99655f3aadSMike Snitzer if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 10062f26317SJeffle Xu return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 10162f26317SJeffle Xu return (char *)bio - DM_IO_BIO_OFFSET - data_size; 10264f52b0eSMike Snitzer } 10364f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 10464f52b0eSMike Snitzer 10564f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 10664f52b0eSMike Snitzer { 10764f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 10864f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 10962f26317SJeffle Xu return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 11064f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 11162f26317SJeffle Xu return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 11264f52b0eSMike Snitzer } 11364f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 11464f52b0eSMike Snitzer 115*86a3238cSHeinz Mauelshagen unsigned int dm_bio_get_target_bio_nr(const struct bio *bio) 11664f52b0eSMike Snitzer { 11764f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 11864f52b0eSMike Snitzer } 11964f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 12064f52b0eSMike Snitzer 121ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 122ba61fdd1SJeff Mahoney 123115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 124115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 125faad87dfSMike Snitzer 126a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 127a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS; 128a666e5c0SMikulas Patocka static int get_swap_bios(void) 129a666e5c0SMikulas Patocka { 130a666e5c0SMikulas Patocka int latch = READ_ONCE(swap_bios); 131a666e5c0SMikulas Patocka if (unlikely(latch <= 0)) 132a666e5c0SMikulas Patocka latch = DEFAULT_SWAP_BIOS; 133a666e5c0SMikulas Patocka return latch; 134a666e5c0SMikulas Patocka } 135a666e5c0SMikulas Patocka 13686f1152bSBenjamin Marzinski struct table_device { 13786f1152bSBenjamin Marzinski struct list_head list; 138b0b4d7c6SElena Reshetova refcount_t count; 13986f1152bSBenjamin Marzinski struct dm_dev dm_dev; 14086f1152bSBenjamin Marzinski }; 14186f1152bSBenjamin Marzinski 142f4790826SMike Snitzer /* 143e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 144e8603136SMike Snitzer */ 1454cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 146*86a3238cSHeinz Mauelshagen static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 147e8603136SMike Snitzer 148115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 149115485e8SMike Snitzer { 1506aa7de05SMark Rutland int param = READ_ONCE(*module_param); 151115485e8SMike Snitzer int modified_param = 0; 152115485e8SMike Snitzer bool modified = true; 153115485e8SMike Snitzer 154115485e8SMike Snitzer if (param < min) 155115485e8SMike Snitzer modified_param = min; 156115485e8SMike Snitzer else if (param > max) 157115485e8SMike Snitzer modified_param = max; 158115485e8SMike Snitzer else 159115485e8SMike Snitzer modified = false; 160115485e8SMike Snitzer 161115485e8SMike Snitzer if (modified) { 162115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 163115485e8SMike Snitzer param = modified_param; 164115485e8SMike Snitzer } 165115485e8SMike Snitzer 166115485e8SMike Snitzer return param; 167115485e8SMike Snitzer } 168115485e8SMike Snitzer 169*86a3238cSHeinz Mauelshagen unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max) 170f4790826SMike Snitzer { 171*86a3238cSHeinz Mauelshagen unsigned int param = READ_ONCE(*module_param); 172*86a3238cSHeinz Mauelshagen unsigned int modified_param = 0; 173f4790826SMike Snitzer 17409c2d531SMike Snitzer if (!param) 17509c2d531SMike Snitzer modified_param = def; 17609c2d531SMike Snitzer else if (param > max) 17709c2d531SMike Snitzer modified_param = max; 178f4790826SMike Snitzer 17909c2d531SMike Snitzer if (modified_param) { 18009c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 18109c2d531SMike Snitzer param = modified_param; 182f4790826SMike Snitzer } 183f4790826SMike Snitzer 18409c2d531SMike Snitzer return param; 185f4790826SMike Snitzer } 186f4790826SMike Snitzer 187*86a3238cSHeinz Mauelshagen unsigned int dm_get_reserved_bio_based_ios(void) 188e8603136SMike Snitzer { 18909c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1904cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 191e8603136SMike Snitzer } 192e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 193e8603136SMike Snitzer 194*86a3238cSHeinz Mauelshagen static unsigned int dm_get_numa_node(void) 195115485e8SMike Snitzer { 196115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 197115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 198115485e8SMike Snitzer } 199115485e8SMike Snitzer 2001da177e4SLinus Torvalds static int __init local_init(void) 2011da177e4SLinus Torvalds { 202e689fbabSMike Snitzer int r; 2031ae49ea2SMike Snitzer 20451e5b2bdSMike Anderson r = dm_uevent_init(); 20551157b4aSKiyoshi Ueda if (r) 206e689fbabSMike Snitzer return r; 20751e5b2bdSMike Anderson 208acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 209acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 210acfe0ad7SMikulas Patocka r = -ENOMEM; 211acfe0ad7SMikulas Patocka goto out_uevent_exit; 212acfe0ad7SMikulas Patocka } 213acfe0ad7SMikulas Patocka 2141da177e4SLinus Torvalds _major = major; 2151da177e4SLinus Torvalds r = register_blkdev(_major, _name); 21651157b4aSKiyoshi Ueda if (r < 0) 217acfe0ad7SMikulas Patocka goto out_free_workqueue; 2181da177e4SLinus Torvalds 2191da177e4SLinus Torvalds if (!_major) 2201da177e4SLinus Torvalds _major = r; 2211da177e4SLinus Torvalds 2221da177e4SLinus Torvalds return 0; 22351157b4aSKiyoshi Ueda 224acfe0ad7SMikulas Patocka out_free_workqueue: 225acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 22651157b4aSKiyoshi Ueda out_uevent_exit: 22751157b4aSKiyoshi Ueda dm_uevent_exit(); 22851157b4aSKiyoshi Ueda 22951157b4aSKiyoshi Ueda return r; 2301da177e4SLinus Torvalds } 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds static void local_exit(void) 2331da177e4SLinus Torvalds { 2342c140a24SMikulas Patocka flush_scheduled_work(); 235acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2362c140a24SMikulas Patocka 23700d59405SAkinobu Mita unregister_blkdev(_major, _name); 23851e5b2bdSMike Anderson dm_uevent_exit(); 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds _major = 0; 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds DMINFO("cleaned up"); 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 245b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2461da177e4SLinus Torvalds local_init, 2471da177e4SLinus Torvalds dm_target_init, 2481da177e4SLinus Torvalds dm_linear_init, 2491da177e4SLinus Torvalds dm_stripe_init, 250952b3557SMikulas Patocka dm_io_init, 251945fa4d2SMikulas Patocka dm_kcopyd_init, 2521da177e4SLinus Torvalds dm_interface_init, 253fd2ed4d2SMikulas Patocka dm_statistics_init, 2541da177e4SLinus Torvalds }; 2551da177e4SLinus Torvalds 256b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2571da177e4SLinus Torvalds local_exit, 2581da177e4SLinus Torvalds dm_target_exit, 2591da177e4SLinus Torvalds dm_linear_exit, 2601da177e4SLinus Torvalds dm_stripe_exit, 261952b3557SMikulas Patocka dm_io_exit, 262945fa4d2SMikulas Patocka dm_kcopyd_exit, 2631da177e4SLinus Torvalds dm_interface_exit, 264fd2ed4d2SMikulas Patocka dm_statistics_exit, 2651da177e4SLinus Torvalds }; 2661da177e4SLinus Torvalds 2671da177e4SLinus Torvalds static int __init dm_init(void) 2681da177e4SLinus Torvalds { 2691da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2701da177e4SLinus Torvalds int r, i; 2711da177e4SLinus Torvalds 272f1cd6cb2STushar Sugandhi #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 273f1cd6cb2STushar Sugandhi DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 274f1cd6cb2STushar Sugandhi " Duplicate IMA measurements will not be recorded in the IMA log."); 275f1cd6cb2STushar Sugandhi #endif 276f1cd6cb2STushar Sugandhi 2771da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2781da177e4SLinus Torvalds r = _inits[i](); 2791da177e4SLinus Torvalds if (r) 2801da177e4SLinus Torvalds goto bad; 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds 2831da177e4SLinus Torvalds return 0; 2841da177e4SLinus Torvalds bad: 2851da177e4SLinus Torvalds while (i--) 2861da177e4SLinus Torvalds _exits[i](); 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds return r; 2891da177e4SLinus Torvalds } 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds static void __exit dm_exit(void) 2921da177e4SLinus Torvalds { 2931da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds while (i--) 2961da177e4SLinus Torvalds _exits[i](); 297d15b774cSAlasdair G Kergon 298d15b774cSAlasdair G Kergon /* 299d15b774cSAlasdair G Kergon * Should be empty by this point. 300d15b774cSAlasdair G Kergon */ 301d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3021da177e4SLinus Torvalds } 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds /* 3051da177e4SLinus Torvalds * Block device functions 3061da177e4SLinus Torvalds */ 307432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 308432a212cSMike Anderson { 309432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 310432a212cSMike Anderson } 311432a212cSMike Anderson 312fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3131da177e4SLinus Torvalds { 3141da177e4SLinus Torvalds struct mapped_device *md; 3151da177e4SLinus Torvalds 316fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 317fba9f90eSJeff Mahoney 318fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 319fba9f90eSJeff Mahoney if (!md) 320fba9f90eSJeff Mahoney goto out; 321fba9f90eSJeff Mahoney 3225c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 323432a212cSMike Anderson dm_deleting_md(md)) { 324fba9f90eSJeff Mahoney md = NULL; 325fba9f90eSJeff Mahoney goto out; 326fba9f90eSJeff Mahoney } 327fba9f90eSJeff Mahoney 3281da177e4SLinus Torvalds dm_get(md); 3295c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 330fba9f90eSJeff Mahoney out: 331fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 332fba9f90eSJeff Mahoney 333fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3341da177e4SLinus Torvalds } 3351da177e4SLinus Torvalds 336db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3371da177e4SLinus Torvalds { 33863a4f065SMike Snitzer struct mapped_device *md; 3396e9624b8SArnd Bergmann 3404a1aeb98SMilan Broz spin_lock(&_minor_lock); 3414a1aeb98SMilan Broz 34263a4f065SMike Snitzer md = disk->private_data; 34363a4f065SMike Snitzer if (WARN_ON(!md)) 34463a4f065SMike Snitzer goto out; 34563a4f065SMike Snitzer 3462c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3472c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 348acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3492c140a24SMikulas Patocka 3501da177e4SLinus Torvalds dm_put(md); 35163a4f065SMike Snitzer out: 3524a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3531da177e4SLinus Torvalds } 3541da177e4SLinus Torvalds 3555c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3565c6bd75dSAlasdair G Kergon { 3575c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3585c6bd75dSAlasdair G Kergon } 3595c6bd75dSAlasdair G Kergon 3605c6bd75dSAlasdair G Kergon /* 3615c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3625c6bd75dSAlasdair G Kergon */ 3632c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3645c6bd75dSAlasdair G Kergon { 3655c6bd75dSAlasdair G Kergon int r = 0; 3665c6bd75dSAlasdair G Kergon 3675c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3685c6bd75dSAlasdair G Kergon 3692c140a24SMikulas Patocka if (dm_open_count(md)) { 3705c6bd75dSAlasdair G Kergon r = -EBUSY; 3712c140a24SMikulas Patocka if (mark_deferred) 3722c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3732c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3742c140a24SMikulas Patocka r = -EEXIST; 3755c6bd75dSAlasdair G Kergon else 3765c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3775c6bd75dSAlasdair G Kergon 3785c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3795c6bd75dSAlasdair G Kergon 3805c6bd75dSAlasdair G Kergon return r; 3815c6bd75dSAlasdair G Kergon } 3825c6bd75dSAlasdair G Kergon 3832c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3842c140a24SMikulas Patocka { 3852c140a24SMikulas Patocka int r = 0; 3862c140a24SMikulas Patocka 3872c140a24SMikulas Patocka spin_lock(&_minor_lock); 3882c140a24SMikulas Patocka 3892c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3902c140a24SMikulas Patocka r = -EBUSY; 3912c140a24SMikulas Patocka else 3922c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3932c140a24SMikulas Patocka 3942c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3952c140a24SMikulas Patocka 3962c140a24SMikulas Patocka return r; 3972c140a24SMikulas Patocka } 3982c140a24SMikulas Patocka 3992c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4002c140a24SMikulas Patocka { 4012c140a24SMikulas Patocka dm_deferred_remove(); 4022c140a24SMikulas Patocka } 4032c140a24SMikulas Patocka 4043ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4053ac51e74SDarrick J. Wong { 4063ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4073ac51e74SDarrick J. Wong 4083ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4093ac51e74SDarrick J. Wong } 4103ac51e74SDarrick J. Wong 411971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4125bd5e8d8SMike Snitzer struct block_device **bdev) 413aa129a22SMilan Broz { 414564b5c54SMike Snitzer struct dm_target *ti; 4156c182cd8SHannes Reinecke struct dm_table *map; 416971888c4SMike Snitzer int r; 417aa129a22SMilan Broz 4186c182cd8SHannes Reinecke retry: 419e56f81e0SChristoph Hellwig r = -ENOTTY; 420971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 421aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 422971888c4SMike Snitzer return r; 423aa129a22SMilan Broz 424aa129a22SMilan Broz /* We only support devices that have a single target */ 4252aec377aSMike Snitzer if (map->num_targets != 1) 426971888c4SMike Snitzer return r; 427aa129a22SMilan Broz 428564b5c54SMike Snitzer ti = dm_table_get_target(map, 0); 429564b5c54SMike Snitzer if (!ti->type->prepare_ioctl) 430e56f81e0SChristoph Hellwig return r; 431aa129a22SMilan Broz 432971888c4SMike Snitzer if (dm_suspended_md(md)) 433971888c4SMike Snitzer return -EAGAIN; 434971888c4SMike Snitzer 435564b5c54SMike Snitzer r = ti->type->prepare_ioctl(ti, bdev); 4365bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 437971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 438238d991fSHeinz Mauelshagen fsleep(10000); 4396c182cd8SHannes Reinecke goto retry; 4406c182cd8SHannes Reinecke } 441971888c4SMike Snitzer 442e56f81e0SChristoph Hellwig return r; 443e56f81e0SChristoph Hellwig } 4446c182cd8SHannes Reinecke 445971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 446971888c4SMike Snitzer { 447971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 448971888c4SMike Snitzer } 449971888c4SMike Snitzer 450e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 451e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 452e56f81e0SChristoph Hellwig { 453e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 454971888c4SMike Snitzer int r, srcu_idx; 455e56f81e0SChristoph Hellwig 4565bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 457e56f81e0SChristoph Hellwig if (r < 0) 458971888c4SMike Snitzer goto out; 459e56f81e0SChristoph Hellwig 460e56f81e0SChristoph Hellwig if (r > 0) { 461e56f81e0SChristoph Hellwig /* 462e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 463e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 464e56f81e0SChristoph Hellwig */ 465e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 4660378c625SMike Snitzer DMDEBUG_LIMIT( 467e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 468e980f623SChristoph Hellwig current->comm, cmd); 469e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 470e56f81e0SChristoph Hellwig goto out; 471e56f81e0SChristoph Hellwig } 472e980f623SChristoph Hellwig } 473e56f81e0SChristoph Hellwig 474a7cb3d2fSChristoph Hellwig if (!bdev->bd_disk->fops->ioctl) 475a7cb3d2fSChristoph Hellwig r = -ENOTTY; 476a7cb3d2fSChristoph Hellwig else 477a7cb3d2fSChristoph Hellwig r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 478e56f81e0SChristoph Hellwig out: 479971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 480aa129a22SMilan Broz return r; 481aa129a22SMilan Broz } 482aa129a22SMilan Broz 4837465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio) 4847465d7acSMike Snitzer { 4856c23f0bdSChristoph Hellwig return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 4867465d7acSMike Snitzer } 4877465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 4887465d7acSMike Snitzer 4898d394bc4SMike Snitzer static bool bio_is_flush_with_data(struct bio *bio) 4907465d7acSMike Snitzer { 4918d394bc4SMike Snitzer return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 4927465d7acSMike Snitzer } 4937465d7acSMike Snitzer 494e6926ad0SMing Lei static void dm_io_acct(struct dm_io *io, bool end) 4957465d7acSMike Snitzer { 496e6926ad0SMing Lei struct dm_stats_aux *stats_aux = &io->stats_aux; 497e6926ad0SMing Lei unsigned long start_time = io->start_time; 498e6926ad0SMing Lei struct mapped_device *md = io->md; 499e6926ad0SMing Lei struct bio *bio = io->orig_bio; 500d3de6d12SMing Lei unsigned int sectors; 5017465d7acSMike Snitzer 502d3de6d12SMing Lei /* 503d3de6d12SMing Lei * If REQ_PREFLUSH set, don't account payload, it will be 504d3de6d12SMing Lei * submitted (and accounted) after this flush completes. 505d3de6d12SMing Lei */ 506d3de6d12SMing Lei if (bio_is_flush_with_data(bio)) 507d3de6d12SMing Lei sectors = 0; 5087dd76d1fSMing Lei else if (likely(!(dm_io_flagged(io, DM_IO_WAS_SPLIT)))) 509d3de6d12SMing Lei sectors = bio_sectors(bio); 5107dd76d1fSMing Lei else 5117dd76d1fSMing Lei sectors = io->sectors; 5128d394bc4SMike Snitzer 5138d394bc4SMike Snitzer if (!end) 514d3de6d12SMing Lei bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio), 515d3de6d12SMing Lei start_time); 5168d394bc4SMike Snitzer else 517d3de6d12SMing Lei bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); 5187465d7acSMike Snitzer 519442761fdSMike Snitzer if (static_branch_unlikely(&stats_enabled) && 5207dd76d1fSMing Lei unlikely(dm_stats_used(&md->stats))) { 5217dd76d1fSMing Lei sector_t sector; 5227dd76d1fSMing Lei 5237dd76d1fSMing Lei if (likely(!dm_io_flagged(io, DM_IO_WAS_SPLIT))) 5247dd76d1fSMing Lei sector = bio->bi_iter.bi_sector; 5257dd76d1fSMing Lei else 5267dd76d1fSMing Lei sector = bio_end_sector(bio) - io->sector_offset; 5277dd76d1fSMing Lei 5287465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 5297dd76d1fSMing Lei sector, sectors, 5308d394bc4SMike Snitzer end, start_time, stats_aux); 5318d394bc4SMike Snitzer } 5327dd76d1fSMing Lei } 5338d394bc4SMike Snitzer 534b992b40dSMing Lei static void __dm_start_io_acct(struct dm_io *io) 5358d394bc4SMike Snitzer { 536e6926ad0SMing Lei dm_io_acct(io, false); 5378d394bc4SMike Snitzer } 5388d394bc4SMike Snitzer 5390fbb4d93SMike Snitzer static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 5408d394bc4SMike Snitzer { 5410fbb4d93SMike Snitzer /* 5420fbb4d93SMike Snitzer * Ensure IO accounting is only ever started once. 5430fbb4d93SMike Snitzer */ 5443b03f7c1SMike Snitzer if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 5450fbb4d93SMike Snitzer return; 5463b03f7c1SMike Snitzer 5473b03f7c1SMike Snitzer /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ 5483b03f7c1SMike Snitzer if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { 54982f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_ACCOUNTED); 55082f6cdccSMike Snitzer } else { 55182f6cdccSMike Snitzer unsigned long flags; 552655f3aadSMike Snitzer /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 5534d7bca13SMike Snitzer spin_lock_irqsave(&io->lock, flags); 55410eb3a0dSBenjamin Marzinski if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { 55510eb3a0dSBenjamin Marzinski spin_unlock_irqrestore(&io->lock, flags); 55610eb3a0dSBenjamin Marzinski return; 55710eb3a0dSBenjamin Marzinski } 55882f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_ACCOUNTED); 5594d7bca13SMike Snitzer spin_unlock_irqrestore(&io->lock, flags); 56082f6cdccSMike Snitzer } 5610fbb4d93SMike Snitzer 562b992b40dSMing Lei __dm_start_io_acct(io); 5630fbb4d93SMike Snitzer } 5640fbb4d93SMike Snitzer 565b992b40dSMing Lei static void dm_end_io_acct(struct dm_io *io) 5660fbb4d93SMike Snitzer { 567e6926ad0SMing Lei dm_io_acct(io, true); 5687465d7acSMike Snitzer } 569978e51baSMike Snitzer 570978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5711da177e4SLinus Torvalds { 57264f52b0eSMike Snitzer struct dm_io *io; 57364f52b0eSMike Snitzer struct dm_target_io *tio; 57464f52b0eSMike Snitzer struct bio *clone; 57564f52b0eSMike Snitzer 57629dec90aSChristoph Hellwig clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); 5776c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 578655f3aadSMike Snitzer tio->flags = 0; 579655f3aadSMike Snitzer dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 58064f52b0eSMike Snitzer tio->io = NULL; 58164f52b0eSMike Snitzer 58264f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 58364f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 58484b98f4cSMike Snitzer io->status = BLK_STS_OK; 5850f14d60aSMing Lei 5860f14d60aSMing Lei /* one ref is for submission, the other is for completion */ 5870f14d60aSMing Lei atomic_set(&io->io_count, 2); 5889f6dc633SMike Snitzer this_cpu_inc(*md->pending_io); 5897dd76d1fSMing Lei io->orig_bio = bio; 590978e51baSMike Snitzer io->md = md; 5914d7bca13SMike Snitzer spin_lock_init(&io->lock); 592b879f915SMike Snitzer io->start_time = jiffies; 59382f6cdccSMike Snitzer io->flags = 0; 59464f52b0eSMike Snitzer 595442761fdSMike Snitzer if (static_branch_unlikely(&stats_enabled)) 5960cdb90f0SMike Snitzer dm_stats_record_start(&md->stats, &io->stats_aux); 59764f52b0eSMike Snitzer 59864f52b0eSMike Snitzer return io; 5991da177e4SLinus Torvalds } 6001da177e4SLinus Torvalds 6010119ab14SMike Snitzer static void free_io(struct dm_io *io) 6021da177e4SLinus Torvalds { 60364f52b0eSMike Snitzer bio_put(&io->tio.clone); 60464f52b0eSMike Snitzer } 60564f52b0eSMike Snitzer 6061d1068ceSChristoph Hellwig static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 607*86a3238cSHeinz Mauelshagen unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask) 60864f52b0eSMike Snitzer { 6099dd1cd32SMike Snitzer struct mapped_device *md = ci->io->md; 61064f52b0eSMike Snitzer struct dm_target_io *tio; 611018b05ebSMike Snitzer struct bio *clone; 61264f52b0eSMike Snitzer 61364f52b0eSMike Snitzer if (!ci->io->tio.io) { 61464f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 61564f52b0eSMike Snitzer tio = &ci->io->tio; 616018b05ebSMike Snitzer /* alloc_io() already initialized embedded clone */ 617018b05ebSMike Snitzer clone = &tio->clone; 61864f52b0eSMike Snitzer } else { 61929dec90aSChristoph Hellwig clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, 62029dec90aSChristoph Hellwig &md->mempools->bs); 62164f52b0eSMike Snitzer if (!clone) 62264f52b0eSMike Snitzer return NULL; 62364f52b0eSMike Snitzer 624b99fdcdcSMing Lei /* REQ_DM_POLL_LIST shouldn't be inherited */ 625b99fdcdcSMing Lei clone->bi_opf &= ~REQ_DM_POLL_LIST; 626b99fdcdcSMing Lei 6276c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 628655f3aadSMike Snitzer tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 62964f52b0eSMike Snitzer } 63064f52b0eSMike Snitzer 63164f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 63264f52b0eSMike Snitzer tio->io = ci->io; 63364f52b0eSMike Snitzer tio->ti = ti; 63464f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 635dc8e2021SChristoph Hellwig tio->len_ptr = len; 636743598f0SMike Snitzer tio->old_sector = 0; 63764f52b0eSMike Snitzer 6389dd1cd32SMike Snitzer /* Set default bdev, but target must bio_set_dev() before issuing IO */ 6399dd1cd32SMike Snitzer clone->bi_bdev = md->disk->part0; 6409dd1cd32SMike Snitzer if (unlikely(ti->needs_bio_set_dev)) 6419dd1cd32SMike Snitzer bio_set_dev(clone, md->disk->part0); 6429dd1cd32SMike Snitzer 643018b05ebSMike Snitzer if (len) { 644018b05ebSMike Snitzer clone->bi_iter.bi_size = to_bytes(*len); 645018b05ebSMike Snitzer if (bio_integrity(clone)) 646018b05ebSMike Snitzer bio_integrity_trim(clone); 647018b05ebSMike Snitzer } 648018b05ebSMike Snitzer 649018b05ebSMike Snitzer return clone; 6501da177e4SLinus Torvalds } 6511da177e4SLinus Torvalds 6521d1068ceSChristoph Hellwig static void free_tio(struct bio *clone) 6531da177e4SLinus Torvalds { 654655f3aadSMike Snitzer if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 65564f52b0eSMike Snitzer return; 6561d1068ceSChristoph Hellwig bio_put(clone); 6571da177e4SLinus Torvalds } 6581da177e4SLinus Torvalds 6591da177e4SLinus Torvalds /* 6601da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6611da177e4SLinus Torvalds */ 66292c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6631da177e4SLinus Torvalds { 66405447420SKiyoshi Ueda unsigned long flags; 6651da177e4SLinus Torvalds 66605447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6671da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 66805447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 66992c63902SMikulas Patocka queue_work(md->wq, &md->work); 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds 6721da177e4SLinus Torvalds /* 6731da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6741da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 67583d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6761da177e4SLinus Torvalds */ 677563a225cSMike Snitzer struct dm_table *dm_get_live_table(struct mapped_device *md, 678563a225cSMike Snitzer int *srcu_idx) __acquires(md->io_barrier) 6791da177e4SLinus Torvalds { 68083d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6811da177e4SLinus Torvalds 68283d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 68383d5e5b0SMikulas Patocka } 6841da177e4SLinus Torvalds 685563a225cSMike Snitzer void dm_put_live_table(struct mapped_device *md, 686563a225cSMike Snitzer int srcu_idx) __releases(md->io_barrier) 68783d5e5b0SMikulas Patocka { 68883d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 68983d5e5b0SMikulas Patocka } 69083d5e5b0SMikulas Patocka 69183d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 69283d5e5b0SMikulas Patocka { 69383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 69483d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 69583d5e5b0SMikulas Patocka } 69683d5e5b0SMikulas Patocka 69783d5e5b0SMikulas Patocka /* 69883d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 69983d5e5b0SMikulas Patocka * The caller must not block between these two functions. 70083d5e5b0SMikulas Patocka */ 70183d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 70283d5e5b0SMikulas Patocka { 70383d5e5b0SMikulas Patocka rcu_read_lock(); 70483d5e5b0SMikulas Patocka return rcu_dereference(md->map); 70583d5e5b0SMikulas Patocka } 70683d5e5b0SMikulas Patocka 70783d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 70883d5e5b0SMikulas Patocka { 70983d5e5b0SMikulas Patocka rcu_read_unlock(); 7101da177e4SLinus Torvalds } 7111da177e4SLinus Torvalds 712563a225cSMike Snitzer static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, 713a3282b43SBart Van Assche int *srcu_idx, blk_opf_t bio_opf) 714563a225cSMike Snitzer { 7155d7362d0SMikulas Patocka if (bio_opf & REQ_NOWAIT) 716563a225cSMike Snitzer return dm_get_live_table_fast(md); 717563a225cSMike Snitzer else 718563a225cSMike Snitzer return dm_get_live_table(md, srcu_idx); 719563a225cSMike Snitzer } 720563a225cSMike Snitzer 721563a225cSMike Snitzer static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, 722a3282b43SBart Van Assche blk_opf_t bio_opf) 723563a225cSMike Snitzer { 7245d7362d0SMikulas Patocka if (bio_opf & REQ_NOWAIT) 725563a225cSMike Snitzer dm_put_live_table_fast(md); 726563a225cSMike Snitzer else 727563a225cSMike Snitzer dm_put_live_table(md, srcu_idx); 728563a225cSMike Snitzer } 729563a225cSMike Snitzer 730971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 731971888c4SMike Snitzer 7323ac51e74SDarrick J. Wong /* 73386f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 73486f1152bSBenjamin Marzinski */ 735b9a785d2SChristoph Hellwig static struct table_device *open_table_device(struct mapped_device *md, 736b9a785d2SChristoph Hellwig dev_t dev, fmode_t mode) 73786f1152bSBenjamin Marzinski { 738b9a785d2SChristoph Hellwig struct table_device *td; 73986f1152bSBenjamin Marzinski struct block_device *bdev; 740cd913c76SChristoph Hellwig u64 part_off; 74186f1152bSBenjamin Marzinski int r; 74286f1152bSBenjamin Marzinski 743b9a785d2SChristoph Hellwig td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 744b9a785d2SChristoph Hellwig if (!td) 745b9a785d2SChristoph Hellwig return ERR_PTR(-ENOMEM); 746b9a785d2SChristoph Hellwig refcount_set(&td->count, 1); 74786f1152bSBenjamin Marzinski 748b9a785d2SChristoph Hellwig bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr); 749b9a785d2SChristoph Hellwig if (IS_ERR(bdev)) { 750b9a785d2SChristoph Hellwig r = PTR_ERR(bdev); 751b9a785d2SChristoph Hellwig goto out_free_td; 75286f1152bSBenjamin Marzinski } 75386f1152bSBenjamin Marzinski 7541a581b72SChristoph Hellwig /* 7551a581b72SChristoph Hellwig * We can be called before the dm disk is added. In that case we can't 7561a581b72SChristoph Hellwig * register the holder relation here. It will be done once add_disk was 7571a581b72SChristoph Hellwig * called. 7581a581b72SChristoph Hellwig */ 7591a581b72SChristoph Hellwig if (md->disk->slave_dir) { 7601a581b72SChristoph Hellwig r = bd_link_disk_holder(bdev, md->disk); 761b9a785d2SChristoph Hellwig if (r) 762b9a785d2SChristoph Hellwig goto out_blkdev_put; 7631a581b72SChristoph Hellwig } 764b9a785d2SChristoph Hellwig 765b9a785d2SChristoph Hellwig td->dm_dev.mode = mode; 76686f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 7678012b866SShiyang Ruan td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL); 768b9a785d2SChristoph Hellwig format_dev_t(td->dm_dev.name, dev); 769b9a785d2SChristoph Hellwig list_add(&td->list, &md->table_devices); 770b9a785d2SChristoph Hellwig return td; 771b9a785d2SChristoph Hellwig 772b9a785d2SChristoph Hellwig out_blkdev_put: 773b9a785d2SChristoph Hellwig blkdev_put(bdev, mode | FMODE_EXCL); 774b9a785d2SChristoph Hellwig out_free_td: 775b9a785d2SChristoph Hellwig kfree(td); 776b9a785d2SChristoph Hellwig return ERR_PTR(r); 77786f1152bSBenjamin Marzinski } 77886f1152bSBenjamin Marzinski 77986f1152bSBenjamin Marzinski /* 78086f1152bSBenjamin Marzinski * Close a table device that we've been using. 78186f1152bSBenjamin Marzinski */ 78286f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 78386f1152bSBenjamin Marzinski { 7841a581b72SChristoph Hellwig if (md->disk->slave_dir) 7851a581b72SChristoph Hellwig bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 78686f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 787817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 7887b586583SChristoph Hellwig list_del(&td->list); 7897b586583SChristoph Hellwig kfree(td); 79086f1152bSBenjamin Marzinski } 79186f1152bSBenjamin Marzinski 79286f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 7938454fca4SSheetal Singala fmode_t mode) 7948454fca4SSheetal Singala { 79586f1152bSBenjamin Marzinski struct table_device *td; 79686f1152bSBenjamin Marzinski 79786f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 79886f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 79986f1152bSBenjamin Marzinski return td; 80086f1152bSBenjamin Marzinski 80186f1152bSBenjamin Marzinski return NULL; 80286f1152bSBenjamin Marzinski } 80386f1152bSBenjamin Marzinski 80486f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 8058454fca4SSheetal Singala struct dm_dev **result) 8068454fca4SSheetal Singala { 80786f1152bSBenjamin Marzinski struct table_device *td; 80886f1152bSBenjamin Marzinski 80986f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 81086f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 81186f1152bSBenjamin Marzinski if (!td) { 812b9a785d2SChristoph Hellwig td = open_table_device(md, dev, mode); 813b9a785d2SChristoph Hellwig if (IS_ERR(td)) { 81486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 815b9a785d2SChristoph Hellwig return PTR_ERR(td); 81686f1152bSBenjamin Marzinski } 817b0b4d7c6SElena Reshetova } else { 818b0b4d7c6SElena Reshetova refcount_inc(&td->count); 81986f1152bSBenjamin Marzinski } 82086f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 82186f1152bSBenjamin Marzinski 82286f1152bSBenjamin Marzinski *result = &td->dm_dev; 82386f1152bSBenjamin Marzinski return 0; 82486f1152bSBenjamin Marzinski } 82586f1152bSBenjamin Marzinski 82686f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 82786f1152bSBenjamin Marzinski { 82886f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 82986f1152bSBenjamin Marzinski 83086f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 8317b586583SChristoph Hellwig if (refcount_dec_and_test(&td->count)) 83286f1152bSBenjamin Marzinski close_table_device(td, md); 83386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83486f1152bSBenjamin Marzinski } 83586f1152bSBenjamin Marzinski 83686f1152bSBenjamin Marzinski /* 8373ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8383ac51e74SDarrick J. Wong */ 8393ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8403ac51e74SDarrick J. Wong { 8413ac51e74SDarrick J. Wong *geo = md->geometry; 8423ac51e74SDarrick J. Wong 8433ac51e74SDarrick J. Wong return 0; 8443ac51e74SDarrick J. Wong } 8453ac51e74SDarrick J. Wong 8463ac51e74SDarrick J. Wong /* 8473ac51e74SDarrick J. Wong * Set the geometry of a device. 8483ac51e74SDarrick J. Wong */ 8493ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8503ac51e74SDarrick J. Wong { 8513ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8523ac51e74SDarrick J. Wong 8533ac51e74SDarrick J. Wong if (geo->start > sz) { 85443e6c111SMikulas Patocka DMERR("Start sector is beyond the geometry limits."); 8553ac51e74SDarrick J. Wong return -EINVAL; 8563ac51e74SDarrick J. Wong } 8573ac51e74SDarrick J. Wong 8583ac51e74SDarrick J. Wong md->geometry = *geo; 8593ac51e74SDarrick J. Wong 8603ac51e74SDarrick J. Wong return 0; 8613ac51e74SDarrick J. Wong } 8623ac51e74SDarrick J. Wong 8632e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8642e93ccc1SKiyoshi Ueda { 8652e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8662e93ccc1SKiyoshi Ueda } 8672e93ccc1SKiyoshi Ueda 8688b211aacSMing Lei static void dm_requeue_add_io(struct dm_io *io, bool first_stage) 8691da177e4SLinus Torvalds { 870b35f8caaSMilan Broz struct mapped_device *md = io->md; 8712e93ccc1SKiyoshi Ueda 8728b211aacSMing Lei if (first_stage) { 8738b211aacSMing Lei struct dm_io *next = md->requeue_list; 8748b211aacSMing Lei 8758b211aacSMing Lei md->requeue_list = io; 8768b211aacSMing Lei io->next = next; 8778b211aacSMing Lei } else { 8788b211aacSMing Lei bio_list_add_head(&md->deferred, io->orig_bio); 8798b211aacSMing Lei } 8808b211aacSMing Lei } 8818b211aacSMing Lei 8828b211aacSMing Lei static void dm_kick_requeue(struct mapped_device *md, bool first_stage) 8838b211aacSMing Lei { 8848b211aacSMing Lei if (first_stage) 8858b211aacSMing Lei queue_work(md->wq, &md->requeue_work); 8868b211aacSMing Lei else 8878b211aacSMing Lei queue_work(md->wq, &md->work); 8888b211aacSMing Lei } 8898b211aacSMing Lei 8902e93ccc1SKiyoshi Ueda /* 891444fe04fSMing Lei * Return true if the dm_io's original bio is requeued. 892444fe04fSMing Lei * io->status is updated with error if requeue disallowed. 893444fe04fSMing Lei */ 8948b211aacSMing Lei static bool dm_handle_requeue(struct dm_io *io, bool first_stage) 895444fe04fSMing Lei { 8968b211aacSMing Lei struct bio *bio = io->orig_bio; 897444fe04fSMing Lei bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); 898444fe04fSMing Lei bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && 899444fe04fSMing Lei (bio->bi_opf & REQ_POLLED)); 900444fe04fSMing Lei struct mapped_device *md = io->md; 901444fe04fSMing Lei bool requeued = false; 902444fe04fSMing Lei 903444fe04fSMing Lei if (handle_requeue || handle_polled_eagain) { 904444fe04fSMing Lei unsigned long flags; 905444fe04fSMing Lei 906444fe04fSMing Lei if (bio->bi_opf & REQ_POLLED) { 907444fe04fSMing Lei /* 908444fe04fSMing Lei * Upper layer won't help us poll split bio 909444fe04fSMing Lei * (io->orig_bio may only reflect a subset of the 910444fe04fSMing Lei * pre-split original) so clear REQ_POLLED. 911444fe04fSMing Lei */ 912444fe04fSMing Lei bio_clear_polled(bio); 913444fe04fSMing Lei } 914444fe04fSMing Lei 915444fe04fSMing Lei /* 916444fe04fSMing Lei * Target requested pushing back the I/O or 917444fe04fSMing Lei * polled IO hit BLK_STS_AGAIN. 9182e93ccc1SKiyoshi Ueda */ 919022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 920444fe04fSMing Lei if ((__noflush_suspending(md) && 921444fe04fSMing Lei !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || 9228b211aacSMing Lei handle_polled_eagain || first_stage) { 9238b211aacSMing Lei dm_requeue_add_io(io, first_stage); 924444fe04fSMing Lei requeued = true; 925bf14e2b2SDamien Le Moal } else { 926bf14e2b2SDamien Le Moal /* 927bf14e2b2SDamien Le Moal * noflush suspend was interrupted or this is 928bf14e2b2SDamien Le Moal * a write to a zoned target. 929bf14e2b2SDamien Le Moal */ 9304e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 931bf14e2b2SDamien Le Moal } 932022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9332e93ccc1SKiyoshi Ueda } 9342e93ccc1SKiyoshi Ueda 935444fe04fSMing Lei if (requeued) 9368b211aacSMing Lei dm_kick_requeue(md, first_stage); 937444fe04fSMing Lei 938444fe04fSMing Lei return requeued; 939444fe04fSMing Lei } 940444fe04fSMing Lei 9418b211aacSMing Lei static void __dm_io_complete(struct dm_io *io, bool first_stage) 942444fe04fSMing Lei { 9438b211aacSMing Lei struct bio *bio = io->orig_bio; 944444fe04fSMing Lei struct mapped_device *md = io->md; 945444fe04fSMing Lei blk_status_t io_error; 946444fe04fSMing Lei bool requeued; 947444fe04fSMing Lei 9488b211aacSMing Lei requeued = dm_handle_requeue(io, first_stage); 9498b211aacSMing Lei if (requeued && first_stage) 9508b211aacSMing Lei return; 951444fe04fSMing Lei 9524e4cbee9SChristoph Hellwig io_error = io->status; 95382f6cdccSMike Snitzer if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 954b992b40dSMing Lei dm_end_io_acct(io); 9550fbb4d93SMike Snitzer else if (!io_error) { 9560fbb4d93SMike Snitzer /* 9570fbb4d93SMike Snitzer * Must handle target that DM_MAPIO_SUBMITTED only to 9580fbb4d93SMike Snitzer * then bio_endio() rather than dm_submit_bio_remap() 9590fbb4d93SMike Snitzer */ 960b992b40dSMing Lei __dm_start_io_acct(io); 961b992b40dSMing Lei dm_end_io_acct(io); 9620fbb4d93SMike Snitzer } 9630119ab14SMike Snitzer free_io(io); 9649f6dc633SMike Snitzer smp_wmb(); 9659f6dc633SMike Snitzer this_cpu_dec(*md->pending_io); 9662056a782SJens Axboe 9679f6dc633SMike Snitzer /* nudge anyone waiting on suspend queue */ 9689f6dc633SMike Snitzer if (unlikely(wq_has_sleeper(&md->wait))) 9699f6dc633SMike Snitzer wake_up(&md->wait); 9701da177e4SLinus Torvalds 971444fe04fSMing Lei /* Return early if the original bio was requeued */ 972444fe04fSMing Lei if (requeued) 97378ccef91SMike Snitzer return; 9746a8736d1STejun Heo 9758d394bc4SMike Snitzer if (bio_is_flush_with_data(bio)) { 976af7e466aSMikulas Patocka /* 9776a8736d1STejun Heo * Preflush done for flush with data, reissue 97828a8f0d3SMike Christie * without REQ_PREFLUSH. 979af7e466aSMikulas Patocka */ 9801eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9816a8736d1STejun Heo queue_io(md, bio); 982af7e466aSMikulas Patocka } else { 983b372d360SMike Snitzer /* done with normal IO or empty flush */ 9848dd601faSNeilBrown if (io_error) 9854e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9864246a0b6SChristoph Hellwig bio_endio(bio); 9872e93ccc1SKiyoshi Ueda } 9881da177e4SLinus Torvalds } 989e2736347SMike Snitzer 9908b211aacSMing Lei static void dm_wq_requeue_work(struct work_struct *work) 9918b211aacSMing Lei { 9928b211aacSMing Lei struct mapped_device *md = container_of(work, struct mapped_device, 9938b211aacSMing Lei requeue_work); 9948b211aacSMing Lei unsigned long flags; 9958b211aacSMing Lei struct dm_io *io; 9968b211aacSMing Lei 9978b211aacSMing Lei /* reuse deferred lock to simplify dm_handle_requeue */ 9988b211aacSMing Lei spin_lock_irqsave(&md->deferred_lock, flags); 9998b211aacSMing Lei io = md->requeue_list; 10008b211aacSMing Lei md->requeue_list = NULL; 10018b211aacSMing Lei spin_unlock_irqrestore(&md->deferred_lock, flags); 10028b211aacSMing Lei 10038b211aacSMing Lei while (io) { 10048b211aacSMing Lei struct dm_io *next = io->next; 10058b211aacSMing Lei 100646754bd0SChristoph Hellwig dm_io_rewind(io, &md->disk->bio_split); 10078b211aacSMing Lei 10088b211aacSMing Lei io->next = NULL; 10098b211aacSMing Lei __dm_io_complete(io, false); 10108b211aacSMing Lei io = next; 10118b211aacSMing Lei } 10128b211aacSMing Lei } 10138b211aacSMing Lei 10148b211aacSMing Lei /* 10158b211aacSMing Lei * Two staged requeue: 10168b211aacSMing Lei * 10178b211aacSMing Lei * 1) io->orig_bio points to the real original bio, and the part mapped to 10188b211aacSMing Lei * this io must be requeued, instead of other parts of the original bio. 10198b211aacSMing Lei * 10208b211aacSMing Lei * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. 10218b211aacSMing Lei */ 10228b211aacSMing Lei static void dm_io_complete(struct dm_io *io) 10238b211aacSMing Lei { 10248b211aacSMing Lei bool first_requeue; 10258b211aacSMing Lei 10268b211aacSMing Lei /* 10278b211aacSMing Lei * Only dm_io that has been split needs two stage requeue, otherwise 10288b211aacSMing Lei * we may run into long bio clone chain during suspend and OOM could 10298b211aacSMing Lei * be triggered. 10308b211aacSMing Lei * 10318b211aacSMing Lei * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they 10328b211aacSMing Lei * also aren't handled via the first stage requeue. 10338b211aacSMing Lei */ 10348b211aacSMing Lei if (dm_io_flagged(io, DM_IO_WAS_SPLIT)) 10358b211aacSMing Lei first_requeue = true; 10368b211aacSMing Lei else 10378b211aacSMing Lei first_requeue = false; 10388b211aacSMing Lei 10398b211aacSMing Lei __dm_io_complete(io, first_requeue); 10408b211aacSMing Lei } 10418b211aacSMing Lei 1042e2736347SMike Snitzer /* 1043e2736347SMike Snitzer * Decrements the number of outstanding ios that a bio has been 1044e2736347SMike Snitzer * cloned into, completing the original io if necc. 1045e2736347SMike Snitzer */ 104684b98f4cSMike Snitzer static inline void __dm_io_dec_pending(struct dm_io *io) 1047e2736347SMike Snitzer { 104884b98f4cSMike Snitzer if (atomic_dec_and_test(&io->io_count)) 104984b98f4cSMike Snitzer dm_io_complete(io); 105084b98f4cSMike Snitzer } 105184b98f4cSMike Snitzer 105284b98f4cSMike Snitzer static void dm_io_set_error(struct dm_io *io, blk_status_t error) 105384b98f4cSMike Snitzer { 1054e2736347SMike Snitzer unsigned long flags; 105584b98f4cSMike Snitzer 105684b98f4cSMike Snitzer /* Push-back supersedes any I/O errors */ 10574d7bca13SMike Snitzer spin_lock_irqsave(&io->lock, flags); 1058e2736347SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && 105984b98f4cSMike Snitzer __noflush_suspending(io->md))) { 1060e2736347SMike Snitzer io->status = error; 106184b98f4cSMike Snitzer } 10624d7bca13SMike Snitzer spin_unlock_irqrestore(&io->lock, flags); 1063e2736347SMike Snitzer } 1064e2736347SMike Snitzer 10652e803cd9SMing Lei static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 106684b98f4cSMike Snitzer { 106784b98f4cSMike Snitzer if (unlikely(error)) 106884b98f4cSMike Snitzer dm_io_set_error(io, error); 106984b98f4cSMike Snitzer 107084b98f4cSMike Snitzer __dm_io_dec_pending(io); 1071af7e466aSMikulas Patocka } 10721da177e4SLinus Torvalds 1073bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 1074bcb44433SMike Snitzer { 1075bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 1076bcb44433SMike Snitzer 1077bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 1078bcb44433SMike Snitzer limits->max_discard_sectors = 0; 1079bcb44433SMike Snitzer } 1080bcb44433SMike Snitzer 1081ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 1082ac62d620SChristoph Hellwig { 1083ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 1084ac62d620SChristoph Hellwig 1085ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 1086ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 1087ac62d620SChristoph Hellwig } 1088ac62d620SChristoph Hellwig 1089a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 1090a666e5c0SMikulas Patocka { 1091a666e5c0SMikulas Patocka return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 1092a666e5c0SMikulas Patocka } 1093a666e5c0SMikulas Patocka 10944246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 10951da177e4SLinus Torvalds { 10964e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 10976cbce280SMike Snitzer struct dm_target_io *tio = clone_to_tio(bio); 10986cbce280SMike Snitzer struct dm_target *ti = tio->ti; 10996cbce280SMike Snitzer dm_endio_fn endio = ti->type->end_io; 11006cbce280SMike Snitzer struct dm_io *io = tio->io; 11016cbce280SMike Snitzer struct mapped_device *md = io->md; 11021da177e4SLinus Torvalds 11039c37de29SMike Snitzer if (unlikely(error == BLK_STS_TARGET)) { 1104bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 110570200574SChristoph Hellwig !bdev_max_discard_sectors(bio->bi_bdev)) 1106bcb44433SMike Snitzer disable_discard(md); 1107bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1108dddf3056SMike Snitzer !bdev_write_zeroes_sectors(bio->bi_bdev)) 1109ac62d620SChristoph Hellwig disable_write_zeroes(md); 1110ac62d620SChristoph Hellwig } 11117eee4ae2SMike Snitzer 1112442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled) && 1113edd1dbc8SChristoph Hellwig unlikely(bdev_is_zoned(bio->bi_bdev))) 1114bb37d772SDamien Le Moal dm_zone_endio(io, bio); 1115415c79e1SJohannes Thumshirn 11161be56909SChristoph Hellwig if (endio) { 11176cbce280SMike Snitzer int r = endio(ti, bio, &error); 11181be56909SChristoph Hellwig switch (r) { 11191be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 1120442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled)) { 1121bf14e2b2SDamien Le Moal /* 1122bf14e2b2SDamien Le Moal * Requeuing writes to a sequential zone of a zoned 1123bf14e2b2SDamien Le Moal * target will break the sequential write pattern: 1124bf14e2b2SDamien Le Moal * fail such IO. 1125bf14e2b2SDamien Le Moal */ 1126bf14e2b2SDamien Le Moal if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1127bf14e2b2SDamien Le Moal error = BLK_STS_IOERR; 1128bf14e2b2SDamien Le Moal else 11294e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 1130442761fdSMike Snitzer } else 1131442761fdSMike Snitzer error = BLK_STS_DM_REQUEUE; 1132df561f66SGustavo A. R. Silva fallthrough; 11331be56909SChristoph Hellwig case DM_ENDIO_DONE: 11341be56909SChristoph Hellwig break; 11351be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 11361be56909SChristoph Hellwig /* The target will handle the io */ 11371be56909SChristoph Hellwig return; 11381be56909SChristoph Hellwig default: 113943e6c111SMikulas Patocka DMCRIT("unimplemented target endio return value: %d", r); 11401be56909SChristoph Hellwig BUG(); 11411be56909SChristoph Hellwig } 11421be56909SChristoph Hellwig } 11431be56909SChristoph Hellwig 1144442761fdSMike Snitzer if (static_branch_unlikely(&swap_bios_enabled) && 1145442761fdSMike Snitzer unlikely(swap_bios_limit(ti, bio))) 1146a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1147a666e5c0SMikulas Patocka 11481d1068ceSChristoph Hellwig free_tio(bio); 1149e2118b3cSDamien Le Moal dm_io_dec_pending(io, error); 11501da177e4SLinus Torvalds } 11511da177e4SLinus Torvalds 115278d8e58aSMike Snitzer /* 115356a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 115456a67df7SMike Snitzer * target boundary. 115556a67df7SMike Snitzer */ 11563720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 11573720281dSMike Snitzer sector_t target_offset) 11581da177e4SLinus Torvalds { 115956a67df7SMike Snitzer return ti->len - target_offset; 116056a67df7SMike Snitzer } 116156a67df7SMike Snitzer 11623720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector) 116356a67df7SMike Snitzer { 11643720281dSMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 11653720281dSMike Snitzer sector_t len = max_io_len_target_boundary(ti, target_offset); 11661da177e4SLinus Torvalds 11671da177e4SLinus Torvalds /* 11683ee16db3SMike Snitzer * Does the target need to split IO even further? 11693ee16db3SMike Snitzer * - varied (per target) IO splitting is a tenet of DM; this 11703ee16db3SMike Snitzer * explains why stacked chunk_sectors based splitting via 11715a97806fSChristoph Hellwig * bio_split_to_limits() isn't possible here. 11721da177e4SLinus Torvalds */ 1173c3949322SChristoph Hellwig if (!ti->max_io_len) 11741da177e4SLinus Torvalds return len; 1175c3949322SChristoph Hellwig return min_t(sector_t, len, 1176c3949322SChristoph Hellwig min(queue_max_sectors(ti->table->md->queue), 1177c3949322SChristoph Hellwig blk_chunk_sectors_left(target_offset, ti->max_io_len))); 11781da177e4SLinus Torvalds } 11791da177e4SLinus Torvalds 1180542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1181542f9038SMike Snitzer { 1182542f9038SMike Snitzer if (len > UINT_MAX) { 1183542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1184542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1185542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1186542f9038SMike Snitzer return -EINVAL; 1187542f9038SMike Snitzer } 1188542f9038SMike Snitzer 118975ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 1190542f9038SMike Snitzer 1191542f9038SMike Snitzer return 0; 1192542f9038SMike Snitzer } 1193542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1194542f9038SMike Snitzer 1195f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1196f26c5719SDan Williams sector_t sector, int *srcu_idx) 11973d97c829SMike Snitzer __acquires(md->io_barrier) 1198545ed20eSToshi Kani { 1199545ed20eSToshi Kani struct dm_table *map; 1200545ed20eSToshi Kani struct dm_target *ti; 1201545ed20eSToshi Kani 1202f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1203545ed20eSToshi Kani if (!map) 1204f26c5719SDan Williams return NULL; 1205545ed20eSToshi Kani 1206545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1207123d87d5SMikulas Patocka if (!ti) 1208f26c5719SDan Williams return NULL; 1209f26c5719SDan Williams 1210f26c5719SDan Williams return ti; 1211f26c5719SDan Williams } 1212f26c5719SDan Williams 1213f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1214e511c4a3SJane Chu long nr_pages, enum dax_access_mode mode, void **kaddr, 1215e511c4a3SJane Chu pfn_t *pfn) 1216f26c5719SDan Williams { 1217f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1218f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1219f26c5719SDan Williams struct dm_target *ti; 1220f26c5719SDan Williams long len, ret = -EIO; 1221f26c5719SDan Williams int srcu_idx; 1222f26c5719SDan Williams 1223f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1224f26c5719SDan Williams 1225f26c5719SDan Williams if (!ti) 1226545ed20eSToshi Kani goto out; 1227f26c5719SDan Williams if (!ti->type->direct_access) 1228f26c5719SDan Williams goto out; 12293720281dSMike Snitzer len = max_io_len(ti, sector) / PAGE_SECTORS; 1230f26c5719SDan Williams if (len < 1) 1231f26c5719SDan Williams goto out; 1232f26c5719SDan Williams nr_pages = min(len, nr_pages); 1233e511c4a3SJane Chu ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); 1234817bf402SDan Williams 1235545ed20eSToshi Kani out: 1236545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1237f26c5719SDan Williams 1238f26c5719SDan Williams return ret; 1239545ed20eSToshi Kani } 1240545ed20eSToshi Kani 1241cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1242cdf6cdcdSVivek Goyal size_t nr_pages) 1243cdf6cdcdSVivek Goyal { 1244cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1245cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1246cdf6cdcdSVivek Goyal struct dm_target *ti; 1247cdf6cdcdSVivek Goyal int ret = -EIO; 1248cdf6cdcdSVivek Goyal int srcu_idx; 1249cdf6cdcdSVivek Goyal 1250cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1251cdf6cdcdSVivek Goyal 1252cdf6cdcdSVivek Goyal if (!ti) 1253cdf6cdcdSVivek Goyal goto out; 1254cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1255cdf6cdcdSVivek Goyal /* 1256cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1257cdf6cdcdSVivek Goyal * here, something is wrong. 1258cdf6cdcdSVivek Goyal */ 1259cdf6cdcdSVivek Goyal goto out; 1260cdf6cdcdSVivek Goyal } 1261cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1262cdf6cdcdSVivek Goyal out: 1263cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1264cdf6cdcdSVivek Goyal 1265cdf6cdcdSVivek Goyal return ret; 1266cdf6cdcdSVivek Goyal } 1267cdf6cdcdSVivek Goyal 1268047218ecSJane Chu static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 1269047218ecSJane Chu void *addr, size_t bytes, struct iov_iter *i) 1270047218ecSJane Chu { 1271047218ecSJane Chu struct mapped_device *md = dax_get_private(dax_dev); 1272047218ecSJane Chu sector_t sector = pgoff * PAGE_SECTORS; 1273047218ecSJane Chu struct dm_target *ti; 1274047218ecSJane Chu int srcu_idx; 1275047218ecSJane Chu long ret = 0; 1276047218ecSJane Chu 1277047218ecSJane Chu ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1278047218ecSJane Chu if (!ti || !ti->type->dax_recovery_write) 1279047218ecSJane Chu goto out; 1280047218ecSJane Chu 1281047218ecSJane Chu ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); 1282047218ecSJane Chu out: 1283047218ecSJane Chu dm_put_live_table(md, srcu_idx); 1284047218ecSJane Chu return ret; 1285047218ecSJane Chu } 1286047218ecSJane Chu 12871dd40c3eSMikulas Patocka /* 12881dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 12896842d264SDamien Le Moal * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1290e6fc9f62SMike Snitzer * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1291e6fc9f62SMike Snitzer * __send_duplicate_bios(). 12921dd40c3eSMikulas Patocka * 12931dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 12941dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 12951dd40c3eSMikulas Patocka * sent in a next bio. 12961dd40c3eSMikulas Patocka * 12971dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 12981dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12991dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 13001dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 13011dd40c3eSMikulas Patocka * 13021dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 1303bdb34759SMike Snitzer * <----- bio_sectors -----> 13041dd40c3eSMikulas Patocka * <-- n_sectors --> 13051dd40c3eSMikulas Patocka * 13061dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 13071dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 13081dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 13091dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 13101dd40c3eSMikulas Patocka * to make it empty) 13111dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 13121dd40c3eSMikulas Patocka * 13131dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 13141dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 13151dd40c3eSMikulas Patocka * copies of the bio. 13161dd40c3eSMikulas Patocka */ 1317*86a3238cSHeinz Mauelshagen void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) 13181dd40c3eSMikulas Patocka { 13196c23f0bdSChristoph Hellwig struct dm_target_io *tio = clone_to_tio(bio); 13208b211aacSMing Lei struct dm_io *io = tio->io; 1321*86a3238cSHeinz Mauelshagen unsigned int bio_sectors = bio_sectors(bio); 13226842d264SDamien Le Moal 1323655f3aadSMike Snitzer BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 13246842d264SDamien Le Moal BUG_ON(op_is_zone_mgmt(bio_op(bio))); 13256842d264SDamien Le Moal BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1326bdb34759SMike Snitzer BUG_ON(bio_sectors > *tio->len_ptr); 1327bdb34759SMike Snitzer BUG_ON(n_sectors > bio_sectors); 13286842d264SDamien Le Moal 1329bdb34759SMike Snitzer *tio->len_ptr -= bio_sectors - n_sectors; 13301dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 13317dd76d1fSMing Lei 13327dd76d1fSMing Lei /* 13337dd76d1fSMing Lei * __split_and_process_bio() may have already saved mapped part 13347dd76d1fSMing Lei * for accounting but it is being reduced so update accordingly. 13357dd76d1fSMing Lei */ 13368b211aacSMing Lei dm_io_set_flag(io, DM_IO_WAS_SPLIT); 13378b211aacSMing Lei io->sectors = n_sectors; 13388b211aacSMing Lei io->sector_offset = bio_sectors(io->orig_bio); 13391dd40c3eSMikulas Patocka } 13401dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 13411dd40c3eSMikulas Patocka 13420fbb4d93SMike Snitzer /* 13430fbb4d93SMike Snitzer * @clone: clone bio that DM core passed to target's .map function 13440fbb4d93SMike Snitzer * @tgt_clone: clone of @clone bio that target needs submitted 13450fbb4d93SMike Snitzer * 13460fbb4d93SMike Snitzer * Targets should use this interface to submit bios they take 13470fbb4d93SMike Snitzer * ownership of when returning DM_MAPIO_SUBMITTED. 13480fbb4d93SMike Snitzer * 13490fbb4d93SMike Snitzer * Target should also enable ti->accounts_remapped_io 13500fbb4d93SMike Snitzer */ 1351b7f8dff0SMike Snitzer void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 13520fbb4d93SMike Snitzer { 13530fbb4d93SMike Snitzer struct dm_target_io *tio = clone_to_tio(clone); 13540fbb4d93SMike Snitzer struct dm_io *io = tio->io; 13550fbb4d93SMike Snitzer 13560fbb4d93SMike Snitzer /* establish bio that will get submitted */ 13570fbb4d93SMike Snitzer if (!tgt_clone) 13580fbb4d93SMike Snitzer tgt_clone = clone; 13590fbb4d93SMike Snitzer 13600fbb4d93SMike Snitzer /* 13610fbb4d93SMike Snitzer * Account io->origin_bio to DM dev on behalf of target 13620fbb4d93SMike Snitzer * that took ownership of IO with DM_MAPIO_SUBMITTED. 13630fbb4d93SMike Snitzer */ 13640fbb4d93SMike Snitzer dm_start_io_acct(io, clone); 13650fbb4d93SMike Snitzer 13669d20653fSMike Snitzer trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), 13670fbb4d93SMike Snitzer tio->old_sector); 13689d20653fSMike Snitzer submit_bio_noacct(tgt_clone); 13690fbb4d93SMike Snitzer } 13700fbb4d93SMike Snitzer EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 13710fbb4d93SMike Snitzer 1372a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1373a666e5c0SMikulas Patocka { 1374a666e5c0SMikulas Patocka mutex_lock(&md->swap_bios_lock); 1375a666e5c0SMikulas Patocka while (latch < md->swap_bios) { 1376a666e5c0SMikulas Patocka cond_resched(); 1377a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1378a666e5c0SMikulas Patocka md->swap_bios--; 1379a666e5c0SMikulas Patocka } 1380a666e5c0SMikulas Patocka while (latch > md->swap_bios) { 1381a666e5c0SMikulas Patocka cond_resched(); 1382a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1383a666e5c0SMikulas Patocka md->swap_bios++; 1384a666e5c0SMikulas Patocka } 1385a666e5c0SMikulas Patocka mutex_unlock(&md->swap_bios_lock); 1386a666e5c0SMikulas Patocka } 1387a666e5c0SMikulas Patocka 13881561b396SChristoph Hellwig static void __map_bio(struct bio *clone) 13891da177e4SLinus Torvalds { 13901561b396SChristoph Hellwig struct dm_target_io *tio = clone_to_tio(clone); 1391bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 13926cbce280SMike Snitzer struct dm_io *io = tio->io; 13936cbce280SMike Snitzer struct mapped_device *md = io->md; 13946cbce280SMike Snitzer int r; 13951da177e4SLinus Torvalds 13961da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds /* 13990fbb4d93SMike Snitzer * Map the clone. 14001da177e4SLinus Torvalds */ 1401743598f0SMike Snitzer tio->old_sector = clone->bi_iter.bi_sector; 1402d67a5f4bSMikulas Patocka 1403442761fdSMike Snitzer if (static_branch_unlikely(&swap_bios_enabled) && 1404442761fdSMike Snitzer unlikely(swap_bios_limit(ti, clone))) { 1405a666e5c0SMikulas Patocka int latch = get_swap_bios(); 1406a666e5c0SMikulas Patocka if (unlikely(latch != md->swap_bios)) 1407a666e5c0SMikulas Patocka __set_swap_bios_limit(md, latch); 1408a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1409a666e5c0SMikulas Patocka } 1410a666e5c0SMikulas Patocka 1411442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled)) { 1412bb37d772SDamien Le Moal /* 1413442761fdSMike Snitzer * Check if the IO needs a special mapping due to zone append 1414442761fdSMike Snitzer * emulation on zoned target. In this case, dm_zone_map_bio() 1415442761fdSMike Snitzer * calls the target map operation. 1416bb37d772SDamien Le Moal */ 14176cbce280SMike Snitzer if (unlikely(dm_emulate_zone_append(md))) 1418bb37d772SDamien Le Moal r = dm_zone_map_bio(tio); 1419bb37d772SDamien Le Moal else 14207de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1421442761fdSMike Snitzer } else 1422442761fdSMike Snitzer r = ti->type->map(ti, clone); 1423bb37d772SDamien Le Moal 1424846785e6SChristoph Hellwig switch (r) { 1425846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 14260fbb4d93SMike Snitzer /* target has assumed ownership of this io */ 14270fbb4d93SMike Snitzer if (!ti->accounts_remapped_io) 14289d20653fSMike Snitzer dm_start_io_acct(io, clone); 1429846785e6SChristoph Hellwig break; 1430846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 14319d20653fSMike Snitzer dm_submit_bio_remap(clone, NULL); 1432846785e6SChristoph Hellwig break; 1433846785e6SChristoph Hellwig case DM_MAPIO_KILL: 1434846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1435442761fdSMike Snitzer if (static_branch_unlikely(&swap_bios_enabled) && 1436442761fdSMike Snitzer unlikely(swap_bios_limit(ti, clone))) 14376cbce280SMike Snitzer up(&md->swap_bios_semaphore); 14381d1068ceSChristoph Hellwig free_tio(clone); 143990a2326eSMike Snitzer if (r == DM_MAPIO_KILL) 144090a2326eSMike Snitzer dm_io_dec_pending(io, BLK_STS_IOERR); 144190a2326eSMike Snitzer else 1442e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1443846785e6SChristoph Hellwig break; 1444846785e6SChristoph Hellwig default: 144543e6c111SMikulas Patocka DMCRIT("unimplemented target map return value: %d", r); 144645cbcd79SKiyoshi Ueda BUG(); 14471da177e4SLinus Torvalds } 14481da177e4SLinus Torvalds } 14491da177e4SLinus Torvalds 1450*86a3238cSHeinz Mauelshagen static void setup_split_accounting(struct clone_info *ci, unsigned int len) 14517dd76d1fSMing Lei { 14527dd76d1fSMing Lei struct dm_io *io = ci->io; 14537dd76d1fSMing Lei 14547dd76d1fSMing Lei if (ci->sector_count > len) { 14557dd76d1fSMing Lei /* 14567dd76d1fSMing Lei * Split needed, save the mapped part for accounting. 14577dd76d1fSMing Lei * NOTE: dm_accept_partial_bio() will update accordingly. 14587dd76d1fSMing Lei */ 14597dd76d1fSMing Lei dm_io_set_flag(io, DM_IO_WAS_SPLIT); 14607dd76d1fSMing Lei io->sectors = len; 14618b211aacSMing Lei io->sector_offset = bio_sectors(ci->bio); 14627dd76d1fSMing Lei } 14637dd76d1fSMing Lei } 14647dd76d1fSMing Lei 1465318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1466*86a3238cSHeinz Mauelshagen struct dm_target *ti, unsigned int num_bios) 1467f9ab94ceSMikulas Patocka { 14681d1068ceSChristoph Hellwig struct bio *bio; 1469318716ddSMike Snitzer int try; 1470dba14160SMikulas Patocka 1471318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1472318716ddSMike Snitzer int bio_nr; 1473318716ddSMike Snitzer 1474318716ddSMike Snitzer if (try) 1475bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1476318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 14777dd06a25SMike Snitzer bio = alloc_tio(ci, ti, bio_nr, NULL, 1478dc8e2021SChristoph Hellwig try ? GFP_NOIO : GFP_NOWAIT); 14791d1068ceSChristoph Hellwig if (!bio) 1480318716ddSMike Snitzer break; 1481318716ddSMike Snitzer 14821d1068ceSChristoph Hellwig bio_list_add(blist, bio); 1483318716ddSMike Snitzer } 1484318716ddSMike Snitzer if (try) 1485bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1486318716ddSMike Snitzer if (bio_nr == num_bios) 1487318716ddSMike Snitzer return; 1488318716ddSMike Snitzer 14896c23f0bdSChristoph Hellwig while ((bio = bio_list_pop(blist))) 14901d1068ceSChristoph Hellwig free_tio(bio); 1491318716ddSMike Snitzer } 1492318716ddSMike Snitzer } 1493f9ab94ceSMikulas Patocka 14940f14d60aSMing Lei static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1495*86a3238cSHeinz Mauelshagen unsigned int num_bios, unsigned int *len) 149606a426ceSMike Snitzer { 1497318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 14988eabf5d0SChristoph Hellwig struct bio *clone; 1499564b5c54SMike Snitzer unsigned int ret = 0; 150006a426ceSMike Snitzer 1501891fced6SChristoph Hellwig switch (num_bios) { 1502891fced6SChristoph Hellwig case 0: 1503891fced6SChristoph Hellwig break; 1504891fced6SChristoph Hellwig case 1: 15057dd76d1fSMing Lei if (len) 15067dd76d1fSMing Lei setup_split_accounting(ci, *len); 1507891fced6SChristoph Hellwig clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1508891fced6SChristoph Hellwig __map_bio(clone); 15090f14d60aSMing Lei ret = 1; 1510891fced6SChristoph Hellwig break; 1511891fced6SChristoph Hellwig default: 15127dd06a25SMike Snitzer /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 15137dd06a25SMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 15148eabf5d0SChristoph Hellwig while ((clone = bio_list_pop(&blist))) { 1515655f3aadSMike Snitzer dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 15161561b396SChristoph Hellwig __map_bio(clone); 15170f14d60aSMing Lei ret += 1; 1518f9ab94ceSMikulas Patocka } 1519891fced6SChristoph Hellwig break; 1520318716ddSMike Snitzer } 15210f14d60aSMing Lei 15220f14d60aSMing Lei return ret; 152306a426ceSMike Snitzer } 152406a426ceSMike Snitzer 1525332f2b1eSMike Snitzer static void __send_empty_flush(struct clone_info *ci) 1526f9ab94ceSMikulas Patocka { 1527564b5c54SMike Snitzer struct dm_table *t = ci->map; 1528828678b8SMike Snitzer struct bio flush_bio; 1529828678b8SMike Snitzer 1530828678b8SMike Snitzer /* 1531828678b8SMike Snitzer * Use an on-stack bio for this, it's safe since we don't 1532828678b8SMike Snitzer * need to reference it after submit. It's just used as 1533828678b8SMike Snitzer * the basis for the clone(s). 1534828678b8SMike Snitzer */ 153549add496SChristoph Hellwig bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 153649add496SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 153747d95102SChristoph Hellwig 1538828678b8SMike Snitzer ci->bio = &flush_bio; 1539828678b8SMike Snitzer ci->sector_count = 0; 154092b914e2SShin'ichiro Kawasaki ci->io->tio.clone.bi_iter.bi_size = 0; 1541f9ab94ceSMikulas Patocka 1542564b5c54SMike Snitzer for (unsigned int i = 0; i < t->num_targets; i++) { 1543564b5c54SMike Snitzer unsigned int bios; 1544564b5c54SMike Snitzer struct dm_target *ti = dm_table_get_target(t, i); 15450f14d60aSMing Lei 15460f14d60aSMing Lei atomic_add(ti->num_flush_bios, &ci->io->io_count); 15470f14d60aSMing Lei bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 15480f14d60aSMing Lei atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); 15490f14d60aSMing Lei } 15500f14d60aSMing Lei 15510f14d60aSMing Lei /* 15520f14d60aSMing Lei * alloc_io() takes one extra reference for submission, so the 15530f14d60aSMing Lei * reference won't reach 0 without the following subtraction 15540f14d60aSMing Lei */ 15550f14d60aSMing Lei atomic_sub(1, &ci->io->io_count); 1556828678b8SMike Snitzer 1557828678b8SMike Snitzer bio_uninit(ci->bio); 1558f9ab94ceSMikulas Patocka } 1559f9ab94ceSMikulas Patocka 1560e6fc9f62SMike Snitzer static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1561*86a3238cSHeinz Mauelshagen unsigned int num_bios) 15625ae89a87SMike Snitzer { 1563*86a3238cSHeinz Mauelshagen unsigned int len, bios; 15645ae89a87SMike Snitzer 15653720281dSMike Snitzer len = min_t(sector_t, ci->sector_count, 15663720281dSMike Snitzer max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 156751b86f9aSMichael Lass 15680f14d60aSMing Lei atomic_add(num_bios, &ci->io->io_count); 15690f14d60aSMing Lei bios = __send_duplicate_bios(ci, ti, num_bios, &len); 15700f14d60aSMing Lei /* 15710f14d60aSMing Lei * alloc_io() takes one extra reference for submission, so the 15720f14d60aSMing Lei * reference won't reach 0 without the following (+1) subtraction 15730f14d60aSMing Lei */ 15740f14d60aSMing Lei atomic_sub(num_bios - bios + 1, &ci->io->io_count); 15757dd06a25SMike Snitzer 1576a79245b3SMike Snitzer ci->sector += len; 15773d7f4562SMike Snitzer ci->sector_count -= len; 15785ae89a87SMike Snitzer } 15795ae89a87SMike Snitzer 1580568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1581568c73a3SMike Snitzer { 1582a3282b43SBart Van Assche enum req_op op = bio_op(bio); 1583568c73a3SMike Snitzer 15844edadf6dSMike Snitzer if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { 15854edadf6dSMike Snitzer switch (op) { 1586568c73a3SMike Snitzer case REQ_OP_DISCARD: 1587568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1588568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 15894edadf6dSMike Snitzer return true; 15904edadf6dSMike Snitzer default: 1591568c73a3SMike Snitzer break; 1592568c73a3SMike Snitzer } 1593568c73a3SMike Snitzer } 1594568c73a3SMike Snitzer 15954edadf6dSMike Snitzer return false; 15964edadf6dSMike Snitzer } 15974edadf6dSMike Snitzer 15984edadf6dSMike Snitzer static blk_status_t __process_abnormal_io(struct clone_info *ci, 15994edadf6dSMike Snitzer struct dm_target *ti) 16000519c71eSMike Snitzer { 1601*86a3238cSHeinz Mauelshagen unsigned int num_bios = 0; 16020519c71eSMike Snitzer 1603e6fc9f62SMike Snitzer switch (bio_op(ci->bio)) { 16049679b5a7SMike Snitzer case REQ_OP_DISCARD: 16059679b5a7SMike Snitzer num_bios = ti->num_discard_bios; 16069679b5a7SMike Snitzer break; 16079679b5a7SMike Snitzer case REQ_OP_SECURE_ERASE: 16089679b5a7SMike Snitzer num_bios = ti->num_secure_erase_bios; 16099679b5a7SMike Snitzer break; 16109679b5a7SMike Snitzer case REQ_OP_WRITE_ZEROES: 16119679b5a7SMike Snitzer num_bios = ti->num_write_zeroes_bios; 16129679b5a7SMike Snitzer break; 16132d9b02beSBart Van Assche default: 16142d9b02beSBart Van Assche break; 16159679b5a7SMike Snitzer } 16160519c71eSMike Snitzer 1617e6fc9f62SMike Snitzer /* 1618e6fc9f62SMike Snitzer * Even though the device advertised support for this type of 1619e6fc9f62SMike Snitzer * request, that does not mean every target supports it, and 1620e6fc9f62SMike Snitzer * reconfiguration might also have changed that since the 1621e6fc9f62SMike Snitzer * check was performed. 1622e6fc9f62SMike Snitzer */ 162384b98f4cSMike Snitzer if (unlikely(!num_bios)) 16244edadf6dSMike Snitzer return BLK_STS_NOTSUPP; 16254edadf6dSMike Snitzer 1626e6fc9f62SMike Snitzer __send_changing_extent_only(ci, ti, num_bios); 16274edadf6dSMike Snitzer return BLK_STS_OK; 16280519c71eSMike Snitzer } 16290519c71eSMike Snitzer 1630e4c93811SAlasdair G Kergon /* 1631ec211631SMing Lei * Reuse ->bi_private as dm_io list head for storing all dm_io instances 1632b99fdcdcSMing Lei * associated with this bio, and this bio's bi_private needs to be 1633b99fdcdcSMing Lei * stored in dm_io->data before the reuse. 1634b99fdcdcSMing Lei * 1635b99fdcdcSMing Lei * bio->bi_private is owned by fs or upper layer, so block layer won't 1636b99fdcdcSMing Lei * touch it after splitting. Meantime it won't be changed by anyone after 1637b99fdcdcSMing Lei * bio is submitted. So this reuse is safe. 1638b99fdcdcSMing Lei */ 1639ec211631SMing Lei static inline struct dm_io **dm_poll_list_head(struct bio *bio) 1640b99fdcdcSMing Lei { 1641ec211631SMing Lei return (struct dm_io **)&bio->bi_private; 1642b99fdcdcSMing Lei } 1643b99fdcdcSMing Lei 1644b99fdcdcSMing Lei static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1645b99fdcdcSMing Lei { 1646ec211631SMing Lei struct dm_io **head = dm_poll_list_head(bio); 1647b99fdcdcSMing Lei 1648b99fdcdcSMing Lei if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1649b99fdcdcSMing Lei bio->bi_opf |= REQ_DM_POLL_LIST; 1650b99fdcdcSMing Lei /* 1651b99fdcdcSMing Lei * Save .bi_private into dm_io, so that we can reuse 1652ec211631SMing Lei * .bi_private as dm_io list head for storing dm_io list 1653b99fdcdcSMing Lei */ 1654b99fdcdcSMing Lei io->data = bio->bi_private; 1655b99fdcdcSMing Lei 1656b99fdcdcSMing Lei /* tell block layer to poll for completion */ 1657b99fdcdcSMing Lei bio->bi_cookie = ~BLK_QC_T_NONE; 1658ec211631SMing Lei 1659ec211631SMing Lei io->next = NULL; 1660b99fdcdcSMing Lei } else { 1661b99fdcdcSMing Lei /* 1662b99fdcdcSMing Lei * bio recursed due to split, reuse original poll list, 1663b99fdcdcSMing Lei * and save bio->bi_private too. 1664b99fdcdcSMing Lei */ 1665ec211631SMing Lei io->data = (*head)->data; 1666ec211631SMing Lei io->next = *head; 1667b99fdcdcSMing Lei } 1668b99fdcdcSMing Lei 1669ec211631SMing Lei *head = io; 1670b99fdcdcSMing Lei } 1671b99fdcdcSMing Lei 1672b99fdcdcSMing Lei /* 1673e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1674e4c93811SAlasdair G Kergon */ 167584b98f4cSMike Snitzer static blk_status_t __split_and_process_bio(struct clone_info *ci) 1676e4c93811SAlasdair G Kergon { 167766bdaa43SMike Snitzer struct bio *clone; 1678e4c93811SAlasdair G Kergon struct dm_target *ti; 1679*86a3238cSHeinz Mauelshagen unsigned int len; 1680e4c93811SAlasdair G Kergon 1681e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 16824edadf6dSMike Snitzer if (unlikely(!ti)) 16834edadf6dSMike Snitzer return BLK_STS_IOERR; 16841ee88de3SMikulas Patocka 16851ee88de3SMikulas Patocka if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && 16861ee88de3SMikulas Patocka unlikely(!dm_target_supports_nowait(ti->type))) 16871ee88de3SMikulas Patocka return BLK_STS_NOTSUPP; 16881ee88de3SMikulas Patocka 16891ee88de3SMikulas Patocka if (unlikely(ci->is_abnormal_io)) 16904edadf6dSMike Snitzer return __process_abnormal_io(ci, ti); 16913d7f4562SMike Snitzer 1692b99fdcdcSMing Lei /* 1693b99fdcdcSMing Lei * Only support bio polling for normal IO, and the target io is 1694b99fdcdcSMing Lei * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1695b99fdcdcSMing Lei */ 1696a3282b43SBart Van Assche ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); 1697e4c93811SAlasdair G Kergon 1698e4c93811SAlasdair G Kergon len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 16997dd76d1fSMing Lei setup_split_accounting(ci, len); 170066bdaa43SMike Snitzer clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 170166bdaa43SMike Snitzer __map_bio(clone); 1702e4c93811SAlasdair G Kergon 1703e4c93811SAlasdair G Kergon ci->sector += len; 1704e4c93811SAlasdair G Kergon ci->sector_count -= len; 1705e4c93811SAlasdair G Kergon 170684b98f4cSMike Snitzer return BLK_STS_OK; 1707e4c93811SAlasdair G Kergon } 1708e4c93811SAlasdair G Kergon 1709978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 17104edadf6dSMike Snitzer struct dm_table *map, struct bio *bio, bool is_abnormal) 1711978e51baSMike Snitzer { 1712978e51baSMike Snitzer ci->map = map; 1713978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1714d41e077aSMike Snitzer ci->bio = bio; 17154edadf6dSMike Snitzer ci->is_abnormal_io = is_abnormal; 1716b99fdcdcSMing Lei ci->submit_as_polled = false; 1717978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1718d41e077aSMike Snitzer ci->sector_count = bio_sectors(bio); 1719d41e077aSMike Snitzer 1720d41e077aSMike Snitzer /* Shouldn't happen but sector_count was being set to 0 so... */ 1721442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled) && 1722442761fdSMike Snitzer WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1723d41e077aSMike Snitzer ci->sector_count = 0; 1724978e51baSMike Snitzer } 1725978e51baSMike Snitzer 1726e4c93811SAlasdair G Kergon /* 172714fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 17281da177e4SLinus Torvalds */ 172996c9865cSMike Snitzer static void dm_split_and_process_bio(struct mapped_device *md, 173083d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 17311da177e4SLinus Torvalds { 17321da177e4SLinus Torvalds struct clone_info ci; 17334857abf6SMike Snitzer struct dm_io *io; 173484b98f4cSMike Snitzer blk_status_t error = BLK_STS_OK; 17354edadf6dSMike Snitzer bool is_abnormal; 17361da177e4SLinus Torvalds 17374edadf6dSMike Snitzer is_abnormal = is_abnormal_io(bio); 17384edadf6dSMike Snitzer if (unlikely(is_abnormal)) { 17394edadf6dSMike Snitzer /* 17405a97806fSChristoph Hellwig * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc) 17414edadf6dSMike Snitzer * otherwise associated queue_limits won't be imposed. 17424edadf6dSMike Snitzer */ 17435a97806fSChristoph Hellwig bio = bio_split_to_limits(bio); 1744613b1488SJens Axboe if (!bio) 1745613b1488SJens Axboe return; 17464edadf6dSMike Snitzer } 17474edadf6dSMike Snitzer 17484edadf6dSMike Snitzer init_clone_info(&ci, md, map, bio, is_abnormal); 17494857abf6SMike Snitzer io = ci.io; 1750bd2a49b8SAlasdair G Kergon 17511eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1752332f2b1eSMike Snitzer __send_empty_flush(&ci); 1753e2736347SMike Snitzer /* dm_io_complete submits any data associated with flush */ 1754d41e077aSMike Snitzer goto out; 1755d41e077aSMike Snitzer } 1756d41e077aSMike Snitzer 175796c9865cSMike Snitzer error = __split_and_process_bio(&ci); 1758d41e077aSMike Snitzer if (error || !ci.sector_count) 1759d41e077aSMike Snitzer goto out; 176018a25da8SNeilBrown /* 1761d41e077aSMike Snitzer * Remainder must be passed to submit_bio_noacct() so it gets handled 1762d41e077aSMike Snitzer * *after* bios already submitted have been completely processed. 176318a25da8SNeilBrown */ 17648b211aacSMing Lei bio_trim(bio, io->sectors, ci.sector_count); 17658b211aacSMing Lei trace_block_split(bio, bio->bi_iter.bi_sector); 17668b211aacSMing Lei bio_inc_remaining(bio); 17673e08773cSChristoph Hellwig submit_bio_noacct(bio); 1768d41e077aSMike Snitzer out: 1769b99fdcdcSMing Lei /* 1770b99fdcdcSMing Lei * Drop the extra reference count for non-POLLED bio, and hold one 1771b99fdcdcSMing Lei * reference for POLLED bio, which will be released in dm_poll_bio 1772b99fdcdcSMing Lei * 1773ec211631SMing Lei * Add every dm_io instance into the dm_io list head which is stored 1774ec211631SMing Lei * in bio->bi_private, so that dm_poll_bio can poll them all. 1775b99fdcdcSMing Lei */ 17760f14d60aSMing Lei if (error || !ci.submit_as_polled) { 17770f14d60aSMing Lei /* 17780f14d60aSMing Lei * In case of submission failure, the extra reference for 17790f14d60aSMing Lei * submitting io isn't consumed yet 17800f14d60aSMing Lei */ 17810f14d60aSMing Lei if (error) 17820f14d60aSMing Lei atomic_dec(&io->io_count); 17830f14d60aSMing Lei dm_io_dec_pending(io, error); 17840f14d60aSMing Lei } else 17854857abf6SMike Snitzer dm_queue_poll_io(bio, io); 17861da177e4SLinus Torvalds } 17871da177e4SLinus Torvalds 17883e08773cSChristoph Hellwig static void dm_submit_bio(struct bio *bio) 17891da177e4SLinus Torvalds { 1790309dca30SChristoph Hellwig struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 179183d5e5b0SMikulas Patocka int srcu_idx; 179283d5e5b0SMikulas Patocka struct dm_table *map; 1793a3282b43SBart Van Assche blk_opf_t bio_opf = bio->bi_opf; 17941da177e4SLinus Torvalds 17955d7362d0SMikulas Patocka map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); 17968cf7961dSChristoph Hellwig 1797fa247089SMike Snitzer /* If suspended, or map not yet available, queue this IO for later */ 1798fa247089SMike Snitzer if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1799fa247089SMike Snitzer unlikely(!map)) { 18006abc4946SKonstantin Khlebnikov if (bio->bi_opf & REQ_NOWAIT) 18016abc4946SKonstantin Khlebnikov bio_wouldblock_error(bio); 1802b2abdb1bSMike Snitzer else if (bio->bi_opf & REQ_RAHEAD) 18036a8736d1STejun Heo bio_io_error(bio); 1804b2abdb1bSMike Snitzer else 1805b2abdb1bSMike Snitzer queue_io(md, bio); 1806b2abdb1bSMike Snitzer goto out; 18071da177e4SLinus Torvalds } 18081da177e4SLinus Torvalds 180996c9865cSMike Snitzer dm_split_and_process_bio(md, map, bio); 1810b2abdb1bSMike Snitzer out: 18115d7362d0SMikulas Patocka dm_put_live_table_bio(md, srcu_idx, bio_opf); 1812978e51baSMike Snitzer } 1813978e51baSMike Snitzer 1814b99fdcdcSMing Lei static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1815b99fdcdcSMing Lei unsigned int flags) 1816b99fdcdcSMing Lei { 1817655f3aadSMike Snitzer WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 1818b99fdcdcSMing Lei 1819b99fdcdcSMing Lei /* don't poll if the mapped io is done */ 1820b99fdcdcSMing Lei if (atomic_read(&io->io_count) > 1) 1821b99fdcdcSMing Lei bio_poll(&io->tio.clone, iob, flags); 1822b99fdcdcSMing Lei 1823b99fdcdcSMing Lei /* bio_poll holds the last reference */ 1824b99fdcdcSMing Lei return atomic_read(&io->io_count) == 1; 1825b99fdcdcSMing Lei } 1826b99fdcdcSMing Lei 1827b99fdcdcSMing Lei static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1828b99fdcdcSMing Lei unsigned int flags) 1829b99fdcdcSMing Lei { 1830ec211631SMing Lei struct dm_io **head = dm_poll_list_head(bio); 1831ec211631SMing Lei struct dm_io *list = *head; 1832ec211631SMing Lei struct dm_io *tmp = NULL; 1833ec211631SMing Lei struct dm_io *curr, *next; 1834b99fdcdcSMing Lei 1835b99fdcdcSMing Lei /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1836b99fdcdcSMing Lei if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1837b99fdcdcSMing Lei return 0; 1838b99fdcdcSMing Lei 1839ec211631SMing Lei WARN_ON_ONCE(!list); 1840b99fdcdcSMing Lei 1841b99fdcdcSMing Lei /* 1842b99fdcdcSMing Lei * Restore .bi_private before possibly completing dm_io. 1843b99fdcdcSMing Lei * 1844b99fdcdcSMing Lei * bio_poll() is only possible once @bio has been completely 1845b99fdcdcSMing Lei * submitted via submit_bio_noacct()'s depth-first submission. 1846b99fdcdcSMing Lei * So there is no dm_queue_poll_io() race associated with 1847b99fdcdcSMing Lei * clearing REQ_DM_POLL_LIST here. 1848b99fdcdcSMing Lei */ 1849b99fdcdcSMing Lei bio->bi_opf &= ~REQ_DM_POLL_LIST; 1850ec211631SMing Lei bio->bi_private = list->data; 1851b99fdcdcSMing Lei 1852ec211631SMing Lei for (curr = list, next = curr->next; curr; curr = next, next = 1853ec211631SMing Lei curr ? curr->next : NULL) { 1854ec211631SMing Lei if (dm_poll_dm_io(curr, iob, flags)) { 1855b99fdcdcSMing Lei /* 185684b98f4cSMike Snitzer * clone_endio() has already occurred, so no 185784b98f4cSMike Snitzer * error handling is needed here. 1858b99fdcdcSMing Lei */ 1859ec211631SMing Lei __dm_io_dec_pending(curr); 1860ec211631SMing Lei } else { 1861ec211631SMing Lei curr->next = tmp; 1862ec211631SMing Lei tmp = curr; 1863b99fdcdcSMing Lei } 1864b99fdcdcSMing Lei } 1865b99fdcdcSMing Lei 1866b99fdcdcSMing Lei /* Not done? */ 1867ec211631SMing Lei if (tmp) { 1868b99fdcdcSMing Lei bio->bi_opf |= REQ_DM_POLL_LIST; 1869b99fdcdcSMing Lei /* Reset bio->bi_private to dm_io list head */ 1870ec211631SMing Lei *head = tmp; 1871b99fdcdcSMing Lei return 0; 1872b99fdcdcSMing Lei } 1873b99fdcdcSMing Lei return 1; 1874b99fdcdcSMing Lei } 1875b99fdcdcSMing Lei 18761da177e4SLinus Torvalds /*----------------------------------------------------------------- 18771da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 18781da177e4SLinus Torvalds *---------------------------------------------------------------*/ 18792b06cfffSAlasdair G Kergon static void free_minor(int minor) 18801da177e4SLinus Torvalds { 1881f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18821da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1883f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18841da177e4SLinus Torvalds } 18851da177e4SLinus Torvalds 18861da177e4SLinus Torvalds /* 18871da177e4SLinus Torvalds * See if the device with a specific minor # is free. 18881da177e4SLinus Torvalds */ 1889cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 18901da177e4SLinus Torvalds { 1891c9d76be6STejun Heo int r; 18921da177e4SLinus Torvalds 18931da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 18941da177e4SLinus Torvalds return -EINVAL; 18951da177e4SLinus Torvalds 1896c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1897f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18981da177e4SLinus Torvalds 1899c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 19001da177e4SLinus Torvalds 1901f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1902c9d76be6STejun Heo idr_preload_end(); 1903c9d76be6STejun Heo if (r < 0) 1904c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1905c9d76be6STejun Heo return 0; 19061da177e4SLinus Torvalds } 19071da177e4SLinus Torvalds 1908cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 19091da177e4SLinus Torvalds { 1910c9d76be6STejun Heo int r; 19111da177e4SLinus Torvalds 1912c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1913f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 19141da177e4SLinus Torvalds 1915c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 19161da177e4SLinus Torvalds 1917f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1918c9d76be6STejun Heo idr_preload_end(); 1919c9d76be6STejun Heo if (r < 0) 19201da177e4SLinus Torvalds return r; 1921c9d76be6STejun Heo *minor = r; 1922c9d76be6STejun Heo return 0; 19231da177e4SLinus Torvalds } 19241da177e4SLinus Torvalds 192583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1926681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops; 1927f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 19281da177e4SLinus Torvalds 192953d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 193053d5914fSMikulas Patocka 1931aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1932cb77cb5aSEric Biggers static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1933aa6ce87aSSatya Tangirala { 1934cb77cb5aSEric Biggers dm_destroy_crypto_profile(q->crypto_profile); 1935aa6ce87aSSatya Tangirala } 1936aa6ce87aSSatya Tangirala 1937aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1938aa6ce87aSSatya Tangirala 1939cb77cb5aSEric Biggers static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1940aa6ce87aSSatya Tangirala { 1941aa6ce87aSSatya Tangirala } 1942aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1943aa6ce87aSSatya Tangirala 19440f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 19450f20972fSMike Snitzer { 19460f20972fSMike Snitzer if (md->wq) 19470f20972fSMike Snitzer destroy_workqueue(md->wq); 194829dec90aSChristoph Hellwig dm_free_md_mempools(md->mempools); 19490f20972fSMike Snitzer 1950f26c5719SDan Williams if (md->dax_dev) { 1951fb08a190SChristoph Hellwig dax_remove_host(md->disk); 1952f26c5719SDan Williams kill_dax(md->dax_dev); 1953f26c5719SDan Williams put_dax(md->dax_dev); 1954f26c5719SDan Williams md->dax_dev = NULL; 1955f26c5719SDan Williams } 1956f26c5719SDan Williams 1957588b7f5dSKirill Tkhai dm_cleanup_zoned_dev(md); 19580f20972fSMike Snitzer if (md->disk) { 19590f20972fSMike Snitzer spin_lock(&_minor_lock); 19600f20972fSMike Snitzer md->disk->private_data = NULL; 19610f20972fSMike Snitzer spin_unlock(&_minor_lock); 196289f871afSChristoph Hellwig if (dm_get_md_type(md) != DM_TYPE_NONE) { 19631a581b72SChristoph Hellwig struct table_device *td; 19641a581b72SChristoph Hellwig 196589f871afSChristoph Hellwig dm_sysfs_exit(md); 19661a581b72SChristoph Hellwig list_for_each_entry(td, &md->table_devices, list) { 19671a581b72SChristoph Hellwig bd_unlink_disk_holder(td->dm_dev.bdev, 19681a581b72SChristoph Hellwig md->disk); 19691a581b72SChristoph Hellwig } 1970d563792cSYu Kuai 1971d563792cSYu Kuai /* 1972d563792cSYu Kuai * Hold lock to make sure del_gendisk() won't concurrent 1973d563792cSYu Kuai * with open/close_table_device(). 1974d563792cSYu Kuai */ 1975d563792cSYu Kuai mutex_lock(&md->table_devices_lock); 19760f20972fSMike Snitzer del_gendisk(md->disk); 1977d563792cSYu Kuai mutex_unlock(&md->table_devices_lock); 197889f871afSChristoph Hellwig } 1979cb77cb5aSEric Biggers dm_queue_destroy_crypto_profile(md->queue); 19808b9ab626SChristoph Hellwig put_disk(md->disk); 198174a2b6ecSChristoph Hellwig } 19820f20972fSMike Snitzer 19839f6dc633SMike Snitzer if (md->pending_io) { 19849f6dc633SMike Snitzer free_percpu(md->pending_io); 19859f6dc633SMike Snitzer md->pending_io = NULL; 19869f6dc633SMike Snitzer } 19879f6dc633SMike Snitzer 1988d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1989d09960b0STahsin Erdogan 1990d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1991d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1992d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1993a666e5c0SMikulas Patocka mutex_destroy(&md->swap_bios_lock); 1994d5ffebddSMike Snitzer 19954cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 19960f20972fSMike Snitzer } 19970f20972fSMike Snitzer 19981da177e4SLinus Torvalds /* 19991da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 20001da177e4SLinus Torvalds */ 20012b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 20021da177e4SLinus Torvalds { 2003115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 2004115485e8SMike Snitzer struct mapped_device *md; 2005ba61fdd1SJeff Mahoney void *old_md; 20061da177e4SLinus Torvalds 2007856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 20081da177e4SLinus Torvalds if (!md) { 200943e6c111SMikulas Patocka DMERR("unable to allocate device, out of memory."); 20101da177e4SLinus Torvalds return NULL; 20111da177e4SLinus Torvalds } 20121da177e4SLinus Torvalds 201310da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 20146ed7ade8SMilan Broz goto bad_module_get; 201510da4f79SJeff Mahoney 20161da177e4SLinus Torvalds /* get a minor number for the dev */ 20172b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 2018cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 20192b06cfffSAlasdair G Kergon else 2020cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 20211da177e4SLinus Torvalds if (r < 0) 20226ed7ade8SMilan Broz goto bad_minor; 20231da177e4SLinus Torvalds 202483d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 202583d5e5b0SMikulas Patocka if (r < 0) 202683d5e5b0SMikulas Patocka goto bad_io_barrier; 202783d5e5b0SMikulas Patocka 2028115485e8SMike Snitzer md->numa_node_id = numa_node_id; 2029591ddcfcSMike Snitzer md->init_tio_pdu = false; 2030a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 2031e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 2032a5664dadSMike Snitzer mutex_init(&md->type_lock); 203386f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 2034022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 20351da177e4SLinus Torvalds atomic_set(&md->holders, 1); 20365c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 20371da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 20387a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 20397a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 204086f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 20417a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 20421da177e4SLinus Torvalds 204347ace7e0SMike Snitzer /* 2044c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 2045c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 2046c62b37d9SChristoph Hellwig * override accordingly. 204747ace7e0SMike Snitzer */ 204874fe6ba9SChristoph Hellwig md->disk = blk_alloc_disk(md->numa_node_id); 20491da177e4SLinus Torvalds if (!md->disk) 20500f20972fSMike Snitzer goto bad; 205174fe6ba9SChristoph Hellwig md->queue = md->disk->queue; 20521da177e4SLinus Torvalds 2053f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 205453d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 20558b211aacSMing Lei INIT_WORK(&md->requeue_work, dm_wq_requeue_work); 2056f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 20572995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 2058f0b04115SJeff Mahoney 20598b211aacSMing Lei md->requeue_list = NULL; 2060a666e5c0SMikulas Patocka md->swap_bios = get_swap_bios(); 2061a666e5c0SMikulas Patocka sema_init(&md->swap_bios_semaphore, md->swap_bios); 2062a666e5c0SMikulas Patocka mutex_init(&md->swap_bios_lock); 2063a666e5c0SMikulas Patocka 20641da177e4SLinus Torvalds md->disk->major = _major; 20651da177e4SLinus Torvalds md->disk->first_minor = minor; 206674fe6ba9SChristoph Hellwig md->disk->minors = 1; 20671ebe2e5fSChristoph Hellwig md->disk->flags |= GENHD_FL_NO_PART; 20681da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 20691da177e4SLinus Torvalds md->disk->private_data = md; 20701da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 2071f26c5719SDan Williams 20725d2a228bSChristoph Hellwig if (IS_ENABLED(CONFIG_FS_DAX)) { 207330c6828aSChristoph Hellwig md->dax_dev = alloc_dax(md, &dm_dax_ops); 2074d7519392SChristoph Hellwig if (IS_ERR(md->dax_dev)) { 2075d7519392SChristoph Hellwig md->dax_dev = NULL; 2076f26c5719SDan Williams goto bad; 2077976431b0SDan Williams } 20787ac5360cSChristoph Hellwig set_dax_nocache(md->dax_dev); 20797ac5360cSChristoph Hellwig set_dax_nomc(md->dax_dev); 2080fb08a190SChristoph Hellwig if (dax_add_host(md->dax_dev, md->disk)) 2081f26c5719SDan Williams goto bad; 2082f26c5719SDan Williams } 20831da177e4SLinus Torvalds 20847e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 20851da177e4SLinus Torvalds 2086c7c879eeSMichał Mirosław md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 2087304f3f6aSMilan Broz if (!md->wq) 20880f20972fSMike Snitzer goto bad; 2089304f3f6aSMilan Broz 20909f6dc633SMike Snitzer md->pending_io = alloc_percpu(unsigned long); 20919f6dc633SMike Snitzer if (!md->pending_io) 20929f6dc633SMike Snitzer goto bad; 20939f6dc633SMike Snitzer 2094fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2095fd2ed4d2SMikulas Patocka 2096ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2097f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2098ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2099f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2100ba61fdd1SJeff Mahoney 2101ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2102ba61fdd1SJeff Mahoney 21031da177e4SLinus Torvalds return md; 21041da177e4SLinus Torvalds 21050f20972fSMike Snitzer bad: 21060f20972fSMike Snitzer cleanup_mapped_device(md); 210783d5e5b0SMikulas Patocka bad_io_barrier: 21081da177e4SLinus Torvalds free_minor(minor); 21096ed7ade8SMilan Broz bad_minor: 211010da4f79SJeff Mahoney module_put(THIS_MODULE); 21116ed7ade8SMilan Broz bad_module_get: 2112856eb091SMikulas Patocka kvfree(md); 21131da177e4SLinus Torvalds return NULL; 21141da177e4SLinus Torvalds } 21151da177e4SLinus Torvalds 2116ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2117ae9da83fSJun'ichi Nomura 21181da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 21191da177e4SLinus Torvalds { 2120f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 212163d94e48SJun'ichi Nomura 2122ae9da83fSJun'ichi Nomura unlock_fs(md); 21232eb6e1e3SKeith Busch 21240f20972fSMike Snitzer cleanup_mapped_device(md); 21250f20972fSMike Snitzer 2126992ec6a9SChristoph Hellwig WARN_ON_ONCE(!list_empty(&md->table_devices)); 21270f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 212863a4f065SMike Snitzer free_minor(minor); 212963a4f065SMike Snitzer 213010da4f79SJeff Mahoney module_put(THIS_MODULE); 2131856eb091SMikulas Patocka kvfree(md); 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds /* 21351da177e4SLinus Torvalds * Bind a table to the device. 21361da177e4SLinus Torvalds */ 21371da177e4SLinus Torvalds static void event_callback(void *context) 21381da177e4SLinus Torvalds { 21397a8c3d3bSMike Anderson unsigned long flags; 21407a8c3d3bSMike Anderson LIST_HEAD(uevents); 21411da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 21421da177e4SLinus Torvalds 21437a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 21447a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 21457a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 21467a8c3d3bSMike Anderson 2147ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 21487a8c3d3bSMike Anderson 21491da177e4SLinus Torvalds atomic_inc(&md->event_nr); 21501da177e4SLinus Torvalds wake_up(&md->eventq); 215162e08243SMikulas Patocka dm_issue_global_event(); 21521da177e4SLinus Torvalds } 21531da177e4SLinus Torvalds 2154c217649bSMike Snitzer /* 2155042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2156042d2a9bSAlasdair G Kergon */ 2157042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2158754c5fc7SMike Snitzer struct queue_limits *limits) 21591da177e4SLinus Torvalds { 2160042d2a9bSAlasdair G Kergon struct dm_table *old_map; 21611da177e4SLinus Torvalds sector_t size; 21622a2a4c51SJens Axboe int ret; 21631da177e4SLinus Torvalds 21645a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 21655a8f1f80SBart Van Assche 21661da177e4SLinus Torvalds size = dm_table_get_size(t); 21673ac51e74SDarrick J. Wong 21683ac51e74SDarrick J. Wong /* 21693ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 21703ac51e74SDarrick J. Wong */ 2171fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 21723ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 21733ac51e74SDarrick J. Wong 21745424a0b8SMikulas Patocka set_capacity(md->disk, size); 21751da177e4SLinus Torvalds 2176cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 21772ca3310eSAlasdair G Kergon 2178f5b4aee1SMike Snitzer if (dm_table_request_based(t)) { 217916f12266SMike Snitzer /* 21809c37de29SMike Snitzer * Leverage the fact that request-based DM targets are 21819c37de29SMike Snitzer * immutable singletons - used to optimize dm_mq_queue_rq. 218216f12266SMike Snitzer */ 218316f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 2184e6ee8c0bSKiyoshi Ueda 218529dec90aSChristoph Hellwig /* 218629dec90aSChristoph Hellwig * There is no need to reload with request-based dm because the 218729dec90aSChristoph Hellwig * size of front_pad doesn't change. 218829dec90aSChristoph Hellwig * 218929dec90aSChristoph Hellwig * Note for future: If you are to reload bioset, prep-ed 219029dec90aSChristoph Hellwig * requests in the queue may refer to bio from the old bioset, 219129dec90aSChristoph Hellwig * so you must walk through the queue to unprep. 219229dec90aSChristoph Hellwig */ 219329dec90aSChristoph Hellwig if (!md->mempools) { 219429dec90aSChristoph Hellwig md->mempools = t->mempools; 219529dec90aSChristoph Hellwig t->mempools = NULL; 219629dec90aSChristoph Hellwig } 219729dec90aSChristoph Hellwig } else { 219829dec90aSChristoph Hellwig /* 219929dec90aSChristoph Hellwig * The md may already have mempools that need changing. 220029dec90aSChristoph Hellwig * If so, reload bioset because front_pad may have changed 220129dec90aSChristoph Hellwig * because a different table was loaded. 220229dec90aSChristoph Hellwig */ 220329dec90aSChristoph Hellwig dm_free_md_mempools(md->mempools); 220429dec90aSChristoph Hellwig md->mempools = t->mempools; 220529dec90aSChristoph Hellwig t->mempools = NULL; 22062a2a4c51SJens Axboe } 2207e6ee8c0bSKiyoshi Ueda 2208f5b4aee1SMike Snitzer ret = dm_table_set_restrictions(t, md->queue, limits); 2209bb37d772SDamien Le Moal if (ret) { 2210bb37d772SDamien Le Moal old_map = ERR_PTR(ret); 2211bb37d772SDamien Le Moal goto out; 2212bb37d772SDamien Le Moal } 2213bb37d772SDamien Le Moal 2214a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 22151d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 221636a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 221736a0456fSAlasdair G Kergon 221841abc4e1SHannes Reinecke if (old_map) 221983d5e5b0SMikulas Patocka dm_sync_table(md); 22202a2a4c51SJens Axboe out: 2221042d2a9bSAlasdair G Kergon return old_map; 22221da177e4SLinus Torvalds } 22231da177e4SLinus Torvalds 2224a7940155SAlasdair G Kergon /* 2225a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2226a7940155SAlasdair G Kergon */ 2227a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 22281da177e4SLinus Torvalds { 2229a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 22301da177e4SLinus Torvalds 22311da177e4SLinus Torvalds if (!map) 2232a7940155SAlasdair G Kergon return NULL; 22331da177e4SLinus Torvalds 22341da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 22359cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 223683d5e5b0SMikulas Patocka dm_sync_table(md); 2237a7940155SAlasdair G Kergon 2238a7940155SAlasdair G Kergon return map; 22391da177e4SLinus Torvalds } 22401da177e4SLinus Torvalds 22411da177e4SLinus Torvalds /* 22421da177e4SLinus Torvalds * Constructor for a new device. 22431da177e4SLinus Torvalds */ 22442b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 22451da177e4SLinus Torvalds { 22461da177e4SLinus Torvalds struct mapped_device *md; 22471da177e4SLinus Torvalds 22482b06cfffSAlasdair G Kergon md = alloc_dev(minor); 22491da177e4SLinus Torvalds if (!md) 22501da177e4SLinus Torvalds return -ENXIO; 22511da177e4SLinus Torvalds 225291ccbbacSTushar Sugandhi dm_ima_reset_data(md); 225391ccbbacSTushar Sugandhi 22541da177e4SLinus Torvalds *result = md; 22551da177e4SLinus Torvalds return 0; 22561da177e4SLinus Torvalds } 22571da177e4SLinus Torvalds 2258a5664dadSMike Snitzer /* 2259a5664dadSMike Snitzer * Functions to manage md->type. 2260a5664dadSMike Snitzer * All are required to hold md->type_lock. 2261a5664dadSMike Snitzer */ 2262a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2263a5664dadSMike Snitzer { 2264a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2265a5664dadSMike Snitzer } 2266a5664dadSMike Snitzer 2267a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2268a5664dadSMike Snitzer { 2269a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2270a5664dadSMike Snitzer } 2271a5664dadSMike Snitzer 22727e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2273a5664dadSMike Snitzer { 227400c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2275a5664dadSMike Snitzer md->type = type; 2276a5664dadSMike Snitzer } 2277a5664dadSMike Snitzer 22787e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2279a5664dadSMike Snitzer { 2280a5664dadSMike Snitzer return md->type; 2281a5664dadSMike Snitzer } 2282a5664dadSMike Snitzer 228336a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 228436a0456fSAlasdair G Kergon { 228536a0456fSAlasdair G Kergon return md->immutable_target_type; 228636a0456fSAlasdair G Kergon } 228736a0456fSAlasdair G Kergon 22884a0b4ddfSMike Snitzer /* 2289f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2290f84cb8a4SMike Snitzer * count on 'md'. 2291f84cb8a4SMike Snitzer */ 2292f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2293f84cb8a4SMike Snitzer { 2294f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2295f84cb8a4SMike Snitzer return &md->queue->limits; 2296f84cb8a4SMike Snitzer } 2297f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2298f84cb8a4SMike Snitzer 22994a0b4ddfSMike Snitzer /* 23004a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 23014a0b4ddfSMike Snitzer */ 2302591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 23034a0b4ddfSMike Snitzer { 2304ba305859SChristoph Hellwig enum dm_queue_mode type = dm_table_get_type(t); 2305c100ec49SMike Snitzer struct queue_limits limits; 23061a581b72SChristoph Hellwig struct table_device *td; 2307ba305859SChristoph Hellwig int r; 2308bfebd1cdSMike Snitzer 2309545ed20eSToshi Kani switch (type) { 2310bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2311681cc5e8SMike Snitzer md->disk->fops = &dm_rq_blk_dops; 2312e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2313bfebd1cdSMike Snitzer if (r) { 2314681cc5e8SMike Snitzer DMERR("Cannot initialize queue for request-based dm mapped device"); 2315bfebd1cdSMike Snitzer return r; 2316bfebd1cdSMike Snitzer } 2317bfebd1cdSMike Snitzer break; 2318bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2319545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2320bfebd1cdSMike Snitzer break; 23217e0d574fSBart Van Assche case DM_TYPE_NONE: 23227e0d574fSBart Van Assche WARN_ON_ONCE(true); 23237e0d574fSBart Van Assche break; 2324ff36ab34SMike Snitzer } 23254a0b4ddfSMike Snitzer 2326c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2327c100ec49SMike Snitzer if (r) { 2328c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2329c100ec49SMike Snitzer return r; 2330c100ec49SMike Snitzer } 2331bb37d772SDamien Le Moal r = dm_table_set_restrictions(t, md->queue, &limits); 2332bb37d772SDamien Le Moal if (r) 2333bb37d772SDamien Le Moal return r; 233489f871afSChristoph Hellwig 2335d563792cSYu Kuai /* 2336d563792cSYu Kuai * Hold lock to make sure add_disk() and del_gendisk() won't concurrent 2337d563792cSYu Kuai * with open_table_device() and close_table_device(). 2338d563792cSYu Kuai */ 2339d563792cSYu Kuai mutex_lock(&md->table_devices_lock); 2340e7089f65SLuis Chamberlain r = add_disk(md->disk); 2341d563792cSYu Kuai mutex_unlock(&md->table_devices_lock); 2342e7089f65SLuis Chamberlain if (r) 2343e7089f65SLuis Chamberlain return r; 234489f871afSChristoph Hellwig 23451a581b72SChristoph Hellwig /* 23461a581b72SChristoph Hellwig * Register the holder relationship for devices added before the disk 23471a581b72SChristoph Hellwig * was live. 23481a581b72SChristoph Hellwig */ 23491a581b72SChristoph Hellwig list_for_each_entry(td, &md->table_devices, list) { 23501a581b72SChristoph Hellwig r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); 23511a581b72SChristoph Hellwig if (r) 23521a581b72SChristoph Hellwig goto out_undo_holders; 23531a581b72SChristoph Hellwig } 23541a581b72SChristoph Hellwig 235589f871afSChristoph Hellwig r = dm_sysfs_init(md); 23561a581b72SChristoph Hellwig if (r) 23571a581b72SChristoph Hellwig goto out_undo_holders; 23581a581b72SChristoph Hellwig 23591a581b72SChristoph Hellwig md->type = type; 23601a581b72SChristoph Hellwig return 0; 23611a581b72SChristoph Hellwig 23621a581b72SChristoph Hellwig out_undo_holders: 23631a581b72SChristoph Hellwig list_for_each_entry_continue_reverse(td, &md->table_devices, list) 23641a581b72SChristoph Hellwig bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); 2365d563792cSYu Kuai mutex_lock(&md->table_devices_lock); 236689f871afSChristoph Hellwig del_gendisk(md->disk); 2367d563792cSYu Kuai mutex_unlock(&md->table_devices_lock); 236889f871afSChristoph Hellwig return r; 236989f871afSChristoph Hellwig } 2370d563792cSYu Kuai 23712bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 23721da177e4SLinus Torvalds { 23731da177e4SLinus Torvalds struct mapped_device *md; 2374*86a3238cSHeinz Mauelshagen unsigned int minor = MINOR(dev); 23751da177e4SLinus Torvalds 23761da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 23771da177e4SLinus Torvalds return NULL; 23781da177e4SLinus Torvalds 2379f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 23801da177e4SLinus Torvalds 23811da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 238249de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 238349de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2384637842cfSDavid Teigland md = NULL; 2385fba9f90eSJeff Mahoney goto out; 2386fba9f90eSJeff Mahoney } 23872bec1f4aSMikulas Patocka dm_get(md); 2388fba9f90eSJeff Mahoney out: 2389f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23901da177e4SLinus Torvalds 2391637842cfSDavid Teigland return md; 2392637842cfSDavid Teigland } 23933cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2394d229a958SDavid Teigland 23959ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2396637842cfSDavid Teigland { 23979ade92a9SAlasdair G Kergon return md->interface_ptr; 23981da177e4SLinus Torvalds } 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 24011da177e4SLinus Torvalds { 24021da177e4SLinus Torvalds md->interface_ptr = ptr; 24031da177e4SLinus Torvalds } 24041da177e4SLinus Torvalds 24051da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 24061da177e4SLinus Torvalds { 24071da177e4SLinus Torvalds atomic_inc(&md->holders); 24083f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 24091da177e4SLinus Torvalds } 24101da177e4SLinus Torvalds 241109ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 241209ee96b2SMikulas Patocka { 241309ee96b2SMikulas Patocka spin_lock(&_minor_lock); 241409ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 241509ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 241609ee96b2SMikulas Patocka return -EBUSY; 241709ee96b2SMikulas Patocka } 241809ee96b2SMikulas Patocka dm_get(md); 241909ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 242009ee96b2SMikulas Patocka return 0; 242109ee96b2SMikulas Patocka } 242209ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 242309ee96b2SMikulas Patocka 242472d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 242572d94861SAlasdair G Kergon { 242672d94861SAlasdair G Kergon return md->name; 242772d94861SAlasdair G Kergon } 242872d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 242972d94861SAlasdair G Kergon 24303f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 24311da177e4SLinus Torvalds { 24321134e5aeSMike Anderson struct dm_table *map; 243383d5e5b0SMikulas Patocka int srcu_idx; 24341da177e4SLinus Torvalds 24353f77316dSKiyoshi Ueda might_sleep(); 2436fba9f90eSJeff Mahoney 243763a4f065SMike Snitzer spin_lock(&_minor_lock); 24383f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2439fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2440f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 24413f77316dSKiyoshi Ueda 24427a5428dcSChristoph Hellwig blk_mark_disk_dead(md->disk); 24433b785fbcSBart Van Assche 2444ab7c7bb6SMikulas Patocka /* 2445ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2446ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2447ab7c7bb6SMikulas Patocka */ 2448ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 24492a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 24504f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 24511da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2452adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 24535df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 24541da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 24551da177e4SLinus Torvalds } 2456238d991fSHeinz Mauelshagen /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */ 245783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 24582a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 245983d5e5b0SMikulas Patocka 24603f77316dSKiyoshi Ueda /* 24613f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 24623f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 24633f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 24643f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 24653f77316dSKiyoshi Ueda */ 24663f77316dSKiyoshi Ueda if (wait) 24673f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 2468238d991fSHeinz Mauelshagen fsleep(1000); 24693f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 24703f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 24713f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 24723f77316dSKiyoshi Ueda 2473a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 24741da177e4SLinus Torvalds free_dev(md); 24751da177e4SLinus Torvalds } 24763f77316dSKiyoshi Ueda 24773f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 24783f77316dSKiyoshi Ueda { 24793f77316dSKiyoshi Ueda __dm_destroy(md, true); 24803f77316dSKiyoshi Ueda } 24813f77316dSKiyoshi Ueda 24823f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 24833f77316dSKiyoshi Ueda { 24843f77316dSKiyoshi Ueda __dm_destroy(md, false); 24853f77316dSKiyoshi Ueda } 24863f77316dSKiyoshi Ueda 24873f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 24883f77316dSKiyoshi Ueda { 24893f77316dSKiyoshi Ueda atomic_dec(&md->holders); 24901da177e4SLinus Torvalds } 249179eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 24921da177e4SLinus Torvalds 24939f6dc633SMike Snitzer static bool dm_in_flight_bios(struct mapped_device *md) 249485067747SMing Lei { 249585067747SMing Lei int cpu; 24969f6dc633SMike Snitzer unsigned long sum = 0; 249785067747SMing Lei 24989f6dc633SMike Snitzer for_each_possible_cpu(cpu) 24999f6dc633SMike Snitzer sum += *per_cpu_ptr(md->pending_io, cpu); 250085067747SMing Lei 250185067747SMing Lei return sum != 0; 250285067747SMing Lei } 250385067747SMing Lei 25042f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 250546125c1cSMilan Broz { 250646125c1cSMilan Broz int r = 0; 25079f4c3f87SBart Van Assche DEFINE_WAIT(wait); 250846125c1cSMilan Broz 250985067747SMing Lei while (true) { 25109f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 251146125c1cSMilan Broz 25129f6dc633SMike Snitzer if (!dm_in_flight_bios(md)) 251346125c1cSMilan Broz break; 251446125c1cSMilan Broz 2515e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 251646125c1cSMilan Broz r = -EINTR; 251746125c1cSMilan Broz break; 251846125c1cSMilan Broz } 251946125c1cSMilan Broz 252046125c1cSMilan Broz io_schedule(); 252146125c1cSMilan Broz } 25229f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2523b44ebeb0SMikulas Patocka 25249f6dc633SMike Snitzer smp_rmb(); 25259f6dc633SMike Snitzer 252646125c1cSMilan Broz return r; 252746125c1cSMilan Broz } 252846125c1cSMilan Broz 25292f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 253085067747SMing Lei { 253185067747SMing Lei int r = 0; 253285067747SMing Lei 253385067747SMing Lei if (!queue_is_mq(md->queue)) 253485067747SMing Lei return dm_wait_for_bios_completion(md, task_state); 253585067747SMing Lei 253685067747SMing Lei while (true) { 253785067747SMing Lei if (!blk_mq_queue_inflight(md->queue)) 253885067747SMing Lei break; 253985067747SMing Lei 254085067747SMing Lei if (signal_pending_state(task_state, current)) { 254185067747SMing Lei r = -EINTR; 254285067747SMing Lei break; 254385067747SMing Lei } 254485067747SMing Lei 2545238d991fSHeinz Mauelshagen fsleep(5000); 254685067747SMing Lei } 254785067747SMing Lei 254885067747SMing Lei return r; 254985067747SMing Lei } 255085067747SMing Lei 25511da177e4SLinus Torvalds /* 25521da177e4SLinus Torvalds * Process the deferred bios 25531da177e4SLinus Torvalds */ 2554ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 25551da177e4SLinus Torvalds { 25560c2915b8SMike Snitzer struct mapped_device *md = container_of(work, struct mapped_device, work); 25570c2915b8SMike Snitzer struct bio *bio; 2558ef208587SMikulas Patocka 25593b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2560022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 25610c2915b8SMike Snitzer bio = bio_list_pop(&md->deferred); 2562022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2563022c2611SMikulas Patocka 25640c2915b8SMike Snitzer if (!bio) 2565df12ee99SAlasdair G Kergon break; 256673d410c0SMilan Broz 25670c2915b8SMike Snitzer submit_bio_noacct(bio); 2568e6ee8c0bSKiyoshi Ueda } 25691da177e4SLinus Torvalds } 25701da177e4SLinus Torvalds 25719a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2572304f3f6aSMilan Broz { 25733b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 25744e857c58SPeter Zijlstra smp_mb__after_atomic(); 257553d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2576304f3f6aSMilan Broz } 2577304f3f6aSMilan Broz 25781da177e4SLinus Torvalds /* 2579042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 25801da177e4SLinus Torvalds */ 2581042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 25821da177e4SLinus Torvalds { 258387eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2584754c5fc7SMike Snitzer struct queue_limits limits; 2585042d2a9bSAlasdair G Kergon int r; 25861da177e4SLinus Torvalds 2587e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 25881da177e4SLinus Torvalds 25891da177e4SLinus Torvalds /* device must be suspended */ 25904f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 259193c534aeSAlasdair G Kergon goto out; 25921da177e4SLinus Torvalds 25933ae70656SMike Snitzer /* 25943ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 25953ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 25963ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 25973ae70656SMike Snitzer * reappear. 25983ae70656SMike Snitzer */ 25993ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 260083d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 26013ae70656SMike Snitzer if (live_map) 26023ae70656SMike Snitzer limits = md->queue->limits; 260383d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 26043ae70656SMike Snitzer } 26053ae70656SMike Snitzer 260687eb5b21SMike Christie if (!live_map) { 2607754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2608042d2a9bSAlasdair G Kergon if (r) { 2609042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2610754c5fc7SMike Snitzer goto out; 2611042d2a9bSAlasdair G Kergon } 261287eb5b21SMike Christie } 2613754c5fc7SMike Snitzer 2614042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 261562e08243SMikulas Patocka dm_issue_global_event(); 26161da177e4SLinus Torvalds 261793c534aeSAlasdair G Kergon out: 2618e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2619042d2a9bSAlasdair G Kergon return map; 26201da177e4SLinus Torvalds } 26211da177e4SLinus Torvalds 26221da177e4SLinus Torvalds /* 26231da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 26241da177e4SLinus Torvalds * device. 26251da177e4SLinus Torvalds */ 26262ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 26271da177e4SLinus Torvalds { 2628e39e2e95SAlasdair G Kergon int r; 26291da177e4SLinus Torvalds 2630040f04bdSChristoph Hellwig WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2631dfbe03f6SAlasdair G Kergon 2632977115c0SChristoph Hellwig r = freeze_bdev(md->disk->part0); 2633040f04bdSChristoph Hellwig if (!r) 2634aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2635040f04bdSChristoph Hellwig return r; 26361da177e4SLinus Torvalds } 26371da177e4SLinus Torvalds 26382ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 26391da177e4SLinus Torvalds { 2640aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2641aa8d7c2fSAlasdair G Kergon return; 2642977115c0SChristoph Hellwig thaw_bdev(md->disk->part0); 2643aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 26441da177e4SLinus Torvalds } 26451da177e4SLinus Torvalds 26461da177e4SLinus Torvalds /* 2647b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2648b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2649b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2650b48633f8SBart Van Assche * 2651ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2652ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2653ffcc3936SMike Snitzer * are being added to md->deferred list. 2654cec47e3dSKiyoshi Ueda */ 2655ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2656*86a3238cSHeinz Mauelshagen unsigned int suspend_flags, unsigned int task_state, 2657eaf9a736SMike Snitzer int dmf_suspended_flag) 26581da177e4SLinus Torvalds { 2659ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2660ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2661ffcc3936SMike Snitzer int r; 2662cf222b37SAlasdair G Kergon 26635a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 26645a8f1f80SBart Van Assche 26652e93ccc1SKiyoshi Ueda /* 26662e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 26672e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 26682e93ccc1SKiyoshi Ueda */ 26692e93ccc1SKiyoshi Ueda if (noflush) 26702e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 267186331f39SBart Van Assche else 2672ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 26732e93ccc1SKiyoshi Ueda 2674d67ee213SMike Snitzer /* 2675d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2676d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2677d67ee213SMike Snitzer */ 26781da177e4SLinus Torvalds dm_table_presuspend_targets(map); 26791da177e4SLinus Torvalds 26802e93ccc1SKiyoshi Ueda /* 26819f518b27SKiyoshi Ueda * Flush I/O to the device. 26829f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 26839f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 26849f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 26852e93ccc1SKiyoshi Ueda */ 268632a926daSMikulas Patocka if (!noflush && do_lockfs) { 26872ca3310eSAlasdair G Kergon r = lock_fs(md); 2688d67ee213SMike Snitzer if (r) { 2689d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2690ffcc3936SMike Snitzer return r; 2691aa8d7c2fSAlasdair G Kergon } 2692d67ee213SMike Snitzer } 26931da177e4SLinus Torvalds 26941da177e4SLinus Torvalds /* 26953b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 26963b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 269796c9865cSMike Snitzer * dm_split_and_process_bio from dm_submit_bio. 26983b00b203SMikulas Patocka * 269996c9865cSMike Snitzer * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 27003b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 270196c9865cSMike Snitzer * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 27020cede372SMike Snitzer * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 27036a8736d1STejun Heo * flush_workqueue(md->wq). 27041da177e4SLinus Torvalds */ 27051eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 270641abc4e1SHannes Reinecke if (map) 270783d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 27081da177e4SLinus Torvalds 2709d0bcb878SKiyoshi Ueda /* 271029e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 271129e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2712d0bcb878SKiyoshi Ueda */ 27136a23e05cSJens Axboe if (dm_request_based(md)) 2714eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2715cec47e3dSKiyoshi Ueda 2716d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2717d0bcb878SKiyoshi Ueda 27181da177e4SLinus Torvalds /* 27193b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 27203b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 27213b00b203SMikulas Patocka * to finish. 27221da177e4SLinus Torvalds */ 2723b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2724eaf9a736SMike Snitzer if (!r) 2725eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 27261da177e4SLinus Torvalds 27276d6f10dfSMilan Broz if (noflush) 2728022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 272941abc4e1SHannes Reinecke if (map) 273083d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 27312e93ccc1SKiyoshi Ueda 27321da177e4SLinus Torvalds /* were we interrupted ? */ 273346125c1cSMilan Broz if (r < 0) { 27349a1fb464SMikulas Patocka dm_queue_flush(md); 273573d410c0SMilan Broz 2736cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2737eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2738cec47e3dSKiyoshi Ueda 27392ca3310eSAlasdair G Kergon unlock_fs(md); 2740d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2741ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2742ffcc3936SMike Snitzer } 2743ffcc3936SMike Snitzer 2744ffcc3936SMike Snitzer return r; 27452ca3310eSAlasdair G Kergon } 27462ca3310eSAlasdair G Kergon 27473b00b203SMikulas Patocka /* 2748ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2749ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2750ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2751ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2752ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 27533b00b203SMikulas Patocka */ 2754ffcc3936SMike Snitzer /* 2755ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2756ffcc3936SMike Snitzer * 2757ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2758ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2759ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2760ffcc3936SMike Snitzer * 2761ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2762ffcc3936SMike Snitzer */ 2763*86a3238cSHeinz Mauelshagen int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) 2764ffcc3936SMike Snitzer { 2765ffcc3936SMike Snitzer struct dm_table *map = NULL; 2766ffcc3936SMike Snitzer int r = 0; 2767ffcc3936SMike Snitzer 2768ffcc3936SMike Snitzer retry: 2769ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2770ffcc3936SMike Snitzer 2771ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2772ffcc3936SMike Snitzer r = -EINVAL; 2773ffcc3936SMike Snitzer goto out_unlock; 2774ffcc3936SMike Snitzer } 2775ffcc3936SMike Snitzer 2776ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2777ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2778ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2779ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2780ffcc3936SMike Snitzer if (r) 2781ffcc3936SMike Snitzer return r; 2782ffcc3936SMike Snitzer goto retry; 2783ffcc3936SMike Snitzer } 2784ffcc3936SMike Snitzer 2785a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2786ffcc3936SMike Snitzer 2787eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2788ffcc3936SMike Snitzer if (r) 2789ffcc3936SMike Snitzer goto out_unlock; 27903b00b203SMikulas Patocka 27915df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 27924d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 27935df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 27944d4471cbSKiyoshi Ueda 2795d287483dSAlasdair G Kergon out_unlock: 2796e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2797cf222b37SAlasdair G Kergon return r; 27981da177e4SLinus Torvalds } 27991da177e4SLinus Torvalds 2800ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 28011da177e4SLinus Torvalds { 2802ffcc3936SMike Snitzer if (map) { 2803ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 28048757b776SMilan Broz if (r) 2805ffcc3936SMike Snitzer return r; 2806ffcc3936SMike Snitzer } 28072ca3310eSAlasdair G Kergon 28089a1fb464SMikulas Patocka dm_queue_flush(md); 28092ca3310eSAlasdair G Kergon 2810cec47e3dSKiyoshi Ueda /* 2811cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2812cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2813cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2814cec47e3dSKiyoshi Ueda */ 2815cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2816eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2817cec47e3dSKiyoshi Ueda 28182ca3310eSAlasdair G Kergon unlock_fs(md); 28192ca3310eSAlasdair G Kergon 2820ffcc3936SMike Snitzer return 0; 2821ffcc3936SMike Snitzer } 2822ffcc3936SMike Snitzer 2823ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2824ffcc3936SMike Snitzer { 28258dc23658SMinfei Huang int r; 2826ffcc3936SMike Snitzer struct dm_table *map = NULL; 2827ffcc3936SMike Snitzer 2828ffcc3936SMike Snitzer retry: 28298dc23658SMinfei Huang r = -EINVAL; 2830ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2831ffcc3936SMike Snitzer 2832ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2833ffcc3936SMike Snitzer goto out; 2834ffcc3936SMike Snitzer 2835ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2836ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2837ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2838ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2839ffcc3936SMike Snitzer if (r) 2840ffcc3936SMike Snitzer return r; 2841ffcc3936SMike Snitzer goto retry; 2842ffcc3936SMike Snitzer } 2843ffcc3936SMike Snitzer 2844a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2845ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2846ffcc3936SMike Snitzer goto out; 2847ffcc3936SMike Snitzer 2848ffcc3936SMike Snitzer r = __dm_resume(md, map); 2849ffcc3936SMike Snitzer if (r) 2850ffcc3936SMike Snitzer goto out; 2851ffcc3936SMike Snitzer 28522ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2853cf222b37SAlasdair G Kergon out: 2854e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 28552ca3310eSAlasdair G Kergon 2856cf222b37SAlasdair G Kergon return r; 28571da177e4SLinus Torvalds } 28581da177e4SLinus Torvalds 2859fd2ed4d2SMikulas Patocka /* 2860fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2861fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2862fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2863fd2ed4d2SMikulas Patocka */ 2864fd2ed4d2SMikulas Patocka 2865*86a3238cSHeinz Mauelshagen static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) 2866ffcc3936SMike Snitzer { 2867ffcc3936SMike Snitzer struct dm_table *map = NULL; 2868ffcc3936SMike Snitzer 28691ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 28701ea0654eSBart Van Assche 287196b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2872ffcc3936SMike Snitzer return; /* nested internal suspend */ 2873ffcc3936SMike Snitzer 2874ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2875ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2876ffcc3936SMike Snitzer return; /* nest suspend */ 2877ffcc3936SMike Snitzer } 2878ffcc3936SMike Snitzer 2879a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2880ffcc3936SMike Snitzer 2881ffcc3936SMike Snitzer /* 2882ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2883ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2884ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2885ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2886ffcc3936SMike Snitzer */ 2887eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2888eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2889ffcc3936SMike Snitzer 28905df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 2891ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 28925df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 2893ffcc3936SMike Snitzer } 2894ffcc3936SMike Snitzer 2895ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2896ffcc3936SMike Snitzer { 289796b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 289896b26c8cSMikulas Patocka 289996b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2900ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2901ffcc3936SMike Snitzer 2902ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2903ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2904ffcc3936SMike Snitzer 2905ffcc3936SMike Snitzer /* 2906ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2907ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2908ffcc3936SMike Snitzer */ 2909ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2910ffcc3936SMike Snitzer 2911ffcc3936SMike Snitzer done: 2912ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2913ffcc3936SMike Snitzer smp_mb__after_atomic(); 2914ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2915ffcc3936SMike Snitzer } 2916ffcc3936SMike Snitzer 2917ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2918fd2ed4d2SMikulas Patocka { 2919fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2920ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2921ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2922ffcc3936SMike Snitzer } 2923ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2924ffcc3936SMike Snitzer 2925ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2926ffcc3936SMike Snitzer { 2927ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2928ffcc3936SMike Snitzer __dm_internal_resume(md); 2929ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2930ffcc3936SMike Snitzer } 2931ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2932ffcc3936SMike Snitzer 2933ffcc3936SMike Snitzer /* 2934ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2935ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2936ffcc3936SMike Snitzer */ 2937ffcc3936SMike Snitzer 2938ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2939ffcc3936SMike Snitzer { 2940ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2941ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2942fd2ed4d2SMikulas Patocka return; 2943fd2ed4d2SMikulas Patocka 2944fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2945fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2946fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2947fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2948fd2ed4d2SMikulas Patocka } 2949b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2950fd2ed4d2SMikulas Patocka 2951ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2952fd2ed4d2SMikulas Patocka { 2953ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2954fd2ed4d2SMikulas Patocka goto done; 2955fd2ed4d2SMikulas Patocka 2956fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2957fd2ed4d2SMikulas Patocka 2958fd2ed4d2SMikulas Patocka done: 2959fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2960fd2ed4d2SMikulas Patocka } 2961b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2962fd2ed4d2SMikulas Patocka 29631da177e4SLinus Torvalds /*----------------------------------------------------------------- 29641da177e4SLinus Torvalds * Event notification. 29651da177e4SLinus Torvalds *---------------------------------------------------------------*/ 29663abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2967*86a3238cSHeinz Mauelshagen unsigned int cookie, bool need_resize_uevent) 296869267a30SAlasdair G Kergon { 29696958c1c6SMikulas Patocka int r; 2970*86a3238cSHeinz Mauelshagen unsigned int noio_flag; 297160935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 29727533afa1SMikulas Patocka char *envp[3] = { NULL, NULL, NULL }; 29737533afa1SMikulas Patocka char **envpp = envp; 29747533afa1SMikulas Patocka if (cookie) { 29757533afa1SMikulas Patocka snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 29767533afa1SMikulas Patocka DM_COOKIE_ENV_VAR_NAME, cookie); 29777533afa1SMikulas Patocka *envpp++ = udev_cookie; 29787533afa1SMikulas Patocka } 29797533afa1SMikulas Patocka if (need_resize_uevent) { 29807533afa1SMikulas Patocka *envpp++ = "RESIZE=1"; 29817533afa1SMikulas Patocka } 298260935eb2SMilan Broz 29836958c1c6SMikulas Patocka noio_flag = memalloc_noio_save(); 29846958c1c6SMikulas Patocka 29857533afa1SMikulas Patocka r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); 29866958c1c6SMikulas Patocka 29876958c1c6SMikulas Patocka memalloc_noio_restore(noio_flag); 29886958c1c6SMikulas Patocka 29896958c1c6SMikulas Patocka return r; 299069267a30SAlasdair G Kergon } 299169267a30SAlasdair G Kergon 29927a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 29937a8c3d3bSMike Anderson { 29947a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 29957a8c3d3bSMike Anderson } 29967a8c3d3bSMike Anderson 29971da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 29981da177e4SLinus Torvalds { 29991da177e4SLinus Torvalds return atomic_read(&md->event_nr); 30001da177e4SLinus Torvalds } 30011da177e4SLinus Torvalds 30021da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 30031da177e4SLinus Torvalds { 30041da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 30051da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 30061da177e4SLinus Torvalds } 30071da177e4SLinus Torvalds 30087a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 30097a8c3d3bSMike Anderson { 30107a8c3d3bSMike Anderson unsigned long flags; 30117a8c3d3bSMike Anderson 30127a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 30137a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 30147a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 30157a8c3d3bSMike Anderson } 30167a8c3d3bSMike Anderson 30171da177e4SLinus Torvalds /* 30181da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 30191da177e4SLinus Torvalds * count on 'md'. 30201da177e4SLinus Torvalds */ 30211da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 30221da177e4SLinus Torvalds { 30231da177e4SLinus Torvalds return md->disk; 30241da177e4SLinus Torvalds } 302565ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 30261da177e4SLinus Torvalds 3027784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 3028784aae73SMilan Broz { 30292995fa78SMikulas Patocka return &md->kobj_holder.kobj; 3030784aae73SMilan Broz } 3031784aae73SMilan Broz 3032784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3033784aae73SMilan Broz { 3034784aae73SMilan Broz struct mapped_device *md; 3035784aae73SMilan Broz 30362995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3037784aae73SMilan Broz 3038b9a41d21SHou Tao spin_lock(&_minor_lock); 3039b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 3040b9a41d21SHou Tao md = NULL; 3041b9a41d21SHou Tao goto out; 3042b9a41d21SHou Tao } 3043784aae73SMilan Broz dm_get(md); 3044b9a41d21SHou Tao out: 3045b9a41d21SHou Tao spin_unlock(&_minor_lock); 3046b9a41d21SHou Tao 3047784aae73SMilan Broz return md; 3048784aae73SMilan Broz } 3049784aae73SMilan Broz 30504f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 30511da177e4SLinus Torvalds { 30521da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 30531da177e4SLinus Torvalds } 30541da177e4SLinus Torvalds 30555df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md) 30565df96f2bSMikulas Patocka { 30575df96f2bSMikulas Patocka return test_bit(DMF_POST_SUSPENDING, &md->flags); 30585df96f2bSMikulas Patocka } 30595df96f2bSMikulas Patocka 3060ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 3061ffcc3936SMike Snitzer { 3062ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3063ffcc3936SMike Snitzer } 3064ffcc3936SMike Snitzer 30652c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 30662c140a24SMikulas Patocka { 30672c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 30682c140a24SMikulas Patocka } 30692c140a24SMikulas Patocka 307064dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 307164dbce58SKiyoshi Ueda { 307233bd6f06SMike Snitzer return dm_suspended_md(ti->table->md); 307364dbce58SKiyoshi Ueda } 307464dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 307564dbce58SKiyoshi Ueda 30765df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti) 30775df96f2bSMikulas Patocka { 307833bd6f06SMike Snitzer return dm_post_suspending_md(ti->table->md); 30795df96f2bSMikulas Patocka } 30805df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending); 30815df96f2bSMikulas Patocka 30822e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 30832e93ccc1SKiyoshi Ueda { 308433bd6f06SMike Snitzer return __noflush_suspending(ti->table->md); 30852e93ccc1SKiyoshi Ueda } 30862e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 30872e93ccc1SKiyoshi Ueda 3088e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3089e6ee8c0bSKiyoshi Ueda { 3090e6ee8c0bSKiyoshi Ueda if (!pools) 3091e6ee8c0bSKiyoshi Ueda return; 3092e6ee8c0bSKiyoshi Ueda 30936f1c819cSKent Overstreet bioset_exit(&pools->bs); 30946f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 3095e6ee8c0bSKiyoshi Ueda 3096e6ee8c0bSKiyoshi Ueda kfree(pools); 3097e6ee8c0bSKiyoshi Ueda } 3098e6ee8c0bSKiyoshi Ueda 30999c72bad1SChristoph Hellwig struct dm_pr { 31009c72bad1SChristoph Hellwig u64 old_key; 31019c72bad1SChristoph Hellwig u64 new_key; 31029c72bad1SChristoph Hellwig u32 flags; 3103c6adada5SMike Christie bool abort; 31049c72bad1SChristoph Hellwig bool fail_early; 31058dd87f3cSMike Christie int ret; 310670151087SMike Christie enum pr_type type; 31079c72bad1SChristoph Hellwig }; 31089c72bad1SChristoph Hellwig 31099c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 31108dd87f3cSMike Christie struct dm_pr *pr) 31119c72bad1SChristoph Hellwig { 31129c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 31139c72bad1SChristoph Hellwig struct dm_table *table; 31149c72bad1SChristoph Hellwig struct dm_target *ti; 31159c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 31169c72bad1SChristoph Hellwig 31179c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 31189c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 31199c72bad1SChristoph Hellwig goto out; 31209c72bad1SChristoph Hellwig 31219c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 31222aec377aSMike Snitzer if (table->num_targets != 1) 31239c72bad1SChristoph Hellwig goto out; 31249c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 31259c72bad1SChristoph Hellwig 3126e120a5f1SMike Snitzer if (dm_suspended_md(md)) { 3127e120a5f1SMike Snitzer ret = -EAGAIN; 3128e120a5f1SMike Snitzer goto out; 3129e120a5f1SMike Snitzer } 3130e120a5f1SMike Snitzer 31319c72bad1SChristoph Hellwig ret = -EINVAL; 31329c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 31339c72bad1SChristoph Hellwig goto out; 31349c72bad1SChristoph Hellwig 31358dd87f3cSMike Christie ti->type->iterate_devices(ti, fn, pr); 31368dd87f3cSMike Christie ret = 0; 31379c72bad1SChristoph Hellwig out: 31389c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 31399c72bad1SChristoph Hellwig return ret; 31409c72bad1SChristoph Hellwig } 31419c72bad1SChristoph Hellwig 31429c72bad1SChristoph Hellwig /* 31439c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 31449c72bad1SChristoph Hellwig */ 31459c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 31469c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 31479c72bad1SChristoph Hellwig { 31489c72bad1SChristoph Hellwig struct dm_pr *pr = data; 31499c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 31508dd87f3cSMike Christie int ret; 31519c72bad1SChristoph Hellwig 31528dd87f3cSMike Christie if (!ops || !ops->pr_register) { 31538dd87f3cSMike Christie pr->ret = -EOPNOTSUPP; 31548dd87f3cSMike Christie return -1; 31558dd87f3cSMike Christie } 31568dd87f3cSMike Christie 31578dd87f3cSMike Christie ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 31588dd87f3cSMike Christie if (!ret) 31598dd87f3cSMike Christie return 0; 31608dd87f3cSMike Christie 31618dd87f3cSMike Christie if (!pr->ret) 31628dd87f3cSMike Christie pr->ret = ret; 31638dd87f3cSMike Christie 31648dd87f3cSMike Christie if (pr->fail_early) 31658dd87f3cSMike Christie return -1; 31668dd87f3cSMike Christie 31678dd87f3cSMike Christie return 0; 31689c72bad1SChristoph Hellwig } 31699c72bad1SChristoph Hellwig 317071cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 317171cdb697SChristoph Hellwig u32 flags) 317271cdb697SChristoph Hellwig { 31739c72bad1SChristoph Hellwig struct dm_pr pr = { 31749c72bad1SChristoph Hellwig .old_key = old_key, 31759c72bad1SChristoph Hellwig .new_key = new_key, 31769c72bad1SChristoph Hellwig .flags = flags, 31779c72bad1SChristoph Hellwig .fail_early = true, 31788dd87f3cSMike Christie .ret = 0, 31799c72bad1SChristoph Hellwig }; 31809c72bad1SChristoph Hellwig int ret; 318171cdb697SChristoph Hellwig 31829c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 31838dd87f3cSMike Christie if (ret) { 31848dd87f3cSMike Christie /* Didn't even get to register a path */ 31858dd87f3cSMike Christie return ret; 31868dd87f3cSMike Christie } 31878dd87f3cSMike Christie 31888dd87f3cSMike Christie if (!pr.ret) 31898dd87f3cSMike Christie return 0; 31908dd87f3cSMike Christie ret = pr.ret; 31918dd87f3cSMike Christie 31928dd87f3cSMike Christie if (!new_key) 31938dd87f3cSMike Christie return ret; 31948dd87f3cSMike Christie 31959c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 31969c72bad1SChristoph Hellwig pr.old_key = new_key; 31979c72bad1SChristoph Hellwig pr.new_key = 0; 31989c72bad1SChristoph Hellwig pr.flags = 0; 31999c72bad1SChristoph Hellwig pr.fail_early = false; 32008dd87f3cSMike Christie (void) dm_call_pr(bdev, __dm_pr_register, &pr); 32019c72bad1SChristoph Hellwig return ret; 320271cdb697SChristoph Hellwig } 3203956a4025SMike Snitzer 320470151087SMike Christie 320570151087SMike Christie static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, 320670151087SMike Christie sector_t start, sector_t len, void *data) 320770151087SMike Christie { 320870151087SMike Christie struct dm_pr *pr = data; 320970151087SMike Christie const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 321070151087SMike Christie 321170151087SMike Christie if (!ops || !ops->pr_reserve) { 321270151087SMike Christie pr->ret = -EOPNOTSUPP; 321370151087SMike Christie return -1; 321470151087SMike Christie } 321570151087SMike Christie 321670151087SMike Christie pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); 321770151087SMike Christie if (!pr->ret) 321870151087SMike Christie return -1; 321970151087SMike Christie 322070151087SMike Christie return 0; 322171cdb697SChristoph Hellwig } 322271cdb697SChristoph Hellwig 322371cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 322471cdb697SChristoph Hellwig u32 flags) 322571cdb697SChristoph Hellwig { 322670151087SMike Christie struct dm_pr pr = { 322770151087SMike Christie .old_key = key, 322870151087SMike Christie .flags = flags, 322970151087SMike Christie .type = type, 323070151087SMike Christie .fail_early = false, 323170151087SMike Christie .ret = 0, 323270151087SMike Christie }; 323370151087SMike Christie int ret; 323471cdb697SChristoph Hellwig 323570151087SMike Christie ret = dm_call_pr(bdev, __dm_pr_reserve, &pr); 323670151087SMike Christie if (ret) 323770151087SMike Christie return ret; 323871cdb697SChristoph Hellwig 323970151087SMike Christie return pr.ret; 324071cdb697SChristoph Hellwig } 324171cdb697SChristoph Hellwig 324208a3c338SMike Christie /* 324308a3c338SMike Christie * If there is a non-All Registrants type of reservation, the release must be 324408a3c338SMike Christie * sent down the holding path. For the cases where there is no reservation or 324508a3c338SMike Christie * the path is not the holder the device will also return success, so we must 324608a3c338SMike Christie * try each path to make sure we got the correct path. 324708a3c338SMike Christie */ 324808a3c338SMike Christie static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, 324908a3c338SMike Christie sector_t start, sector_t len, void *data) 325008a3c338SMike Christie { 325108a3c338SMike Christie struct dm_pr *pr = data; 325208a3c338SMike Christie const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 325308a3c338SMike Christie 325408a3c338SMike Christie if (!ops || !ops->pr_release) { 325508a3c338SMike Christie pr->ret = -EOPNOTSUPP; 325608a3c338SMike Christie return -1; 325708a3c338SMike Christie } 325808a3c338SMike Christie 325908a3c338SMike Christie pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); 326008a3c338SMike Christie if (pr->ret) 326108a3c338SMike Christie return -1; 326208a3c338SMike Christie 326308a3c338SMike Christie return 0; 326471cdb697SChristoph Hellwig } 326571cdb697SChristoph Hellwig 326671cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 326771cdb697SChristoph Hellwig { 326808a3c338SMike Christie struct dm_pr pr = { 326908a3c338SMike Christie .old_key = key, 327008a3c338SMike Christie .type = type, 327108a3c338SMike Christie .fail_early = false, 327208a3c338SMike Christie }; 327308a3c338SMike Christie int ret; 327471cdb697SChristoph Hellwig 327508a3c338SMike Christie ret = dm_call_pr(bdev, __dm_pr_release, &pr); 327608a3c338SMike Christie if (ret) 327708a3c338SMike Christie return ret; 327871cdb697SChristoph Hellwig 327908a3c338SMike Christie return pr.ret; 328071cdb697SChristoph Hellwig } 328171cdb697SChristoph Hellwig 3282c6adada5SMike Christie static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, 3283c6adada5SMike Christie sector_t start, sector_t len, void *data) 3284c6adada5SMike Christie { 3285c6adada5SMike Christie struct dm_pr *pr = data; 3286c6adada5SMike Christie const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3287c6adada5SMike Christie 3288c6adada5SMike Christie if (!ops || !ops->pr_preempt) { 3289c6adada5SMike Christie pr->ret = -EOPNOTSUPP; 3290c6adada5SMike Christie return -1; 3291c6adada5SMike Christie } 3292c6adada5SMike Christie 3293c6adada5SMike Christie pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, 3294c6adada5SMike Christie pr->abort); 3295c6adada5SMike Christie if (!pr->ret) 3296c6adada5SMike Christie return -1; 3297c6adada5SMike Christie 3298c6adada5SMike Christie return 0; 329971cdb697SChristoph Hellwig } 330071cdb697SChristoph Hellwig 330171cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 330271cdb697SChristoph Hellwig enum pr_type type, bool abort) 330371cdb697SChristoph Hellwig { 3304c6adada5SMike Christie struct dm_pr pr = { 3305c6adada5SMike Christie .new_key = new_key, 3306c6adada5SMike Christie .old_key = old_key, 3307c6adada5SMike Christie .type = type, 3308c6adada5SMike Christie .fail_early = false, 3309c6adada5SMike Christie }; 3310c6adada5SMike Christie int ret; 331171cdb697SChristoph Hellwig 3312c6adada5SMike Christie ret = dm_call_pr(bdev, __dm_pr_preempt, &pr); 3313c6adada5SMike Christie if (ret) 3314c6adada5SMike Christie return ret; 331571cdb697SChristoph Hellwig 3316c6adada5SMike Christie return pr.ret; 331771cdb697SChristoph Hellwig } 331871cdb697SChristoph Hellwig 331971cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 332071cdb697SChristoph Hellwig { 332171cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 332271cdb697SChristoph Hellwig const struct pr_ops *ops; 3323971888c4SMike Snitzer int r, srcu_idx; 332471cdb697SChristoph Hellwig 33255bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 332671cdb697SChristoph Hellwig if (r < 0) 3327971888c4SMike Snitzer goto out; 332871cdb697SChristoph Hellwig 332971cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 333071cdb697SChristoph Hellwig if (ops && ops->pr_clear) 333171cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 333271cdb697SChristoph Hellwig else 333371cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3334971888c4SMike Snitzer out: 3335971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 333671cdb697SChristoph Hellwig return r; 333771cdb697SChristoph Hellwig } 333871cdb697SChristoph Hellwig 333971cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 334071cdb697SChristoph Hellwig .pr_register = dm_pr_register, 334171cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 334271cdb697SChristoph Hellwig .pr_release = dm_pr_release, 334371cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 334471cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 334571cdb697SChristoph Hellwig }; 334671cdb697SChristoph Hellwig 334783d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 3348c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 3349b99fdcdcSMing Lei .poll_bio = dm_poll_bio, 33501da177e4SLinus Torvalds .open = dm_blk_open, 33511da177e4SLinus Torvalds .release = dm_blk_close, 3352aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 33533ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3354e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 335571cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 33561da177e4SLinus Torvalds .owner = THIS_MODULE 33571da177e4SLinus Torvalds }; 33581da177e4SLinus Torvalds 3359681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = { 3360681cc5e8SMike Snitzer .open = dm_blk_open, 3361681cc5e8SMike Snitzer .release = dm_blk_close, 3362681cc5e8SMike Snitzer .ioctl = dm_blk_ioctl, 3363681cc5e8SMike Snitzer .getgeo = dm_blk_getgeo, 3364681cc5e8SMike Snitzer .pr_ops = &dm_pr_ops, 3365681cc5e8SMike Snitzer .owner = THIS_MODULE 3366681cc5e8SMike Snitzer }; 3367681cc5e8SMike Snitzer 3368f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3369f26c5719SDan Williams .direct_access = dm_dax_direct_access, 3370cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 3371047218ecSJane Chu .recovery_write = dm_dax_recovery_write, 3372f26c5719SDan Williams }; 3373f26c5719SDan Williams 33741da177e4SLinus Torvalds /* 33751da177e4SLinus Torvalds * module hooks 33761da177e4SLinus Torvalds */ 33771da177e4SLinus Torvalds module_init(dm_init); 33781da177e4SLinus Torvalds module_exit(dm_exit); 33791da177e4SLinus Torvalds 33801da177e4SLinus Torvalds module_param(major, uint, 0); 33811da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3382f4790826SMike Snitzer 3383e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3384e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3385e8603136SMike Snitzer 3386115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3387115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3388115485e8SMike Snitzer 3389a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3390a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3391a666e5c0SMikulas Patocka 33921da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 33931da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 33941da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3395