11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 1191ccbbacSTushar Sugandhi #include "dm-ima.h" 121da177e4SLinus Torvalds 131da177e4SLinus Torvalds #include <linux/init.h> 141da177e4SLinus Torvalds #include <linux/module.h> 1548c9c27bSArjan van de Ven #include <linux/mutex.h> 166958c1c6SMikulas Patocka #include <linux/sched/mm.h> 17174cd4b1SIngo Molnar #include <linux/sched/signal.h> 181da177e4SLinus Torvalds #include <linux/blkpg.h> 191da177e4SLinus Torvalds #include <linux/bio.h> 201da177e4SLinus Torvalds #include <linux/mempool.h> 21f26c5719SDan Williams #include <linux/dax.h> 221da177e4SLinus Torvalds #include <linux/slab.h> 231da177e4SLinus Torvalds #include <linux/idr.h> 247e026c8cSDan Williams #include <linux/uio.h> 253ac51e74SDarrick J. Wong #include <linux/hdreg.h> 263f77316dSKiyoshi Ueda #include <linux/delay.h> 27ffcc3936SMike Snitzer #include <linux/wait.h> 2871cdb697SChristoph Hellwig #include <linux/pr.h> 29b0b4d7c6SElena Reshetova #include <linux/refcount.h> 30c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 31a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 321e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h> 3355782138SLi Zefan 3472d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3572d94861SAlasdair G Kergon 3660935eb2SMilan Broz /* 3760935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3860935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3960935eb2SMilan Broz */ 4060935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4160935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4260935eb2SMilan Broz 43b99fdcdcSMing Lei /* 44b99fdcdcSMing Lei * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 45b99fdcdcSMing Lei * dm_io into one list, and reuse bio->bi_private as the list head. Before 46b99fdcdcSMing Lei * ending this fs bio, we will recover its ->bi_private. 47b99fdcdcSMing Lei */ 48b99fdcdcSMing Lei #define REQ_DM_POLL_LIST REQ_DRV 49b99fdcdcSMing Lei 501da177e4SLinus Torvalds static const char *_name = DM_NAME; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds static unsigned int major = 0; 531da177e4SLinus Torvalds static unsigned int _major = 0; 541da177e4SLinus Torvalds 55d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 56d15b774cSAlasdair G Kergon 57f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 582c140a24SMikulas Patocka 592c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 602c140a24SMikulas Patocka 612c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 622c140a24SMikulas Patocka 63acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 64acfe0ad7SMikulas Patocka 6593e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 6693e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 6793e6442cSMikulas Patocka 6862e08243SMikulas Patocka void dm_issue_global_event(void) 6962e08243SMikulas Patocka { 7062e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 7162e08243SMikulas Patocka wake_up(&dm_global_eventq); 7262e08243SMikulas Patocka } 7362e08243SMikulas Patocka 74442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(stats_enabled); 75442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); 76442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(zoned_enabled); 77442761fdSMike Snitzer 781da177e4SLinus Torvalds /* 7964f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 801da177e4SLinus Torvalds */ 8164f52b0eSMike Snitzer struct clone_info { 8264f52b0eSMike Snitzer struct dm_table *map; 8364f52b0eSMike Snitzer struct bio *bio; 8464f52b0eSMike Snitzer struct dm_io *io; 8564f52b0eSMike Snitzer sector_t sector; 8664f52b0eSMike Snitzer unsigned sector_count; 874edadf6dSMike Snitzer bool is_abnormal_io:1; 884edadf6dSMike Snitzer bool submit_as_polled:1; 8964f52b0eSMike Snitzer }; 9064f52b0eSMike Snitzer 9162f26317SJeffle Xu #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 9262f26317SJeffle Xu #define DM_IO_BIO_OFFSET \ 9362f26317SJeffle Xu (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 9462f26317SJeffle Xu 956c23f0bdSChristoph Hellwig static inline struct dm_target_io *clone_to_tio(struct bio *clone) 966c23f0bdSChristoph Hellwig { 976c23f0bdSChristoph Hellwig return container_of(clone, struct dm_target_io, clone); 986c23f0bdSChristoph Hellwig } 996c23f0bdSChristoph Hellwig 10064f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10164f52b0eSMike Snitzer { 102655f3aadSMike Snitzer if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 10362f26317SJeffle Xu return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 10462f26317SJeffle Xu return (char *)bio - DM_IO_BIO_OFFSET - data_size; 10564f52b0eSMike Snitzer } 10664f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 10764f52b0eSMike Snitzer 10864f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 10964f52b0eSMike Snitzer { 11064f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 11164f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 11262f26317SJeffle Xu return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 11364f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 11462f26317SJeffle Xu return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 11564f52b0eSMike Snitzer } 11664f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 11764f52b0eSMike Snitzer 11864f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 11964f52b0eSMike Snitzer { 12064f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 12164f52b0eSMike Snitzer } 12264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 12364f52b0eSMike Snitzer 124ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 125ba61fdd1SJeff Mahoney 126115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 127115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 128faad87dfSMike Snitzer 129a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 130a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS; 131a666e5c0SMikulas Patocka static int get_swap_bios(void) 132a666e5c0SMikulas Patocka { 133a666e5c0SMikulas Patocka int latch = READ_ONCE(swap_bios); 134a666e5c0SMikulas Patocka if (unlikely(latch <= 0)) 135a666e5c0SMikulas Patocka latch = DEFAULT_SWAP_BIOS; 136a666e5c0SMikulas Patocka return latch; 137a666e5c0SMikulas Patocka } 138a666e5c0SMikulas Patocka 13986f1152bSBenjamin Marzinski struct table_device { 14086f1152bSBenjamin Marzinski struct list_head list; 141b0b4d7c6SElena Reshetova refcount_t count; 14286f1152bSBenjamin Marzinski struct dm_dev dm_dev; 14386f1152bSBenjamin Marzinski }; 14486f1152bSBenjamin Marzinski 145f4790826SMike Snitzer /* 146e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 147e8603136SMike Snitzer */ 1484cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 149e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 150e8603136SMike Snitzer 151115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 152115485e8SMike Snitzer { 1536aa7de05SMark Rutland int param = READ_ONCE(*module_param); 154115485e8SMike Snitzer int modified_param = 0; 155115485e8SMike Snitzer bool modified = true; 156115485e8SMike Snitzer 157115485e8SMike Snitzer if (param < min) 158115485e8SMike Snitzer modified_param = min; 159115485e8SMike Snitzer else if (param > max) 160115485e8SMike Snitzer modified_param = max; 161115485e8SMike Snitzer else 162115485e8SMike Snitzer modified = false; 163115485e8SMike Snitzer 164115485e8SMike Snitzer if (modified) { 165115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 166115485e8SMike Snitzer param = modified_param; 167115485e8SMike Snitzer } 168115485e8SMike Snitzer 169115485e8SMike Snitzer return param; 170115485e8SMike Snitzer } 171115485e8SMike Snitzer 1724cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 173f4790826SMike Snitzer unsigned def, unsigned max) 174f4790826SMike Snitzer { 1756aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 17609c2d531SMike Snitzer unsigned modified_param = 0; 177f4790826SMike Snitzer 17809c2d531SMike Snitzer if (!param) 17909c2d531SMike Snitzer modified_param = def; 18009c2d531SMike Snitzer else if (param > max) 18109c2d531SMike Snitzer modified_param = max; 182f4790826SMike Snitzer 18309c2d531SMike Snitzer if (modified_param) { 18409c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 18509c2d531SMike Snitzer param = modified_param; 186f4790826SMike Snitzer } 187f4790826SMike Snitzer 18809c2d531SMike Snitzer return param; 189f4790826SMike Snitzer } 190f4790826SMike Snitzer 191e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 192e8603136SMike Snitzer { 19309c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1944cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 195e8603136SMike Snitzer } 196e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 197e8603136SMike Snitzer 198115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 199115485e8SMike Snitzer { 200115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 201115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 202115485e8SMike Snitzer } 203115485e8SMike Snitzer 2041da177e4SLinus Torvalds static int __init local_init(void) 2051da177e4SLinus Torvalds { 206e689fbabSMike Snitzer int r; 2071ae49ea2SMike Snitzer 20851e5b2bdSMike Anderson r = dm_uevent_init(); 20951157b4aSKiyoshi Ueda if (r) 210e689fbabSMike Snitzer return r; 21151e5b2bdSMike Anderson 212acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 213acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 214acfe0ad7SMikulas Patocka r = -ENOMEM; 215acfe0ad7SMikulas Patocka goto out_uevent_exit; 216acfe0ad7SMikulas Patocka } 217acfe0ad7SMikulas Patocka 2181da177e4SLinus Torvalds _major = major; 2191da177e4SLinus Torvalds r = register_blkdev(_major, _name); 22051157b4aSKiyoshi Ueda if (r < 0) 221acfe0ad7SMikulas Patocka goto out_free_workqueue; 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds if (!_major) 2241da177e4SLinus Torvalds _major = r; 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds return 0; 22751157b4aSKiyoshi Ueda 228acfe0ad7SMikulas Patocka out_free_workqueue: 229acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 23051157b4aSKiyoshi Ueda out_uevent_exit: 23151157b4aSKiyoshi Ueda dm_uevent_exit(); 23251157b4aSKiyoshi Ueda 23351157b4aSKiyoshi Ueda return r; 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 2361da177e4SLinus Torvalds static void local_exit(void) 2371da177e4SLinus Torvalds { 2382c140a24SMikulas Patocka flush_scheduled_work(); 239acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2402c140a24SMikulas Patocka 24100d59405SAkinobu Mita unregister_blkdev(_major, _name); 24251e5b2bdSMike Anderson dm_uevent_exit(); 2431da177e4SLinus Torvalds 2441da177e4SLinus Torvalds _major = 0; 2451da177e4SLinus Torvalds 2461da177e4SLinus Torvalds DMINFO("cleaned up"); 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds 249b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2501da177e4SLinus Torvalds local_init, 2511da177e4SLinus Torvalds dm_target_init, 2521da177e4SLinus Torvalds dm_linear_init, 2531da177e4SLinus Torvalds dm_stripe_init, 254952b3557SMikulas Patocka dm_io_init, 255945fa4d2SMikulas Patocka dm_kcopyd_init, 2561da177e4SLinus Torvalds dm_interface_init, 257fd2ed4d2SMikulas Patocka dm_statistics_init, 2581da177e4SLinus Torvalds }; 2591da177e4SLinus Torvalds 260b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2611da177e4SLinus Torvalds local_exit, 2621da177e4SLinus Torvalds dm_target_exit, 2631da177e4SLinus Torvalds dm_linear_exit, 2641da177e4SLinus Torvalds dm_stripe_exit, 265952b3557SMikulas Patocka dm_io_exit, 266945fa4d2SMikulas Patocka dm_kcopyd_exit, 2671da177e4SLinus Torvalds dm_interface_exit, 268fd2ed4d2SMikulas Patocka dm_statistics_exit, 2691da177e4SLinus Torvalds }; 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds static int __init dm_init(void) 2721da177e4SLinus Torvalds { 2731da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2741da177e4SLinus Torvalds int r, i; 2751da177e4SLinus Torvalds 276f1cd6cb2STushar Sugandhi #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 277f1cd6cb2STushar Sugandhi DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 278f1cd6cb2STushar Sugandhi " Duplicate IMA measurements will not be recorded in the IMA log."); 279f1cd6cb2STushar Sugandhi #endif 280f1cd6cb2STushar Sugandhi 2811da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2821da177e4SLinus Torvalds r = _inits[i](); 2831da177e4SLinus Torvalds if (r) 2841da177e4SLinus Torvalds goto bad; 2851da177e4SLinus Torvalds } 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds return 0; 2881da177e4SLinus Torvalds bad: 2891da177e4SLinus Torvalds while (i--) 2901da177e4SLinus Torvalds _exits[i](); 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds return r; 2931da177e4SLinus Torvalds } 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds static void __exit dm_exit(void) 2961da177e4SLinus Torvalds { 2971da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2981da177e4SLinus Torvalds 2991da177e4SLinus Torvalds while (i--) 3001da177e4SLinus Torvalds _exits[i](); 301d15b774cSAlasdair G Kergon 302d15b774cSAlasdair G Kergon /* 303d15b774cSAlasdair G Kergon * Should be empty by this point. 304d15b774cSAlasdair G Kergon */ 305d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3061da177e4SLinus Torvalds } 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds /* 3091da177e4SLinus Torvalds * Block device functions 3101da177e4SLinus Torvalds */ 311432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 312432a212cSMike Anderson { 313432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 314432a212cSMike Anderson } 315432a212cSMike Anderson 316fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3171da177e4SLinus Torvalds { 3181da177e4SLinus Torvalds struct mapped_device *md; 3191da177e4SLinus Torvalds 320fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 321fba9f90eSJeff Mahoney 322fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 323fba9f90eSJeff Mahoney if (!md) 324fba9f90eSJeff Mahoney goto out; 325fba9f90eSJeff Mahoney 3265c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 327432a212cSMike Anderson dm_deleting_md(md)) { 328fba9f90eSJeff Mahoney md = NULL; 329fba9f90eSJeff Mahoney goto out; 330fba9f90eSJeff Mahoney } 331fba9f90eSJeff Mahoney 3321da177e4SLinus Torvalds dm_get(md); 3335c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 334fba9f90eSJeff Mahoney out: 335fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 336fba9f90eSJeff Mahoney 337fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 340db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3411da177e4SLinus Torvalds { 34263a4f065SMike Snitzer struct mapped_device *md; 3436e9624b8SArnd Bergmann 3444a1aeb98SMilan Broz spin_lock(&_minor_lock); 3454a1aeb98SMilan Broz 34663a4f065SMike Snitzer md = disk->private_data; 34763a4f065SMike Snitzer if (WARN_ON(!md)) 34863a4f065SMike Snitzer goto out; 34963a4f065SMike Snitzer 3502c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3512c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 352acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3532c140a24SMikulas Patocka 3541da177e4SLinus Torvalds dm_put(md); 35563a4f065SMike Snitzer out: 3564a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3571da177e4SLinus Torvalds } 3581da177e4SLinus Torvalds 3595c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3605c6bd75dSAlasdair G Kergon { 3615c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3625c6bd75dSAlasdair G Kergon } 3635c6bd75dSAlasdair G Kergon 3645c6bd75dSAlasdair G Kergon /* 3655c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3665c6bd75dSAlasdair G Kergon */ 3672c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3685c6bd75dSAlasdair G Kergon { 3695c6bd75dSAlasdair G Kergon int r = 0; 3705c6bd75dSAlasdair G Kergon 3715c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3725c6bd75dSAlasdair G Kergon 3732c140a24SMikulas Patocka if (dm_open_count(md)) { 3745c6bd75dSAlasdair G Kergon r = -EBUSY; 3752c140a24SMikulas Patocka if (mark_deferred) 3762c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3772c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3782c140a24SMikulas Patocka r = -EEXIST; 3795c6bd75dSAlasdair G Kergon else 3805c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3815c6bd75dSAlasdair G Kergon 3825c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3835c6bd75dSAlasdair G Kergon 3845c6bd75dSAlasdair G Kergon return r; 3855c6bd75dSAlasdair G Kergon } 3865c6bd75dSAlasdair G Kergon 3872c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3882c140a24SMikulas Patocka { 3892c140a24SMikulas Patocka int r = 0; 3902c140a24SMikulas Patocka 3912c140a24SMikulas Patocka spin_lock(&_minor_lock); 3922c140a24SMikulas Patocka 3932c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3942c140a24SMikulas Patocka r = -EBUSY; 3952c140a24SMikulas Patocka else 3962c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3972c140a24SMikulas Patocka 3982c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3992c140a24SMikulas Patocka 4002c140a24SMikulas Patocka return r; 4012c140a24SMikulas Patocka } 4022c140a24SMikulas Patocka 4032c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4042c140a24SMikulas Patocka { 4052c140a24SMikulas Patocka dm_deferred_remove(); 4062c140a24SMikulas Patocka } 4072c140a24SMikulas Patocka 4083ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4093ac51e74SDarrick J. Wong { 4103ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4113ac51e74SDarrick J. Wong 4123ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4133ac51e74SDarrick J. Wong } 4143ac51e74SDarrick J. Wong 415971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4165bd5e8d8SMike Snitzer struct block_device **bdev) 417aa129a22SMilan Broz { 41866482026SMike Snitzer struct dm_target *tgt; 4196c182cd8SHannes Reinecke struct dm_table *map; 420971888c4SMike Snitzer int r; 421aa129a22SMilan Broz 4226c182cd8SHannes Reinecke retry: 423e56f81e0SChristoph Hellwig r = -ENOTTY; 424971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 425aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 426971888c4SMike Snitzer return r; 427aa129a22SMilan Broz 428aa129a22SMilan Broz /* We only support devices that have a single target */ 429aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 430971888c4SMike Snitzer return r; 431aa129a22SMilan Broz 43266482026SMike Snitzer tgt = dm_table_get_target(map, 0); 43366482026SMike Snitzer if (!tgt->type->prepare_ioctl) 434e56f81e0SChristoph Hellwig return r; 435aa129a22SMilan Broz 436971888c4SMike Snitzer if (dm_suspended_md(md)) 437971888c4SMike Snitzer return -EAGAIN; 438971888c4SMike Snitzer 4395bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 4405bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 441971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 4426c182cd8SHannes Reinecke msleep(10); 4436c182cd8SHannes Reinecke goto retry; 4446c182cd8SHannes Reinecke } 445971888c4SMike Snitzer 446e56f81e0SChristoph Hellwig return r; 447e56f81e0SChristoph Hellwig } 4486c182cd8SHannes Reinecke 449971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 450971888c4SMike Snitzer { 451971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 452971888c4SMike Snitzer } 453971888c4SMike Snitzer 454e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 455e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 456e56f81e0SChristoph Hellwig { 457e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 458971888c4SMike Snitzer int r, srcu_idx; 459e56f81e0SChristoph Hellwig 4605bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 461e56f81e0SChristoph Hellwig if (r < 0) 462971888c4SMike Snitzer goto out; 463e56f81e0SChristoph Hellwig 464e56f81e0SChristoph Hellwig if (r > 0) { 465e56f81e0SChristoph Hellwig /* 466e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 467e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 468e56f81e0SChristoph Hellwig */ 469e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 4700378c625SMike Snitzer DMDEBUG_LIMIT( 471e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 472e980f623SChristoph Hellwig current->comm, cmd); 473e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 474e56f81e0SChristoph Hellwig goto out; 475e56f81e0SChristoph Hellwig } 476e980f623SChristoph Hellwig } 477e56f81e0SChristoph Hellwig 478a7cb3d2fSChristoph Hellwig if (!bdev->bd_disk->fops->ioctl) 479a7cb3d2fSChristoph Hellwig r = -ENOTTY; 480a7cb3d2fSChristoph Hellwig else 481a7cb3d2fSChristoph Hellwig r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 482e56f81e0SChristoph Hellwig out: 483971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 484aa129a22SMilan Broz return r; 485aa129a22SMilan Broz } 486aa129a22SMilan Broz 4877465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio) 4887465d7acSMike Snitzer { 4896c23f0bdSChristoph Hellwig return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 4907465d7acSMike Snitzer } 4917465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 4927465d7acSMike Snitzer 4938d394bc4SMike Snitzer static bool bio_is_flush_with_data(struct bio *bio) 4947465d7acSMike Snitzer { 4958d394bc4SMike Snitzer return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 4967465d7acSMike Snitzer } 4977465d7acSMike Snitzer 498e6926ad0SMing Lei static void dm_io_acct(struct dm_io *io, bool end) 4997465d7acSMike Snitzer { 500e6926ad0SMing Lei struct dm_stats_aux *stats_aux = &io->stats_aux; 501e6926ad0SMing Lei unsigned long start_time = io->start_time; 502e6926ad0SMing Lei struct mapped_device *md = io->md; 503e6926ad0SMing Lei struct bio *bio = io->orig_bio; 504d3de6d12SMing Lei unsigned int sectors; 5057465d7acSMike Snitzer 506d3de6d12SMing Lei /* 507d3de6d12SMing Lei * If REQ_PREFLUSH set, don't account payload, it will be 508d3de6d12SMing Lei * submitted (and accounted) after this flush completes. 509d3de6d12SMing Lei */ 510d3de6d12SMing Lei if (bio_is_flush_with_data(bio)) 511d3de6d12SMing Lei sectors = 0; 5127dd76d1fSMing Lei else if (likely(!(dm_io_flagged(io, DM_IO_WAS_SPLIT)))) 513d3de6d12SMing Lei sectors = bio_sectors(bio); 5147dd76d1fSMing Lei else 5157dd76d1fSMing Lei sectors = io->sectors; 5168d394bc4SMike Snitzer 5178d394bc4SMike Snitzer if (!end) 518d3de6d12SMing Lei bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio), 519d3de6d12SMing Lei start_time); 5208d394bc4SMike Snitzer else 521d3de6d12SMing Lei bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); 5227465d7acSMike Snitzer 523442761fdSMike Snitzer if (static_branch_unlikely(&stats_enabled) && 5247dd76d1fSMing Lei unlikely(dm_stats_used(&md->stats))) { 5257dd76d1fSMing Lei sector_t sector; 5267dd76d1fSMing Lei 5277dd76d1fSMing Lei if (likely(!dm_io_flagged(io, DM_IO_WAS_SPLIT))) 5287dd76d1fSMing Lei sector = bio->bi_iter.bi_sector; 5297dd76d1fSMing Lei else 5307dd76d1fSMing Lei sector = bio_end_sector(bio) - io->sector_offset; 5317dd76d1fSMing Lei 5327465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 5337dd76d1fSMing Lei sector, sectors, 5348d394bc4SMike Snitzer end, start_time, stats_aux); 5358d394bc4SMike Snitzer } 5367dd76d1fSMing Lei } 5378d394bc4SMike Snitzer 538b992b40dSMing Lei static void __dm_start_io_acct(struct dm_io *io) 5398d394bc4SMike Snitzer { 540e6926ad0SMing Lei dm_io_acct(io, false); 5418d394bc4SMike Snitzer } 5428d394bc4SMike Snitzer 5430fbb4d93SMike Snitzer static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 5448d394bc4SMike Snitzer { 5450fbb4d93SMike Snitzer /* 5460fbb4d93SMike Snitzer * Ensure IO accounting is only ever started once. 5470fbb4d93SMike Snitzer */ 5483b03f7c1SMike Snitzer if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 5490fbb4d93SMike Snitzer return; 5503b03f7c1SMike Snitzer 5513b03f7c1SMike Snitzer /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ 5523b03f7c1SMike Snitzer if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { 55382f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_ACCOUNTED); 55482f6cdccSMike Snitzer } else { 55582f6cdccSMike Snitzer unsigned long flags; 556655f3aadSMike Snitzer /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 5574d7bca13SMike Snitzer spin_lock_irqsave(&io->lock, flags); 55810eb3a0dSBenjamin Marzinski if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { 55910eb3a0dSBenjamin Marzinski spin_unlock_irqrestore(&io->lock, flags); 56010eb3a0dSBenjamin Marzinski return; 56110eb3a0dSBenjamin Marzinski } 56282f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_ACCOUNTED); 5634d7bca13SMike Snitzer spin_unlock_irqrestore(&io->lock, flags); 56482f6cdccSMike Snitzer } 5650fbb4d93SMike Snitzer 566b992b40dSMing Lei __dm_start_io_acct(io); 5670fbb4d93SMike Snitzer } 5680fbb4d93SMike Snitzer 569b992b40dSMing Lei static void dm_end_io_acct(struct dm_io *io) 5700fbb4d93SMike Snitzer { 571e6926ad0SMing Lei dm_io_acct(io, true); 5727465d7acSMike Snitzer } 573978e51baSMike Snitzer 574978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5751da177e4SLinus Torvalds { 57664f52b0eSMike Snitzer struct dm_io *io; 57764f52b0eSMike Snitzer struct dm_target_io *tio; 57864f52b0eSMike Snitzer struct bio *clone; 57964f52b0eSMike Snitzer 58029dec90aSChristoph Hellwig clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); 581ca522482SMike Snitzer /* Set default bdev, but target must bio_set_dev() before issuing IO */ 582ca522482SMike Snitzer clone->bi_bdev = md->disk->part0; 58364f52b0eSMike Snitzer 5846c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 585655f3aadSMike Snitzer tio->flags = 0; 586655f3aadSMike Snitzer dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 58764f52b0eSMike Snitzer tio->io = NULL; 58864f52b0eSMike Snitzer 58964f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 59064f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 59184b98f4cSMike Snitzer io->status = BLK_STS_OK; 5920f14d60aSMing Lei 5930f14d60aSMing Lei /* one ref is for submission, the other is for completion */ 5940f14d60aSMing Lei atomic_set(&io->io_count, 2); 5959f6dc633SMike Snitzer this_cpu_inc(*md->pending_io); 5967dd76d1fSMing Lei io->orig_bio = bio; 59761b6e2e5SMing Lei io->split_bio = NULL; 598978e51baSMike Snitzer io->md = md; 5994d7bca13SMike Snitzer spin_lock_init(&io->lock); 600b879f915SMike Snitzer io->start_time = jiffies; 60182f6cdccSMike Snitzer io->flags = 0; 60264f52b0eSMike Snitzer 603442761fdSMike Snitzer if (static_branch_unlikely(&stats_enabled)) 6040cdb90f0SMike Snitzer dm_stats_record_start(&md->stats, &io->stats_aux); 60564f52b0eSMike Snitzer 60664f52b0eSMike Snitzer return io; 6071da177e4SLinus Torvalds } 6081da177e4SLinus Torvalds 6090119ab14SMike Snitzer static void free_io(struct dm_io *io) 6101da177e4SLinus Torvalds { 61164f52b0eSMike Snitzer bio_put(&io->tio.clone); 61264f52b0eSMike Snitzer } 61364f52b0eSMike Snitzer 6141d1068ceSChristoph Hellwig static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 615dc8e2021SChristoph Hellwig unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 61664f52b0eSMike Snitzer { 61764f52b0eSMike Snitzer struct dm_target_io *tio; 618018b05ebSMike Snitzer struct bio *clone; 61964f52b0eSMike Snitzer 62064f52b0eSMike Snitzer if (!ci->io->tio.io) { 62164f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 62264f52b0eSMike Snitzer tio = &ci->io->tio; 623018b05ebSMike Snitzer /* alloc_io() already initialized embedded clone */ 624018b05ebSMike Snitzer clone = &tio->clone; 62564f52b0eSMike Snitzer } else { 626ca522482SMike Snitzer struct mapped_device *md = ci->io->md; 627ca522482SMike Snitzer 62829dec90aSChristoph Hellwig clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, 62929dec90aSChristoph Hellwig &md->mempools->bs); 63064f52b0eSMike Snitzer if (!clone) 63164f52b0eSMike Snitzer return NULL; 632ca522482SMike Snitzer /* Set default bdev, but target must bio_set_dev() before issuing IO */ 633ca522482SMike Snitzer clone->bi_bdev = md->disk->part0; 63464f52b0eSMike Snitzer 635b99fdcdcSMing Lei /* REQ_DM_POLL_LIST shouldn't be inherited */ 636b99fdcdcSMing Lei clone->bi_opf &= ~REQ_DM_POLL_LIST; 637b99fdcdcSMing Lei 6386c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 639655f3aadSMike Snitzer tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 64064f52b0eSMike Snitzer } 64164f52b0eSMike Snitzer 64264f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 64364f52b0eSMike Snitzer tio->io = ci->io; 64464f52b0eSMike Snitzer tio->ti = ti; 64564f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 646dc8e2021SChristoph Hellwig tio->len_ptr = len; 647743598f0SMike Snitzer tio->old_sector = 0; 64864f52b0eSMike Snitzer 649018b05ebSMike Snitzer if (len) { 650018b05ebSMike Snitzer clone->bi_iter.bi_size = to_bytes(*len); 651018b05ebSMike Snitzer if (bio_integrity(clone)) 652018b05ebSMike Snitzer bio_integrity_trim(clone); 653018b05ebSMike Snitzer } 654018b05ebSMike Snitzer 655018b05ebSMike Snitzer return clone; 6561da177e4SLinus Torvalds } 6571da177e4SLinus Torvalds 6581d1068ceSChristoph Hellwig static void free_tio(struct bio *clone) 6591da177e4SLinus Torvalds { 660655f3aadSMike Snitzer if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 66164f52b0eSMike Snitzer return; 6621d1068ceSChristoph Hellwig bio_put(clone); 6631da177e4SLinus Torvalds } 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds /* 6661da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6671da177e4SLinus Torvalds */ 66892c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6691da177e4SLinus Torvalds { 67005447420SKiyoshi Ueda unsigned long flags; 6711da177e4SLinus Torvalds 67205447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6731da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 67405447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 67592c63902SMikulas Patocka queue_work(md->wq, &md->work); 6761da177e4SLinus Torvalds } 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds /* 6791da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6801da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 68183d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6821da177e4SLinus Torvalds */ 683563a225cSMike Snitzer struct dm_table *dm_get_live_table(struct mapped_device *md, 684563a225cSMike Snitzer int *srcu_idx) __acquires(md->io_barrier) 6851da177e4SLinus Torvalds { 68683d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6871da177e4SLinus Torvalds 68883d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 68983d5e5b0SMikulas Patocka } 6901da177e4SLinus Torvalds 691563a225cSMike Snitzer void dm_put_live_table(struct mapped_device *md, 692563a225cSMike Snitzer int srcu_idx) __releases(md->io_barrier) 69383d5e5b0SMikulas Patocka { 69483d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 69583d5e5b0SMikulas Patocka } 69683d5e5b0SMikulas Patocka 69783d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 69883d5e5b0SMikulas Patocka { 69983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 70083d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 70183d5e5b0SMikulas Patocka } 70283d5e5b0SMikulas Patocka 70383d5e5b0SMikulas Patocka /* 70483d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 70583d5e5b0SMikulas Patocka * The caller must not block between these two functions. 70683d5e5b0SMikulas Patocka */ 70783d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 70883d5e5b0SMikulas Patocka { 70983d5e5b0SMikulas Patocka rcu_read_lock(); 71083d5e5b0SMikulas Patocka return rcu_dereference(md->map); 71183d5e5b0SMikulas Patocka } 71283d5e5b0SMikulas Patocka 71383d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 71483d5e5b0SMikulas Patocka { 71583d5e5b0SMikulas Patocka rcu_read_unlock(); 7161da177e4SLinus Torvalds } 7171da177e4SLinus Torvalds 718563a225cSMike Snitzer static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, 7195d7362d0SMikulas Patocka int *srcu_idx, unsigned bio_opf) 720563a225cSMike Snitzer { 7215d7362d0SMikulas Patocka if (bio_opf & REQ_NOWAIT) 722563a225cSMike Snitzer return dm_get_live_table_fast(md); 723563a225cSMike Snitzer else 724563a225cSMike Snitzer return dm_get_live_table(md, srcu_idx); 725563a225cSMike Snitzer } 726563a225cSMike Snitzer 727563a225cSMike Snitzer static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, 7285d7362d0SMikulas Patocka unsigned bio_opf) 729563a225cSMike Snitzer { 7305d7362d0SMikulas Patocka if (bio_opf & REQ_NOWAIT) 731563a225cSMike Snitzer dm_put_live_table_fast(md); 732563a225cSMike Snitzer else 733563a225cSMike Snitzer dm_put_live_table(md, srcu_idx); 734563a225cSMike Snitzer } 735563a225cSMike Snitzer 736971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 737971888c4SMike Snitzer 7383ac51e74SDarrick J. Wong /* 73986f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 74086f1152bSBenjamin Marzinski */ 74186f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 74286f1152bSBenjamin Marzinski struct mapped_device *md) 74386f1152bSBenjamin Marzinski { 74486f1152bSBenjamin Marzinski struct block_device *bdev; 745cd913c76SChristoph Hellwig u64 part_off; 74686f1152bSBenjamin Marzinski int r; 74786f1152bSBenjamin Marzinski 74886f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 74986f1152bSBenjamin Marzinski 750519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 75186f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 75286f1152bSBenjamin Marzinski return PTR_ERR(bdev); 75386f1152bSBenjamin Marzinski 75486f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 75586f1152bSBenjamin Marzinski if (r) { 75686f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 75786f1152bSBenjamin Marzinski return r; 75886f1152bSBenjamin Marzinski } 75986f1152bSBenjamin Marzinski 76086f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 761cd913c76SChristoph Hellwig td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); 76286f1152bSBenjamin Marzinski return 0; 76386f1152bSBenjamin Marzinski } 76486f1152bSBenjamin Marzinski 76586f1152bSBenjamin Marzinski /* 76686f1152bSBenjamin Marzinski * Close a table device that we've been using. 76786f1152bSBenjamin Marzinski */ 76886f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 76986f1152bSBenjamin Marzinski { 77086f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 77186f1152bSBenjamin Marzinski return; 77286f1152bSBenjamin Marzinski 77386f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 77486f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 775817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 77686f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 777817bf402SDan Williams td->dm_dev.dax_dev = NULL; 77886f1152bSBenjamin Marzinski } 77986f1152bSBenjamin Marzinski 78086f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 7818454fca4SSheetal Singala fmode_t mode) 7828454fca4SSheetal Singala { 78386f1152bSBenjamin Marzinski struct table_device *td; 78486f1152bSBenjamin Marzinski 78586f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 78686f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 78786f1152bSBenjamin Marzinski return td; 78886f1152bSBenjamin Marzinski 78986f1152bSBenjamin Marzinski return NULL; 79086f1152bSBenjamin Marzinski } 79186f1152bSBenjamin Marzinski 79286f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 7938454fca4SSheetal Singala struct dm_dev **result) 7948454fca4SSheetal Singala { 79586f1152bSBenjamin Marzinski int r; 79686f1152bSBenjamin Marzinski struct table_device *td; 79786f1152bSBenjamin Marzinski 79886f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 79986f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 80086f1152bSBenjamin Marzinski if (!td) { 801115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 80286f1152bSBenjamin Marzinski if (!td) { 80386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80486f1152bSBenjamin Marzinski return -ENOMEM; 80586f1152bSBenjamin Marzinski } 80686f1152bSBenjamin Marzinski 80786f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 80886f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 80986f1152bSBenjamin Marzinski 81086f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 81186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 81286f1152bSBenjamin Marzinski kfree(td); 81386f1152bSBenjamin Marzinski return r; 81486f1152bSBenjamin Marzinski } 81586f1152bSBenjamin Marzinski 81686f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 81786f1152bSBenjamin Marzinski 818b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 81986f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 820b0b4d7c6SElena Reshetova } else { 821b0b4d7c6SElena Reshetova refcount_inc(&td->count); 82286f1152bSBenjamin Marzinski } 82386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 82486f1152bSBenjamin Marzinski 82586f1152bSBenjamin Marzinski *result = &td->dm_dev; 82686f1152bSBenjamin Marzinski return 0; 82786f1152bSBenjamin Marzinski } 82886f1152bSBenjamin Marzinski 82986f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 83086f1152bSBenjamin Marzinski { 83186f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 83286f1152bSBenjamin Marzinski 83386f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 834b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 83586f1152bSBenjamin Marzinski close_table_device(td, md); 83686f1152bSBenjamin Marzinski list_del(&td->list); 83786f1152bSBenjamin Marzinski kfree(td); 83886f1152bSBenjamin Marzinski } 83986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 84086f1152bSBenjamin Marzinski } 84186f1152bSBenjamin Marzinski 84286f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 84386f1152bSBenjamin Marzinski { 84486f1152bSBenjamin Marzinski struct list_head *tmp, *next; 84586f1152bSBenjamin Marzinski 84686f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 84786f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 84886f1152bSBenjamin Marzinski 84986f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 850b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 85186f1152bSBenjamin Marzinski kfree(td); 85286f1152bSBenjamin Marzinski } 85386f1152bSBenjamin Marzinski } 85486f1152bSBenjamin Marzinski 85586f1152bSBenjamin Marzinski /* 8563ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8573ac51e74SDarrick J. Wong */ 8583ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8593ac51e74SDarrick J. Wong { 8603ac51e74SDarrick J. Wong *geo = md->geometry; 8613ac51e74SDarrick J. Wong 8623ac51e74SDarrick J. Wong return 0; 8633ac51e74SDarrick J. Wong } 8643ac51e74SDarrick J. Wong 8653ac51e74SDarrick J. Wong /* 8663ac51e74SDarrick J. Wong * Set the geometry of a device. 8673ac51e74SDarrick J. Wong */ 8683ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8693ac51e74SDarrick J. Wong { 8703ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8713ac51e74SDarrick J. Wong 8723ac51e74SDarrick J. Wong if (geo->start > sz) { 8733ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8743ac51e74SDarrick J. Wong return -EINVAL; 8753ac51e74SDarrick J. Wong } 8763ac51e74SDarrick J. Wong 8773ac51e74SDarrick J. Wong md->geometry = *geo; 8783ac51e74SDarrick J. Wong 8793ac51e74SDarrick J. Wong return 0; 8803ac51e74SDarrick J. Wong } 8813ac51e74SDarrick J. Wong 8822e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8832e93ccc1SKiyoshi Ueda { 8842e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8852e93ccc1SKiyoshi Ueda } 8862e93ccc1SKiyoshi Ueda 887e2736347SMike Snitzer static void dm_io_complete(struct dm_io *io) 8881da177e4SLinus Torvalds { 8894e4cbee9SChristoph Hellwig blk_status_t io_error; 890b35f8caaSMilan Broz struct mapped_device *md = io->md; 89161b6e2e5SMing Lei struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio; 8922e93ccc1SKiyoshi Ueda 8934e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 894e2736347SMike Snitzer unsigned long flags; 8952e93ccc1SKiyoshi Ueda /* 8962e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8972e93ccc1SKiyoshi Ueda */ 898022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 899bf14e2b2SDamien Le Moal if (__noflush_suspending(md) && 900bf14e2b2SDamien Le Moal !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 901745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 902bf14e2b2SDamien Le Moal bio_list_add_head(&md->deferred, bio); 903bf14e2b2SDamien Le Moal } else { 904bf14e2b2SDamien Le Moal /* 905bf14e2b2SDamien Le Moal * noflush suspend was interrupted or this is 906bf14e2b2SDamien Le Moal * a write to a zoned target. 907bf14e2b2SDamien Le Moal */ 9084e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 909bf14e2b2SDamien Le Moal } 910022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9112e93ccc1SKiyoshi Ueda } 9122e93ccc1SKiyoshi Ueda 9134e4cbee9SChristoph Hellwig io_error = io->status; 91482f6cdccSMike Snitzer if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 915b992b40dSMing Lei dm_end_io_acct(io); 9160fbb4d93SMike Snitzer else if (!io_error) { 9170fbb4d93SMike Snitzer /* 9180fbb4d93SMike Snitzer * Must handle target that DM_MAPIO_SUBMITTED only to 9190fbb4d93SMike Snitzer * then bio_endio() rather than dm_submit_bio_remap() 9200fbb4d93SMike Snitzer */ 921b992b40dSMing Lei __dm_start_io_acct(io); 922b992b40dSMing Lei dm_end_io_acct(io); 9230fbb4d93SMike Snitzer } 9240119ab14SMike Snitzer free_io(io); 9259f6dc633SMike Snitzer smp_wmb(); 9269f6dc633SMike Snitzer this_cpu_dec(*md->pending_io); 9272056a782SJens Axboe 9289f6dc633SMike Snitzer /* nudge anyone waiting on suspend queue */ 9299f6dc633SMike Snitzer if (unlikely(wq_has_sleeper(&md->wait))) 9309f6dc633SMike Snitzer wake_up(&md->wait); 9311da177e4SLinus Torvalds 93252919840SMing Lei if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) { 93352919840SMing Lei if (bio->bi_opf & REQ_POLLED) { 934b99fdcdcSMing Lei /* 93552919840SMing Lei * Upper layer won't help us poll split bio (io->orig_bio 93652919840SMing Lei * may only reflect a subset of the pre-split original) 93752919840SMing Lei * so clear REQ_POLLED in case of requeue. 938b99fdcdcSMing Lei */ 939cfc97abcSMike Snitzer bio_clear_polled(bio); 94052919840SMing Lei if (io_error == BLK_STS_AGAIN) { 94152919840SMing Lei /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ 94252919840SMing Lei queue_io(md, bio); 94378ccef91SMike Snitzer return; 94452919840SMing Lei } 94552919840SMing Lei } 94678ccef91SMike Snitzer if (io_error == BLK_STS_DM_REQUEUE) 9476a8736d1STejun Heo return; 948b99fdcdcSMing Lei } 9496a8736d1STejun Heo 9508d394bc4SMike Snitzer if (bio_is_flush_with_data(bio)) { 951af7e466aSMikulas Patocka /* 9526a8736d1STejun Heo * Preflush done for flush with data, reissue 95328a8f0d3SMike Christie * without REQ_PREFLUSH. 954af7e466aSMikulas Patocka */ 9551eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9566a8736d1STejun Heo queue_io(md, bio); 957af7e466aSMikulas Patocka } else { 958b372d360SMike Snitzer /* done with normal IO or empty flush */ 9598dd601faSNeilBrown if (io_error) 9604e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9614246a0b6SChristoph Hellwig bio_endio(bio); 9622e93ccc1SKiyoshi Ueda } 9631da177e4SLinus Torvalds } 964e2736347SMike Snitzer 965e2736347SMike Snitzer /* 966e2736347SMike Snitzer * Decrements the number of outstanding ios that a bio has been 967e2736347SMike Snitzer * cloned into, completing the original io if necc. 968e2736347SMike Snitzer */ 96984b98f4cSMike Snitzer static inline void __dm_io_dec_pending(struct dm_io *io) 970e2736347SMike Snitzer { 97184b98f4cSMike Snitzer if (atomic_dec_and_test(&io->io_count)) 97284b98f4cSMike Snitzer dm_io_complete(io); 97384b98f4cSMike Snitzer } 97484b98f4cSMike Snitzer 97584b98f4cSMike Snitzer static void dm_io_set_error(struct dm_io *io, blk_status_t error) 97684b98f4cSMike Snitzer { 977e2736347SMike Snitzer unsigned long flags; 97884b98f4cSMike Snitzer 97984b98f4cSMike Snitzer /* Push-back supersedes any I/O errors */ 9804d7bca13SMike Snitzer spin_lock_irqsave(&io->lock, flags); 981e2736347SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && 98284b98f4cSMike Snitzer __noflush_suspending(io->md))) { 983e2736347SMike Snitzer io->status = error; 98484b98f4cSMike Snitzer } 9854d7bca13SMike Snitzer spin_unlock_irqrestore(&io->lock, flags); 986e2736347SMike Snitzer } 987e2736347SMike Snitzer 9882e803cd9SMing Lei static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 98984b98f4cSMike Snitzer { 99084b98f4cSMike Snitzer if (unlikely(error)) 99184b98f4cSMike Snitzer dm_io_set_error(io, error); 99284b98f4cSMike Snitzer 99384b98f4cSMike Snitzer __dm_io_dec_pending(io); 994af7e466aSMikulas Patocka } 9951da177e4SLinus Torvalds 996bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 997bcb44433SMike Snitzer { 998bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 999bcb44433SMike Snitzer 1000bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 1001bcb44433SMike Snitzer limits->max_discard_sectors = 0; 1002bcb44433SMike Snitzer } 1003bcb44433SMike Snitzer 1004ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 1005ac62d620SChristoph Hellwig { 1006ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 1007ac62d620SChristoph Hellwig 1008ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 1009ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 1010ac62d620SChristoph Hellwig } 1011ac62d620SChristoph Hellwig 1012a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 1013a666e5c0SMikulas Patocka { 1014a666e5c0SMikulas Patocka return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 1015a666e5c0SMikulas Patocka } 1016a666e5c0SMikulas Patocka 10174246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 10181da177e4SLinus Torvalds { 10194e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 10206cbce280SMike Snitzer struct dm_target_io *tio = clone_to_tio(bio); 10216cbce280SMike Snitzer struct dm_target *ti = tio->ti; 10226cbce280SMike Snitzer dm_endio_fn endio = ti->type->end_io; 10236cbce280SMike Snitzer struct dm_io *io = tio->io; 10246cbce280SMike Snitzer struct mapped_device *md = io->md; 10251da177e4SLinus Torvalds 10269c37de29SMike Snitzer if (unlikely(error == BLK_STS_TARGET)) { 1027bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 102870200574SChristoph Hellwig !bdev_max_discard_sectors(bio->bi_bdev)) 1029bcb44433SMike Snitzer disable_discard(md); 1030bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 1031dddf3056SMike Snitzer !bdev_write_zeroes_sectors(bio->bi_bdev)) 1032ac62d620SChristoph Hellwig disable_write_zeroes(md); 1033ac62d620SChristoph Hellwig } 10347eee4ae2SMike Snitzer 1035442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled) && 1036*edd1dbc8SChristoph Hellwig unlikely(bdev_is_zoned(bio->bi_bdev))) 1037bb37d772SDamien Le Moal dm_zone_endio(io, bio); 1038415c79e1SJohannes Thumshirn 10391be56909SChristoph Hellwig if (endio) { 10406cbce280SMike Snitzer int r = endio(ti, bio, &error); 10411be56909SChristoph Hellwig switch (r) { 10421be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 1043442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled)) { 1044bf14e2b2SDamien Le Moal /* 1045bf14e2b2SDamien Le Moal * Requeuing writes to a sequential zone of a zoned 1046bf14e2b2SDamien Le Moal * target will break the sequential write pattern: 1047bf14e2b2SDamien Le Moal * fail such IO. 1048bf14e2b2SDamien Le Moal */ 1049bf14e2b2SDamien Le Moal if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1050bf14e2b2SDamien Le Moal error = BLK_STS_IOERR; 1051bf14e2b2SDamien Le Moal else 10524e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 1053442761fdSMike Snitzer } else 1054442761fdSMike Snitzer error = BLK_STS_DM_REQUEUE; 1055df561f66SGustavo A. R. Silva fallthrough; 10561be56909SChristoph Hellwig case DM_ENDIO_DONE: 10571be56909SChristoph Hellwig break; 10581be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 10591be56909SChristoph Hellwig /* The target will handle the io */ 10601be56909SChristoph Hellwig return; 10611be56909SChristoph Hellwig default: 10621be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 10631be56909SChristoph Hellwig BUG(); 10641be56909SChristoph Hellwig } 10651be56909SChristoph Hellwig } 10661be56909SChristoph Hellwig 1067442761fdSMike Snitzer if (static_branch_unlikely(&swap_bios_enabled) && 1068442761fdSMike Snitzer unlikely(swap_bios_limit(ti, bio))) 1069a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1070a666e5c0SMikulas Patocka 10711d1068ceSChristoph Hellwig free_tio(bio); 1072e2118b3cSDamien Le Moal dm_io_dec_pending(io, error); 10731da177e4SLinus Torvalds } 10741da177e4SLinus Torvalds 107578d8e58aSMike Snitzer /* 107656a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 107756a67df7SMike Snitzer * target boundary. 107856a67df7SMike Snitzer */ 10793720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 10803720281dSMike Snitzer sector_t target_offset) 10811da177e4SLinus Torvalds { 108256a67df7SMike Snitzer return ti->len - target_offset; 108356a67df7SMike Snitzer } 108456a67df7SMike Snitzer 10853720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector) 108656a67df7SMike Snitzer { 10873720281dSMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 10883720281dSMike Snitzer sector_t len = max_io_len_target_boundary(ti, target_offset); 10891da177e4SLinus Torvalds 10901da177e4SLinus Torvalds /* 10913ee16db3SMike Snitzer * Does the target need to split IO even further? 10923ee16db3SMike Snitzer * - varied (per target) IO splitting is a tenet of DM; this 10933ee16db3SMike Snitzer * explains why stacked chunk_sectors based splitting via 1094c3949322SChristoph Hellwig * blk_queue_split() isn't possible here. 10951da177e4SLinus Torvalds */ 1096c3949322SChristoph Hellwig if (!ti->max_io_len) 10971da177e4SLinus Torvalds return len; 1098c3949322SChristoph Hellwig return min_t(sector_t, len, 1099c3949322SChristoph Hellwig min(queue_max_sectors(ti->table->md->queue), 1100c3949322SChristoph Hellwig blk_chunk_sectors_left(target_offset, ti->max_io_len))); 11011da177e4SLinus Torvalds } 11021da177e4SLinus Torvalds 1103542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1104542f9038SMike Snitzer { 1105542f9038SMike Snitzer if (len > UINT_MAX) { 1106542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1107542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1108542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1109542f9038SMike Snitzer return -EINVAL; 1110542f9038SMike Snitzer } 1111542f9038SMike Snitzer 111275ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 1113542f9038SMike Snitzer 1114542f9038SMike Snitzer return 0; 1115542f9038SMike Snitzer } 1116542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1117542f9038SMike Snitzer 1118f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1119f26c5719SDan Williams sector_t sector, int *srcu_idx) 11203d97c829SMike Snitzer __acquires(md->io_barrier) 1121545ed20eSToshi Kani { 1122545ed20eSToshi Kani struct dm_table *map; 1123545ed20eSToshi Kani struct dm_target *ti; 1124545ed20eSToshi Kani 1125f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1126545ed20eSToshi Kani if (!map) 1127f26c5719SDan Williams return NULL; 1128545ed20eSToshi Kani 1129545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1130123d87d5SMikulas Patocka if (!ti) 1131f26c5719SDan Williams return NULL; 1132f26c5719SDan Williams 1133f26c5719SDan Williams return ti; 1134f26c5719SDan Williams } 1135f26c5719SDan Williams 1136f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1137e511c4a3SJane Chu long nr_pages, enum dax_access_mode mode, void **kaddr, 1138e511c4a3SJane Chu pfn_t *pfn) 1139f26c5719SDan Williams { 1140f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1141f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1142f26c5719SDan Williams struct dm_target *ti; 1143f26c5719SDan Williams long len, ret = -EIO; 1144f26c5719SDan Williams int srcu_idx; 1145f26c5719SDan Williams 1146f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1147f26c5719SDan Williams 1148f26c5719SDan Williams if (!ti) 1149545ed20eSToshi Kani goto out; 1150f26c5719SDan Williams if (!ti->type->direct_access) 1151f26c5719SDan Williams goto out; 11523720281dSMike Snitzer len = max_io_len(ti, sector) / PAGE_SECTORS; 1153f26c5719SDan Williams if (len < 1) 1154f26c5719SDan Williams goto out; 1155f26c5719SDan Williams nr_pages = min(len, nr_pages); 1156e511c4a3SJane Chu ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); 1157817bf402SDan Williams 1158545ed20eSToshi Kani out: 1159545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1160f26c5719SDan Williams 1161f26c5719SDan Williams return ret; 1162545ed20eSToshi Kani } 1163545ed20eSToshi Kani 1164cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1165cdf6cdcdSVivek Goyal size_t nr_pages) 1166cdf6cdcdSVivek Goyal { 1167cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1168cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1169cdf6cdcdSVivek Goyal struct dm_target *ti; 1170cdf6cdcdSVivek Goyal int ret = -EIO; 1171cdf6cdcdSVivek Goyal int srcu_idx; 1172cdf6cdcdSVivek Goyal 1173cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1174cdf6cdcdSVivek Goyal 1175cdf6cdcdSVivek Goyal if (!ti) 1176cdf6cdcdSVivek Goyal goto out; 1177cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1178cdf6cdcdSVivek Goyal /* 1179cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1180cdf6cdcdSVivek Goyal * here, something is wrong. 1181cdf6cdcdSVivek Goyal */ 1182cdf6cdcdSVivek Goyal goto out; 1183cdf6cdcdSVivek Goyal } 1184cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1185cdf6cdcdSVivek Goyal out: 1186cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1187cdf6cdcdSVivek Goyal 1188cdf6cdcdSVivek Goyal return ret; 1189cdf6cdcdSVivek Goyal } 1190cdf6cdcdSVivek Goyal 1191047218ecSJane Chu static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 1192047218ecSJane Chu void *addr, size_t bytes, struct iov_iter *i) 1193047218ecSJane Chu { 1194047218ecSJane Chu struct mapped_device *md = dax_get_private(dax_dev); 1195047218ecSJane Chu sector_t sector = pgoff * PAGE_SECTORS; 1196047218ecSJane Chu struct dm_target *ti; 1197047218ecSJane Chu int srcu_idx; 1198047218ecSJane Chu long ret = 0; 1199047218ecSJane Chu 1200047218ecSJane Chu ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1201047218ecSJane Chu if (!ti || !ti->type->dax_recovery_write) 1202047218ecSJane Chu goto out; 1203047218ecSJane Chu 1204047218ecSJane Chu ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); 1205047218ecSJane Chu out: 1206047218ecSJane Chu dm_put_live_table(md, srcu_idx); 1207047218ecSJane Chu return ret; 1208047218ecSJane Chu } 1209047218ecSJane Chu 12101dd40c3eSMikulas Patocka /* 12111dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 12126842d264SDamien Le Moal * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1213e6fc9f62SMike Snitzer * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1214e6fc9f62SMike Snitzer * __send_duplicate_bios(). 12151dd40c3eSMikulas Patocka * 12161dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 12171dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 12181dd40c3eSMikulas Patocka * sent in a next bio. 12191dd40c3eSMikulas Patocka * 12201dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 12211dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12221dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 12231dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12241dd40c3eSMikulas Patocka * 12251dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 1226bdb34759SMike Snitzer * <----- bio_sectors -----> 12271dd40c3eSMikulas Patocka * <-- n_sectors --> 12281dd40c3eSMikulas Patocka * 12291dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 12301dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 12311dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 12321dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 12331dd40c3eSMikulas Patocka * to make it empty) 12341dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 12351dd40c3eSMikulas Patocka * 12361dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 12371dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 12381dd40c3eSMikulas Patocka * copies of the bio. 12391dd40c3eSMikulas Patocka */ 12401dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 12411dd40c3eSMikulas Patocka { 12426c23f0bdSChristoph Hellwig struct dm_target_io *tio = clone_to_tio(bio); 1243bdb34759SMike Snitzer unsigned bio_sectors = bio_sectors(bio); 12446842d264SDamien Le Moal 1245655f3aadSMike Snitzer BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 12466842d264SDamien Le Moal BUG_ON(op_is_zone_mgmt(bio_op(bio))); 12476842d264SDamien Le Moal BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 1248bdb34759SMike Snitzer BUG_ON(bio_sectors > *tio->len_ptr); 1249bdb34759SMike Snitzer BUG_ON(n_sectors > bio_sectors); 12506842d264SDamien Le Moal 1251bdb34759SMike Snitzer *tio->len_ptr -= bio_sectors - n_sectors; 12521dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 12537dd76d1fSMing Lei 12547dd76d1fSMing Lei /* 12557dd76d1fSMing Lei * __split_and_process_bio() may have already saved mapped part 12567dd76d1fSMing Lei * for accounting but it is being reduced so update accordingly. 12577dd76d1fSMing Lei */ 12587dd76d1fSMing Lei dm_io_set_flag(tio->io, DM_IO_WAS_SPLIT); 12597dd76d1fSMing Lei tio->io->sectors = n_sectors; 12601dd40c3eSMikulas Patocka } 12611dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 12621dd40c3eSMikulas Patocka 12630fbb4d93SMike Snitzer /* 12640fbb4d93SMike Snitzer * @clone: clone bio that DM core passed to target's .map function 12650fbb4d93SMike Snitzer * @tgt_clone: clone of @clone bio that target needs submitted 12660fbb4d93SMike Snitzer * 12670fbb4d93SMike Snitzer * Targets should use this interface to submit bios they take 12680fbb4d93SMike Snitzer * ownership of when returning DM_MAPIO_SUBMITTED. 12690fbb4d93SMike Snitzer * 12700fbb4d93SMike Snitzer * Target should also enable ti->accounts_remapped_io 12710fbb4d93SMike Snitzer */ 1272b7f8dff0SMike Snitzer void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 12730fbb4d93SMike Snitzer { 12740fbb4d93SMike Snitzer struct dm_target_io *tio = clone_to_tio(clone); 12750fbb4d93SMike Snitzer struct dm_io *io = tio->io; 12760fbb4d93SMike Snitzer 12770fbb4d93SMike Snitzer /* establish bio that will get submitted */ 12780fbb4d93SMike Snitzer if (!tgt_clone) 12790fbb4d93SMike Snitzer tgt_clone = clone; 12800fbb4d93SMike Snitzer 12810fbb4d93SMike Snitzer /* 12820fbb4d93SMike Snitzer * Account io->origin_bio to DM dev on behalf of target 12830fbb4d93SMike Snitzer * that took ownership of IO with DM_MAPIO_SUBMITTED. 12840fbb4d93SMike Snitzer */ 12850fbb4d93SMike Snitzer dm_start_io_acct(io, clone); 12860fbb4d93SMike Snitzer 12879d20653fSMike Snitzer trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), 12880fbb4d93SMike Snitzer tio->old_sector); 12899d20653fSMike Snitzer submit_bio_noacct(tgt_clone); 12900fbb4d93SMike Snitzer } 12910fbb4d93SMike Snitzer EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 12920fbb4d93SMike Snitzer 1293a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1294a666e5c0SMikulas Patocka { 1295a666e5c0SMikulas Patocka mutex_lock(&md->swap_bios_lock); 1296a666e5c0SMikulas Patocka while (latch < md->swap_bios) { 1297a666e5c0SMikulas Patocka cond_resched(); 1298a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1299a666e5c0SMikulas Patocka md->swap_bios--; 1300a666e5c0SMikulas Patocka } 1301a666e5c0SMikulas Patocka while (latch > md->swap_bios) { 1302a666e5c0SMikulas Patocka cond_resched(); 1303a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1304a666e5c0SMikulas Patocka md->swap_bios++; 1305a666e5c0SMikulas Patocka } 1306a666e5c0SMikulas Patocka mutex_unlock(&md->swap_bios_lock); 1307a666e5c0SMikulas Patocka } 1308a666e5c0SMikulas Patocka 13091561b396SChristoph Hellwig static void __map_bio(struct bio *clone) 13101da177e4SLinus Torvalds { 13111561b396SChristoph Hellwig struct dm_target_io *tio = clone_to_tio(clone); 1312bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 13136cbce280SMike Snitzer struct dm_io *io = tio->io; 13146cbce280SMike Snitzer struct mapped_device *md = io->md; 13156cbce280SMike Snitzer int r; 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds /* 13200fbb4d93SMike Snitzer * Map the clone. 13211da177e4SLinus Torvalds */ 1322743598f0SMike Snitzer tio->old_sector = clone->bi_iter.bi_sector; 1323d67a5f4bSMikulas Patocka 1324442761fdSMike Snitzer if (static_branch_unlikely(&swap_bios_enabled) && 1325442761fdSMike Snitzer unlikely(swap_bios_limit(ti, clone))) { 1326a666e5c0SMikulas Patocka int latch = get_swap_bios(); 1327a666e5c0SMikulas Patocka if (unlikely(latch != md->swap_bios)) 1328a666e5c0SMikulas Patocka __set_swap_bios_limit(md, latch); 1329a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1330a666e5c0SMikulas Patocka } 1331a666e5c0SMikulas Patocka 1332442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled)) { 1333bb37d772SDamien Le Moal /* 1334442761fdSMike Snitzer * Check if the IO needs a special mapping due to zone append 1335442761fdSMike Snitzer * emulation on zoned target. In this case, dm_zone_map_bio() 1336442761fdSMike Snitzer * calls the target map operation. 1337bb37d772SDamien Le Moal */ 13386cbce280SMike Snitzer if (unlikely(dm_emulate_zone_append(md))) 1339bb37d772SDamien Le Moal r = dm_zone_map_bio(tio); 1340bb37d772SDamien Le Moal else 13417de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1342442761fdSMike Snitzer } else 1343442761fdSMike Snitzer r = ti->type->map(ti, clone); 1344bb37d772SDamien Le Moal 1345846785e6SChristoph Hellwig switch (r) { 1346846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 13470fbb4d93SMike Snitzer /* target has assumed ownership of this io */ 13480fbb4d93SMike Snitzer if (!ti->accounts_remapped_io) 13499d20653fSMike Snitzer dm_start_io_acct(io, clone); 1350846785e6SChristoph Hellwig break; 1351846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 13529d20653fSMike Snitzer dm_submit_bio_remap(clone, NULL); 1353846785e6SChristoph Hellwig break; 1354846785e6SChristoph Hellwig case DM_MAPIO_KILL: 1355846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1356442761fdSMike Snitzer if (static_branch_unlikely(&swap_bios_enabled) && 1357442761fdSMike Snitzer unlikely(swap_bios_limit(ti, clone))) 13586cbce280SMike Snitzer up(&md->swap_bios_semaphore); 13591d1068ceSChristoph Hellwig free_tio(clone); 136090a2326eSMike Snitzer if (r == DM_MAPIO_KILL) 136190a2326eSMike Snitzer dm_io_dec_pending(io, BLK_STS_IOERR); 136290a2326eSMike Snitzer else 1363e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1364846785e6SChristoph Hellwig break; 1365846785e6SChristoph Hellwig default: 136645cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 136745cbcd79SKiyoshi Ueda BUG(); 13681da177e4SLinus Torvalds } 13691da177e4SLinus Torvalds } 13701da177e4SLinus Torvalds 13717dd76d1fSMing Lei static void setup_split_accounting(struct clone_info *ci, unsigned len) 13727dd76d1fSMing Lei { 13737dd76d1fSMing Lei struct dm_io *io = ci->io; 13747dd76d1fSMing Lei 13757dd76d1fSMing Lei if (ci->sector_count > len) { 13767dd76d1fSMing Lei /* 13777dd76d1fSMing Lei * Split needed, save the mapped part for accounting. 13787dd76d1fSMing Lei * NOTE: dm_accept_partial_bio() will update accordingly. 13797dd76d1fSMing Lei */ 13807dd76d1fSMing Lei dm_io_set_flag(io, DM_IO_WAS_SPLIT); 13817dd76d1fSMing Lei io->sectors = len; 13827dd76d1fSMing Lei } 13837dd76d1fSMing Lei 13847dd76d1fSMing Lei if (static_branch_unlikely(&stats_enabled) && 13857dd76d1fSMing Lei unlikely(dm_stats_used(&io->md->stats))) { 13867dd76d1fSMing Lei /* 13877dd76d1fSMing Lei * Save bi_sector in terms of its offset from end of 13887dd76d1fSMing Lei * original bio, only needed for DM-stats' benefit. 13897dd76d1fSMing Lei * - saved regardless of whether split needed so that 13907dd76d1fSMing Lei * dm_accept_partial_bio() doesn't need to. 13917dd76d1fSMing Lei */ 13927dd76d1fSMing Lei io->sector_offset = bio_end_sector(ci->bio) - ci->sector; 13937dd76d1fSMing Lei } 13947dd76d1fSMing Lei } 13957dd76d1fSMing Lei 1396318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 13977dd06a25SMike Snitzer struct dm_target *ti, unsigned num_bios) 1398f9ab94ceSMikulas Patocka { 13991d1068ceSChristoph Hellwig struct bio *bio; 1400318716ddSMike Snitzer int try; 1401dba14160SMikulas Patocka 1402318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1403318716ddSMike Snitzer int bio_nr; 1404318716ddSMike Snitzer 1405318716ddSMike Snitzer if (try) 1406bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1407318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 14087dd06a25SMike Snitzer bio = alloc_tio(ci, ti, bio_nr, NULL, 1409dc8e2021SChristoph Hellwig try ? GFP_NOIO : GFP_NOWAIT); 14101d1068ceSChristoph Hellwig if (!bio) 1411318716ddSMike Snitzer break; 1412318716ddSMike Snitzer 14131d1068ceSChristoph Hellwig bio_list_add(blist, bio); 1414318716ddSMike Snitzer } 1415318716ddSMike Snitzer if (try) 1416bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1417318716ddSMike Snitzer if (bio_nr == num_bios) 1418318716ddSMike Snitzer return; 1419318716ddSMike Snitzer 14206c23f0bdSChristoph Hellwig while ((bio = bio_list_pop(blist))) 14211d1068ceSChristoph Hellwig free_tio(bio); 1422318716ddSMike Snitzer } 1423318716ddSMike Snitzer } 1424f9ab94ceSMikulas Patocka 14250f14d60aSMing Lei static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 14261dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 142706a426ceSMike Snitzer { 1428318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 14298eabf5d0SChristoph Hellwig struct bio *clone; 14300f14d60aSMing Lei int ret = 0; 143106a426ceSMike Snitzer 1432891fced6SChristoph Hellwig switch (num_bios) { 1433891fced6SChristoph Hellwig case 0: 1434891fced6SChristoph Hellwig break; 1435891fced6SChristoph Hellwig case 1: 14367dd76d1fSMing Lei if (len) 14377dd76d1fSMing Lei setup_split_accounting(ci, *len); 1438891fced6SChristoph Hellwig clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1439891fced6SChristoph Hellwig __map_bio(clone); 14400f14d60aSMing Lei ret = 1; 1441891fced6SChristoph Hellwig break; 1442891fced6SChristoph Hellwig default: 14437dd06a25SMike Snitzer /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 14447dd06a25SMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 14458eabf5d0SChristoph Hellwig while ((clone = bio_list_pop(&blist))) { 1446655f3aadSMike Snitzer dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 14471561b396SChristoph Hellwig __map_bio(clone); 14480f14d60aSMing Lei ret += 1; 1449f9ab94ceSMikulas Patocka } 1450891fced6SChristoph Hellwig break; 1451318716ddSMike Snitzer } 14520f14d60aSMing Lei 14530f14d60aSMing Lei return ret; 145406a426ceSMike Snitzer } 145506a426ceSMike Snitzer 1456332f2b1eSMike Snitzer static void __send_empty_flush(struct clone_info *ci) 1457f9ab94ceSMikulas Patocka { 145806a426ceSMike Snitzer unsigned target_nr = 0; 1459f9ab94ceSMikulas Patocka struct dm_target *ti; 1460828678b8SMike Snitzer struct bio flush_bio; 1461828678b8SMike Snitzer 1462828678b8SMike Snitzer /* 1463828678b8SMike Snitzer * Use an on-stack bio for this, it's safe since we don't 1464828678b8SMike Snitzer * need to reference it after submit. It's just used as 1465828678b8SMike Snitzer * the basis for the clone(s). 1466828678b8SMike Snitzer */ 146749add496SChristoph Hellwig bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 146849add496SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 146947d95102SChristoph Hellwig 1470828678b8SMike Snitzer ci->bio = &flush_bio; 1471828678b8SMike Snitzer ci->sector_count = 0; 147292b914e2SShin'ichiro Kawasaki ci->io->tio.clone.bi_iter.bi_size = 0; 1473f9ab94ceSMikulas Patocka 14740f14d60aSMing Lei while ((ti = dm_table_get_target(ci->map, target_nr++))) { 14750f14d60aSMing Lei int bios; 14760f14d60aSMing Lei 14770f14d60aSMing Lei atomic_add(ti->num_flush_bios, &ci->io->io_count); 14780f14d60aSMing Lei bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 14790f14d60aSMing Lei atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); 14800f14d60aSMing Lei } 14810f14d60aSMing Lei 14820f14d60aSMing Lei /* 14830f14d60aSMing Lei * alloc_io() takes one extra reference for submission, so the 14840f14d60aSMing Lei * reference won't reach 0 without the following subtraction 14850f14d60aSMing Lei */ 14860f14d60aSMing Lei atomic_sub(1, &ci->io->io_count); 1487828678b8SMike Snitzer 1488828678b8SMike Snitzer bio_uninit(ci->bio); 1489f9ab94ceSMikulas Patocka } 1490f9ab94ceSMikulas Patocka 1491e6fc9f62SMike Snitzer static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 149261697a6aSMike Snitzer unsigned num_bios) 14935ae89a87SMike Snitzer { 149451b86f9aSMichael Lass unsigned len; 14950f14d60aSMing Lei int bios; 14965ae89a87SMike Snitzer 14973720281dSMike Snitzer len = min_t(sector_t, ci->sector_count, 14983720281dSMike Snitzer max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 149951b86f9aSMichael Lass 15000f14d60aSMing Lei atomic_add(num_bios, &ci->io->io_count); 15010f14d60aSMing Lei bios = __send_duplicate_bios(ci, ti, num_bios, &len); 15020f14d60aSMing Lei /* 15030f14d60aSMing Lei * alloc_io() takes one extra reference for submission, so the 15040f14d60aSMing Lei * reference won't reach 0 without the following (+1) subtraction 15050f14d60aSMing Lei */ 15060f14d60aSMing Lei atomic_sub(num_bios - bios + 1, &ci->io->io_count); 15077dd06a25SMike Snitzer 1508a79245b3SMike Snitzer ci->sector += len; 15093d7f4562SMike Snitzer ci->sector_count -= len; 15105ae89a87SMike Snitzer } 15115ae89a87SMike Snitzer 1512568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1513568c73a3SMike Snitzer { 15144edadf6dSMike Snitzer unsigned int op = bio_op(bio); 1515568c73a3SMike Snitzer 15164edadf6dSMike Snitzer if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { 15174edadf6dSMike Snitzer switch (op) { 1518568c73a3SMike Snitzer case REQ_OP_DISCARD: 1519568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1520568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 15214edadf6dSMike Snitzer return true; 15224edadf6dSMike Snitzer default: 1523568c73a3SMike Snitzer break; 1524568c73a3SMike Snitzer } 1525568c73a3SMike Snitzer } 1526568c73a3SMike Snitzer 15274edadf6dSMike Snitzer return false; 15284edadf6dSMike Snitzer } 15294edadf6dSMike Snitzer 15304edadf6dSMike Snitzer static blk_status_t __process_abnormal_io(struct clone_info *ci, 15314edadf6dSMike Snitzer struct dm_target *ti) 15320519c71eSMike Snitzer { 15339679b5a7SMike Snitzer unsigned num_bios = 0; 15340519c71eSMike Snitzer 1535e6fc9f62SMike Snitzer switch (bio_op(ci->bio)) { 15369679b5a7SMike Snitzer case REQ_OP_DISCARD: 15379679b5a7SMike Snitzer num_bios = ti->num_discard_bios; 15389679b5a7SMike Snitzer break; 15399679b5a7SMike Snitzer case REQ_OP_SECURE_ERASE: 15409679b5a7SMike Snitzer num_bios = ti->num_secure_erase_bios; 15419679b5a7SMike Snitzer break; 15429679b5a7SMike Snitzer case REQ_OP_WRITE_ZEROES: 15439679b5a7SMike Snitzer num_bios = ti->num_write_zeroes_bios; 15449679b5a7SMike Snitzer break; 15459679b5a7SMike Snitzer } 15460519c71eSMike Snitzer 1547e6fc9f62SMike Snitzer /* 1548e6fc9f62SMike Snitzer * Even though the device advertised support for this type of 1549e6fc9f62SMike Snitzer * request, that does not mean every target supports it, and 1550e6fc9f62SMike Snitzer * reconfiguration might also have changed that since the 1551e6fc9f62SMike Snitzer * check was performed. 1552e6fc9f62SMike Snitzer */ 155384b98f4cSMike Snitzer if (unlikely(!num_bios)) 15544edadf6dSMike Snitzer return BLK_STS_NOTSUPP; 15554edadf6dSMike Snitzer 1556e6fc9f62SMike Snitzer __send_changing_extent_only(ci, ti, num_bios); 15574edadf6dSMike Snitzer return BLK_STS_OK; 15580519c71eSMike Snitzer } 15590519c71eSMike Snitzer 1560e4c93811SAlasdair G Kergon /* 1561ec211631SMing Lei * Reuse ->bi_private as dm_io list head for storing all dm_io instances 1562b99fdcdcSMing Lei * associated with this bio, and this bio's bi_private needs to be 1563b99fdcdcSMing Lei * stored in dm_io->data before the reuse. 1564b99fdcdcSMing Lei * 1565b99fdcdcSMing Lei * bio->bi_private is owned by fs or upper layer, so block layer won't 1566b99fdcdcSMing Lei * touch it after splitting. Meantime it won't be changed by anyone after 1567b99fdcdcSMing Lei * bio is submitted. So this reuse is safe. 1568b99fdcdcSMing Lei */ 1569ec211631SMing Lei static inline struct dm_io **dm_poll_list_head(struct bio *bio) 1570b99fdcdcSMing Lei { 1571ec211631SMing Lei return (struct dm_io **)&bio->bi_private; 1572b99fdcdcSMing Lei } 1573b99fdcdcSMing Lei 1574b99fdcdcSMing Lei static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1575b99fdcdcSMing Lei { 1576ec211631SMing Lei struct dm_io **head = dm_poll_list_head(bio); 1577b99fdcdcSMing Lei 1578b99fdcdcSMing Lei if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1579b99fdcdcSMing Lei bio->bi_opf |= REQ_DM_POLL_LIST; 1580b99fdcdcSMing Lei /* 1581b99fdcdcSMing Lei * Save .bi_private into dm_io, so that we can reuse 1582ec211631SMing Lei * .bi_private as dm_io list head for storing dm_io list 1583b99fdcdcSMing Lei */ 1584b99fdcdcSMing Lei io->data = bio->bi_private; 1585b99fdcdcSMing Lei 1586b99fdcdcSMing Lei /* tell block layer to poll for completion */ 1587b99fdcdcSMing Lei bio->bi_cookie = ~BLK_QC_T_NONE; 1588ec211631SMing Lei 1589ec211631SMing Lei io->next = NULL; 1590b99fdcdcSMing Lei } else { 1591b99fdcdcSMing Lei /* 1592b99fdcdcSMing Lei * bio recursed due to split, reuse original poll list, 1593b99fdcdcSMing Lei * and save bio->bi_private too. 1594b99fdcdcSMing Lei */ 1595ec211631SMing Lei io->data = (*head)->data; 1596ec211631SMing Lei io->next = *head; 1597b99fdcdcSMing Lei } 1598b99fdcdcSMing Lei 1599ec211631SMing Lei *head = io; 1600b99fdcdcSMing Lei } 1601b99fdcdcSMing Lei 1602b99fdcdcSMing Lei /* 1603e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1604e4c93811SAlasdair G Kergon */ 160584b98f4cSMike Snitzer static blk_status_t __split_and_process_bio(struct clone_info *ci) 1606e4c93811SAlasdair G Kergon { 160766bdaa43SMike Snitzer struct bio *clone; 1608e4c93811SAlasdair G Kergon struct dm_target *ti; 16091c3b13e6SKent Overstreet unsigned len; 1610e4c93811SAlasdair G Kergon 1611e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 16124edadf6dSMike Snitzer if (unlikely(!ti)) 16134edadf6dSMike Snitzer return BLK_STS_IOERR; 16141ee88de3SMikulas Patocka 16151ee88de3SMikulas Patocka if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && 16161ee88de3SMikulas Patocka unlikely(!dm_target_supports_nowait(ti->type))) 16171ee88de3SMikulas Patocka return BLK_STS_NOTSUPP; 16181ee88de3SMikulas Patocka 16191ee88de3SMikulas Patocka if (unlikely(ci->is_abnormal_io)) 16204edadf6dSMike Snitzer return __process_abnormal_io(ci, ti); 16213d7f4562SMike Snitzer 1622b99fdcdcSMing Lei /* 1623b99fdcdcSMing Lei * Only support bio polling for normal IO, and the target io is 1624b99fdcdcSMing Lei * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1625b99fdcdcSMing Lei */ 1626b99fdcdcSMing Lei ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; 1627e4c93811SAlasdair G Kergon 1628e4c93811SAlasdair G Kergon len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 16297dd76d1fSMing Lei setup_split_accounting(ci, len); 163066bdaa43SMike Snitzer clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 163166bdaa43SMike Snitzer __map_bio(clone); 1632e4c93811SAlasdair G Kergon 1633e4c93811SAlasdair G Kergon ci->sector += len; 1634e4c93811SAlasdair G Kergon ci->sector_count -= len; 1635e4c93811SAlasdair G Kergon 163684b98f4cSMike Snitzer return BLK_STS_OK; 1637e4c93811SAlasdair G Kergon } 1638e4c93811SAlasdair G Kergon 1639978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 16404edadf6dSMike Snitzer struct dm_table *map, struct bio *bio, bool is_abnormal) 1641978e51baSMike Snitzer { 1642978e51baSMike Snitzer ci->map = map; 1643978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1644d41e077aSMike Snitzer ci->bio = bio; 16454edadf6dSMike Snitzer ci->is_abnormal_io = is_abnormal; 1646b99fdcdcSMing Lei ci->submit_as_polled = false; 1647978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1648d41e077aSMike Snitzer ci->sector_count = bio_sectors(bio); 1649d41e077aSMike Snitzer 1650d41e077aSMike Snitzer /* Shouldn't happen but sector_count was being set to 0 so... */ 1651442761fdSMike Snitzer if (static_branch_unlikely(&zoned_enabled) && 1652442761fdSMike Snitzer WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1653d41e077aSMike Snitzer ci->sector_count = 0; 1654978e51baSMike Snitzer } 1655978e51baSMike Snitzer 1656e4c93811SAlasdair G Kergon /* 165714fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 16581da177e4SLinus Torvalds */ 165996c9865cSMike Snitzer static void dm_split_and_process_bio(struct mapped_device *md, 166083d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 16611da177e4SLinus Torvalds { 16621da177e4SLinus Torvalds struct clone_info ci; 16634857abf6SMike Snitzer struct dm_io *io; 166484b98f4cSMike Snitzer blk_status_t error = BLK_STS_OK; 16654edadf6dSMike Snitzer bool is_abnormal; 16661da177e4SLinus Torvalds 16674edadf6dSMike Snitzer is_abnormal = is_abnormal_io(bio); 16684edadf6dSMike Snitzer if (unlikely(is_abnormal)) { 16694edadf6dSMike Snitzer /* 16704edadf6dSMike Snitzer * Use blk_queue_split() for abnormal IO (e.g. discard, etc) 16714edadf6dSMike Snitzer * otherwise associated queue_limits won't be imposed. 16724edadf6dSMike Snitzer */ 16734edadf6dSMike Snitzer blk_queue_split(&bio); 16744edadf6dSMike Snitzer } 16754edadf6dSMike Snitzer 16764edadf6dSMike Snitzer init_clone_info(&ci, md, map, bio, is_abnormal); 16774857abf6SMike Snitzer io = ci.io; 1678bd2a49b8SAlasdair G Kergon 16791eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1680332f2b1eSMike Snitzer __send_empty_flush(&ci); 1681e2736347SMike Snitzer /* dm_io_complete submits any data associated with flush */ 1682d41e077aSMike Snitzer goto out; 1683d41e077aSMike Snitzer } 1684d41e077aSMike Snitzer 168596c9865cSMike Snitzer error = __split_and_process_bio(&ci); 1686d41e077aSMike Snitzer if (error || !ci.sector_count) 1687d41e077aSMike Snitzer goto out; 168818a25da8SNeilBrown /* 1689d41e077aSMike Snitzer * Remainder must be passed to submit_bio_noacct() so it gets handled 1690d41e077aSMike Snitzer * *after* bios already submitted have been completely processed. 169118a25da8SNeilBrown */ 169261b6e2e5SMing Lei WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT)); 169361b6e2e5SMing Lei io->split_bio = bio_split(bio, io->sectors, GFP_NOIO, 169461b6e2e5SMing Lei &md->queue->bio_split); 169561b6e2e5SMing Lei bio_chain(io->split_bio, bio); 169661b6e2e5SMing Lei trace_block_split(io->split_bio, bio->bi_iter.bi_sector); 16973e08773cSChristoph Hellwig submit_bio_noacct(bio); 1698d41e077aSMike Snitzer out: 1699b99fdcdcSMing Lei /* 1700b99fdcdcSMing Lei * Drop the extra reference count for non-POLLED bio, and hold one 1701b99fdcdcSMing Lei * reference for POLLED bio, which will be released in dm_poll_bio 1702b99fdcdcSMing Lei * 1703ec211631SMing Lei * Add every dm_io instance into the dm_io list head which is stored 1704ec211631SMing Lei * in bio->bi_private, so that dm_poll_bio can poll them all. 1705b99fdcdcSMing Lei */ 17060f14d60aSMing Lei if (error || !ci.submit_as_polled) { 17070f14d60aSMing Lei /* 17080f14d60aSMing Lei * In case of submission failure, the extra reference for 17090f14d60aSMing Lei * submitting io isn't consumed yet 17100f14d60aSMing Lei */ 17110f14d60aSMing Lei if (error) 17120f14d60aSMing Lei atomic_dec(&io->io_count); 17130f14d60aSMing Lei dm_io_dec_pending(io, error); 17140f14d60aSMing Lei } else 17154857abf6SMike Snitzer dm_queue_poll_io(bio, io); 17161da177e4SLinus Torvalds } 17171da177e4SLinus Torvalds 17183e08773cSChristoph Hellwig static void dm_submit_bio(struct bio *bio) 17191da177e4SLinus Torvalds { 1720309dca30SChristoph Hellwig struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 172183d5e5b0SMikulas Patocka int srcu_idx; 172283d5e5b0SMikulas Patocka struct dm_table *map; 17235d7362d0SMikulas Patocka unsigned bio_opf = bio->bi_opf; 17241da177e4SLinus Torvalds 17255d7362d0SMikulas Patocka map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); 17268cf7961dSChristoph Hellwig 1727fa247089SMike Snitzer /* If suspended, or map not yet available, queue this IO for later */ 1728fa247089SMike Snitzer if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1729fa247089SMike Snitzer unlikely(!map)) { 17306abc4946SKonstantin Khlebnikov if (bio->bi_opf & REQ_NOWAIT) 17316abc4946SKonstantin Khlebnikov bio_wouldblock_error(bio); 1732b2abdb1bSMike Snitzer else if (bio->bi_opf & REQ_RAHEAD) 17336a8736d1STejun Heo bio_io_error(bio); 1734b2abdb1bSMike Snitzer else 1735b2abdb1bSMike Snitzer queue_io(md, bio); 1736b2abdb1bSMike Snitzer goto out; 17371da177e4SLinus Torvalds } 17381da177e4SLinus Torvalds 173996c9865cSMike Snitzer dm_split_and_process_bio(md, map, bio); 1740b2abdb1bSMike Snitzer out: 17415d7362d0SMikulas Patocka dm_put_live_table_bio(md, srcu_idx, bio_opf); 1742978e51baSMike Snitzer } 1743978e51baSMike Snitzer 1744b99fdcdcSMing Lei static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1745b99fdcdcSMing Lei unsigned int flags) 1746b99fdcdcSMing Lei { 1747655f3aadSMike Snitzer WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 1748b99fdcdcSMing Lei 1749b99fdcdcSMing Lei /* don't poll if the mapped io is done */ 1750b99fdcdcSMing Lei if (atomic_read(&io->io_count) > 1) 1751b99fdcdcSMing Lei bio_poll(&io->tio.clone, iob, flags); 1752b99fdcdcSMing Lei 1753b99fdcdcSMing Lei /* bio_poll holds the last reference */ 1754b99fdcdcSMing Lei return atomic_read(&io->io_count) == 1; 1755b99fdcdcSMing Lei } 1756b99fdcdcSMing Lei 1757b99fdcdcSMing Lei static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1758b99fdcdcSMing Lei unsigned int flags) 1759b99fdcdcSMing Lei { 1760ec211631SMing Lei struct dm_io **head = dm_poll_list_head(bio); 1761ec211631SMing Lei struct dm_io *list = *head; 1762ec211631SMing Lei struct dm_io *tmp = NULL; 1763ec211631SMing Lei struct dm_io *curr, *next; 1764b99fdcdcSMing Lei 1765b99fdcdcSMing Lei /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1766b99fdcdcSMing Lei if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1767b99fdcdcSMing Lei return 0; 1768b99fdcdcSMing Lei 1769ec211631SMing Lei WARN_ON_ONCE(!list); 1770b99fdcdcSMing Lei 1771b99fdcdcSMing Lei /* 1772b99fdcdcSMing Lei * Restore .bi_private before possibly completing dm_io. 1773b99fdcdcSMing Lei * 1774b99fdcdcSMing Lei * bio_poll() is only possible once @bio has been completely 1775b99fdcdcSMing Lei * submitted via submit_bio_noacct()'s depth-first submission. 1776b99fdcdcSMing Lei * So there is no dm_queue_poll_io() race associated with 1777b99fdcdcSMing Lei * clearing REQ_DM_POLL_LIST here. 1778b99fdcdcSMing Lei */ 1779b99fdcdcSMing Lei bio->bi_opf &= ~REQ_DM_POLL_LIST; 1780ec211631SMing Lei bio->bi_private = list->data; 1781b99fdcdcSMing Lei 1782ec211631SMing Lei for (curr = list, next = curr->next; curr; curr = next, next = 1783ec211631SMing Lei curr ? curr->next : NULL) { 1784ec211631SMing Lei if (dm_poll_dm_io(curr, iob, flags)) { 1785b99fdcdcSMing Lei /* 178684b98f4cSMike Snitzer * clone_endio() has already occurred, so no 178784b98f4cSMike Snitzer * error handling is needed here. 1788b99fdcdcSMing Lei */ 1789ec211631SMing Lei __dm_io_dec_pending(curr); 1790ec211631SMing Lei } else { 1791ec211631SMing Lei curr->next = tmp; 1792ec211631SMing Lei tmp = curr; 1793b99fdcdcSMing Lei } 1794b99fdcdcSMing Lei } 1795b99fdcdcSMing Lei 1796b99fdcdcSMing Lei /* Not done? */ 1797ec211631SMing Lei if (tmp) { 1798b99fdcdcSMing Lei bio->bi_opf |= REQ_DM_POLL_LIST; 1799b99fdcdcSMing Lei /* Reset bio->bi_private to dm_io list head */ 1800ec211631SMing Lei *head = tmp; 1801b99fdcdcSMing Lei return 0; 1802b99fdcdcSMing Lei } 1803b99fdcdcSMing Lei return 1; 1804b99fdcdcSMing Lei } 1805b99fdcdcSMing Lei 18061da177e4SLinus Torvalds /*----------------------------------------------------------------- 18071da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 18081da177e4SLinus Torvalds *---------------------------------------------------------------*/ 18092b06cfffSAlasdair G Kergon static void free_minor(int minor) 18101da177e4SLinus Torvalds { 1811f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18121da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1813f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18141da177e4SLinus Torvalds } 18151da177e4SLinus Torvalds 18161da177e4SLinus Torvalds /* 18171da177e4SLinus Torvalds * See if the device with a specific minor # is free. 18181da177e4SLinus Torvalds */ 1819cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 18201da177e4SLinus Torvalds { 1821c9d76be6STejun Heo int r; 18221da177e4SLinus Torvalds 18231da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 18241da177e4SLinus Torvalds return -EINVAL; 18251da177e4SLinus Torvalds 1826c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1827f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18281da177e4SLinus Torvalds 1829c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 18301da177e4SLinus Torvalds 1831f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1832c9d76be6STejun Heo idr_preload_end(); 1833c9d76be6STejun Heo if (r < 0) 1834c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1835c9d76be6STejun Heo return 0; 18361da177e4SLinus Torvalds } 18371da177e4SLinus Torvalds 1838cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 18391da177e4SLinus Torvalds { 1840c9d76be6STejun Heo int r; 18411da177e4SLinus Torvalds 1842c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1843f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18441da177e4SLinus Torvalds 1845c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 18461da177e4SLinus Torvalds 1847f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1848c9d76be6STejun Heo idr_preload_end(); 1849c9d76be6STejun Heo if (r < 0) 18501da177e4SLinus Torvalds return r; 1851c9d76be6STejun Heo *minor = r; 1852c9d76be6STejun Heo return 0; 18531da177e4SLinus Torvalds } 18541da177e4SLinus Torvalds 185583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1856681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops; 1857f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 18581da177e4SLinus Torvalds 185953d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 186053d5914fSMikulas Patocka 1861aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1862cb77cb5aSEric Biggers static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1863aa6ce87aSSatya Tangirala { 1864cb77cb5aSEric Biggers dm_destroy_crypto_profile(q->crypto_profile); 1865aa6ce87aSSatya Tangirala } 1866aa6ce87aSSatya Tangirala 1867aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1868aa6ce87aSSatya Tangirala 1869cb77cb5aSEric Biggers static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1870aa6ce87aSSatya Tangirala { 1871aa6ce87aSSatya Tangirala } 1872aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1873aa6ce87aSSatya Tangirala 18740f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 18750f20972fSMike Snitzer { 18760f20972fSMike Snitzer if (md->wq) 18770f20972fSMike Snitzer destroy_workqueue(md->wq); 187829dec90aSChristoph Hellwig dm_free_md_mempools(md->mempools); 18790f20972fSMike Snitzer 1880f26c5719SDan Williams if (md->dax_dev) { 1881fb08a190SChristoph Hellwig dax_remove_host(md->disk); 1882f26c5719SDan Williams kill_dax(md->dax_dev); 1883f26c5719SDan Williams put_dax(md->dax_dev); 1884f26c5719SDan Williams md->dax_dev = NULL; 1885f26c5719SDan Williams } 1886f26c5719SDan Williams 1887588b7f5dSKirill Tkhai dm_cleanup_zoned_dev(md); 18880f20972fSMike Snitzer if (md->disk) { 18890f20972fSMike Snitzer spin_lock(&_minor_lock); 18900f20972fSMike Snitzer md->disk->private_data = NULL; 18910f20972fSMike Snitzer spin_unlock(&_minor_lock); 189289f871afSChristoph Hellwig if (dm_get_md_type(md) != DM_TYPE_NONE) { 189389f871afSChristoph Hellwig dm_sysfs_exit(md); 18940f20972fSMike Snitzer del_gendisk(md->disk); 189589f871afSChristoph Hellwig } 1896cb77cb5aSEric Biggers dm_queue_destroy_crypto_profile(md->queue); 18978b9ab626SChristoph Hellwig put_disk(md->disk); 189874a2b6ecSChristoph Hellwig } 18990f20972fSMike Snitzer 19009f6dc633SMike Snitzer if (md->pending_io) { 19019f6dc633SMike Snitzer free_percpu(md->pending_io); 19029f6dc633SMike Snitzer md->pending_io = NULL; 19039f6dc633SMike Snitzer } 19049f6dc633SMike Snitzer 1905d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1906d09960b0STahsin Erdogan 1907d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1908d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1909d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1910a666e5c0SMikulas Patocka mutex_destroy(&md->swap_bios_lock); 1911d5ffebddSMike Snitzer 19124cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 19130f20972fSMike Snitzer } 19140f20972fSMike Snitzer 19151da177e4SLinus Torvalds /* 19161da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 19171da177e4SLinus Torvalds */ 19182b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 19191da177e4SLinus Torvalds { 1920115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1921115485e8SMike Snitzer struct mapped_device *md; 1922ba61fdd1SJeff Mahoney void *old_md; 19231da177e4SLinus Torvalds 1924856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 19251da177e4SLinus Torvalds if (!md) { 19261da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 19271da177e4SLinus Torvalds return NULL; 19281da177e4SLinus Torvalds } 19291da177e4SLinus Torvalds 193010da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 19316ed7ade8SMilan Broz goto bad_module_get; 193210da4f79SJeff Mahoney 19331da177e4SLinus Torvalds /* get a minor number for the dev */ 19342b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1935cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 19362b06cfffSAlasdair G Kergon else 1937cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 19381da177e4SLinus Torvalds if (r < 0) 19396ed7ade8SMilan Broz goto bad_minor; 19401da177e4SLinus Torvalds 194183d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 194283d5e5b0SMikulas Patocka if (r < 0) 194383d5e5b0SMikulas Patocka goto bad_io_barrier; 194483d5e5b0SMikulas Patocka 1945115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1946591ddcfcSMike Snitzer md->init_tio_pdu = false; 1947a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1948e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1949a5664dadSMike Snitzer mutex_init(&md->type_lock); 195086f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1951022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 19521da177e4SLinus Torvalds atomic_set(&md->holders, 1); 19535c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 19541da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 19557a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 19567a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 195786f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 19587a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 19591da177e4SLinus Torvalds 196047ace7e0SMike Snitzer /* 1961c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 1962c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 1963c62b37d9SChristoph Hellwig * override accordingly. 196447ace7e0SMike Snitzer */ 196574fe6ba9SChristoph Hellwig md->disk = blk_alloc_disk(md->numa_node_id); 19661da177e4SLinus Torvalds if (!md->disk) 19670f20972fSMike Snitzer goto bad; 196874fe6ba9SChristoph Hellwig md->queue = md->disk->queue; 19691da177e4SLinus Torvalds 1970f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 197153d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1972f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 19732995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1974f0b04115SJeff Mahoney 1975a666e5c0SMikulas Patocka md->swap_bios = get_swap_bios(); 1976a666e5c0SMikulas Patocka sema_init(&md->swap_bios_semaphore, md->swap_bios); 1977a666e5c0SMikulas Patocka mutex_init(&md->swap_bios_lock); 1978a666e5c0SMikulas Patocka 19791da177e4SLinus Torvalds md->disk->major = _major; 19801da177e4SLinus Torvalds md->disk->first_minor = minor; 198174fe6ba9SChristoph Hellwig md->disk->minors = 1; 19821ebe2e5fSChristoph Hellwig md->disk->flags |= GENHD_FL_NO_PART; 19831da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19841da177e4SLinus Torvalds md->disk->queue = md->queue; 19851da177e4SLinus Torvalds md->disk->private_data = md; 19861da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1987f26c5719SDan Williams 19885d2a228bSChristoph Hellwig if (IS_ENABLED(CONFIG_FS_DAX)) { 198930c6828aSChristoph Hellwig md->dax_dev = alloc_dax(md, &dm_dax_ops); 1990d7519392SChristoph Hellwig if (IS_ERR(md->dax_dev)) { 1991d7519392SChristoph Hellwig md->dax_dev = NULL; 1992f26c5719SDan Williams goto bad; 1993976431b0SDan Williams } 19947ac5360cSChristoph Hellwig set_dax_nocache(md->dax_dev); 19957ac5360cSChristoph Hellwig set_dax_nomc(md->dax_dev); 1996fb08a190SChristoph Hellwig if (dax_add_host(md->dax_dev, md->disk)) 1997f26c5719SDan Williams goto bad; 1998f26c5719SDan Williams } 19991da177e4SLinus Torvalds 20007e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 20011da177e4SLinus Torvalds 2002c7c879eeSMichał Mirosław md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 2003304f3f6aSMilan Broz if (!md->wq) 20040f20972fSMike Snitzer goto bad; 2005304f3f6aSMilan Broz 20069f6dc633SMike Snitzer md->pending_io = alloc_percpu(unsigned long); 20079f6dc633SMike Snitzer if (!md->pending_io) 20089f6dc633SMike Snitzer goto bad; 20099f6dc633SMike Snitzer 2010fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2011fd2ed4d2SMikulas Patocka 2012ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2013f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2014ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2015f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2016ba61fdd1SJeff Mahoney 2017ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2018ba61fdd1SJeff Mahoney 20191da177e4SLinus Torvalds return md; 20201da177e4SLinus Torvalds 20210f20972fSMike Snitzer bad: 20220f20972fSMike Snitzer cleanup_mapped_device(md); 202383d5e5b0SMikulas Patocka bad_io_barrier: 20241da177e4SLinus Torvalds free_minor(minor); 20256ed7ade8SMilan Broz bad_minor: 202610da4f79SJeff Mahoney module_put(THIS_MODULE); 20276ed7ade8SMilan Broz bad_module_get: 2028856eb091SMikulas Patocka kvfree(md); 20291da177e4SLinus Torvalds return NULL; 20301da177e4SLinus Torvalds } 20311da177e4SLinus Torvalds 2032ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2033ae9da83fSJun'ichi Nomura 20341da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 20351da177e4SLinus Torvalds { 2036f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 203763d94e48SJun'ichi Nomura 2038ae9da83fSJun'ichi Nomura unlock_fs(md); 20392eb6e1e3SKeith Busch 20400f20972fSMike Snitzer cleanup_mapped_device(md); 20410f20972fSMike Snitzer 20420f20972fSMike Snitzer free_table_devices(&md->table_devices); 20430f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 204463a4f065SMike Snitzer free_minor(minor); 204563a4f065SMike Snitzer 204610da4f79SJeff Mahoney module_put(THIS_MODULE); 2047856eb091SMikulas Patocka kvfree(md); 20481da177e4SLinus Torvalds } 20491da177e4SLinus Torvalds 20501da177e4SLinus Torvalds /* 20511da177e4SLinus Torvalds * Bind a table to the device. 20521da177e4SLinus Torvalds */ 20531da177e4SLinus Torvalds static void event_callback(void *context) 20541da177e4SLinus Torvalds { 20557a8c3d3bSMike Anderson unsigned long flags; 20567a8c3d3bSMike Anderson LIST_HEAD(uevents); 20571da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 20581da177e4SLinus Torvalds 20597a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 20607a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 20617a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 20627a8c3d3bSMike Anderson 2063ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 20647a8c3d3bSMike Anderson 20651da177e4SLinus Torvalds atomic_inc(&md->event_nr); 20661da177e4SLinus Torvalds wake_up(&md->eventq); 206762e08243SMikulas Patocka dm_issue_global_event(); 20681da177e4SLinus Torvalds } 20691da177e4SLinus Torvalds 2070c217649bSMike Snitzer /* 2071042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2072042d2a9bSAlasdair G Kergon */ 2073042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2074754c5fc7SMike Snitzer struct queue_limits *limits) 20751da177e4SLinus Torvalds { 2076042d2a9bSAlasdair G Kergon struct dm_table *old_map; 20771da177e4SLinus Torvalds sector_t size; 20782a2a4c51SJens Axboe int ret; 20791da177e4SLinus Torvalds 20805a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 20815a8f1f80SBart Van Assche 20821da177e4SLinus Torvalds size = dm_table_get_size(t); 20833ac51e74SDarrick J. Wong 20843ac51e74SDarrick J. Wong /* 20853ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20863ac51e74SDarrick J. Wong */ 2087fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 20883ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20893ac51e74SDarrick J. Wong 20905424a0b8SMikulas Patocka if (!get_capacity(md->disk)) 20915424a0b8SMikulas Patocka set_capacity(md->disk, size); 20925424a0b8SMikulas Patocka else 2093f64d9b2eSChristoph Hellwig set_capacity_and_notify(md->disk, size); 20941da177e4SLinus Torvalds 2095cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20962ca3310eSAlasdair G Kergon 2097f5b4aee1SMike Snitzer if (dm_table_request_based(t)) { 209816f12266SMike Snitzer /* 20999c37de29SMike Snitzer * Leverage the fact that request-based DM targets are 21009c37de29SMike Snitzer * immutable singletons - used to optimize dm_mq_queue_rq. 210116f12266SMike Snitzer */ 210216f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 2103e6ee8c0bSKiyoshi Ueda 210429dec90aSChristoph Hellwig /* 210529dec90aSChristoph Hellwig * There is no need to reload with request-based dm because the 210629dec90aSChristoph Hellwig * size of front_pad doesn't change. 210729dec90aSChristoph Hellwig * 210829dec90aSChristoph Hellwig * Note for future: If you are to reload bioset, prep-ed 210929dec90aSChristoph Hellwig * requests in the queue may refer to bio from the old bioset, 211029dec90aSChristoph Hellwig * so you must walk through the queue to unprep. 211129dec90aSChristoph Hellwig */ 211229dec90aSChristoph Hellwig if (!md->mempools) { 211329dec90aSChristoph Hellwig md->mempools = t->mempools; 211429dec90aSChristoph Hellwig t->mempools = NULL; 211529dec90aSChristoph Hellwig } 211629dec90aSChristoph Hellwig } else { 211729dec90aSChristoph Hellwig /* 211829dec90aSChristoph Hellwig * The md may already have mempools that need changing. 211929dec90aSChristoph Hellwig * If so, reload bioset because front_pad may have changed 212029dec90aSChristoph Hellwig * because a different table was loaded. 212129dec90aSChristoph Hellwig */ 212229dec90aSChristoph Hellwig dm_free_md_mempools(md->mempools); 212329dec90aSChristoph Hellwig md->mempools = t->mempools; 212429dec90aSChristoph Hellwig t->mempools = NULL; 21252a2a4c51SJens Axboe } 2126e6ee8c0bSKiyoshi Ueda 2127f5b4aee1SMike Snitzer ret = dm_table_set_restrictions(t, md->queue, limits); 2128bb37d772SDamien Le Moal if (ret) { 2129bb37d772SDamien Le Moal old_map = ERR_PTR(ret); 2130bb37d772SDamien Le Moal goto out; 2131bb37d772SDamien Le Moal } 2132bb37d772SDamien Le Moal 2133a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 21341d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 213536a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 213636a0456fSAlasdair G Kergon 213741abc4e1SHannes Reinecke if (old_map) 213883d5e5b0SMikulas Patocka dm_sync_table(md); 21392a2a4c51SJens Axboe out: 2140042d2a9bSAlasdair G Kergon return old_map; 21411da177e4SLinus Torvalds } 21421da177e4SLinus Torvalds 2143a7940155SAlasdair G Kergon /* 2144a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2145a7940155SAlasdair G Kergon */ 2146a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 21471da177e4SLinus Torvalds { 2148a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 21491da177e4SLinus Torvalds 21501da177e4SLinus Torvalds if (!map) 2151a7940155SAlasdair G Kergon return NULL; 21521da177e4SLinus Torvalds 21531da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 21549cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 215583d5e5b0SMikulas Patocka dm_sync_table(md); 2156a7940155SAlasdair G Kergon 2157a7940155SAlasdair G Kergon return map; 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds 21601da177e4SLinus Torvalds /* 21611da177e4SLinus Torvalds * Constructor for a new device. 21621da177e4SLinus Torvalds */ 21632b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 21641da177e4SLinus Torvalds { 21651da177e4SLinus Torvalds struct mapped_device *md; 21661da177e4SLinus Torvalds 21672b06cfffSAlasdair G Kergon md = alloc_dev(minor); 21681da177e4SLinus Torvalds if (!md) 21691da177e4SLinus Torvalds return -ENXIO; 21701da177e4SLinus Torvalds 217191ccbbacSTushar Sugandhi dm_ima_reset_data(md); 217291ccbbacSTushar Sugandhi 21731da177e4SLinus Torvalds *result = md; 21741da177e4SLinus Torvalds return 0; 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds 2177a5664dadSMike Snitzer /* 2178a5664dadSMike Snitzer * Functions to manage md->type. 2179a5664dadSMike Snitzer * All are required to hold md->type_lock. 2180a5664dadSMike Snitzer */ 2181a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2182a5664dadSMike Snitzer { 2183a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2184a5664dadSMike Snitzer } 2185a5664dadSMike Snitzer 2186a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2187a5664dadSMike Snitzer { 2188a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2189a5664dadSMike Snitzer } 2190a5664dadSMike Snitzer 21917e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2192a5664dadSMike Snitzer { 219300c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2194a5664dadSMike Snitzer md->type = type; 2195a5664dadSMike Snitzer } 2196a5664dadSMike Snitzer 21977e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2198a5664dadSMike Snitzer { 2199a5664dadSMike Snitzer return md->type; 2200a5664dadSMike Snitzer } 2201a5664dadSMike Snitzer 220236a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 220336a0456fSAlasdair G Kergon { 220436a0456fSAlasdair G Kergon return md->immutable_target_type; 220536a0456fSAlasdair G Kergon } 220636a0456fSAlasdair G Kergon 22074a0b4ddfSMike Snitzer /* 2208f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2209f84cb8a4SMike Snitzer * count on 'md'. 2210f84cb8a4SMike Snitzer */ 2211f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2212f84cb8a4SMike Snitzer { 2213f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2214f84cb8a4SMike Snitzer return &md->queue->limits; 2215f84cb8a4SMike Snitzer } 2216f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2217f84cb8a4SMike Snitzer 22184a0b4ddfSMike Snitzer /* 22194a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 22204a0b4ddfSMike Snitzer */ 2221591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 22224a0b4ddfSMike Snitzer { 2223ba305859SChristoph Hellwig enum dm_queue_mode type = dm_table_get_type(t); 2224c100ec49SMike Snitzer struct queue_limits limits; 2225ba305859SChristoph Hellwig int r; 2226bfebd1cdSMike Snitzer 2227545ed20eSToshi Kani switch (type) { 2228bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2229681cc5e8SMike Snitzer md->disk->fops = &dm_rq_blk_dops; 2230e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2231bfebd1cdSMike Snitzer if (r) { 2232681cc5e8SMike Snitzer DMERR("Cannot initialize queue for request-based dm mapped device"); 2233bfebd1cdSMike Snitzer return r; 2234bfebd1cdSMike Snitzer } 2235bfebd1cdSMike Snitzer break; 2236bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2237545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2238bfebd1cdSMike Snitzer break; 22397e0d574fSBart Van Assche case DM_TYPE_NONE: 22407e0d574fSBart Van Assche WARN_ON_ONCE(true); 22417e0d574fSBart Van Assche break; 2242ff36ab34SMike Snitzer } 22434a0b4ddfSMike Snitzer 2244c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2245c100ec49SMike Snitzer if (r) { 2246c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2247c100ec49SMike Snitzer return r; 2248c100ec49SMike Snitzer } 2249bb37d772SDamien Le Moal r = dm_table_set_restrictions(t, md->queue, &limits); 2250bb37d772SDamien Le Moal if (r) 2251bb37d772SDamien Le Moal return r; 225289f871afSChristoph Hellwig 2253e7089f65SLuis Chamberlain r = add_disk(md->disk); 2254e7089f65SLuis Chamberlain if (r) 2255e7089f65SLuis Chamberlain return r; 225689f871afSChristoph Hellwig 225789f871afSChristoph Hellwig r = dm_sysfs_init(md); 225889f871afSChristoph Hellwig if (r) { 225989f871afSChristoph Hellwig del_gendisk(md->disk); 226089f871afSChristoph Hellwig return r; 226189f871afSChristoph Hellwig } 2262ba305859SChristoph Hellwig md->type = type; 22634a0b4ddfSMike Snitzer return 0; 22644a0b4ddfSMike Snitzer } 22654a0b4ddfSMike Snitzer 22662bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 22671da177e4SLinus Torvalds { 22681da177e4SLinus Torvalds struct mapped_device *md; 22691da177e4SLinus Torvalds unsigned minor = MINOR(dev); 22701da177e4SLinus Torvalds 22711da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 22721da177e4SLinus Torvalds return NULL; 22731da177e4SLinus Torvalds 2274f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22751da177e4SLinus Torvalds 22761da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 227749de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 227849de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2279637842cfSDavid Teigland md = NULL; 2280fba9f90eSJeff Mahoney goto out; 2281fba9f90eSJeff Mahoney } 22822bec1f4aSMikulas Patocka dm_get(md); 2283fba9f90eSJeff Mahoney out: 2284f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22851da177e4SLinus Torvalds 2286637842cfSDavid Teigland return md; 2287637842cfSDavid Teigland } 22883cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2289d229a958SDavid Teigland 22909ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2291637842cfSDavid Teigland { 22929ade92a9SAlasdair G Kergon return md->interface_ptr; 22931da177e4SLinus Torvalds } 22941da177e4SLinus Torvalds 22951da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 22961da177e4SLinus Torvalds { 22971da177e4SLinus Torvalds md->interface_ptr = ptr; 22981da177e4SLinus Torvalds } 22991da177e4SLinus Torvalds 23001da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 23011da177e4SLinus Torvalds { 23021da177e4SLinus Torvalds atomic_inc(&md->holders); 23033f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 23041da177e4SLinus Torvalds } 23051da177e4SLinus Torvalds 230609ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 230709ee96b2SMikulas Patocka { 230809ee96b2SMikulas Patocka spin_lock(&_minor_lock); 230909ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 231009ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 231109ee96b2SMikulas Patocka return -EBUSY; 231209ee96b2SMikulas Patocka } 231309ee96b2SMikulas Patocka dm_get(md); 231409ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 231509ee96b2SMikulas Patocka return 0; 231609ee96b2SMikulas Patocka } 231709ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 231809ee96b2SMikulas Patocka 231972d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 232072d94861SAlasdair G Kergon { 232172d94861SAlasdair G Kergon return md->name; 232272d94861SAlasdair G Kergon } 232372d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 232472d94861SAlasdair G Kergon 23253f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 23261da177e4SLinus Torvalds { 23271134e5aeSMike Anderson struct dm_table *map; 232883d5e5b0SMikulas Patocka int srcu_idx; 23291da177e4SLinus Torvalds 23303f77316dSKiyoshi Ueda might_sleep(); 2331fba9f90eSJeff Mahoney 233263a4f065SMike Snitzer spin_lock(&_minor_lock); 23333f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2334fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2335f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23363f77316dSKiyoshi Ueda 23377a5428dcSChristoph Hellwig blk_mark_disk_dead(md->disk); 23383b785fbcSBart Van Assche 2339ab7c7bb6SMikulas Patocka /* 2340ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2341ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2342ab7c7bb6SMikulas Patocka */ 2343ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 23442a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 23454f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 23461da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2347adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 23485df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 23491da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 23501da177e4SLinus Torvalds } 235183d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 235283d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 23532a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 235483d5e5b0SMikulas Patocka 23553f77316dSKiyoshi Ueda /* 23563f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 23573f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 23583f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 23593f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 23603f77316dSKiyoshi Ueda */ 23613f77316dSKiyoshi Ueda if (wait) 23623f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 23633f77316dSKiyoshi Ueda msleep(1); 23643f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 23653f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 23663f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 23673f77316dSKiyoshi Ueda 2368a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 23691da177e4SLinus Torvalds free_dev(md); 23701da177e4SLinus Torvalds } 23713f77316dSKiyoshi Ueda 23723f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 23733f77316dSKiyoshi Ueda { 23743f77316dSKiyoshi Ueda __dm_destroy(md, true); 23753f77316dSKiyoshi Ueda } 23763f77316dSKiyoshi Ueda 23773f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 23783f77316dSKiyoshi Ueda { 23793f77316dSKiyoshi Ueda __dm_destroy(md, false); 23803f77316dSKiyoshi Ueda } 23813f77316dSKiyoshi Ueda 23823f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 23833f77316dSKiyoshi Ueda { 23843f77316dSKiyoshi Ueda atomic_dec(&md->holders); 23851da177e4SLinus Torvalds } 238679eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 23871da177e4SLinus Torvalds 23889f6dc633SMike Snitzer static bool dm_in_flight_bios(struct mapped_device *md) 238985067747SMing Lei { 239085067747SMing Lei int cpu; 23919f6dc633SMike Snitzer unsigned long sum = 0; 239285067747SMing Lei 23939f6dc633SMike Snitzer for_each_possible_cpu(cpu) 23949f6dc633SMike Snitzer sum += *per_cpu_ptr(md->pending_io, cpu); 239585067747SMing Lei 239685067747SMing Lei return sum != 0; 239785067747SMing Lei } 239885067747SMing Lei 23992f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 240046125c1cSMilan Broz { 240146125c1cSMilan Broz int r = 0; 24029f4c3f87SBart Van Assche DEFINE_WAIT(wait); 240346125c1cSMilan Broz 240485067747SMing Lei while (true) { 24059f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 240646125c1cSMilan Broz 24079f6dc633SMike Snitzer if (!dm_in_flight_bios(md)) 240846125c1cSMilan Broz break; 240946125c1cSMilan Broz 2410e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 241146125c1cSMilan Broz r = -EINTR; 241246125c1cSMilan Broz break; 241346125c1cSMilan Broz } 241446125c1cSMilan Broz 241546125c1cSMilan Broz io_schedule(); 241646125c1cSMilan Broz } 24179f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2418b44ebeb0SMikulas Patocka 24199f6dc633SMike Snitzer smp_rmb(); 24209f6dc633SMike Snitzer 242146125c1cSMilan Broz return r; 242246125c1cSMilan Broz } 242346125c1cSMilan Broz 24242f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 242585067747SMing Lei { 242685067747SMing Lei int r = 0; 242785067747SMing Lei 242885067747SMing Lei if (!queue_is_mq(md->queue)) 242985067747SMing Lei return dm_wait_for_bios_completion(md, task_state); 243085067747SMing Lei 243185067747SMing Lei while (true) { 243285067747SMing Lei if (!blk_mq_queue_inflight(md->queue)) 243385067747SMing Lei break; 243485067747SMing Lei 243585067747SMing Lei if (signal_pending_state(task_state, current)) { 243685067747SMing Lei r = -EINTR; 243785067747SMing Lei break; 243885067747SMing Lei } 243985067747SMing Lei 244085067747SMing Lei msleep(5); 244185067747SMing Lei } 244285067747SMing Lei 244385067747SMing Lei return r; 244485067747SMing Lei } 244585067747SMing Lei 24461da177e4SLinus Torvalds /* 24471da177e4SLinus Torvalds * Process the deferred bios 24481da177e4SLinus Torvalds */ 2449ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 24501da177e4SLinus Torvalds { 24510c2915b8SMike Snitzer struct mapped_device *md = container_of(work, struct mapped_device, work); 24520c2915b8SMike Snitzer struct bio *bio; 2453ef208587SMikulas Patocka 24543b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2455022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 24560c2915b8SMike Snitzer bio = bio_list_pop(&md->deferred); 2457022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2458022c2611SMikulas Patocka 24590c2915b8SMike Snitzer if (!bio) 2460df12ee99SAlasdair G Kergon break; 246173d410c0SMilan Broz 24620c2915b8SMike Snitzer submit_bio_noacct(bio); 2463e6ee8c0bSKiyoshi Ueda } 24641da177e4SLinus Torvalds } 24651da177e4SLinus Torvalds 24669a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2467304f3f6aSMilan Broz { 24683b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24694e857c58SPeter Zijlstra smp_mb__after_atomic(); 247053d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2471304f3f6aSMilan Broz } 2472304f3f6aSMilan Broz 24731da177e4SLinus Torvalds /* 2474042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24751da177e4SLinus Torvalds */ 2476042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24771da177e4SLinus Torvalds { 247887eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2479754c5fc7SMike Snitzer struct queue_limits limits; 2480042d2a9bSAlasdair G Kergon int r; 24811da177e4SLinus Torvalds 2482e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24831da177e4SLinus Torvalds 24841da177e4SLinus Torvalds /* device must be suspended */ 24854f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 248693c534aeSAlasdair G Kergon goto out; 24871da177e4SLinus Torvalds 24883ae70656SMike Snitzer /* 24893ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 24903ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 24913ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 24923ae70656SMike Snitzer * reappear. 24933ae70656SMike Snitzer */ 24943ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 249583d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 24963ae70656SMike Snitzer if (live_map) 24973ae70656SMike Snitzer limits = md->queue->limits; 249883d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 24993ae70656SMike Snitzer } 25003ae70656SMike Snitzer 250187eb5b21SMike Christie if (!live_map) { 2502754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2503042d2a9bSAlasdair G Kergon if (r) { 2504042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2505754c5fc7SMike Snitzer goto out; 2506042d2a9bSAlasdair G Kergon } 250787eb5b21SMike Christie } 2508754c5fc7SMike Snitzer 2509042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 251062e08243SMikulas Patocka dm_issue_global_event(); 25111da177e4SLinus Torvalds 251293c534aeSAlasdair G Kergon out: 2513e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2514042d2a9bSAlasdair G Kergon return map; 25151da177e4SLinus Torvalds } 25161da177e4SLinus Torvalds 25171da177e4SLinus Torvalds /* 25181da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 25191da177e4SLinus Torvalds * device. 25201da177e4SLinus Torvalds */ 25212ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 25221da177e4SLinus Torvalds { 2523e39e2e95SAlasdair G Kergon int r; 25241da177e4SLinus Torvalds 2525040f04bdSChristoph Hellwig WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2526dfbe03f6SAlasdair G Kergon 2527977115c0SChristoph Hellwig r = freeze_bdev(md->disk->part0); 2528040f04bdSChristoph Hellwig if (!r) 2529aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2530040f04bdSChristoph Hellwig return r; 25311da177e4SLinus Torvalds } 25321da177e4SLinus Torvalds 25332ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 25341da177e4SLinus Torvalds { 2535aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2536aa8d7c2fSAlasdair G Kergon return; 2537977115c0SChristoph Hellwig thaw_bdev(md->disk->part0); 2538aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25391da177e4SLinus Torvalds } 25401da177e4SLinus Torvalds 25411da177e4SLinus Torvalds /* 2542b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2543b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2544b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2545b48633f8SBart Van Assche * 2546ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2547ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2548ffcc3936SMike Snitzer * are being added to md->deferred list. 2549cec47e3dSKiyoshi Ueda */ 2550ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 25512f064a59SPeter Zijlstra unsigned suspend_flags, unsigned int task_state, 2552eaf9a736SMike Snitzer int dmf_suspended_flag) 25531da177e4SLinus Torvalds { 2554ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2555ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2556ffcc3936SMike Snitzer int r; 2557cf222b37SAlasdair G Kergon 25585a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 25595a8f1f80SBart Van Assche 25602e93ccc1SKiyoshi Ueda /* 25612e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 25622e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25632e93ccc1SKiyoshi Ueda */ 25642e93ccc1SKiyoshi Ueda if (noflush) 25652e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 256686331f39SBart Van Assche else 2567ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 25682e93ccc1SKiyoshi Ueda 2569d67ee213SMike Snitzer /* 2570d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2571d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2572d67ee213SMike Snitzer */ 25731da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25741da177e4SLinus Torvalds 25752e93ccc1SKiyoshi Ueda /* 25769f518b27SKiyoshi Ueda * Flush I/O to the device. 25779f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 25789f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 25799f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 25802e93ccc1SKiyoshi Ueda */ 258132a926daSMikulas Patocka if (!noflush && do_lockfs) { 25822ca3310eSAlasdair G Kergon r = lock_fs(md); 2583d67ee213SMike Snitzer if (r) { 2584d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2585ffcc3936SMike Snitzer return r; 2586aa8d7c2fSAlasdair G Kergon } 2587d67ee213SMike Snitzer } 25881da177e4SLinus Torvalds 25891da177e4SLinus Torvalds /* 25903b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 25913b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 259296c9865cSMike Snitzer * dm_split_and_process_bio from dm_submit_bio. 25933b00b203SMikulas Patocka * 259496c9865cSMike Snitzer * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 25953b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 259696c9865cSMike Snitzer * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 25970cede372SMike Snitzer * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 25986a8736d1STejun Heo * flush_workqueue(md->wq). 25991da177e4SLinus Torvalds */ 26001eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 260141abc4e1SHannes Reinecke if (map) 260283d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26031da177e4SLinus Torvalds 2604d0bcb878SKiyoshi Ueda /* 260529e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 260629e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2607d0bcb878SKiyoshi Ueda */ 26086a23e05cSJens Axboe if (dm_request_based(md)) 2609eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2610cec47e3dSKiyoshi Ueda 2611d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2612d0bcb878SKiyoshi Ueda 26131da177e4SLinus Torvalds /* 26143b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 26153b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 26163b00b203SMikulas Patocka * to finish. 26171da177e4SLinus Torvalds */ 2618b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2619eaf9a736SMike Snitzer if (!r) 2620eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 26211da177e4SLinus Torvalds 26226d6f10dfSMilan Broz if (noflush) 2623022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 262441abc4e1SHannes Reinecke if (map) 262583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26262e93ccc1SKiyoshi Ueda 26271da177e4SLinus Torvalds /* were we interrupted ? */ 262846125c1cSMilan Broz if (r < 0) { 26299a1fb464SMikulas Patocka dm_queue_flush(md); 263073d410c0SMilan Broz 2631cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2632eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2633cec47e3dSKiyoshi Ueda 26342ca3310eSAlasdair G Kergon unlock_fs(md); 2635d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2636ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2637ffcc3936SMike Snitzer } 2638ffcc3936SMike Snitzer 2639ffcc3936SMike Snitzer return r; 26402ca3310eSAlasdair G Kergon } 26412ca3310eSAlasdair G Kergon 26423b00b203SMikulas Patocka /* 2643ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2644ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2645ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2646ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2647ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 26483b00b203SMikulas Patocka */ 2649ffcc3936SMike Snitzer /* 2650ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2651ffcc3936SMike Snitzer * 2652ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2653ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2654ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2655ffcc3936SMike Snitzer * 2656ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2657ffcc3936SMike Snitzer */ 2658ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2659ffcc3936SMike Snitzer { 2660ffcc3936SMike Snitzer struct dm_table *map = NULL; 2661ffcc3936SMike Snitzer int r = 0; 2662ffcc3936SMike Snitzer 2663ffcc3936SMike Snitzer retry: 2664ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2665ffcc3936SMike Snitzer 2666ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2667ffcc3936SMike Snitzer r = -EINVAL; 2668ffcc3936SMike Snitzer goto out_unlock; 2669ffcc3936SMike Snitzer } 2670ffcc3936SMike Snitzer 2671ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2672ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2673ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2674ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2675ffcc3936SMike Snitzer if (r) 2676ffcc3936SMike Snitzer return r; 2677ffcc3936SMike Snitzer goto retry; 2678ffcc3936SMike Snitzer } 2679ffcc3936SMike Snitzer 2680a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2681ffcc3936SMike Snitzer 2682eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2683ffcc3936SMike Snitzer if (r) 2684ffcc3936SMike Snitzer goto out_unlock; 26853b00b203SMikulas Patocka 26865df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 26874d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 26885df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 26894d4471cbSKiyoshi Ueda 2690d287483dSAlasdair G Kergon out_unlock: 2691e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2692cf222b37SAlasdair G Kergon return r; 26931da177e4SLinus Torvalds } 26941da177e4SLinus Torvalds 2695ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 26961da177e4SLinus Torvalds { 2697ffcc3936SMike Snitzer if (map) { 2698ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 26998757b776SMilan Broz if (r) 2700ffcc3936SMike Snitzer return r; 2701ffcc3936SMike Snitzer } 27022ca3310eSAlasdair G Kergon 27039a1fb464SMikulas Patocka dm_queue_flush(md); 27042ca3310eSAlasdair G Kergon 2705cec47e3dSKiyoshi Ueda /* 2706cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2707cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2708cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2709cec47e3dSKiyoshi Ueda */ 2710cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2711eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2712cec47e3dSKiyoshi Ueda 27132ca3310eSAlasdair G Kergon unlock_fs(md); 27142ca3310eSAlasdair G Kergon 2715ffcc3936SMike Snitzer return 0; 2716ffcc3936SMike Snitzer } 2717ffcc3936SMike Snitzer 2718ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2719ffcc3936SMike Snitzer { 27208dc23658SMinfei Huang int r; 2721ffcc3936SMike Snitzer struct dm_table *map = NULL; 2722ffcc3936SMike Snitzer 2723ffcc3936SMike Snitzer retry: 27248dc23658SMinfei Huang r = -EINVAL; 2725ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2726ffcc3936SMike Snitzer 2727ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2728ffcc3936SMike Snitzer goto out; 2729ffcc3936SMike Snitzer 2730ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2731ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2732ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2733ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2734ffcc3936SMike Snitzer if (r) 2735ffcc3936SMike Snitzer return r; 2736ffcc3936SMike Snitzer goto retry; 2737ffcc3936SMike Snitzer } 2738ffcc3936SMike Snitzer 2739a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2740ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2741ffcc3936SMike Snitzer goto out; 2742ffcc3936SMike Snitzer 2743ffcc3936SMike Snitzer r = __dm_resume(md, map); 2744ffcc3936SMike Snitzer if (r) 2745ffcc3936SMike Snitzer goto out; 2746ffcc3936SMike Snitzer 27472ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2748cf222b37SAlasdair G Kergon out: 2749e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 27502ca3310eSAlasdair G Kergon 2751cf222b37SAlasdair G Kergon return r; 27521da177e4SLinus Torvalds } 27531da177e4SLinus Torvalds 2754fd2ed4d2SMikulas Patocka /* 2755fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2756fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2757fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2758fd2ed4d2SMikulas Patocka */ 2759fd2ed4d2SMikulas Patocka 2760ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2761ffcc3936SMike Snitzer { 2762ffcc3936SMike Snitzer struct dm_table *map = NULL; 2763ffcc3936SMike Snitzer 27641ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 27651ea0654eSBart Van Assche 276696b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2767ffcc3936SMike Snitzer return; /* nested internal suspend */ 2768ffcc3936SMike Snitzer 2769ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2770ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2771ffcc3936SMike Snitzer return; /* nest suspend */ 2772ffcc3936SMike Snitzer } 2773ffcc3936SMike Snitzer 2774a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2775ffcc3936SMike Snitzer 2776ffcc3936SMike Snitzer /* 2777ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2778ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2779ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2780ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2781ffcc3936SMike Snitzer */ 2782eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2783eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2784ffcc3936SMike Snitzer 27855df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 2786ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 27875df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 2788ffcc3936SMike Snitzer } 2789ffcc3936SMike Snitzer 2790ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2791ffcc3936SMike Snitzer { 279296b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 279396b26c8cSMikulas Patocka 279496b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2795ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2796ffcc3936SMike Snitzer 2797ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2798ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2799ffcc3936SMike Snitzer 2800ffcc3936SMike Snitzer /* 2801ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2802ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2803ffcc3936SMike Snitzer */ 2804ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2805ffcc3936SMike Snitzer 2806ffcc3936SMike Snitzer done: 2807ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2808ffcc3936SMike Snitzer smp_mb__after_atomic(); 2809ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2810ffcc3936SMike Snitzer } 2811ffcc3936SMike Snitzer 2812ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2813fd2ed4d2SMikulas Patocka { 2814fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2815ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2816ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2817ffcc3936SMike Snitzer } 2818ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2819ffcc3936SMike Snitzer 2820ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2821ffcc3936SMike Snitzer { 2822ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2823ffcc3936SMike Snitzer __dm_internal_resume(md); 2824ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2825ffcc3936SMike Snitzer } 2826ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2827ffcc3936SMike Snitzer 2828ffcc3936SMike Snitzer /* 2829ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2830ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2831ffcc3936SMike Snitzer */ 2832ffcc3936SMike Snitzer 2833ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2834ffcc3936SMike Snitzer { 2835ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2836ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2837fd2ed4d2SMikulas Patocka return; 2838fd2ed4d2SMikulas Patocka 2839fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2840fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2841fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2842fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2843fd2ed4d2SMikulas Patocka } 2844b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2845fd2ed4d2SMikulas Patocka 2846ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2847fd2ed4d2SMikulas Patocka { 2848ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2849fd2ed4d2SMikulas Patocka goto done; 2850fd2ed4d2SMikulas Patocka 2851fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2852fd2ed4d2SMikulas Patocka 2853fd2ed4d2SMikulas Patocka done: 2854fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2855fd2ed4d2SMikulas Patocka } 2856b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2857fd2ed4d2SMikulas Patocka 28581da177e4SLinus Torvalds /*----------------------------------------------------------------- 28591da177e4SLinus Torvalds * Event notification. 28601da177e4SLinus Torvalds *---------------------------------------------------------------*/ 28613abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 286260935eb2SMilan Broz unsigned cookie) 286369267a30SAlasdair G Kergon { 28646958c1c6SMikulas Patocka int r; 28656958c1c6SMikulas Patocka unsigned noio_flag; 286660935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 286760935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 286860935eb2SMilan Broz 28696958c1c6SMikulas Patocka noio_flag = memalloc_noio_save(); 28706958c1c6SMikulas Patocka 287160935eb2SMilan Broz if (!cookie) 28726958c1c6SMikulas Patocka r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 287360935eb2SMilan Broz else { 287460935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 287560935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 28766958c1c6SMikulas Patocka r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 28773abf85b5SPeter Rajnoha action, envp); 287860935eb2SMilan Broz } 28796958c1c6SMikulas Patocka 28806958c1c6SMikulas Patocka memalloc_noio_restore(noio_flag); 28816958c1c6SMikulas Patocka 28826958c1c6SMikulas Patocka return r; 288369267a30SAlasdair G Kergon } 288469267a30SAlasdair G Kergon 28857a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28867a8c3d3bSMike Anderson { 28877a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 28887a8c3d3bSMike Anderson } 28897a8c3d3bSMike Anderson 28901da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 28911da177e4SLinus Torvalds { 28921da177e4SLinus Torvalds return atomic_read(&md->event_nr); 28931da177e4SLinus Torvalds } 28941da177e4SLinus Torvalds 28951da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 28961da177e4SLinus Torvalds { 28971da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 28981da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 28991da177e4SLinus Torvalds } 29001da177e4SLinus Torvalds 29017a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 29027a8c3d3bSMike Anderson { 29037a8c3d3bSMike Anderson unsigned long flags; 29047a8c3d3bSMike Anderson 29057a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 29067a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 29077a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 29087a8c3d3bSMike Anderson } 29097a8c3d3bSMike Anderson 29101da177e4SLinus Torvalds /* 29111da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 29121da177e4SLinus Torvalds * count on 'md'. 29131da177e4SLinus Torvalds */ 29141da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 29151da177e4SLinus Torvalds { 29161da177e4SLinus Torvalds return md->disk; 29171da177e4SLinus Torvalds } 291865ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 29191da177e4SLinus Torvalds 2920784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2921784aae73SMilan Broz { 29222995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2923784aae73SMilan Broz } 2924784aae73SMilan Broz 2925784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2926784aae73SMilan Broz { 2927784aae73SMilan Broz struct mapped_device *md; 2928784aae73SMilan Broz 29292995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2930784aae73SMilan Broz 2931b9a41d21SHou Tao spin_lock(&_minor_lock); 2932b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2933b9a41d21SHou Tao md = NULL; 2934b9a41d21SHou Tao goto out; 2935b9a41d21SHou Tao } 2936784aae73SMilan Broz dm_get(md); 2937b9a41d21SHou Tao out: 2938b9a41d21SHou Tao spin_unlock(&_minor_lock); 2939b9a41d21SHou Tao 2940784aae73SMilan Broz return md; 2941784aae73SMilan Broz } 2942784aae73SMilan Broz 29434f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 29441da177e4SLinus Torvalds { 29451da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 29461da177e4SLinus Torvalds } 29471da177e4SLinus Torvalds 29485df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md) 29495df96f2bSMikulas Patocka { 29505df96f2bSMikulas Patocka return test_bit(DMF_POST_SUSPENDING, &md->flags); 29515df96f2bSMikulas Patocka } 29525df96f2bSMikulas Patocka 2953ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2954ffcc3936SMike Snitzer { 2955ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2956ffcc3936SMike Snitzer } 2957ffcc3936SMike Snitzer 29582c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 29592c140a24SMikulas Patocka { 29602c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 29612c140a24SMikulas Patocka } 29622c140a24SMikulas Patocka 296364dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 296464dbce58SKiyoshi Ueda { 296533bd6f06SMike Snitzer return dm_suspended_md(ti->table->md); 296664dbce58SKiyoshi Ueda } 296764dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 296864dbce58SKiyoshi Ueda 29695df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti) 29705df96f2bSMikulas Patocka { 297133bd6f06SMike Snitzer return dm_post_suspending_md(ti->table->md); 29725df96f2bSMikulas Patocka } 29735df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending); 29745df96f2bSMikulas Patocka 29752e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 29762e93ccc1SKiyoshi Ueda { 297733bd6f06SMike Snitzer return __noflush_suspending(ti->table->md); 29782e93ccc1SKiyoshi Ueda } 29792e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 29802e93ccc1SKiyoshi Ueda 29817e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 2982cfc97abcSMike Snitzer unsigned per_io_data_size, unsigned min_pool_size, 2983cfc97abcSMike Snitzer bool integrity, bool poll) 2984e6ee8c0bSKiyoshi Ueda { 2985115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 298678d8e58aSMike Snitzer unsigned int pool_size = 0; 298764f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 29886f1c819cSKent Overstreet int ret; 2989e6ee8c0bSKiyoshi Ueda 2990e6ee8c0bSKiyoshi Ueda if (!pools) 29914e6e36c3SMike Snitzer return NULL; 2992e6ee8c0bSKiyoshi Ueda 299378d8e58aSMike Snitzer switch (type) { 299478d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2995545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 29960776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 299762f26317SJeffle Xu front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 299862f26317SJeffle Xu io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 2999cfc97abcSMike Snitzer ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0); 30006f1c819cSKent Overstreet if (ret) 300164f52b0eSMike Snitzer goto out; 30026f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 3003eb8db831SChristoph Hellwig goto out; 300478d8e58aSMike Snitzer break; 300578d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 30060776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 300778d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3008591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 300978d8e58aSMike Snitzer break; 301078d8e58aSMike Snitzer default: 301178d8e58aSMike Snitzer BUG(); 301278d8e58aSMike Snitzer } 301378d8e58aSMike Snitzer 30146f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 30156f1c819cSKent Overstreet if (ret) 30165f015204SJun'ichi Nomura goto out; 3017e6ee8c0bSKiyoshi Ueda 30186f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 30195f015204SJun'ichi Nomura goto out; 3020a91a2785SMartin K. Petersen 3021e6ee8c0bSKiyoshi Ueda return pools; 302278d8e58aSMike Snitzer 30235f015204SJun'ichi Nomura out: 30245f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3025e6ee8c0bSKiyoshi Ueda 30264e6e36c3SMike Snitzer return NULL; 3027e6ee8c0bSKiyoshi Ueda } 3028e6ee8c0bSKiyoshi Ueda 3029e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3030e6ee8c0bSKiyoshi Ueda { 3031e6ee8c0bSKiyoshi Ueda if (!pools) 3032e6ee8c0bSKiyoshi Ueda return; 3033e6ee8c0bSKiyoshi Ueda 30346f1c819cSKent Overstreet bioset_exit(&pools->bs); 30356f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 3036e6ee8c0bSKiyoshi Ueda 3037e6ee8c0bSKiyoshi Ueda kfree(pools); 3038e6ee8c0bSKiyoshi Ueda } 3039e6ee8c0bSKiyoshi Ueda 30409c72bad1SChristoph Hellwig struct dm_pr { 30419c72bad1SChristoph Hellwig u64 old_key; 30429c72bad1SChristoph Hellwig u64 new_key; 30439c72bad1SChristoph Hellwig u32 flags; 30449c72bad1SChristoph Hellwig bool fail_early; 30459c72bad1SChristoph Hellwig }; 30469c72bad1SChristoph Hellwig 30479c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 30489c72bad1SChristoph Hellwig void *data) 30499c72bad1SChristoph Hellwig { 30509c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 30519c72bad1SChristoph Hellwig struct dm_table *table; 30529c72bad1SChristoph Hellwig struct dm_target *ti; 30539c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 30549c72bad1SChristoph Hellwig 30559c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 30569c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 30579c72bad1SChristoph Hellwig goto out; 30589c72bad1SChristoph Hellwig 30599c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 30609c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30619c72bad1SChristoph Hellwig goto out; 30629c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30639c72bad1SChristoph Hellwig 30649c72bad1SChristoph Hellwig ret = -EINVAL; 30659c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30669c72bad1SChristoph Hellwig goto out; 30679c72bad1SChristoph Hellwig 30689c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 30699c72bad1SChristoph Hellwig out: 30709c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 30719c72bad1SChristoph Hellwig return ret; 30729c72bad1SChristoph Hellwig } 30739c72bad1SChristoph Hellwig 30749c72bad1SChristoph Hellwig /* 30759c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 30769c72bad1SChristoph Hellwig */ 30779c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 30789c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 30799c72bad1SChristoph Hellwig { 30809c72bad1SChristoph Hellwig struct dm_pr *pr = data; 30819c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 30829c72bad1SChristoph Hellwig 30839c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 30849c72bad1SChristoph Hellwig return -EOPNOTSUPP; 30859c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 30869c72bad1SChristoph Hellwig } 30879c72bad1SChristoph Hellwig 308871cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 308971cdb697SChristoph Hellwig u32 flags) 309071cdb697SChristoph Hellwig { 30919c72bad1SChristoph Hellwig struct dm_pr pr = { 30929c72bad1SChristoph Hellwig .old_key = old_key, 30939c72bad1SChristoph Hellwig .new_key = new_key, 30949c72bad1SChristoph Hellwig .flags = flags, 30959c72bad1SChristoph Hellwig .fail_early = true, 30969c72bad1SChristoph Hellwig }; 30979c72bad1SChristoph Hellwig int ret; 309871cdb697SChristoph Hellwig 30999c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 31009c72bad1SChristoph Hellwig if (ret && new_key) { 31019c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 31029c72bad1SChristoph Hellwig pr.old_key = new_key; 31039c72bad1SChristoph Hellwig pr.new_key = 0; 31049c72bad1SChristoph Hellwig pr.flags = 0; 31059c72bad1SChristoph Hellwig pr.fail_early = false; 31069c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 31079c72bad1SChristoph Hellwig } 310871cdb697SChristoph Hellwig 31099c72bad1SChristoph Hellwig return ret; 311071cdb697SChristoph Hellwig } 311171cdb697SChristoph Hellwig 311271cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 311371cdb697SChristoph Hellwig u32 flags) 311471cdb697SChristoph Hellwig { 311571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 311671cdb697SChristoph Hellwig const struct pr_ops *ops; 3117971888c4SMike Snitzer int r, srcu_idx; 311871cdb697SChristoph Hellwig 31195bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 312071cdb697SChristoph Hellwig if (r < 0) 3121971888c4SMike Snitzer goto out; 312271cdb697SChristoph Hellwig 312371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 312471cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 312571cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 312671cdb697SChristoph Hellwig else 312771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3128971888c4SMike Snitzer out: 3129971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 313071cdb697SChristoph Hellwig return r; 313171cdb697SChristoph Hellwig } 313271cdb697SChristoph Hellwig 313371cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 313471cdb697SChristoph Hellwig { 313571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 313671cdb697SChristoph Hellwig const struct pr_ops *ops; 3137971888c4SMike Snitzer int r, srcu_idx; 313871cdb697SChristoph Hellwig 31395bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 314071cdb697SChristoph Hellwig if (r < 0) 3141971888c4SMike Snitzer goto out; 314271cdb697SChristoph Hellwig 314371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 314471cdb697SChristoph Hellwig if (ops && ops->pr_release) 314571cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 314671cdb697SChristoph Hellwig else 314771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3148971888c4SMike Snitzer out: 3149971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 315071cdb697SChristoph Hellwig return r; 315171cdb697SChristoph Hellwig } 315271cdb697SChristoph Hellwig 315371cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 315471cdb697SChristoph Hellwig enum pr_type type, bool abort) 315571cdb697SChristoph Hellwig { 315671cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 315771cdb697SChristoph Hellwig const struct pr_ops *ops; 3158971888c4SMike Snitzer int r, srcu_idx; 315971cdb697SChristoph Hellwig 31605bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 316171cdb697SChristoph Hellwig if (r < 0) 3162971888c4SMike Snitzer goto out; 316371cdb697SChristoph Hellwig 316471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 316571cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 316671cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 316771cdb697SChristoph Hellwig else 316871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3169971888c4SMike Snitzer out: 3170971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 317171cdb697SChristoph Hellwig return r; 317271cdb697SChristoph Hellwig } 317371cdb697SChristoph Hellwig 317471cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 317571cdb697SChristoph Hellwig { 317671cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 317771cdb697SChristoph Hellwig const struct pr_ops *ops; 3178971888c4SMike Snitzer int r, srcu_idx; 317971cdb697SChristoph Hellwig 31805bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 318171cdb697SChristoph Hellwig if (r < 0) 3182971888c4SMike Snitzer goto out; 318371cdb697SChristoph Hellwig 318471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 318571cdb697SChristoph Hellwig if (ops && ops->pr_clear) 318671cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 318771cdb697SChristoph Hellwig else 318871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3189971888c4SMike Snitzer out: 3190971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 319171cdb697SChristoph Hellwig return r; 319271cdb697SChristoph Hellwig } 319371cdb697SChristoph Hellwig 319471cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 319571cdb697SChristoph Hellwig .pr_register = dm_pr_register, 319671cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 319771cdb697SChristoph Hellwig .pr_release = dm_pr_release, 319871cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 319971cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 320071cdb697SChristoph Hellwig }; 320171cdb697SChristoph Hellwig 320283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 3203c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 3204b99fdcdcSMing Lei .poll_bio = dm_poll_bio, 32051da177e4SLinus Torvalds .open = dm_blk_open, 32061da177e4SLinus Torvalds .release = dm_blk_close, 3207aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 32083ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3209e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 321071cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 32111da177e4SLinus Torvalds .owner = THIS_MODULE 32121da177e4SLinus Torvalds }; 32131da177e4SLinus Torvalds 3214681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = { 3215681cc5e8SMike Snitzer .open = dm_blk_open, 3216681cc5e8SMike Snitzer .release = dm_blk_close, 3217681cc5e8SMike Snitzer .ioctl = dm_blk_ioctl, 3218681cc5e8SMike Snitzer .getgeo = dm_blk_getgeo, 3219681cc5e8SMike Snitzer .pr_ops = &dm_pr_ops, 3220681cc5e8SMike Snitzer .owner = THIS_MODULE 3221681cc5e8SMike Snitzer }; 3222681cc5e8SMike Snitzer 3223f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3224f26c5719SDan Williams .direct_access = dm_dax_direct_access, 3225cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 3226047218ecSJane Chu .recovery_write = dm_dax_recovery_write, 3227f26c5719SDan Williams }; 3228f26c5719SDan Williams 32291da177e4SLinus Torvalds /* 32301da177e4SLinus Torvalds * module hooks 32311da177e4SLinus Torvalds */ 32321da177e4SLinus Torvalds module_init(dm_init); 32331da177e4SLinus Torvalds module_exit(dm_exit); 32341da177e4SLinus Torvalds 32351da177e4SLinus Torvalds module_param(major, uint, 0); 32361da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3237f4790826SMike Snitzer 3238e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3239e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3240e8603136SMike Snitzer 3241115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3242115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3243115485e8SMike Snitzer 3244a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3245a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3246a666e5c0SMikulas Patocka 32471da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 32481da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 32491da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3250