11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 1191ccbbacSTushar Sugandhi #include "dm-ima.h" 121da177e4SLinus Torvalds 131da177e4SLinus Torvalds #include <linux/init.h> 141da177e4SLinus Torvalds #include <linux/module.h> 1548c9c27bSArjan van de Ven #include <linux/mutex.h> 166958c1c6SMikulas Patocka #include <linux/sched/mm.h> 17174cd4b1SIngo Molnar #include <linux/sched/signal.h> 181da177e4SLinus Torvalds #include <linux/blkpg.h> 191da177e4SLinus Torvalds #include <linux/bio.h> 201da177e4SLinus Torvalds #include <linux/mempool.h> 21f26c5719SDan Williams #include <linux/dax.h> 221da177e4SLinus Torvalds #include <linux/slab.h> 231da177e4SLinus Torvalds #include <linux/idr.h> 247e026c8cSDan Williams #include <linux/uio.h> 253ac51e74SDarrick J. Wong #include <linux/hdreg.h> 263f77316dSKiyoshi Ueda #include <linux/delay.h> 27ffcc3936SMike Snitzer #include <linux/wait.h> 2871cdb697SChristoph Hellwig #include <linux/pr.h> 29b0b4d7c6SElena Reshetova #include <linux/refcount.h> 30c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 31a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 321e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h> 3355782138SLi Zefan 3472d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3572d94861SAlasdair G Kergon 3660935eb2SMilan Broz /* 3760935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3860935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3960935eb2SMilan Broz */ 4060935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4160935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4260935eb2SMilan Broz 43b99fdcdcSMing Lei /* 44b99fdcdcSMing Lei * For REQ_POLLED fs bio, this flag is set if we link mapped underlying 45b99fdcdcSMing Lei * dm_io into one list, and reuse bio->bi_private as the list head. Before 46b99fdcdcSMing Lei * ending this fs bio, we will recover its ->bi_private. 47b99fdcdcSMing Lei */ 48b99fdcdcSMing Lei #define REQ_DM_POLL_LIST REQ_DRV 49b99fdcdcSMing Lei 501da177e4SLinus Torvalds static const char *_name = DM_NAME; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds static unsigned int major = 0; 531da177e4SLinus Torvalds static unsigned int _major = 0; 541da177e4SLinus Torvalds 55d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 56d15b774cSAlasdair G Kergon 57f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 582c140a24SMikulas Patocka 592c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 602c140a24SMikulas Patocka 612c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 622c140a24SMikulas Patocka 63acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 64acfe0ad7SMikulas Patocka 6593e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 6693e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 6793e6442cSMikulas Patocka 6862e08243SMikulas Patocka void dm_issue_global_event(void) 6962e08243SMikulas Patocka { 7062e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 7162e08243SMikulas Patocka wake_up(&dm_global_eventq); 7262e08243SMikulas Patocka } 7362e08243SMikulas Patocka 741da177e4SLinus Torvalds /* 7564f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 761da177e4SLinus Torvalds */ 7764f52b0eSMike Snitzer struct clone_info { 7864f52b0eSMike Snitzer struct dm_table *map; 7964f52b0eSMike Snitzer struct bio *bio; 8064f52b0eSMike Snitzer struct dm_io *io; 8164f52b0eSMike Snitzer sector_t sector; 8264f52b0eSMike Snitzer unsigned sector_count; 83b99fdcdcSMing Lei bool submit_as_polled; 8464f52b0eSMike Snitzer }; 8564f52b0eSMike Snitzer 8662f26317SJeffle Xu #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 8762f26317SJeffle Xu #define DM_IO_BIO_OFFSET \ 8862f26317SJeffle Xu (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 8962f26317SJeffle Xu 906c23f0bdSChristoph Hellwig static inline struct dm_target_io *clone_to_tio(struct bio *clone) 916c23f0bdSChristoph Hellwig { 926c23f0bdSChristoph Hellwig return container_of(clone, struct dm_target_io, clone); 936c23f0bdSChristoph Hellwig } 946c23f0bdSChristoph Hellwig 9564f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 9664f52b0eSMike Snitzer { 97655f3aadSMike Snitzer if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) 9862f26317SJeffle Xu return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 9962f26317SJeffle Xu return (char *)bio - DM_IO_BIO_OFFSET - data_size; 10064f52b0eSMike Snitzer } 10164f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 10264f52b0eSMike Snitzer 10364f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 10464f52b0eSMike Snitzer { 10564f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 10664f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 10762f26317SJeffle Xu return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 10864f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 10962f26317SJeffle Xu return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 11064f52b0eSMike Snitzer } 11164f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 11264f52b0eSMike Snitzer 11364f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 11464f52b0eSMike Snitzer { 11564f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 11664f52b0eSMike Snitzer } 11764f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 11864f52b0eSMike Snitzer 119ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 120ba61fdd1SJeff Mahoney 121115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 122115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 123faad87dfSMike Snitzer 124a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 125a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS; 126a666e5c0SMikulas Patocka static int get_swap_bios(void) 127a666e5c0SMikulas Patocka { 128a666e5c0SMikulas Patocka int latch = READ_ONCE(swap_bios); 129a666e5c0SMikulas Patocka if (unlikely(latch <= 0)) 130a666e5c0SMikulas Patocka latch = DEFAULT_SWAP_BIOS; 131a666e5c0SMikulas Patocka return latch; 132a666e5c0SMikulas Patocka } 133a666e5c0SMikulas Patocka 134e6ee8c0bSKiyoshi Ueda /* 135e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 136e6ee8c0bSKiyoshi Ueda */ 137e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1386f1c819cSKent Overstreet struct bio_set bs; 1396f1c819cSKent Overstreet struct bio_set io_bs; 140e6ee8c0bSKiyoshi Ueda }; 141e6ee8c0bSKiyoshi Ueda 14286f1152bSBenjamin Marzinski struct table_device { 14386f1152bSBenjamin Marzinski struct list_head list; 144b0b4d7c6SElena Reshetova refcount_t count; 14586f1152bSBenjamin Marzinski struct dm_dev dm_dev; 14686f1152bSBenjamin Marzinski }; 14786f1152bSBenjamin Marzinski 148f4790826SMike Snitzer /* 149e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 150e8603136SMike Snitzer */ 1514cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 152e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 153e8603136SMike Snitzer 154115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 155115485e8SMike Snitzer { 1566aa7de05SMark Rutland int param = READ_ONCE(*module_param); 157115485e8SMike Snitzer int modified_param = 0; 158115485e8SMike Snitzer bool modified = true; 159115485e8SMike Snitzer 160115485e8SMike Snitzer if (param < min) 161115485e8SMike Snitzer modified_param = min; 162115485e8SMike Snitzer else if (param > max) 163115485e8SMike Snitzer modified_param = max; 164115485e8SMike Snitzer else 165115485e8SMike Snitzer modified = false; 166115485e8SMike Snitzer 167115485e8SMike Snitzer if (modified) { 168115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 169115485e8SMike Snitzer param = modified_param; 170115485e8SMike Snitzer } 171115485e8SMike Snitzer 172115485e8SMike Snitzer return param; 173115485e8SMike Snitzer } 174115485e8SMike Snitzer 1754cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 176f4790826SMike Snitzer unsigned def, unsigned max) 177f4790826SMike Snitzer { 1786aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 17909c2d531SMike Snitzer unsigned modified_param = 0; 180f4790826SMike Snitzer 18109c2d531SMike Snitzer if (!param) 18209c2d531SMike Snitzer modified_param = def; 18309c2d531SMike Snitzer else if (param > max) 18409c2d531SMike Snitzer modified_param = max; 185f4790826SMike Snitzer 18609c2d531SMike Snitzer if (modified_param) { 18709c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 18809c2d531SMike Snitzer param = modified_param; 189f4790826SMike Snitzer } 190f4790826SMike Snitzer 19109c2d531SMike Snitzer return param; 192f4790826SMike Snitzer } 193f4790826SMike Snitzer 194e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 195e8603136SMike Snitzer { 19609c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1974cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 198e8603136SMike Snitzer } 199e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 200e8603136SMike Snitzer 201115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 202115485e8SMike Snitzer { 203115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 204115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 205115485e8SMike Snitzer } 206115485e8SMike Snitzer 2071da177e4SLinus Torvalds static int __init local_init(void) 2081da177e4SLinus Torvalds { 209e689fbabSMike Snitzer int r; 2101ae49ea2SMike Snitzer 21151e5b2bdSMike Anderson r = dm_uevent_init(); 21251157b4aSKiyoshi Ueda if (r) 213e689fbabSMike Snitzer return r; 21451e5b2bdSMike Anderson 215acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 216acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 217acfe0ad7SMikulas Patocka r = -ENOMEM; 218acfe0ad7SMikulas Patocka goto out_uevent_exit; 219acfe0ad7SMikulas Patocka } 220acfe0ad7SMikulas Patocka 2211da177e4SLinus Torvalds _major = major; 2221da177e4SLinus Torvalds r = register_blkdev(_major, _name); 22351157b4aSKiyoshi Ueda if (r < 0) 224acfe0ad7SMikulas Patocka goto out_free_workqueue; 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds if (!_major) 2271da177e4SLinus Torvalds _major = r; 2281da177e4SLinus Torvalds 2291da177e4SLinus Torvalds return 0; 23051157b4aSKiyoshi Ueda 231acfe0ad7SMikulas Patocka out_free_workqueue: 232acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 23351157b4aSKiyoshi Ueda out_uevent_exit: 23451157b4aSKiyoshi Ueda dm_uevent_exit(); 23551157b4aSKiyoshi Ueda 23651157b4aSKiyoshi Ueda return r; 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds static void local_exit(void) 2401da177e4SLinus Torvalds { 2412c140a24SMikulas Patocka flush_scheduled_work(); 242acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2432c140a24SMikulas Patocka 24400d59405SAkinobu Mita unregister_blkdev(_major, _name); 24551e5b2bdSMike Anderson dm_uevent_exit(); 2461da177e4SLinus Torvalds 2471da177e4SLinus Torvalds _major = 0; 2481da177e4SLinus Torvalds 2491da177e4SLinus Torvalds DMINFO("cleaned up"); 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds 252b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2531da177e4SLinus Torvalds local_init, 2541da177e4SLinus Torvalds dm_target_init, 2551da177e4SLinus Torvalds dm_linear_init, 2561da177e4SLinus Torvalds dm_stripe_init, 257952b3557SMikulas Patocka dm_io_init, 258945fa4d2SMikulas Patocka dm_kcopyd_init, 2591da177e4SLinus Torvalds dm_interface_init, 260fd2ed4d2SMikulas Patocka dm_statistics_init, 2611da177e4SLinus Torvalds }; 2621da177e4SLinus Torvalds 263b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2641da177e4SLinus Torvalds local_exit, 2651da177e4SLinus Torvalds dm_target_exit, 2661da177e4SLinus Torvalds dm_linear_exit, 2671da177e4SLinus Torvalds dm_stripe_exit, 268952b3557SMikulas Patocka dm_io_exit, 269945fa4d2SMikulas Patocka dm_kcopyd_exit, 2701da177e4SLinus Torvalds dm_interface_exit, 271fd2ed4d2SMikulas Patocka dm_statistics_exit, 2721da177e4SLinus Torvalds }; 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds static int __init dm_init(void) 2751da177e4SLinus Torvalds { 2761da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2771da177e4SLinus Torvalds int r, i; 2781da177e4SLinus Torvalds 279f1cd6cb2STushar Sugandhi #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 280f1cd6cb2STushar Sugandhi DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 281f1cd6cb2STushar Sugandhi " Duplicate IMA measurements will not be recorded in the IMA log."); 282f1cd6cb2STushar Sugandhi #endif 283f1cd6cb2STushar Sugandhi 2841da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2851da177e4SLinus Torvalds r = _inits[i](); 2861da177e4SLinus Torvalds if (r) 2871da177e4SLinus Torvalds goto bad; 2881da177e4SLinus Torvalds } 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds return 0; 2911da177e4SLinus Torvalds bad: 2921da177e4SLinus Torvalds while (i--) 2931da177e4SLinus Torvalds _exits[i](); 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds return r; 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds static void __exit dm_exit(void) 2991da177e4SLinus Torvalds { 3001da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds while (i--) 3031da177e4SLinus Torvalds _exits[i](); 304d15b774cSAlasdair G Kergon 305d15b774cSAlasdair G Kergon /* 306d15b774cSAlasdair G Kergon * Should be empty by this point. 307d15b774cSAlasdair G Kergon */ 308d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds /* 3121da177e4SLinus Torvalds * Block device functions 3131da177e4SLinus Torvalds */ 314432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 315432a212cSMike Anderson { 316432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 317432a212cSMike Anderson } 318432a212cSMike Anderson 319fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3201da177e4SLinus Torvalds { 3211da177e4SLinus Torvalds struct mapped_device *md; 3221da177e4SLinus Torvalds 323fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 324fba9f90eSJeff Mahoney 325fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 326fba9f90eSJeff Mahoney if (!md) 327fba9f90eSJeff Mahoney goto out; 328fba9f90eSJeff Mahoney 3295c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 330432a212cSMike Anderson dm_deleting_md(md)) { 331fba9f90eSJeff Mahoney md = NULL; 332fba9f90eSJeff Mahoney goto out; 333fba9f90eSJeff Mahoney } 334fba9f90eSJeff Mahoney 3351da177e4SLinus Torvalds dm_get(md); 3365c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 337fba9f90eSJeff Mahoney out: 338fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 339fba9f90eSJeff Mahoney 340fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3411da177e4SLinus Torvalds } 3421da177e4SLinus Torvalds 343db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3441da177e4SLinus Torvalds { 34563a4f065SMike Snitzer struct mapped_device *md; 3466e9624b8SArnd Bergmann 3474a1aeb98SMilan Broz spin_lock(&_minor_lock); 3484a1aeb98SMilan Broz 34963a4f065SMike Snitzer md = disk->private_data; 35063a4f065SMike Snitzer if (WARN_ON(!md)) 35163a4f065SMike Snitzer goto out; 35263a4f065SMike Snitzer 3532c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3542c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 355acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3562c140a24SMikulas Patocka 3571da177e4SLinus Torvalds dm_put(md); 35863a4f065SMike Snitzer out: 3594a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds 3625c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3635c6bd75dSAlasdair G Kergon { 3645c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3655c6bd75dSAlasdair G Kergon } 3665c6bd75dSAlasdair G Kergon 3675c6bd75dSAlasdair G Kergon /* 3685c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3695c6bd75dSAlasdair G Kergon */ 3702c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3715c6bd75dSAlasdair G Kergon { 3725c6bd75dSAlasdair G Kergon int r = 0; 3735c6bd75dSAlasdair G Kergon 3745c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3755c6bd75dSAlasdair G Kergon 3762c140a24SMikulas Patocka if (dm_open_count(md)) { 3775c6bd75dSAlasdair G Kergon r = -EBUSY; 3782c140a24SMikulas Patocka if (mark_deferred) 3792c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3802c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3812c140a24SMikulas Patocka r = -EEXIST; 3825c6bd75dSAlasdair G Kergon else 3835c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3845c6bd75dSAlasdair G Kergon 3855c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3865c6bd75dSAlasdair G Kergon 3875c6bd75dSAlasdair G Kergon return r; 3885c6bd75dSAlasdair G Kergon } 3895c6bd75dSAlasdair G Kergon 3902c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3912c140a24SMikulas Patocka { 3922c140a24SMikulas Patocka int r = 0; 3932c140a24SMikulas Patocka 3942c140a24SMikulas Patocka spin_lock(&_minor_lock); 3952c140a24SMikulas Patocka 3962c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3972c140a24SMikulas Patocka r = -EBUSY; 3982c140a24SMikulas Patocka else 3992c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4002c140a24SMikulas Patocka 4012c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4022c140a24SMikulas Patocka 4032c140a24SMikulas Patocka return r; 4042c140a24SMikulas Patocka } 4052c140a24SMikulas Patocka 4062c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4072c140a24SMikulas Patocka { 4082c140a24SMikulas Patocka dm_deferred_remove(); 4092c140a24SMikulas Patocka } 4102c140a24SMikulas Patocka 4113ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4123ac51e74SDarrick J. Wong { 4133ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4143ac51e74SDarrick J. Wong 4153ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4163ac51e74SDarrick J. Wong } 4173ac51e74SDarrick J. Wong 418971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4195bd5e8d8SMike Snitzer struct block_device **bdev) 420aa129a22SMilan Broz { 42166482026SMike Snitzer struct dm_target *tgt; 4226c182cd8SHannes Reinecke struct dm_table *map; 423971888c4SMike Snitzer int r; 424aa129a22SMilan Broz 4256c182cd8SHannes Reinecke retry: 426e56f81e0SChristoph Hellwig r = -ENOTTY; 427971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 428aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 429971888c4SMike Snitzer return r; 430aa129a22SMilan Broz 431aa129a22SMilan Broz /* We only support devices that have a single target */ 432aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 433971888c4SMike Snitzer return r; 434aa129a22SMilan Broz 43566482026SMike Snitzer tgt = dm_table_get_target(map, 0); 43666482026SMike Snitzer if (!tgt->type->prepare_ioctl) 437e56f81e0SChristoph Hellwig return r; 438aa129a22SMilan Broz 439971888c4SMike Snitzer if (dm_suspended_md(md)) 440971888c4SMike Snitzer return -EAGAIN; 441971888c4SMike Snitzer 4425bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 4435bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 444971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 4456c182cd8SHannes Reinecke msleep(10); 4466c182cd8SHannes Reinecke goto retry; 4476c182cd8SHannes Reinecke } 448971888c4SMike Snitzer 449e56f81e0SChristoph Hellwig return r; 450e56f81e0SChristoph Hellwig } 4516c182cd8SHannes Reinecke 452971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 453971888c4SMike Snitzer { 454971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 455971888c4SMike Snitzer } 456971888c4SMike Snitzer 457e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 458e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 459e56f81e0SChristoph Hellwig { 460e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 461971888c4SMike Snitzer int r, srcu_idx; 462e56f81e0SChristoph Hellwig 4635bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 464e56f81e0SChristoph Hellwig if (r < 0) 465971888c4SMike Snitzer goto out; 466e56f81e0SChristoph Hellwig 467e56f81e0SChristoph Hellwig if (r > 0) { 468e56f81e0SChristoph Hellwig /* 469e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 470e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 471e56f81e0SChristoph Hellwig */ 472e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 4730378c625SMike Snitzer DMDEBUG_LIMIT( 474e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 475e980f623SChristoph Hellwig current->comm, cmd); 476e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 477e56f81e0SChristoph Hellwig goto out; 478e56f81e0SChristoph Hellwig } 479e980f623SChristoph Hellwig } 480e56f81e0SChristoph Hellwig 481a7cb3d2fSChristoph Hellwig if (!bdev->bd_disk->fops->ioctl) 482a7cb3d2fSChristoph Hellwig r = -ENOTTY; 483a7cb3d2fSChristoph Hellwig else 484a7cb3d2fSChristoph Hellwig r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 485e56f81e0SChristoph Hellwig out: 486971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 487aa129a22SMilan Broz return r; 488aa129a22SMilan Broz } 489aa129a22SMilan Broz 4907465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio) 4917465d7acSMike Snitzer { 4926c23f0bdSChristoph Hellwig return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 4937465d7acSMike Snitzer } 4947465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 4957465d7acSMike Snitzer 4968d394bc4SMike Snitzer static bool bio_is_flush_with_data(struct bio *bio) 4977465d7acSMike Snitzer { 4988d394bc4SMike Snitzer return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); 4997465d7acSMike Snitzer } 5007465d7acSMike Snitzer 5018d394bc4SMike Snitzer static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, 502d208b894SJiazi Li unsigned long start_time, struct dm_stats_aux *stats_aux) 5037465d7acSMike Snitzer { 5048d394bc4SMike Snitzer bool is_flush_with_data; 5058d394bc4SMike Snitzer unsigned int bi_size; 5067465d7acSMike Snitzer 5078d394bc4SMike Snitzer /* If REQ_PREFLUSH set save any payload but do not account it */ 5088d394bc4SMike Snitzer is_flush_with_data = bio_is_flush_with_data(bio); 5098d394bc4SMike Snitzer if (is_flush_with_data) { 5108d394bc4SMike Snitzer bi_size = bio->bi_iter.bi_size; 5118d394bc4SMike Snitzer bio->bi_iter.bi_size = 0; 5128d394bc4SMike Snitzer } 5138d394bc4SMike Snitzer 5148d394bc4SMike Snitzer if (!end) 5158d394bc4SMike Snitzer bio_start_io_acct_time(bio, start_time); 5168d394bc4SMike Snitzer else 517d208b894SJiazi Li bio_end_io_acct(bio, start_time); 5187465d7acSMike Snitzer 5197465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 5207465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 5217465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 5228d394bc4SMike Snitzer end, start_time, stats_aux); 5237465d7acSMike Snitzer 5248d394bc4SMike Snitzer /* Restore bio's payload so it does get accounted upon requeue */ 5258d394bc4SMike Snitzer if (is_flush_with_data) 5268d394bc4SMike Snitzer bio->bi_iter.bi_size = bi_size; 5278d394bc4SMike Snitzer } 5288d394bc4SMike Snitzer 5290fbb4d93SMike Snitzer static void __dm_start_io_acct(struct dm_io *io, struct bio *bio) 5308d394bc4SMike Snitzer { 5310fbb4d93SMike Snitzer dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux); 5328d394bc4SMike Snitzer } 5338d394bc4SMike Snitzer 5340fbb4d93SMike Snitzer static void dm_start_io_acct(struct dm_io *io, struct bio *clone) 5358d394bc4SMike Snitzer { 5360fbb4d93SMike Snitzer /* Must account IO to DM device in terms of orig_bio */ 5370fbb4d93SMike Snitzer struct bio *bio = io->orig_bio; 5380fbb4d93SMike Snitzer 5390fbb4d93SMike Snitzer /* 5400fbb4d93SMike Snitzer * Ensure IO accounting is only ever started once. 541655f3aadSMike Snitzer * Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. 5420fbb4d93SMike Snitzer */ 543655f3aadSMike Snitzer if (!clone || 544655f3aadSMike Snitzer likely(!dm_tio_flagged(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO))) { 54582f6cdccSMike Snitzer if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED))) 5460fbb4d93SMike Snitzer return; 54782f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_ACCOUNTED); 54882f6cdccSMike Snitzer } else { 54982f6cdccSMike Snitzer unsigned long flags; 55082f6cdccSMike Snitzer if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 5510fbb4d93SMike Snitzer return; 552655f3aadSMike Snitzer /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ 5534d7bca13SMike Snitzer spin_lock_irqsave(&io->lock, flags); 55482f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_ACCOUNTED); 5554d7bca13SMike Snitzer spin_unlock_irqrestore(&io->lock, flags); 55682f6cdccSMike Snitzer } 5570fbb4d93SMike Snitzer 5580fbb4d93SMike Snitzer __dm_start_io_acct(io, bio); 5590fbb4d93SMike Snitzer } 5600fbb4d93SMike Snitzer 5610fbb4d93SMike Snitzer static void dm_end_io_acct(struct dm_io *io, struct bio *bio) 5620fbb4d93SMike Snitzer { 5630fbb4d93SMike Snitzer dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux); 5647465d7acSMike Snitzer } 565978e51baSMike Snitzer 566978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5671da177e4SLinus Torvalds { 56864f52b0eSMike Snitzer struct dm_io *io; 56964f52b0eSMike Snitzer struct dm_target_io *tio; 57064f52b0eSMike Snitzer struct bio *clone; 57164f52b0eSMike Snitzer 572abfc426dSChristoph Hellwig clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); 57364f52b0eSMike Snitzer 5746c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 575655f3aadSMike Snitzer tio->flags = 0; 576655f3aadSMike Snitzer dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); 57764f52b0eSMike Snitzer tio->io = NULL; 57864f52b0eSMike Snitzer 57964f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 58064f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 581978e51baSMike Snitzer io->status = 0; 582978e51baSMike Snitzer atomic_set(&io->io_count, 1); 5839f6dc633SMike Snitzer this_cpu_inc(*md->pending_io); 5840fbb4d93SMike Snitzer io->orig_bio = NULL; 585978e51baSMike Snitzer io->md = md; 586b7f8dff0SMike Snitzer io->map_task = current; 5874d7bca13SMike Snitzer spin_lock_init(&io->lock); 588b879f915SMike Snitzer io->start_time = jiffies; 58982f6cdccSMike Snitzer io->flags = 0; 59064f52b0eSMike Snitzer 5910cdb90f0SMike Snitzer dm_stats_record_start(&md->stats, &io->stats_aux); 59264f52b0eSMike Snitzer 59364f52b0eSMike Snitzer return io; 5941da177e4SLinus Torvalds } 5951da177e4SLinus Torvalds 5960119ab14SMike Snitzer static void free_io(struct dm_io *io) 5971da177e4SLinus Torvalds { 59864f52b0eSMike Snitzer bio_put(&io->tio.clone); 59964f52b0eSMike Snitzer } 60064f52b0eSMike Snitzer 6011d1068ceSChristoph Hellwig static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 602dc8e2021SChristoph Hellwig unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 60364f52b0eSMike Snitzer { 60464f52b0eSMike Snitzer struct dm_target_io *tio; 605018b05ebSMike Snitzer struct bio *clone; 60664f52b0eSMike Snitzer 60764f52b0eSMike Snitzer if (!ci->io->tio.io) { 60864f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 60964f52b0eSMike Snitzer tio = &ci->io->tio; 610018b05ebSMike Snitzer /* alloc_io() already initialized embedded clone */ 611018b05ebSMike Snitzer clone = &tio->clone; 61264f52b0eSMike Snitzer } else { 613018b05ebSMike Snitzer clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, 614abfc426dSChristoph Hellwig gfp_mask, &ci->io->md->bs); 61564f52b0eSMike Snitzer if (!clone) 61664f52b0eSMike Snitzer return NULL; 61764f52b0eSMike Snitzer 618b99fdcdcSMing Lei /* REQ_DM_POLL_LIST shouldn't be inherited */ 619b99fdcdcSMing Lei clone->bi_opf &= ~REQ_DM_POLL_LIST; 620b99fdcdcSMing Lei 6216c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 622655f3aadSMike Snitzer tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ 62364f52b0eSMike Snitzer } 62464f52b0eSMike Snitzer 62564f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 62664f52b0eSMike Snitzer tio->io = ci->io; 62764f52b0eSMike Snitzer tio->ti = ti; 62864f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 629dc8e2021SChristoph Hellwig tio->len_ptr = len; 630743598f0SMike Snitzer tio->old_sector = 0; 63164f52b0eSMike Snitzer 632018b05ebSMike Snitzer if (len) { 633018b05ebSMike Snitzer clone->bi_iter.bi_size = to_bytes(*len); 634018b05ebSMike Snitzer if (bio_integrity(clone)) 635018b05ebSMike Snitzer bio_integrity_trim(clone); 636018b05ebSMike Snitzer } 637018b05ebSMike Snitzer 638018b05ebSMike Snitzer return clone; 6391da177e4SLinus Torvalds } 6401da177e4SLinus Torvalds 6411d1068ceSChristoph Hellwig static void free_tio(struct bio *clone) 6421da177e4SLinus Torvalds { 643655f3aadSMike Snitzer if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) 64464f52b0eSMike Snitzer return; 6451d1068ceSChristoph Hellwig bio_put(clone); 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /* 6491da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6501da177e4SLinus Torvalds */ 65192c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6521da177e4SLinus Torvalds { 65305447420SKiyoshi Ueda unsigned long flags; 6541da177e4SLinus Torvalds 65505447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6561da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 65705447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 65892c63902SMikulas Patocka queue_work(md->wq, &md->work); 6591da177e4SLinus Torvalds } 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds /* 6621da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6631da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 66483d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6651da177e4SLinus Torvalds */ 66683d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6671da177e4SLinus Torvalds { 66883d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6691da177e4SLinus Torvalds 67083d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 67183d5e5b0SMikulas Patocka } 6721da177e4SLinus Torvalds 67383d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 67483d5e5b0SMikulas Patocka { 67583d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 67683d5e5b0SMikulas Patocka } 67783d5e5b0SMikulas Patocka 67883d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 67983d5e5b0SMikulas Patocka { 68083d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 68183d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 68283d5e5b0SMikulas Patocka } 68383d5e5b0SMikulas Patocka 68483d5e5b0SMikulas Patocka /* 68583d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 68683d5e5b0SMikulas Patocka * The caller must not block between these two functions. 68783d5e5b0SMikulas Patocka */ 68883d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 68983d5e5b0SMikulas Patocka { 69083d5e5b0SMikulas Patocka rcu_read_lock(); 69183d5e5b0SMikulas Patocka return rcu_dereference(md->map); 69283d5e5b0SMikulas Patocka } 69383d5e5b0SMikulas Patocka 69483d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 69583d5e5b0SMikulas Patocka { 69683d5e5b0SMikulas Patocka rcu_read_unlock(); 6971da177e4SLinus Torvalds } 6981da177e4SLinus Torvalds 699971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 700971888c4SMike Snitzer 7013ac51e74SDarrick J. Wong /* 70286f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 70386f1152bSBenjamin Marzinski */ 70486f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 70586f1152bSBenjamin Marzinski struct mapped_device *md) 70686f1152bSBenjamin Marzinski { 70786f1152bSBenjamin Marzinski struct block_device *bdev; 708cd913c76SChristoph Hellwig u64 part_off; 70986f1152bSBenjamin Marzinski int r; 71086f1152bSBenjamin Marzinski 71186f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 71286f1152bSBenjamin Marzinski 713519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 71486f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 71586f1152bSBenjamin Marzinski return PTR_ERR(bdev); 71686f1152bSBenjamin Marzinski 71786f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 71886f1152bSBenjamin Marzinski if (r) { 71986f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 72086f1152bSBenjamin Marzinski return r; 72186f1152bSBenjamin Marzinski } 72286f1152bSBenjamin Marzinski 72386f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 724cd913c76SChristoph Hellwig td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); 72586f1152bSBenjamin Marzinski return 0; 72686f1152bSBenjamin Marzinski } 72786f1152bSBenjamin Marzinski 72886f1152bSBenjamin Marzinski /* 72986f1152bSBenjamin Marzinski * Close a table device that we've been using. 73086f1152bSBenjamin Marzinski */ 73186f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 73286f1152bSBenjamin Marzinski { 73386f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 73486f1152bSBenjamin Marzinski return; 73586f1152bSBenjamin Marzinski 73686f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 73786f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 738817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 73986f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 740817bf402SDan Williams td->dm_dev.dax_dev = NULL; 74186f1152bSBenjamin Marzinski } 74286f1152bSBenjamin Marzinski 74386f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 7448454fca4SSheetal Singala fmode_t mode) 7458454fca4SSheetal Singala { 74686f1152bSBenjamin Marzinski struct table_device *td; 74786f1152bSBenjamin Marzinski 74886f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 74986f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 75086f1152bSBenjamin Marzinski return td; 75186f1152bSBenjamin Marzinski 75286f1152bSBenjamin Marzinski return NULL; 75386f1152bSBenjamin Marzinski } 75486f1152bSBenjamin Marzinski 75586f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 7568454fca4SSheetal Singala struct dm_dev **result) 7578454fca4SSheetal Singala { 75886f1152bSBenjamin Marzinski int r; 75986f1152bSBenjamin Marzinski struct table_device *td; 76086f1152bSBenjamin Marzinski 76186f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 76286f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 76386f1152bSBenjamin Marzinski if (!td) { 764115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 76586f1152bSBenjamin Marzinski if (!td) { 76686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 76786f1152bSBenjamin Marzinski return -ENOMEM; 76886f1152bSBenjamin Marzinski } 76986f1152bSBenjamin Marzinski 77086f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 77186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 77286f1152bSBenjamin Marzinski 77386f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 77486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 77586f1152bSBenjamin Marzinski kfree(td); 77686f1152bSBenjamin Marzinski return r; 77786f1152bSBenjamin Marzinski } 77886f1152bSBenjamin Marzinski 77986f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 78086f1152bSBenjamin Marzinski 781b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 78286f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 783b0b4d7c6SElena Reshetova } else { 784b0b4d7c6SElena Reshetova refcount_inc(&td->count); 78586f1152bSBenjamin Marzinski } 78686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 78786f1152bSBenjamin Marzinski 78886f1152bSBenjamin Marzinski *result = &td->dm_dev; 78986f1152bSBenjamin Marzinski return 0; 79086f1152bSBenjamin Marzinski } 79186f1152bSBenjamin Marzinski 79286f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 79386f1152bSBenjamin Marzinski { 79486f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 79586f1152bSBenjamin Marzinski 79686f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 797b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 79886f1152bSBenjamin Marzinski close_table_device(td, md); 79986f1152bSBenjamin Marzinski list_del(&td->list); 80086f1152bSBenjamin Marzinski kfree(td); 80186f1152bSBenjamin Marzinski } 80286f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80386f1152bSBenjamin Marzinski } 80486f1152bSBenjamin Marzinski 80586f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 80686f1152bSBenjamin Marzinski { 80786f1152bSBenjamin Marzinski struct list_head *tmp, *next; 80886f1152bSBenjamin Marzinski 80986f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 81086f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 81186f1152bSBenjamin Marzinski 81286f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 813b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 81486f1152bSBenjamin Marzinski kfree(td); 81586f1152bSBenjamin Marzinski } 81686f1152bSBenjamin Marzinski } 81786f1152bSBenjamin Marzinski 81886f1152bSBenjamin Marzinski /* 8193ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8203ac51e74SDarrick J. Wong */ 8213ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8223ac51e74SDarrick J. Wong { 8233ac51e74SDarrick J. Wong *geo = md->geometry; 8243ac51e74SDarrick J. Wong 8253ac51e74SDarrick J. Wong return 0; 8263ac51e74SDarrick J. Wong } 8273ac51e74SDarrick J. Wong 8283ac51e74SDarrick J. Wong /* 8293ac51e74SDarrick J. Wong * Set the geometry of a device. 8303ac51e74SDarrick J. Wong */ 8313ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8323ac51e74SDarrick J. Wong { 8333ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8343ac51e74SDarrick J. Wong 8353ac51e74SDarrick J. Wong if (geo->start > sz) { 8363ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8373ac51e74SDarrick J. Wong return -EINVAL; 8383ac51e74SDarrick J. Wong } 8393ac51e74SDarrick J. Wong 8403ac51e74SDarrick J. Wong md->geometry = *geo; 8413ac51e74SDarrick J. Wong 8423ac51e74SDarrick J. Wong return 0; 8433ac51e74SDarrick J. Wong } 8443ac51e74SDarrick J. Wong 8452e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8462e93ccc1SKiyoshi Ueda { 8472e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8482e93ccc1SKiyoshi Ueda } 8492e93ccc1SKiyoshi Ueda 850e2736347SMike Snitzer static void dm_io_complete(struct dm_io *io) 8511da177e4SLinus Torvalds { 8524e4cbee9SChristoph Hellwig blk_status_t io_error; 853b35f8caaSMilan Broz struct mapped_device *md = io->md; 854e2736347SMike Snitzer struct bio *bio = io->orig_bio; 8552e93ccc1SKiyoshi Ueda 8564e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 857e2736347SMike Snitzer unsigned long flags; 8582e93ccc1SKiyoshi Ueda /* 8592e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8602e93ccc1SKiyoshi Ueda */ 861022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 862bf14e2b2SDamien Le Moal if (__noflush_suspending(md) && 863bf14e2b2SDamien Le Moal !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 864745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 865bf14e2b2SDamien Le Moal bio_list_add_head(&md->deferred, bio); 866bf14e2b2SDamien Le Moal } else { 867bf14e2b2SDamien Le Moal /* 868bf14e2b2SDamien Le Moal * noflush suspend was interrupted or this is 869bf14e2b2SDamien Le Moal * a write to a zoned target. 870bf14e2b2SDamien Le Moal */ 8714e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 872bf14e2b2SDamien Le Moal } 873022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 8742e93ccc1SKiyoshi Ueda } 8752e93ccc1SKiyoshi Ueda 8764e4cbee9SChristoph Hellwig io_error = io->status; 87782f6cdccSMike Snitzer if (dm_io_flagged(io, DM_IO_ACCOUNTED)) 8780fbb4d93SMike Snitzer dm_end_io_acct(io, bio); 8790fbb4d93SMike Snitzer else if (!io_error) { 8800fbb4d93SMike Snitzer /* 8810fbb4d93SMike Snitzer * Must handle target that DM_MAPIO_SUBMITTED only to 8820fbb4d93SMike Snitzer * then bio_endio() rather than dm_submit_bio_remap() 8830fbb4d93SMike Snitzer */ 8840fbb4d93SMike Snitzer __dm_start_io_acct(io, bio); 8850fbb4d93SMike Snitzer dm_end_io_acct(io, bio); 8860fbb4d93SMike Snitzer } 8870119ab14SMike Snitzer free_io(io); 8889f6dc633SMike Snitzer smp_wmb(); 8899f6dc633SMike Snitzer this_cpu_dec(*md->pending_io); 8902056a782SJens Axboe 8919f6dc633SMike Snitzer /* nudge anyone waiting on suspend queue */ 8929f6dc633SMike Snitzer if (unlikely(wq_has_sleeper(&md->wait))) 8939f6dc633SMike Snitzer wake_up(&md->wait); 8941da177e4SLinus Torvalds 89552919840SMing Lei if (io_error == BLK_STS_DM_REQUEUE || io_error == BLK_STS_AGAIN) { 89652919840SMing Lei if (bio->bi_opf & REQ_POLLED) { 897b99fdcdcSMing Lei /* 89852919840SMing Lei * Upper layer won't help us poll split bio (io->orig_bio 89952919840SMing Lei * may only reflect a subset of the pre-split original) 90052919840SMing Lei * so clear REQ_POLLED in case of requeue. 901b99fdcdcSMing Lei */ 902b99fdcdcSMing Lei bio->bi_opf &= ~REQ_POLLED; 90352919840SMing Lei if (io_error == BLK_STS_AGAIN) { 90452919840SMing Lei /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ 90552919840SMing Lei queue_io(md, bio); 90652919840SMing Lei } 90752919840SMing Lei } 9086a8736d1STejun Heo return; 909b99fdcdcSMing Lei } 9106a8736d1STejun Heo 9118d394bc4SMike Snitzer if (bio_is_flush_with_data(bio)) { 912af7e466aSMikulas Patocka /* 9136a8736d1STejun Heo * Preflush done for flush with data, reissue 91428a8f0d3SMike Christie * without REQ_PREFLUSH. 915af7e466aSMikulas Patocka */ 9161eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9176a8736d1STejun Heo queue_io(md, bio); 918af7e466aSMikulas Patocka } else { 919b372d360SMike Snitzer /* done with normal IO or empty flush */ 9208dd601faSNeilBrown if (io_error) 9214e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9224246a0b6SChristoph Hellwig bio_endio(bio); 9232e93ccc1SKiyoshi Ueda } 9241da177e4SLinus Torvalds } 925e2736347SMike Snitzer 926655f3aadSMike Snitzer static inline bool dm_tio_is_normal(struct dm_target_io *tio) 927655f3aadSMike Snitzer { 928655f3aadSMike Snitzer return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) && 929655f3aadSMike Snitzer !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 930655f3aadSMike Snitzer } 931655f3aadSMike Snitzer 932e2736347SMike Snitzer /* 933e2736347SMike Snitzer * Decrements the number of outstanding ios that a bio has been 934e2736347SMike Snitzer * cloned into, completing the original io if necc. 935e2736347SMike Snitzer */ 936e2736347SMike Snitzer void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 937e2736347SMike Snitzer { 938e2736347SMike Snitzer /* Push-back supersedes any I/O errors */ 939e2736347SMike Snitzer if (unlikely(error)) { 940e2736347SMike Snitzer unsigned long flags; 9414d7bca13SMike Snitzer spin_lock_irqsave(&io->lock, flags); 942e2736347SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && 943e2736347SMike Snitzer __noflush_suspending(io->md))) 944e2736347SMike Snitzer io->status = error; 9454d7bca13SMike Snitzer spin_unlock_irqrestore(&io->lock, flags); 946e2736347SMike Snitzer } 947e2736347SMike Snitzer 948e2736347SMike Snitzer if (atomic_dec_and_test(&io->io_count)) 949e2736347SMike Snitzer dm_io_complete(io); 950af7e466aSMikulas Patocka } 9511da177e4SLinus Torvalds 952bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 953bcb44433SMike Snitzer { 954bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 955bcb44433SMike Snitzer 956bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 957bcb44433SMike Snitzer limits->max_discard_sectors = 0; 958bcb44433SMike Snitzer blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 959bcb44433SMike Snitzer } 960bcb44433SMike Snitzer 961ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 962ac62d620SChristoph Hellwig { 963ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 964ac62d620SChristoph Hellwig 965ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 966ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 967ac62d620SChristoph Hellwig } 968ac62d620SChristoph Hellwig 969a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 970a666e5c0SMikulas Patocka { 971a666e5c0SMikulas Patocka return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 972a666e5c0SMikulas Patocka } 973a666e5c0SMikulas Patocka 9744246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9751da177e4SLinus Torvalds { 9764e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 9776c23f0bdSChristoph Hellwig struct dm_target_io *tio = clone_to_tio(bio); 978b35f8caaSMilan Broz struct dm_io *io = tio->io; 9799faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9801da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 981309dca30SChristoph Hellwig struct request_queue *q = bio->bi_bdev->bd_disk->queue; 9821da177e4SLinus Torvalds 9839c37de29SMike Snitzer if (unlikely(error == BLK_STS_TARGET)) { 984bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 985309dca30SChristoph Hellwig !q->limits.max_discard_sectors) 986bcb44433SMike Snitzer disable_discard(md); 987bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 988309dca30SChristoph Hellwig !q->limits.max_write_zeroes_sectors) 989ac62d620SChristoph Hellwig disable_write_zeroes(md); 990ac62d620SChristoph Hellwig } 9917eee4ae2SMike Snitzer 992bb37d772SDamien Le Moal if (blk_queue_is_zoned(q)) 993bb37d772SDamien Le Moal dm_zone_endio(io, bio); 994415c79e1SJohannes Thumshirn 9951be56909SChristoph Hellwig if (endio) { 9964e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 9971be56909SChristoph Hellwig switch (r) { 9981be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 999bf14e2b2SDamien Le Moal /* 1000bf14e2b2SDamien Le Moal * Requeuing writes to a sequential zone of a zoned 1001bf14e2b2SDamien Le Moal * target will break the sequential write pattern: 1002bf14e2b2SDamien Le Moal * fail such IO. 1003bf14e2b2SDamien Le Moal */ 1004bf14e2b2SDamien Le Moal if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 1005bf14e2b2SDamien Le Moal error = BLK_STS_IOERR; 1006bf14e2b2SDamien Le Moal else 10074e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 1008df561f66SGustavo A. R. Silva fallthrough; 10091be56909SChristoph Hellwig case DM_ENDIO_DONE: 10101be56909SChristoph Hellwig break; 10111be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 10121be56909SChristoph Hellwig /* The target will handle the io */ 10131be56909SChristoph Hellwig return; 10141be56909SChristoph Hellwig default: 10151be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 10161be56909SChristoph Hellwig BUG(); 10171be56909SChristoph Hellwig } 10181be56909SChristoph Hellwig } 10191be56909SChristoph Hellwig 1020a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(tio->ti, bio))) { 1021a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1022a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1023a666e5c0SMikulas Patocka } 1024a666e5c0SMikulas Patocka 10251d1068ceSChristoph Hellwig free_tio(bio); 1026e2118b3cSDamien Le Moal dm_io_dec_pending(io, error); 10271da177e4SLinus Torvalds } 10281da177e4SLinus Torvalds 102978d8e58aSMike Snitzer /* 103056a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 103156a67df7SMike Snitzer * target boundary. 103256a67df7SMike Snitzer */ 10333720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 10343720281dSMike Snitzer sector_t target_offset) 10351da177e4SLinus Torvalds { 103656a67df7SMike Snitzer return ti->len - target_offset; 103756a67df7SMike Snitzer } 103856a67df7SMike Snitzer 10393720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector) 104056a67df7SMike Snitzer { 10413720281dSMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 10423720281dSMike Snitzer sector_t len = max_io_len_target_boundary(ti, target_offset); 10435091cdecSMike Snitzer sector_t max_len; 10441da177e4SLinus Torvalds 10451da177e4SLinus Torvalds /* 10463ee16db3SMike Snitzer * Does the target need to split IO even further? 10473ee16db3SMike Snitzer * - varied (per target) IO splitting is a tenet of DM; this 10483ee16db3SMike Snitzer * explains why stacked chunk_sectors based splitting via 10493ee16db3SMike Snitzer * blk_max_size_offset() isn't possible here. So pass in 10503ee16db3SMike Snitzer * ti->max_io_len to override stacked chunk_sectors. 10511da177e4SLinus Torvalds */ 10523ee16db3SMike Snitzer if (ti->max_io_len) { 105333bd6f06SMike Snitzer max_len = blk_max_size_offset(ti->table->md->queue, 10543ee16db3SMike Snitzer target_offset, ti->max_io_len); 1055542f9038SMike Snitzer if (len > max_len) 1056542f9038SMike Snitzer len = max_len; 10573ee16db3SMike Snitzer } 10581da177e4SLinus Torvalds 10591da177e4SLinus Torvalds return len; 10601da177e4SLinus Torvalds } 10611da177e4SLinus Torvalds 1062542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1063542f9038SMike Snitzer { 1064542f9038SMike Snitzer if (len > UINT_MAX) { 1065542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1066542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1067542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1068542f9038SMike Snitzer return -EINVAL; 1069542f9038SMike Snitzer } 1070542f9038SMike Snitzer 107175ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 1072542f9038SMike Snitzer 1073542f9038SMike Snitzer return 0; 1074542f9038SMike Snitzer } 1075542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1076542f9038SMike Snitzer 1077f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1078f26c5719SDan Williams sector_t sector, int *srcu_idx) 10793d97c829SMike Snitzer __acquires(md->io_barrier) 1080545ed20eSToshi Kani { 1081545ed20eSToshi Kani struct dm_table *map; 1082545ed20eSToshi Kani struct dm_target *ti; 1083545ed20eSToshi Kani 1084f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1085545ed20eSToshi Kani if (!map) 1086f26c5719SDan Williams return NULL; 1087545ed20eSToshi Kani 1088545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1089123d87d5SMikulas Patocka if (!ti) 1090f26c5719SDan Williams return NULL; 1091f26c5719SDan Williams 1092f26c5719SDan Williams return ti; 1093f26c5719SDan Williams } 1094f26c5719SDan Williams 1095f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1096e511c4a3SJane Chu long nr_pages, enum dax_access_mode mode, void **kaddr, 1097e511c4a3SJane Chu pfn_t *pfn) 1098f26c5719SDan Williams { 1099f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1100f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1101f26c5719SDan Williams struct dm_target *ti; 1102f26c5719SDan Williams long len, ret = -EIO; 1103f26c5719SDan Williams int srcu_idx; 1104f26c5719SDan Williams 1105f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1106f26c5719SDan Williams 1107f26c5719SDan Williams if (!ti) 1108545ed20eSToshi Kani goto out; 1109f26c5719SDan Williams if (!ti->type->direct_access) 1110f26c5719SDan Williams goto out; 11113720281dSMike Snitzer len = max_io_len(ti, sector) / PAGE_SECTORS; 1112f26c5719SDan Williams if (len < 1) 1113f26c5719SDan Williams goto out; 1114f26c5719SDan Williams nr_pages = min(len, nr_pages); 1115e511c4a3SJane Chu ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); 1116817bf402SDan Williams 1117545ed20eSToshi Kani out: 1118545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1119f26c5719SDan Williams 1120f26c5719SDan Williams return ret; 1121545ed20eSToshi Kani } 1122545ed20eSToshi Kani 1123cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1124cdf6cdcdSVivek Goyal size_t nr_pages) 1125cdf6cdcdSVivek Goyal { 1126cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1127cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1128cdf6cdcdSVivek Goyal struct dm_target *ti; 1129cdf6cdcdSVivek Goyal int ret = -EIO; 1130cdf6cdcdSVivek Goyal int srcu_idx; 1131cdf6cdcdSVivek Goyal 1132cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1133cdf6cdcdSVivek Goyal 1134cdf6cdcdSVivek Goyal if (!ti) 1135cdf6cdcdSVivek Goyal goto out; 1136cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1137cdf6cdcdSVivek Goyal /* 1138cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1139cdf6cdcdSVivek Goyal * here, something is wrong. 1140cdf6cdcdSVivek Goyal */ 1141cdf6cdcdSVivek Goyal goto out; 1142cdf6cdcdSVivek Goyal } 1143cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1144cdf6cdcdSVivek Goyal out: 1145cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1146cdf6cdcdSVivek Goyal 1147cdf6cdcdSVivek Goyal return ret; 1148cdf6cdcdSVivek Goyal } 1149cdf6cdcdSVivek Goyal 1150*047218ecSJane Chu static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 1151*047218ecSJane Chu void *addr, size_t bytes, struct iov_iter *i) 1152*047218ecSJane Chu { 1153*047218ecSJane Chu struct mapped_device *md = dax_get_private(dax_dev); 1154*047218ecSJane Chu sector_t sector = pgoff * PAGE_SECTORS; 1155*047218ecSJane Chu struct dm_target *ti; 1156*047218ecSJane Chu int srcu_idx; 1157*047218ecSJane Chu long ret = 0; 1158*047218ecSJane Chu 1159*047218ecSJane Chu ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1160*047218ecSJane Chu if (!ti || !ti->type->dax_recovery_write) 1161*047218ecSJane Chu goto out; 1162*047218ecSJane Chu 1163*047218ecSJane Chu ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); 1164*047218ecSJane Chu out: 1165*047218ecSJane Chu dm_put_live_table(md, srcu_idx); 1166*047218ecSJane Chu return ret; 1167*047218ecSJane Chu } 1168*047218ecSJane Chu 11691dd40c3eSMikulas Patocka /* 11701dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 11716842d264SDamien Le Moal * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 1172e6fc9f62SMike Snitzer * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by 1173e6fc9f62SMike Snitzer * __send_duplicate_bios(). 11741dd40c3eSMikulas Patocka * 11751dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 11761dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 11771dd40c3eSMikulas Patocka * sent in a next bio. 11781dd40c3eSMikulas Patocka * 11791dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 11801dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11811dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 11821dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11831dd40c3eSMikulas Patocka * 11841dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 11851dd40c3eSMikulas Patocka * <------- bi_size -------> 11861dd40c3eSMikulas Patocka * <-- n_sectors --> 11871dd40c3eSMikulas Patocka * 11881dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11891dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11901dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11911dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11921dd40c3eSMikulas Patocka * to make it empty) 11931dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11941dd40c3eSMikulas Patocka * 11951dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 11961dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 11971dd40c3eSMikulas Patocka * copies of the bio. 11981dd40c3eSMikulas Patocka */ 11991dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 12001dd40c3eSMikulas Patocka { 12016c23f0bdSChristoph Hellwig struct dm_target_io *tio = clone_to_tio(bio); 12021dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 12036842d264SDamien Le Moal 1204655f3aadSMike Snitzer BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); 12056842d264SDamien Le Moal BUG_ON(op_is_zone_mgmt(bio_op(bio))); 12066842d264SDamien Le Moal BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 12071dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 12081dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 12096842d264SDamien Le Moal 12101dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 12111dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 12121dd40c3eSMikulas Patocka } 12131dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 12141dd40c3eSMikulas Patocka 12150fbb4d93SMike Snitzer static inline void __dm_submit_bio_remap(struct bio *clone, 12160fbb4d93SMike Snitzer dev_t dev, sector_t old_sector) 12170fbb4d93SMike Snitzer { 12180fbb4d93SMike Snitzer trace_block_bio_remap(clone, dev, old_sector); 12190fbb4d93SMike Snitzer submit_bio_noacct(clone); 12200fbb4d93SMike Snitzer } 12210fbb4d93SMike Snitzer 12220fbb4d93SMike Snitzer /* 12230fbb4d93SMike Snitzer * @clone: clone bio that DM core passed to target's .map function 12240fbb4d93SMike Snitzer * @tgt_clone: clone of @clone bio that target needs submitted 12250fbb4d93SMike Snitzer * 12260fbb4d93SMike Snitzer * Targets should use this interface to submit bios they take 12270fbb4d93SMike Snitzer * ownership of when returning DM_MAPIO_SUBMITTED. 12280fbb4d93SMike Snitzer * 12290fbb4d93SMike Snitzer * Target should also enable ti->accounts_remapped_io 12300fbb4d93SMike Snitzer */ 1231b7f8dff0SMike Snitzer void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) 12320fbb4d93SMike Snitzer { 12330fbb4d93SMike Snitzer struct dm_target_io *tio = clone_to_tio(clone); 12340fbb4d93SMike Snitzer struct dm_io *io = tio->io; 12350fbb4d93SMike Snitzer 12360a8e9599SMike Snitzer WARN_ON_ONCE(!tio->ti->accounts_remapped_io); 12370a8e9599SMike Snitzer 12380fbb4d93SMike Snitzer /* establish bio that will get submitted */ 12390fbb4d93SMike Snitzer if (!tgt_clone) 12400fbb4d93SMike Snitzer tgt_clone = clone; 12410fbb4d93SMike Snitzer 12420fbb4d93SMike Snitzer /* 12430fbb4d93SMike Snitzer * Account io->origin_bio to DM dev on behalf of target 12440fbb4d93SMike Snitzer * that took ownership of IO with DM_MAPIO_SUBMITTED. 12450fbb4d93SMike Snitzer */ 1246b7f8dff0SMike Snitzer if (io->map_task == current) { 12470fbb4d93SMike Snitzer /* Still in target's map function */ 124882f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_START_ACCT); 12490fbb4d93SMike Snitzer } else { 12500fbb4d93SMike Snitzer /* 12510fbb4d93SMike Snitzer * Called by another thread, managed by DM target, 12520fbb4d93SMike Snitzer * wait for dm_split_and_process_bio() to store 12530fbb4d93SMike Snitzer * io->orig_bio 12540fbb4d93SMike Snitzer */ 12550fbb4d93SMike Snitzer while (unlikely(!smp_load_acquire(&io->orig_bio))) 12560fbb4d93SMike Snitzer msleep(1); 12570fbb4d93SMike Snitzer dm_start_io_acct(io, clone); 12580fbb4d93SMike Snitzer } 12590fbb4d93SMike Snitzer 12600fbb4d93SMike Snitzer __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk), 12610fbb4d93SMike Snitzer tio->old_sector); 12620fbb4d93SMike Snitzer } 12630fbb4d93SMike Snitzer EXPORT_SYMBOL_GPL(dm_submit_bio_remap); 12640fbb4d93SMike Snitzer 1265a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1266a666e5c0SMikulas Patocka { 1267a666e5c0SMikulas Patocka mutex_lock(&md->swap_bios_lock); 1268a666e5c0SMikulas Patocka while (latch < md->swap_bios) { 1269a666e5c0SMikulas Patocka cond_resched(); 1270a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1271a666e5c0SMikulas Patocka md->swap_bios--; 1272a666e5c0SMikulas Patocka } 1273a666e5c0SMikulas Patocka while (latch > md->swap_bios) { 1274a666e5c0SMikulas Patocka cond_resched(); 1275a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1276a666e5c0SMikulas Patocka md->swap_bios++; 1277a666e5c0SMikulas Patocka } 1278a666e5c0SMikulas Patocka mutex_unlock(&md->swap_bios_lock); 1279a666e5c0SMikulas Patocka } 1280a666e5c0SMikulas Patocka 12811561b396SChristoph Hellwig static void __map_bio(struct bio *clone) 12821da177e4SLinus Torvalds { 12831561b396SChristoph Hellwig struct dm_target_io *tio = clone_to_tio(clone); 12841da177e4SLinus Torvalds int r; 128564f52b0eSMike Snitzer struct dm_io *io = tio->io; 1286bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 12871da177e4SLinus Torvalds 12881da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12891da177e4SLinus Torvalds 12901da177e4SLinus Torvalds /* 12910fbb4d93SMike Snitzer * Map the clone. 12921da177e4SLinus Torvalds */ 1293e2118b3cSDamien Le Moal dm_io_inc_pending(io); 1294743598f0SMike Snitzer tio->old_sector = clone->bi_iter.bi_sector; 1295d67a5f4bSMikulas Patocka 1296a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1297a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1298a666e5c0SMikulas Patocka int latch = get_swap_bios(); 1299a666e5c0SMikulas Patocka if (unlikely(latch != md->swap_bios)) 1300a666e5c0SMikulas Patocka __set_swap_bios_limit(md, latch); 1301a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1302a666e5c0SMikulas Patocka } 1303a666e5c0SMikulas Patocka 1304bb37d772SDamien Le Moal /* 1305bb37d772SDamien Le Moal * Check if the IO needs a special mapping due to zone append emulation 1306bb37d772SDamien Le Moal * on zoned target. In this case, dm_zone_map_bio() calls the target 1307bb37d772SDamien Le Moal * map operation. 1308bb37d772SDamien Le Moal */ 1309bb37d772SDamien Le Moal if (dm_emulate_zone_append(io->md)) 1310bb37d772SDamien Le Moal r = dm_zone_map_bio(tio); 1311bb37d772SDamien Le Moal else 13127de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1313bb37d772SDamien Le Moal 1314846785e6SChristoph Hellwig switch (r) { 1315846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 13160fbb4d93SMike Snitzer /* target has assumed ownership of this io */ 13170fbb4d93SMike Snitzer if (!ti->accounts_remapped_io) 131882f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_START_ACCT); 1319846785e6SChristoph Hellwig break; 1320846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 13210fbb4d93SMike Snitzer /* 13220fbb4d93SMike Snitzer * the bio has been remapped so dispatch it, but defer 13230fbb4d93SMike Snitzer * dm_start_io_acct() until after possible bio_split(). 13240fbb4d93SMike Snitzer */ 13250fbb4d93SMike Snitzer __dm_submit_bio_remap(clone, disk_devt(io->md->disk), 1326743598f0SMike Snitzer tio->old_sector); 132782f6cdccSMike Snitzer dm_io_set_flag(io, DM_IO_START_ACCT); 1328846785e6SChristoph Hellwig break; 1329846785e6SChristoph Hellwig case DM_MAPIO_KILL: 1330846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 133190a2326eSMike Snitzer if (unlikely(swap_bios_limit(ti, clone))) 133290a2326eSMike Snitzer up(&io->md->swap_bios_semaphore); 13331d1068ceSChristoph Hellwig free_tio(clone); 133490a2326eSMike Snitzer if (r == DM_MAPIO_KILL) 133590a2326eSMike Snitzer dm_io_dec_pending(io, BLK_STS_IOERR); 133690a2326eSMike Snitzer else 1337e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1338846785e6SChristoph Hellwig break; 1339846785e6SChristoph Hellwig default: 134045cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 134145cbcd79SKiyoshi Ueda BUG(); 13421da177e4SLinus Torvalds } 13431da177e4SLinus Torvalds } 13441da177e4SLinus Torvalds 1345318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 13467dd06a25SMike Snitzer struct dm_target *ti, unsigned num_bios) 1347f9ab94ceSMikulas Patocka { 13481d1068ceSChristoph Hellwig struct bio *bio; 1349318716ddSMike Snitzer int try; 1350dba14160SMikulas Patocka 1351318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1352318716ddSMike Snitzer int bio_nr; 1353318716ddSMike Snitzer 1354318716ddSMike Snitzer if (try) 1355bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1356318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 13577dd06a25SMike Snitzer bio = alloc_tio(ci, ti, bio_nr, NULL, 1358dc8e2021SChristoph Hellwig try ? GFP_NOIO : GFP_NOWAIT); 13591d1068ceSChristoph Hellwig if (!bio) 1360318716ddSMike Snitzer break; 1361318716ddSMike Snitzer 13621d1068ceSChristoph Hellwig bio_list_add(blist, bio); 1363318716ddSMike Snitzer } 1364318716ddSMike Snitzer if (try) 1365bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1366318716ddSMike Snitzer if (bio_nr == num_bios) 1367318716ddSMike Snitzer return; 1368318716ddSMike Snitzer 13696c23f0bdSChristoph Hellwig while ((bio = bio_list_pop(blist))) 13701d1068ceSChristoph Hellwig free_tio(bio); 1371318716ddSMike Snitzer } 1372318716ddSMike Snitzer } 1373f9ab94ceSMikulas Patocka 137414fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13751dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 137606a426ceSMike Snitzer { 1377318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 13788eabf5d0SChristoph Hellwig struct bio *clone; 137906a426ceSMike Snitzer 1380891fced6SChristoph Hellwig switch (num_bios) { 1381891fced6SChristoph Hellwig case 0: 1382891fced6SChristoph Hellwig break; 1383891fced6SChristoph Hellwig case 1: 1384891fced6SChristoph Hellwig clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1385891fced6SChristoph Hellwig __map_bio(clone); 1386891fced6SChristoph Hellwig break; 1387891fced6SChristoph Hellwig default: 13887dd06a25SMike Snitzer /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 13897dd06a25SMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 13908eabf5d0SChristoph Hellwig while ((clone = bio_list_pop(&blist))) { 1391655f3aadSMike Snitzer dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 13921561b396SChristoph Hellwig __map_bio(clone); 1393f9ab94ceSMikulas Patocka } 1394891fced6SChristoph Hellwig break; 1395318716ddSMike Snitzer } 139606a426ceSMike Snitzer } 139706a426ceSMike Snitzer 1398332f2b1eSMike Snitzer static void __send_empty_flush(struct clone_info *ci) 1399f9ab94ceSMikulas Patocka { 140006a426ceSMike Snitzer unsigned target_nr = 0; 1401f9ab94ceSMikulas Patocka struct dm_target *ti; 1402828678b8SMike Snitzer struct bio flush_bio; 1403828678b8SMike Snitzer 1404828678b8SMike Snitzer /* 1405828678b8SMike Snitzer * Use an on-stack bio for this, it's safe since we don't 1406828678b8SMike Snitzer * need to reference it after submit. It's just used as 1407828678b8SMike Snitzer * the basis for the clone(s). 1408828678b8SMike Snitzer */ 140949add496SChristoph Hellwig bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 141049add496SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 141147d95102SChristoph Hellwig 1412828678b8SMike Snitzer ci->bio = &flush_bio; 1413828678b8SMike Snitzer ci->sector_count = 0; 141492b914e2SShin'ichiro Kawasaki ci->io->tio.clone.bi_iter.bi_size = 0; 1415f9ab94ceSMikulas Patocka 1416f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14171dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1418828678b8SMike Snitzer 1419828678b8SMike Snitzer bio_uninit(ci->bio); 1420f9ab94ceSMikulas Patocka } 1421f9ab94ceSMikulas Patocka 1422e6fc9f62SMike Snitzer static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 142361697a6aSMike Snitzer unsigned num_bios) 14245ae89a87SMike Snitzer { 142551b86f9aSMichael Lass unsigned len; 14265ae89a87SMike Snitzer 14273720281dSMike Snitzer len = min_t(sector_t, ci->sector_count, 14283720281dSMike Snitzer max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 142951b86f9aSMichael Lass 14307dd06a25SMike Snitzer __send_duplicate_bios(ci, ti, num_bios, &len); 14317dd06a25SMike Snitzer 1432a79245b3SMike Snitzer ci->sector += len; 14333d7f4562SMike Snitzer ci->sector_count -= len; 14345ae89a87SMike Snitzer } 14355ae89a87SMike Snitzer 1436568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1437568c73a3SMike Snitzer { 1438568c73a3SMike Snitzer bool r = false; 1439568c73a3SMike Snitzer 1440568c73a3SMike Snitzer switch (bio_op(bio)) { 1441568c73a3SMike Snitzer case REQ_OP_DISCARD: 1442568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1443568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 1444568c73a3SMike Snitzer r = true; 1445568c73a3SMike Snitzer break; 1446568c73a3SMike Snitzer } 1447568c73a3SMike Snitzer 1448568c73a3SMike Snitzer return r; 1449568c73a3SMike Snitzer } 1450568c73a3SMike Snitzer 14510519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 14520519c71eSMike Snitzer int *result) 14530519c71eSMike Snitzer { 14549679b5a7SMike Snitzer unsigned num_bios = 0; 14550519c71eSMike Snitzer 1456e6fc9f62SMike Snitzer switch (bio_op(ci->bio)) { 14579679b5a7SMike Snitzer case REQ_OP_DISCARD: 14589679b5a7SMike Snitzer num_bios = ti->num_discard_bios; 14599679b5a7SMike Snitzer break; 14609679b5a7SMike Snitzer case REQ_OP_SECURE_ERASE: 14619679b5a7SMike Snitzer num_bios = ti->num_secure_erase_bios; 14629679b5a7SMike Snitzer break; 14639679b5a7SMike Snitzer case REQ_OP_WRITE_ZEROES: 14649679b5a7SMike Snitzer num_bios = ti->num_write_zeroes_bios; 14659679b5a7SMike Snitzer break; 14669679b5a7SMike Snitzer default: 14670519c71eSMike Snitzer return false; 14689679b5a7SMike Snitzer } 14690519c71eSMike Snitzer 1470e6fc9f62SMike Snitzer /* 1471e6fc9f62SMike Snitzer * Even though the device advertised support for this type of 1472e6fc9f62SMike Snitzer * request, that does not mean every target supports it, and 1473e6fc9f62SMike Snitzer * reconfiguration might also have changed that since the 1474e6fc9f62SMike Snitzer * check was performed. 1475e6fc9f62SMike Snitzer */ 1476e6fc9f62SMike Snitzer if (!num_bios) 1477e6fc9f62SMike Snitzer *result = -EOPNOTSUPP; 1478e6fc9f62SMike Snitzer else { 1479e6fc9f62SMike Snitzer __send_changing_extent_only(ci, ti, num_bios); 1480e6fc9f62SMike Snitzer *result = 0; 1481e6fc9f62SMike Snitzer } 14820519c71eSMike Snitzer return true; 14830519c71eSMike Snitzer } 14840519c71eSMike Snitzer 1485e4c93811SAlasdair G Kergon /* 1486b99fdcdcSMing Lei * Reuse ->bi_private as hlist head for storing all dm_io instances 1487b99fdcdcSMing Lei * associated with this bio, and this bio's bi_private needs to be 1488b99fdcdcSMing Lei * stored in dm_io->data before the reuse. 1489b99fdcdcSMing Lei * 1490b99fdcdcSMing Lei * bio->bi_private is owned by fs or upper layer, so block layer won't 1491b99fdcdcSMing Lei * touch it after splitting. Meantime it won't be changed by anyone after 1492b99fdcdcSMing Lei * bio is submitted. So this reuse is safe. 1493b99fdcdcSMing Lei */ 1494b99fdcdcSMing Lei static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio) 1495b99fdcdcSMing Lei { 1496b99fdcdcSMing Lei return (struct hlist_head *)&bio->bi_private; 1497b99fdcdcSMing Lei } 1498b99fdcdcSMing Lei 1499b99fdcdcSMing Lei static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) 1500b99fdcdcSMing Lei { 1501b99fdcdcSMing Lei struct hlist_head *head = dm_get_bio_hlist_head(bio); 1502b99fdcdcSMing Lei 1503b99fdcdcSMing Lei if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { 1504b99fdcdcSMing Lei bio->bi_opf |= REQ_DM_POLL_LIST; 1505b99fdcdcSMing Lei /* 1506b99fdcdcSMing Lei * Save .bi_private into dm_io, so that we can reuse 1507b99fdcdcSMing Lei * .bi_private as hlist head for storing dm_io list 1508b99fdcdcSMing Lei */ 1509b99fdcdcSMing Lei io->data = bio->bi_private; 1510b99fdcdcSMing Lei 1511b99fdcdcSMing Lei INIT_HLIST_HEAD(head); 1512b99fdcdcSMing Lei 1513b99fdcdcSMing Lei /* tell block layer to poll for completion */ 1514b99fdcdcSMing Lei bio->bi_cookie = ~BLK_QC_T_NONE; 1515b99fdcdcSMing Lei } else { 1516b99fdcdcSMing Lei /* 1517b99fdcdcSMing Lei * bio recursed due to split, reuse original poll list, 1518b99fdcdcSMing Lei * and save bio->bi_private too. 1519b99fdcdcSMing Lei */ 1520b99fdcdcSMing Lei io->data = hlist_entry(head->first, struct dm_io, node)->data; 1521b99fdcdcSMing Lei } 1522b99fdcdcSMing Lei 1523b99fdcdcSMing Lei hlist_add_head(&io->node, head); 1524b99fdcdcSMing Lei } 1525b99fdcdcSMing Lei 1526b99fdcdcSMing Lei /* 1527e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1528e4c93811SAlasdair G Kergon */ 152996c9865cSMike Snitzer static int __split_and_process_bio(struct clone_info *ci) 1530e4c93811SAlasdair G Kergon { 153166bdaa43SMike Snitzer struct bio *clone; 1532e4c93811SAlasdair G Kergon struct dm_target *ti; 15331c3b13e6SKent Overstreet unsigned len; 1534c80914e8SMike Snitzer int r; 1535e4c93811SAlasdair G Kergon 1536e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1537123d87d5SMikulas Patocka if (!ti) 1538e4c93811SAlasdair G Kergon return -EIO; 1539e4c93811SAlasdair G Kergon 1540568c73a3SMike Snitzer if (__process_abnormal_io(ci, ti, &r)) 15410519c71eSMike Snitzer return r; 15423d7f4562SMike Snitzer 1543b99fdcdcSMing Lei /* 1544b99fdcdcSMing Lei * Only support bio polling for normal IO, and the target io is 1545b99fdcdcSMing Lei * exactly inside the dm_io instance (verified in dm_poll_dm_io) 1546b99fdcdcSMing Lei */ 1547b99fdcdcSMing Lei ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; 1548e4c93811SAlasdair G Kergon 1549e4c93811SAlasdair G Kergon len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 155066bdaa43SMike Snitzer clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); 155166bdaa43SMike Snitzer __map_bio(clone); 1552e4c93811SAlasdair G Kergon 1553e4c93811SAlasdair G Kergon ci->sector += len; 1554e4c93811SAlasdair G Kergon ci->sector_count -= len; 1555e4c93811SAlasdair G Kergon 1556e4c93811SAlasdair G Kergon return 0; 1557e4c93811SAlasdair G Kergon } 1558e4c93811SAlasdair G Kergon 1559978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1560978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1561978e51baSMike Snitzer { 1562978e51baSMike Snitzer ci->map = map; 1563978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1564d41e077aSMike Snitzer ci->bio = bio; 1565b99fdcdcSMing Lei ci->submit_as_polled = false; 1566978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1567d41e077aSMike Snitzer ci->sector_count = bio_sectors(bio); 1568d41e077aSMike Snitzer 1569d41e077aSMike Snitzer /* Shouldn't happen but sector_count was being set to 0 so... */ 1570d41e077aSMike Snitzer if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) 1571d41e077aSMike Snitzer ci->sector_count = 0; 1572978e51baSMike Snitzer } 1573978e51baSMike Snitzer 1574e4c93811SAlasdair G Kergon /* 157514fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 15761da177e4SLinus Torvalds */ 157796c9865cSMike Snitzer static void dm_split_and_process_bio(struct mapped_device *md, 157883d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 15791da177e4SLinus Torvalds { 15801da177e4SLinus Torvalds struct clone_info ci; 15810fbb4d93SMike Snitzer struct bio *orig_bio = NULL; 1582512875bdSJun'ichi Nomura int error = 0; 15831da177e4SLinus Torvalds 1584978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1585bd2a49b8SAlasdair G Kergon 15861eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1587332f2b1eSMike Snitzer __send_empty_flush(&ci); 1588e2736347SMike Snitzer /* dm_io_complete submits any data associated with flush */ 1589d41e077aSMike Snitzer goto out; 1590d41e077aSMike Snitzer } 1591d41e077aSMike Snitzer 159296c9865cSMike Snitzer error = __split_and_process_bio(&ci); 1593b7f8dff0SMike Snitzer ci.io->map_task = NULL; 1594d41e077aSMike Snitzer if (error || !ci.sector_count) 1595d41e077aSMike Snitzer goto out; 1596d41e077aSMike Snitzer 159718a25da8SNeilBrown /* 1598d41e077aSMike Snitzer * Remainder must be passed to submit_bio_noacct() so it gets handled 1599d41e077aSMike Snitzer * *after* bios already submitted have been completely processed. 1600d41e077aSMike Snitzer * We take a clone of the original to store in ci.io->orig_bio to be 1601e2736347SMike Snitzer * used by dm_end_io_acct() and for dm_io_complete() to use for 1602d41e077aSMike Snitzer * completion handling. 160318a25da8SNeilBrown */ 16040fbb4d93SMike Snitzer orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1605f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 16060fbb4d93SMike Snitzer bio_chain(orig_bio, bio); 16070fbb4d93SMike Snitzer trace_block_split(orig_bio, bio->bi_iter.bi_sector); 16083e08773cSChristoph Hellwig submit_bio_noacct(bio); 1609d41e077aSMike Snitzer out: 16100fbb4d93SMike Snitzer if (!orig_bio) 16110fbb4d93SMike Snitzer orig_bio = bio; 16120fbb4d93SMike Snitzer smp_store_release(&ci.io->orig_bio, orig_bio); 161382f6cdccSMike Snitzer if (dm_io_flagged(ci.io, DM_IO_START_ACCT)) 16140fbb4d93SMike Snitzer dm_start_io_acct(ci.io, NULL); 16151da177e4SLinus Torvalds 1616b99fdcdcSMing Lei /* 1617b99fdcdcSMing Lei * Drop the extra reference count for non-POLLED bio, and hold one 1618b99fdcdcSMing Lei * reference for POLLED bio, which will be released in dm_poll_bio 1619b99fdcdcSMing Lei * 1620b99fdcdcSMing Lei * Add every dm_io instance into the hlist_head which is stored in 1621b99fdcdcSMing Lei * bio->bi_private, so that dm_poll_bio can poll them all. 1622b99fdcdcSMing Lei */ 1623b99fdcdcSMing Lei if (error || !ci.submit_as_polled) 1624e2118b3cSDamien Le Moal dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 1625b99fdcdcSMing Lei else 1626b99fdcdcSMing Lei dm_queue_poll_io(bio, ci.io); 16271da177e4SLinus Torvalds } 16281da177e4SLinus Torvalds 16293e08773cSChristoph Hellwig static void dm_submit_bio(struct bio *bio) 16301da177e4SLinus Torvalds { 1631309dca30SChristoph Hellwig struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 163283d5e5b0SMikulas Patocka int srcu_idx; 163383d5e5b0SMikulas Patocka struct dm_table *map; 16341da177e4SLinus Torvalds 163583d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 16366a8736d1STejun Heo 1637fa247089SMike Snitzer /* If suspended, or map not yet available, queue this IO for later */ 1638fa247089SMike Snitzer if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || 1639fa247089SMike Snitzer unlikely(!map)) { 16406abc4946SKonstantin Khlebnikov if (bio->bi_opf & REQ_NOWAIT) 16416abc4946SKonstantin Khlebnikov bio_wouldblock_error(bio); 1642b2abdb1bSMike Snitzer else if (bio->bi_opf & REQ_RAHEAD) 16436a8736d1STejun Heo bio_io_error(bio); 1644b2abdb1bSMike Snitzer else 1645b2abdb1bSMike Snitzer queue_io(md, bio); 1646b2abdb1bSMike Snitzer goto out; 16471da177e4SLinus Torvalds } 16481da177e4SLinus Torvalds 1649b2abdb1bSMike Snitzer /* 1650b2abdb1bSMike Snitzer * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1651b2abdb1bSMike Snitzer * otherwise associated queue_limits won't be imposed. 1652b2abdb1bSMike Snitzer */ 1653b2abdb1bSMike Snitzer if (is_abnormal_io(bio)) 1654b2abdb1bSMike Snitzer blk_queue_split(&bio); 1655978e51baSMike Snitzer 165696c9865cSMike Snitzer dm_split_and_process_bio(md, map, bio); 1657b2abdb1bSMike Snitzer out: 165883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1659978e51baSMike Snitzer } 1660978e51baSMike Snitzer 1661b99fdcdcSMing Lei static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, 1662b99fdcdcSMing Lei unsigned int flags) 1663b99fdcdcSMing Lei { 1664655f3aadSMike Snitzer WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); 1665b99fdcdcSMing Lei 1666b99fdcdcSMing Lei /* don't poll if the mapped io is done */ 1667b99fdcdcSMing Lei if (atomic_read(&io->io_count) > 1) 1668b99fdcdcSMing Lei bio_poll(&io->tio.clone, iob, flags); 1669b99fdcdcSMing Lei 1670b99fdcdcSMing Lei /* bio_poll holds the last reference */ 1671b99fdcdcSMing Lei return atomic_read(&io->io_count) == 1; 1672b99fdcdcSMing Lei } 1673b99fdcdcSMing Lei 1674b99fdcdcSMing Lei static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, 1675b99fdcdcSMing Lei unsigned int flags) 1676b99fdcdcSMing Lei { 1677b99fdcdcSMing Lei struct hlist_head *head = dm_get_bio_hlist_head(bio); 1678b99fdcdcSMing Lei struct hlist_head tmp = HLIST_HEAD_INIT; 1679b99fdcdcSMing Lei struct hlist_node *next; 1680b99fdcdcSMing Lei struct dm_io *io; 1681b99fdcdcSMing Lei 1682b99fdcdcSMing Lei /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ 1683b99fdcdcSMing Lei if (!(bio->bi_opf & REQ_DM_POLL_LIST)) 1684b99fdcdcSMing Lei return 0; 1685b99fdcdcSMing Lei 1686b99fdcdcSMing Lei WARN_ON_ONCE(hlist_empty(head)); 1687b99fdcdcSMing Lei 1688b99fdcdcSMing Lei hlist_move_list(head, &tmp); 1689b99fdcdcSMing Lei 1690b99fdcdcSMing Lei /* 1691b99fdcdcSMing Lei * Restore .bi_private before possibly completing dm_io. 1692b99fdcdcSMing Lei * 1693b99fdcdcSMing Lei * bio_poll() is only possible once @bio has been completely 1694b99fdcdcSMing Lei * submitted via submit_bio_noacct()'s depth-first submission. 1695b99fdcdcSMing Lei * So there is no dm_queue_poll_io() race associated with 1696b99fdcdcSMing Lei * clearing REQ_DM_POLL_LIST here. 1697b99fdcdcSMing Lei */ 1698b99fdcdcSMing Lei bio->bi_opf &= ~REQ_DM_POLL_LIST; 1699b99fdcdcSMing Lei bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data; 1700b99fdcdcSMing Lei 1701b99fdcdcSMing Lei hlist_for_each_entry_safe(io, next, &tmp, node) { 1702b99fdcdcSMing Lei if (dm_poll_dm_io(io, iob, flags)) { 1703b99fdcdcSMing Lei hlist_del_init(&io->node); 1704b99fdcdcSMing Lei /* 1705b99fdcdcSMing Lei * clone_endio() has already occurred, so passing 1706b99fdcdcSMing Lei * error as 0 here doesn't override io->status 1707b99fdcdcSMing Lei */ 1708b99fdcdcSMing Lei dm_io_dec_pending(io, 0); 1709b99fdcdcSMing Lei } 1710b99fdcdcSMing Lei } 1711b99fdcdcSMing Lei 1712b99fdcdcSMing Lei /* Not done? */ 1713b99fdcdcSMing Lei if (!hlist_empty(&tmp)) { 1714b99fdcdcSMing Lei bio->bi_opf |= REQ_DM_POLL_LIST; 1715b99fdcdcSMing Lei /* Reset bio->bi_private to dm_io list head */ 1716b99fdcdcSMing Lei hlist_move_list(&tmp, head); 1717b99fdcdcSMing Lei return 0; 1718b99fdcdcSMing Lei } 1719b99fdcdcSMing Lei return 1; 1720b99fdcdcSMing Lei } 1721b99fdcdcSMing Lei 17221da177e4SLinus Torvalds /*----------------------------------------------------------------- 17231da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 17241da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17252b06cfffSAlasdair G Kergon static void free_minor(int minor) 17261da177e4SLinus Torvalds { 1727f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17281da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1729f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17301da177e4SLinus Torvalds } 17311da177e4SLinus Torvalds 17321da177e4SLinus Torvalds /* 17331da177e4SLinus Torvalds * See if the device with a specific minor # is free. 17341da177e4SLinus Torvalds */ 1735cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 17361da177e4SLinus Torvalds { 1737c9d76be6STejun Heo int r; 17381da177e4SLinus Torvalds 17391da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17401da177e4SLinus Torvalds return -EINVAL; 17411da177e4SLinus Torvalds 1742c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1743f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17441da177e4SLinus Torvalds 1745c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 17461da177e4SLinus Torvalds 1747f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1748c9d76be6STejun Heo idr_preload_end(); 1749c9d76be6STejun Heo if (r < 0) 1750c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1751c9d76be6STejun Heo return 0; 17521da177e4SLinus Torvalds } 17531da177e4SLinus Torvalds 1754cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 17551da177e4SLinus Torvalds { 1756c9d76be6STejun Heo int r; 17571da177e4SLinus Torvalds 1758c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1759f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17601da177e4SLinus Torvalds 1761c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 17621da177e4SLinus Torvalds 1763f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1764c9d76be6STejun Heo idr_preload_end(); 1765c9d76be6STejun Heo if (r < 0) 17661da177e4SLinus Torvalds return r; 1767c9d76be6STejun Heo *minor = r; 1768c9d76be6STejun Heo return 0; 17691da177e4SLinus Torvalds } 17701da177e4SLinus Torvalds 177183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1772681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops; 1773f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 17741da177e4SLinus Torvalds 177553d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 177653d5914fSMikulas Patocka 1777aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1778cb77cb5aSEric Biggers static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1779aa6ce87aSSatya Tangirala { 1780cb77cb5aSEric Biggers dm_destroy_crypto_profile(q->crypto_profile); 1781aa6ce87aSSatya Tangirala } 1782aa6ce87aSSatya Tangirala 1783aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1784aa6ce87aSSatya Tangirala 1785cb77cb5aSEric Biggers static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1786aa6ce87aSSatya Tangirala { 1787aa6ce87aSSatya Tangirala } 1788aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1789aa6ce87aSSatya Tangirala 17900f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 17910f20972fSMike Snitzer { 17920f20972fSMike Snitzer if (md->wq) 17930f20972fSMike Snitzer destroy_workqueue(md->wq); 17946f1c819cSKent Overstreet bioset_exit(&md->bs); 17956f1c819cSKent Overstreet bioset_exit(&md->io_bs); 17960f20972fSMike Snitzer 1797f26c5719SDan Williams if (md->dax_dev) { 1798fb08a190SChristoph Hellwig dax_remove_host(md->disk); 1799f26c5719SDan Williams kill_dax(md->dax_dev); 1800f26c5719SDan Williams put_dax(md->dax_dev); 1801f26c5719SDan Williams md->dax_dev = NULL; 1802f26c5719SDan Williams } 1803f26c5719SDan Williams 1804588b7f5dSKirill Tkhai dm_cleanup_zoned_dev(md); 18050f20972fSMike Snitzer if (md->disk) { 18060f20972fSMike Snitzer spin_lock(&_minor_lock); 18070f20972fSMike Snitzer md->disk->private_data = NULL; 18080f20972fSMike Snitzer spin_unlock(&_minor_lock); 180989f871afSChristoph Hellwig if (dm_get_md_type(md) != DM_TYPE_NONE) { 181089f871afSChristoph Hellwig dm_sysfs_exit(md); 18110f20972fSMike Snitzer del_gendisk(md->disk); 181289f871afSChristoph Hellwig } 1813cb77cb5aSEric Biggers dm_queue_destroy_crypto_profile(md->queue); 181474fe6ba9SChristoph Hellwig blk_cleanup_disk(md->disk); 181574a2b6ecSChristoph Hellwig } 18160f20972fSMike Snitzer 18179f6dc633SMike Snitzer if (md->pending_io) { 18189f6dc633SMike Snitzer free_percpu(md->pending_io); 18199f6dc633SMike Snitzer md->pending_io = NULL; 18209f6dc633SMike Snitzer } 18219f6dc633SMike Snitzer 1822d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1823d09960b0STahsin Erdogan 1824d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1825d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1826d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1827a666e5c0SMikulas Patocka mutex_destroy(&md->swap_bios_lock); 1828d5ffebddSMike Snitzer 18294cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 18300f20972fSMike Snitzer } 18310f20972fSMike Snitzer 18321da177e4SLinus Torvalds /* 18331da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 18341da177e4SLinus Torvalds */ 18352b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 18361da177e4SLinus Torvalds { 1837115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1838115485e8SMike Snitzer struct mapped_device *md; 1839ba61fdd1SJeff Mahoney void *old_md; 18401da177e4SLinus Torvalds 1841856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 18421da177e4SLinus Torvalds if (!md) { 18431da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18441da177e4SLinus Torvalds return NULL; 18451da177e4SLinus Torvalds } 18461da177e4SLinus Torvalds 184710da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18486ed7ade8SMilan Broz goto bad_module_get; 184910da4f79SJeff Mahoney 18501da177e4SLinus Torvalds /* get a minor number for the dev */ 18512b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1852cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18532b06cfffSAlasdair G Kergon else 1854cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18551da177e4SLinus Torvalds if (r < 0) 18566ed7ade8SMilan Broz goto bad_minor; 18571da177e4SLinus Torvalds 185883d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 185983d5e5b0SMikulas Patocka if (r < 0) 186083d5e5b0SMikulas Patocka goto bad_io_barrier; 186183d5e5b0SMikulas Patocka 1862115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1863591ddcfcSMike Snitzer md->init_tio_pdu = false; 1864a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1865e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1866a5664dadSMike Snitzer mutex_init(&md->type_lock); 186786f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1868022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18691da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18705c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 18711da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 18727a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 18737a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 187486f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 18757a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 18761da177e4SLinus Torvalds 187747ace7e0SMike Snitzer /* 1878c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 1879c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 1880c62b37d9SChristoph Hellwig * override accordingly. 188147ace7e0SMike Snitzer */ 188274fe6ba9SChristoph Hellwig md->disk = blk_alloc_disk(md->numa_node_id); 18831da177e4SLinus Torvalds if (!md->disk) 18840f20972fSMike Snitzer goto bad; 188574fe6ba9SChristoph Hellwig md->queue = md->disk->queue; 18861da177e4SLinus Torvalds 1887f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 188853d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1889f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 18902995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1891f0b04115SJeff Mahoney 1892a666e5c0SMikulas Patocka md->swap_bios = get_swap_bios(); 1893a666e5c0SMikulas Patocka sema_init(&md->swap_bios_semaphore, md->swap_bios); 1894a666e5c0SMikulas Patocka mutex_init(&md->swap_bios_lock); 1895a666e5c0SMikulas Patocka 18961da177e4SLinus Torvalds md->disk->major = _major; 18971da177e4SLinus Torvalds md->disk->first_minor = minor; 189874fe6ba9SChristoph Hellwig md->disk->minors = 1; 18991ebe2e5fSChristoph Hellwig md->disk->flags |= GENHD_FL_NO_PART; 19001da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19011da177e4SLinus Torvalds md->disk->queue = md->queue; 19021da177e4SLinus Torvalds md->disk->private_data = md; 19031da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1904f26c5719SDan Williams 19055d2a228bSChristoph Hellwig if (IS_ENABLED(CONFIG_FS_DAX)) { 190630c6828aSChristoph Hellwig md->dax_dev = alloc_dax(md, &dm_dax_ops); 1907d7519392SChristoph Hellwig if (IS_ERR(md->dax_dev)) { 1908d7519392SChristoph Hellwig md->dax_dev = NULL; 1909f26c5719SDan Williams goto bad; 1910976431b0SDan Williams } 19117ac5360cSChristoph Hellwig set_dax_nocache(md->dax_dev); 19127ac5360cSChristoph Hellwig set_dax_nomc(md->dax_dev); 1913fb08a190SChristoph Hellwig if (dax_add_host(md->dax_dev, md->disk)) 1914f26c5719SDan Williams goto bad; 1915f26c5719SDan Williams } 19161da177e4SLinus Torvalds 19177e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 19181da177e4SLinus Torvalds 1919c7c879eeSMichał Mirosław md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 1920304f3f6aSMilan Broz if (!md->wq) 19210f20972fSMike Snitzer goto bad; 1922304f3f6aSMilan Broz 19239f6dc633SMike Snitzer md->pending_io = alloc_percpu(unsigned long); 19249f6dc633SMike Snitzer if (!md->pending_io) 19259f6dc633SMike Snitzer goto bad; 19269f6dc633SMike Snitzer 1927fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1928fd2ed4d2SMikulas Patocka 1929ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1930f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1931ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1932f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1933ba61fdd1SJeff Mahoney 1934ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1935ba61fdd1SJeff Mahoney 19361da177e4SLinus Torvalds return md; 19371da177e4SLinus Torvalds 19380f20972fSMike Snitzer bad: 19390f20972fSMike Snitzer cleanup_mapped_device(md); 194083d5e5b0SMikulas Patocka bad_io_barrier: 19411da177e4SLinus Torvalds free_minor(minor); 19426ed7ade8SMilan Broz bad_minor: 194310da4f79SJeff Mahoney module_put(THIS_MODULE); 19446ed7ade8SMilan Broz bad_module_get: 1945856eb091SMikulas Patocka kvfree(md); 19461da177e4SLinus Torvalds return NULL; 19471da177e4SLinus Torvalds } 19481da177e4SLinus Torvalds 1949ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1950ae9da83fSJun'ichi Nomura 19511da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19521da177e4SLinus Torvalds { 1953f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 195463d94e48SJun'ichi Nomura 1955ae9da83fSJun'ichi Nomura unlock_fs(md); 19562eb6e1e3SKeith Busch 19570f20972fSMike Snitzer cleanup_mapped_device(md); 19580f20972fSMike Snitzer 19590f20972fSMike Snitzer free_table_devices(&md->table_devices); 19600f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 196163a4f065SMike Snitzer free_minor(minor); 196263a4f065SMike Snitzer 196310da4f79SJeff Mahoney module_put(THIS_MODULE); 1964856eb091SMikulas Patocka kvfree(md); 19651da177e4SLinus Torvalds } 19661da177e4SLinus Torvalds 19672a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1968e6ee8c0bSKiyoshi Ueda { 1969c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 19702a2a4c51SJens Axboe int ret = 0; 1971e6ee8c0bSKiyoshi Ueda 1972545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1973c0820cf5SMikulas Patocka /* 197464f52b0eSMike Snitzer * The md may already have mempools that need changing. 197564f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 197616245bdcSJun'ichi Nomura * because a different table was loaded. 1977c0820cf5SMikulas Patocka */ 19786f1c819cSKent Overstreet bioset_exit(&md->bs); 19796f1c819cSKent Overstreet bioset_exit(&md->io_bs); 19800776aa0eSMike Snitzer 19816f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 1982cbc4e3c1SMike Snitzer /* 19834e6e36c3SMike Snitzer * There's no need to reload with request-based dm 19844e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 19854e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 19864e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 19874e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 19884e6e36c3SMike Snitzer * through the queue to unprep. 1989cbc4e3c1SMike Snitzer */ 1990cbc4e3c1SMike Snitzer goto out; 1991cbc4e3c1SMike Snitzer } 1992cbc4e3c1SMike Snitzer 19936f1c819cSKent Overstreet BUG_ON(!p || 19946f1c819cSKent Overstreet bioset_initialized(&md->bs) || 19956f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 1996e6ee8c0bSKiyoshi Ueda 19972a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 19982a2a4c51SJens Axboe if (ret) 19992a2a4c51SJens Axboe goto out; 20002a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 20012a2a4c51SJens Axboe if (ret) 20022a2a4c51SJens Axboe bioset_exit(&md->bs); 2003e6ee8c0bSKiyoshi Ueda out: 200402233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2005e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 20062a2a4c51SJens Axboe return ret; 2007e6ee8c0bSKiyoshi Ueda } 2008e6ee8c0bSKiyoshi Ueda 20091da177e4SLinus Torvalds /* 20101da177e4SLinus Torvalds * Bind a table to the device. 20111da177e4SLinus Torvalds */ 20121da177e4SLinus Torvalds static void event_callback(void *context) 20131da177e4SLinus Torvalds { 20147a8c3d3bSMike Anderson unsigned long flags; 20157a8c3d3bSMike Anderson LIST_HEAD(uevents); 20161da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 20171da177e4SLinus Torvalds 20187a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 20197a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 20207a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 20217a8c3d3bSMike Anderson 2022ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 20237a8c3d3bSMike Anderson 20241da177e4SLinus Torvalds atomic_inc(&md->event_nr); 20251da177e4SLinus Torvalds wake_up(&md->eventq); 202662e08243SMikulas Patocka dm_issue_global_event(); 20271da177e4SLinus Torvalds } 20281da177e4SLinus Torvalds 2029c217649bSMike Snitzer /* 2030042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2031042d2a9bSAlasdair G Kergon */ 2032042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2033754c5fc7SMike Snitzer struct queue_limits *limits) 20341da177e4SLinus Torvalds { 2035042d2a9bSAlasdair G Kergon struct dm_table *old_map; 20361da177e4SLinus Torvalds sector_t size; 20372a2a4c51SJens Axboe int ret; 20381da177e4SLinus Torvalds 20395a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 20405a8f1f80SBart Van Assche 20411da177e4SLinus Torvalds size = dm_table_get_size(t); 20423ac51e74SDarrick J. Wong 20433ac51e74SDarrick J. Wong /* 20443ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20453ac51e74SDarrick J. Wong */ 2046fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 20473ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20483ac51e74SDarrick J. Wong 20495424a0b8SMikulas Patocka if (!get_capacity(md->disk)) 20505424a0b8SMikulas Patocka set_capacity(md->disk, size); 20515424a0b8SMikulas Patocka else 2052f64d9b2eSChristoph Hellwig set_capacity_and_notify(md->disk, size); 20531da177e4SLinus Torvalds 2054cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20552ca3310eSAlasdair G Kergon 2056f5b4aee1SMike Snitzer if (dm_table_request_based(t)) { 205716f12266SMike Snitzer /* 20589c37de29SMike Snitzer * Leverage the fact that request-based DM targets are 20599c37de29SMike Snitzer * immutable singletons - used to optimize dm_mq_queue_rq. 206016f12266SMike Snitzer */ 206116f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 206216f12266SMike Snitzer } 2063e6ee8c0bSKiyoshi Ueda 20642a2a4c51SJens Axboe ret = __bind_mempools(md, t); 20652a2a4c51SJens Axboe if (ret) { 20662a2a4c51SJens Axboe old_map = ERR_PTR(ret); 20672a2a4c51SJens Axboe goto out; 20682a2a4c51SJens Axboe } 2069e6ee8c0bSKiyoshi Ueda 2070f5b4aee1SMike Snitzer ret = dm_table_set_restrictions(t, md->queue, limits); 2071bb37d772SDamien Le Moal if (ret) { 2072bb37d772SDamien Le Moal old_map = ERR_PTR(ret); 2073bb37d772SDamien Le Moal goto out; 2074bb37d772SDamien Le Moal } 2075bb37d772SDamien Le Moal 2076a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 20771d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 207836a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 207936a0456fSAlasdair G Kergon 208041abc4e1SHannes Reinecke if (old_map) 208183d5e5b0SMikulas Patocka dm_sync_table(md); 20822a2a4c51SJens Axboe out: 2083042d2a9bSAlasdair G Kergon return old_map; 20841da177e4SLinus Torvalds } 20851da177e4SLinus Torvalds 2086a7940155SAlasdair G Kergon /* 2087a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2088a7940155SAlasdair G Kergon */ 2089a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 20901da177e4SLinus Torvalds { 2091a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 20921da177e4SLinus Torvalds 20931da177e4SLinus Torvalds if (!map) 2094a7940155SAlasdair G Kergon return NULL; 20951da177e4SLinus Torvalds 20961da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 20979cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 209883d5e5b0SMikulas Patocka dm_sync_table(md); 2099a7940155SAlasdair G Kergon 2100a7940155SAlasdair G Kergon return map; 21011da177e4SLinus Torvalds } 21021da177e4SLinus Torvalds 21031da177e4SLinus Torvalds /* 21041da177e4SLinus Torvalds * Constructor for a new device. 21051da177e4SLinus Torvalds */ 21062b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 21071da177e4SLinus Torvalds { 21081da177e4SLinus Torvalds struct mapped_device *md; 21091da177e4SLinus Torvalds 21102b06cfffSAlasdair G Kergon md = alloc_dev(minor); 21111da177e4SLinus Torvalds if (!md) 21121da177e4SLinus Torvalds return -ENXIO; 21131da177e4SLinus Torvalds 211491ccbbacSTushar Sugandhi dm_ima_reset_data(md); 211591ccbbacSTushar Sugandhi 21161da177e4SLinus Torvalds *result = md; 21171da177e4SLinus Torvalds return 0; 21181da177e4SLinus Torvalds } 21191da177e4SLinus Torvalds 2120a5664dadSMike Snitzer /* 2121a5664dadSMike Snitzer * Functions to manage md->type. 2122a5664dadSMike Snitzer * All are required to hold md->type_lock. 2123a5664dadSMike Snitzer */ 2124a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2125a5664dadSMike Snitzer { 2126a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2127a5664dadSMike Snitzer } 2128a5664dadSMike Snitzer 2129a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2130a5664dadSMike Snitzer { 2131a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2132a5664dadSMike Snitzer } 2133a5664dadSMike Snitzer 21347e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2135a5664dadSMike Snitzer { 213600c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2137a5664dadSMike Snitzer md->type = type; 2138a5664dadSMike Snitzer } 2139a5664dadSMike Snitzer 21407e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2141a5664dadSMike Snitzer { 2142a5664dadSMike Snitzer return md->type; 2143a5664dadSMike Snitzer } 2144a5664dadSMike Snitzer 214536a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 214636a0456fSAlasdair G Kergon { 214736a0456fSAlasdair G Kergon return md->immutable_target_type; 214836a0456fSAlasdair G Kergon } 214936a0456fSAlasdair G Kergon 21504a0b4ddfSMike Snitzer /* 2151f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2152f84cb8a4SMike Snitzer * count on 'md'. 2153f84cb8a4SMike Snitzer */ 2154f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2155f84cb8a4SMike Snitzer { 2156f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2157f84cb8a4SMike Snitzer return &md->queue->limits; 2158f84cb8a4SMike Snitzer } 2159f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2160f84cb8a4SMike Snitzer 21614a0b4ddfSMike Snitzer /* 21624a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 21634a0b4ddfSMike Snitzer */ 2164591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 21654a0b4ddfSMike Snitzer { 2166ba305859SChristoph Hellwig enum dm_queue_mode type = dm_table_get_type(t); 2167c100ec49SMike Snitzer struct queue_limits limits; 2168ba305859SChristoph Hellwig int r; 2169bfebd1cdSMike Snitzer 2170545ed20eSToshi Kani switch (type) { 2171bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2172681cc5e8SMike Snitzer md->disk->fops = &dm_rq_blk_dops; 2173e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2174bfebd1cdSMike Snitzer if (r) { 2175681cc5e8SMike Snitzer DMERR("Cannot initialize queue for request-based dm mapped device"); 2176bfebd1cdSMike Snitzer return r; 2177bfebd1cdSMike Snitzer } 2178bfebd1cdSMike Snitzer break; 2179bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2180545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2181bfebd1cdSMike Snitzer break; 21827e0d574fSBart Van Assche case DM_TYPE_NONE: 21837e0d574fSBart Van Assche WARN_ON_ONCE(true); 21847e0d574fSBart Van Assche break; 2185ff36ab34SMike Snitzer } 21864a0b4ddfSMike Snitzer 2187c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2188c100ec49SMike Snitzer if (r) { 2189c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2190c100ec49SMike Snitzer return r; 2191c100ec49SMike Snitzer } 2192bb37d772SDamien Le Moal r = dm_table_set_restrictions(t, md->queue, &limits); 2193bb37d772SDamien Le Moal if (r) 2194bb37d772SDamien Le Moal return r; 219589f871afSChristoph Hellwig 2196e7089f65SLuis Chamberlain r = add_disk(md->disk); 2197e7089f65SLuis Chamberlain if (r) 2198e7089f65SLuis Chamberlain return r; 219989f871afSChristoph Hellwig 220089f871afSChristoph Hellwig r = dm_sysfs_init(md); 220189f871afSChristoph Hellwig if (r) { 220289f871afSChristoph Hellwig del_gendisk(md->disk); 220389f871afSChristoph Hellwig return r; 220489f871afSChristoph Hellwig } 2205ba305859SChristoph Hellwig md->type = type; 22064a0b4ddfSMike Snitzer return 0; 22074a0b4ddfSMike Snitzer } 22084a0b4ddfSMike Snitzer 22092bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 22101da177e4SLinus Torvalds { 22111da177e4SLinus Torvalds struct mapped_device *md; 22121da177e4SLinus Torvalds unsigned minor = MINOR(dev); 22131da177e4SLinus Torvalds 22141da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 22151da177e4SLinus Torvalds return NULL; 22161da177e4SLinus Torvalds 2217f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22181da177e4SLinus Torvalds 22191da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 222049de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 222149de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2222637842cfSDavid Teigland md = NULL; 2223fba9f90eSJeff Mahoney goto out; 2224fba9f90eSJeff Mahoney } 22252bec1f4aSMikulas Patocka dm_get(md); 2226fba9f90eSJeff Mahoney out: 2227f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22281da177e4SLinus Torvalds 2229637842cfSDavid Teigland return md; 2230637842cfSDavid Teigland } 22313cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2232d229a958SDavid Teigland 22339ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2234637842cfSDavid Teigland { 22359ade92a9SAlasdair G Kergon return md->interface_ptr; 22361da177e4SLinus Torvalds } 22371da177e4SLinus Torvalds 22381da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 22391da177e4SLinus Torvalds { 22401da177e4SLinus Torvalds md->interface_ptr = ptr; 22411da177e4SLinus Torvalds } 22421da177e4SLinus Torvalds 22431da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 22441da177e4SLinus Torvalds { 22451da177e4SLinus Torvalds atomic_inc(&md->holders); 22463f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 22471da177e4SLinus Torvalds } 22481da177e4SLinus Torvalds 224909ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 225009ee96b2SMikulas Patocka { 225109ee96b2SMikulas Patocka spin_lock(&_minor_lock); 225209ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 225309ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 225409ee96b2SMikulas Patocka return -EBUSY; 225509ee96b2SMikulas Patocka } 225609ee96b2SMikulas Patocka dm_get(md); 225709ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 225809ee96b2SMikulas Patocka return 0; 225909ee96b2SMikulas Patocka } 226009ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 226109ee96b2SMikulas Patocka 226272d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 226372d94861SAlasdair G Kergon { 226472d94861SAlasdair G Kergon return md->name; 226572d94861SAlasdair G Kergon } 226672d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 226772d94861SAlasdair G Kergon 22683f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 22691da177e4SLinus Torvalds { 22701134e5aeSMike Anderson struct dm_table *map; 227183d5e5b0SMikulas Patocka int srcu_idx; 22721da177e4SLinus Torvalds 22733f77316dSKiyoshi Ueda might_sleep(); 2274fba9f90eSJeff Mahoney 227563a4f065SMike Snitzer spin_lock(&_minor_lock); 22763f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2277fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2278f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22793f77316dSKiyoshi Ueda 22807a5428dcSChristoph Hellwig blk_mark_disk_dead(md->disk); 22813b785fbcSBart Van Assche 2282ab7c7bb6SMikulas Patocka /* 2283ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2284ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2285ab7c7bb6SMikulas Patocka */ 2286ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 22872a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 22884f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 22891da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2290adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 22915df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 22921da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 22931da177e4SLinus Torvalds } 229483d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 229583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 22962a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 229783d5e5b0SMikulas Patocka 22983f77316dSKiyoshi Ueda /* 22993f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 23003f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 23013f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 23023f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 23033f77316dSKiyoshi Ueda */ 23043f77316dSKiyoshi Ueda if (wait) 23053f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 23063f77316dSKiyoshi Ueda msleep(1); 23073f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 23083f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 23093f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 23103f77316dSKiyoshi Ueda 2311a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 23121da177e4SLinus Torvalds free_dev(md); 23131da177e4SLinus Torvalds } 23143f77316dSKiyoshi Ueda 23153f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 23163f77316dSKiyoshi Ueda { 23173f77316dSKiyoshi Ueda __dm_destroy(md, true); 23183f77316dSKiyoshi Ueda } 23193f77316dSKiyoshi Ueda 23203f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 23213f77316dSKiyoshi Ueda { 23223f77316dSKiyoshi Ueda __dm_destroy(md, false); 23233f77316dSKiyoshi Ueda } 23243f77316dSKiyoshi Ueda 23253f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 23263f77316dSKiyoshi Ueda { 23273f77316dSKiyoshi Ueda atomic_dec(&md->holders); 23281da177e4SLinus Torvalds } 232979eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 23301da177e4SLinus Torvalds 23319f6dc633SMike Snitzer static bool dm_in_flight_bios(struct mapped_device *md) 233285067747SMing Lei { 233385067747SMing Lei int cpu; 23349f6dc633SMike Snitzer unsigned long sum = 0; 233585067747SMing Lei 23369f6dc633SMike Snitzer for_each_possible_cpu(cpu) 23379f6dc633SMike Snitzer sum += *per_cpu_ptr(md->pending_io, cpu); 233885067747SMing Lei 233985067747SMing Lei return sum != 0; 234085067747SMing Lei } 234185067747SMing Lei 23422f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 234346125c1cSMilan Broz { 234446125c1cSMilan Broz int r = 0; 23459f4c3f87SBart Van Assche DEFINE_WAIT(wait); 234646125c1cSMilan Broz 234785067747SMing Lei while (true) { 23489f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 234946125c1cSMilan Broz 23509f6dc633SMike Snitzer if (!dm_in_flight_bios(md)) 235146125c1cSMilan Broz break; 235246125c1cSMilan Broz 2353e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 235446125c1cSMilan Broz r = -EINTR; 235546125c1cSMilan Broz break; 235646125c1cSMilan Broz } 235746125c1cSMilan Broz 235846125c1cSMilan Broz io_schedule(); 235946125c1cSMilan Broz } 23609f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2361b44ebeb0SMikulas Patocka 23629f6dc633SMike Snitzer smp_rmb(); 23639f6dc633SMike Snitzer 236446125c1cSMilan Broz return r; 236546125c1cSMilan Broz } 236646125c1cSMilan Broz 23672f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 236885067747SMing Lei { 236985067747SMing Lei int r = 0; 237085067747SMing Lei 237185067747SMing Lei if (!queue_is_mq(md->queue)) 237285067747SMing Lei return dm_wait_for_bios_completion(md, task_state); 237385067747SMing Lei 237485067747SMing Lei while (true) { 237585067747SMing Lei if (!blk_mq_queue_inflight(md->queue)) 237685067747SMing Lei break; 237785067747SMing Lei 237885067747SMing Lei if (signal_pending_state(task_state, current)) { 237985067747SMing Lei r = -EINTR; 238085067747SMing Lei break; 238185067747SMing Lei } 238285067747SMing Lei 238385067747SMing Lei msleep(5); 238485067747SMing Lei } 238585067747SMing Lei 238685067747SMing Lei return r; 238785067747SMing Lei } 238885067747SMing Lei 23891da177e4SLinus Torvalds /* 23901da177e4SLinus Torvalds * Process the deferred bios 23911da177e4SLinus Torvalds */ 2392ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 23931da177e4SLinus Torvalds { 23940c2915b8SMike Snitzer struct mapped_device *md = container_of(work, struct mapped_device, work); 23950c2915b8SMike Snitzer struct bio *bio; 2396ef208587SMikulas Patocka 23973b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2398022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 23990c2915b8SMike Snitzer bio = bio_list_pop(&md->deferred); 2400022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2401022c2611SMikulas Patocka 24020c2915b8SMike Snitzer if (!bio) 2403df12ee99SAlasdair G Kergon break; 240473d410c0SMilan Broz 24050c2915b8SMike Snitzer submit_bio_noacct(bio); 2406e6ee8c0bSKiyoshi Ueda } 24071da177e4SLinus Torvalds } 24081da177e4SLinus Torvalds 24099a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2410304f3f6aSMilan Broz { 24113b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24124e857c58SPeter Zijlstra smp_mb__after_atomic(); 241353d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2414304f3f6aSMilan Broz } 2415304f3f6aSMilan Broz 24161da177e4SLinus Torvalds /* 2417042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24181da177e4SLinus Torvalds */ 2419042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24201da177e4SLinus Torvalds { 242187eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2422754c5fc7SMike Snitzer struct queue_limits limits; 2423042d2a9bSAlasdair G Kergon int r; 24241da177e4SLinus Torvalds 2425e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24261da177e4SLinus Torvalds 24271da177e4SLinus Torvalds /* device must be suspended */ 24284f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 242993c534aeSAlasdair G Kergon goto out; 24301da177e4SLinus Torvalds 24313ae70656SMike Snitzer /* 24323ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 24333ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 24343ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 24353ae70656SMike Snitzer * reappear. 24363ae70656SMike Snitzer */ 24373ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 243883d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 24393ae70656SMike Snitzer if (live_map) 24403ae70656SMike Snitzer limits = md->queue->limits; 244183d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 24423ae70656SMike Snitzer } 24433ae70656SMike Snitzer 244487eb5b21SMike Christie if (!live_map) { 2445754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2446042d2a9bSAlasdair G Kergon if (r) { 2447042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2448754c5fc7SMike Snitzer goto out; 2449042d2a9bSAlasdair G Kergon } 245087eb5b21SMike Christie } 2451754c5fc7SMike Snitzer 2452042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 245362e08243SMikulas Patocka dm_issue_global_event(); 24541da177e4SLinus Torvalds 245593c534aeSAlasdair G Kergon out: 2456e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2457042d2a9bSAlasdair G Kergon return map; 24581da177e4SLinus Torvalds } 24591da177e4SLinus Torvalds 24601da177e4SLinus Torvalds /* 24611da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 24621da177e4SLinus Torvalds * device. 24631da177e4SLinus Torvalds */ 24642ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 24651da177e4SLinus Torvalds { 2466e39e2e95SAlasdair G Kergon int r; 24671da177e4SLinus Torvalds 2468040f04bdSChristoph Hellwig WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2469dfbe03f6SAlasdair G Kergon 2470977115c0SChristoph Hellwig r = freeze_bdev(md->disk->part0); 2471040f04bdSChristoph Hellwig if (!r) 2472aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2473040f04bdSChristoph Hellwig return r; 24741da177e4SLinus Torvalds } 24751da177e4SLinus Torvalds 24762ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 24771da177e4SLinus Torvalds { 2478aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2479aa8d7c2fSAlasdair G Kergon return; 2480977115c0SChristoph Hellwig thaw_bdev(md->disk->part0); 2481aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 24821da177e4SLinus Torvalds } 24831da177e4SLinus Torvalds 24841da177e4SLinus Torvalds /* 2485b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2486b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2487b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2488b48633f8SBart Van Assche * 2489ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2490ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2491ffcc3936SMike Snitzer * are being added to md->deferred list. 2492cec47e3dSKiyoshi Ueda */ 2493ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 24942f064a59SPeter Zijlstra unsigned suspend_flags, unsigned int task_state, 2495eaf9a736SMike Snitzer int dmf_suspended_flag) 24961da177e4SLinus Torvalds { 2497ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2498ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2499ffcc3936SMike Snitzer int r; 2500cf222b37SAlasdair G Kergon 25015a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 25025a8f1f80SBart Van Assche 25032e93ccc1SKiyoshi Ueda /* 25042e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 25052e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25062e93ccc1SKiyoshi Ueda */ 25072e93ccc1SKiyoshi Ueda if (noflush) 25082e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 250986331f39SBart Van Assche else 2510ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 25112e93ccc1SKiyoshi Ueda 2512d67ee213SMike Snitzer /* 2513d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2514d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2515d67ee213SMike Snitzer */ 25161da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25171da177e4SLinus Torvalds 25182e93ccc1SKiyoshi Ueda /* 25199f518b27SKiyoshi Ueda * Flush I/O to the device. 25209f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 25219f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 25229f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 25232e93ccc1SKiyoshi Ueda */ 252432a926daSMikulas Patocka if (!noflush && do_lockfs) { 25252ca3310eSAlasdair G Kergon r = lock_fs(md); 2526d67ee213SMike Snitzer if (r) { 2527d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2528ffcc3936SMike Snitzer return r; 2529aa8d7c2fSAlasdair G Kergon } 2530d67ee213SMike Snitzer } 25311da177e4SLinus Torvalds 25321da177e4SLinus Torvalds /* 25333b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 25343b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 253596c9865cSMike Snitzer * dm_split_and_process_bio from dm_submit_bio. 25363b00b203SMikulas Patocka * 253796c9865cSMike Snitzer * To get all processes out of dm_split_and_process_bio in dm_submit_bio, 25383b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 253996c9865cSMike Snitzer * dm_split_and_process_bio from dm_submit_bio and quiesce the thread 25400cede372SMike Snitzer * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 25416a8736d1STejun Heo * flush_workqueue(md->wq). 25421da177e4SLinus Torvalds */ 25431eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 254441abc4e1SHannes Reinecke if (map) 254583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25461da177e4SLinus Torvalds 2547d0bcb878SKiyoshi Ueda /* 254829e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 254929e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2550d0bcb878SKiyoshi Ueda */ 25516a23e05cSJens Axboe if (dm_request_based(md)) 2552eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2553cec47e3dSKiyoshi Ueda 2554d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2555d0bcb878SKiyoshi Ueda 25561da177e4SLinus Torvalds /* 25573b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 25583b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 25593b00b203SMikulas Patocka * to finish. 25601da177e4SLinus Torvalds */ 2561b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2562eaf9a736SMike Snitzer if (!r) 2563eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 25641da177e4SLinus Torvalds 25656d6f10dfSMilan Broz if (noflush) 2566022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 256741abc4e1SHannes Reinecke if (map) 256883d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25692e93ccc1SKiyoshi Ueda 25701da177e4SLinus Torvalds /* were we interrupted ? */ 257146125c1cSMilan Broz if (r < 0) { 25729a1fb464SMikulas Patocka dm_queue_flush(md); 257373d410c0SMilan Broz 2574cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2575eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2576cec47e3dSKiyoshi Ueda 25772ca3310eSAlasdair G Kergon unlock_fs(md); 2578d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2579ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2580ffcc3936SMike Snitzer } 2581ffcc3936SMike Snitzer 2582ffcc3936SMike Snitzer return r; 25832ca3310eSAlasdair G Kergon } 25842ca3310eSAlasdair G Kergon 25853b00b203SMikulas Patocka /* 2586ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2587ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2588ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2589ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2590ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 25913b00b203SMikulas Patocka */ 2592ffcc3936SMike Snitzer /* 2593ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2594ffcc3936SMike Snitzer * 2595ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2596ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2597ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2598ffcc3936SMike Snitzer * 2599ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2600ffcc3936SMike Snitzer */ 2601ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2602ffcc3936SMike Snitzer { 2603ffcc3936SMike Snitzer struct dm_table *map = NULL; 2604ffcc3936SMike Snitzer int r = 0; 2605ffcc3936SMike Snitzer 2606ffcc3936SMike Snitzer retry: 2607ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2608ffcc3936SMike Snitzer 2609ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2610ffcc3936SMike Snitzer r = -EINVAL; 2611ffcc3936SMike Snitzer goto out_unlock; 2612ffcc3936SMike Snitzer } 2613ffcc3936SMike Snitzer 2614ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2615ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2616ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2617ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2618ffcc3936SMike Snitzer if (r) 2619ffcc3936SMike Snitzer return r; 2620ffcc3936SMike Snitzer goto retry; 2621ffcc3936SMike Snitzer } 2622ffcc3936SMike Snitzer 2623a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2624ffcc3936SMike Snitzer 2625eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2626ffcc3936SMike Snitzer if (r) 2627ffcc3936SMike Snitzer goto out_unlock; 26283b00b203SMikulas Patocka 26295df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 26304d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 26315df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 26324d4471cbSKiyoshi Ueda 2633d287483dSAlasdair G Kergon out_unlock: 2634e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2635cf222b37SAlasdair G Kergon return r; 26361da177e4SLinus Torvalds } 26371da177e4SLinus Torvalds 2638ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 26391da177e4SLinus Torvalds { 2640ffcc3936SMike Snitzer if (map) { 2641ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 26428757b776SMilan Broz if (r) 2643ffcc3936SMike Snitzer return r; 2644ffcc3936SMike Snitzer } 26452ca3310eSAlasdair G Kergon 26469a1fb464SMikulas Patocka dm_queue_flush(md); 26472ca3310eSAlasdair G Kergon 2648cec47e3dSKiyoshi Ueda /* 2649cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2650cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2651cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2652cec47e3dSKiyoshi Ueda */ 2653cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2654eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2655cec47e3dSKiyoshi Ueda 26562ca3310eSAlasdair G Kergon unlock_fs(md); 26572ca3310eSAlasdair G Kergon 2658ffcc3936SMike Snitzer return 0; 2659ffcc3936SMike Snitzer } 2660ffcc3936SMike Snitzer 2661ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2662ffcc3936SMike Snitzer { 26638dc23658SMinfei Huang int r; 2664ffcc3936SMike Snitzer struct dm_table *map = NULL; 2665ffcc3936SMike Snitzer 2666ffcc3936SMike Snitzer retry: 26678dc23658SMinfei Huang r = -EINVAL; 2668ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2669ffcc3936SMike Snitzer 2670ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2671ffcc3936SMike Snitzer goto out; 2672ffcc3936SMike Snitzer 2673ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2674ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2675ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2676ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2677ffcc3936SMike Snitzer if (r) 2678ffcc3936SMike Snitzer return r; 2679ffcc3936SMike Snitzer goto retry; 2680ffcc3936SMike Snitzer } 2681ffcc3936SMike Snitzer 2682a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2683ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2684ffcc3936SMike Snitzer goto out; 2685ffcc3936SMike Snitzer 2686ffcc3936SMike Snitzer r = __dm_resume(md, map); 2687ffcc3936SMike Snitzer if (r) 2688ffcc3936SMike Snitzer goto out; 2689ffcc3936SMike Snitzer 26902ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2691cf222b37SAlasdair G Kergon out: 2692e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 26932ca3310eSAlasdair G Kergon 2694cf222b37SAlasdair G Kergon return r; 26951da177e4SLinus Torvalds } 26961da177e4SLinus Torvalds 2697fd2ed4d2SMikulas Patocka /* 2698fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2699fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2700fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2701fd2ed4d2SMikulas Patocka */ 2702fd2ed4d2SMikulas Patocka 2703ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2704ffcc3936SMike Snitzer { 2705ffcc3936SMike Snitzer struct dm_table *map = NULL; 2706ffcc3936SMike Snitzer 27071ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 27081ea0654eSBart Van Assche 270996b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2710ffcc3936SMike Snitzer return; /* nested internal suspend */ 2711ffcc3936SMike Snitzer 2712ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2713ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2714ffcc3936SMike Snitzer return; /* nest suspend */ 2715ffcc3936SMike Snitzer } 2716ffcc3936SMike Snitzer 2717a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2718ffcc3936SMike Snitzer 2719ffcc3936SMike Snitzer /* 2720ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2721ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2722ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2723ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2724ffcc3936SMike Snitzer */ 2725eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2726eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2727ffcc3936SMike Snitzer 27285df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 2729ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 27305df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 2731ffcc3936SMike Snitzer } 2732ffcc3936SMike Snitzer 2733ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2734ffcc3936SMike Snitzer { 273596b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 273696b26c8cSMikulas Patocka 273796b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2738ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2739ffcc3936SMike Snitzer 2740ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2741ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2742ffcc3936SMike Snitzer 2743ffcc3936SMike Snitzer /* 2744ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2745ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2746ffcc3936SMike Snitzer */ 2747ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2748ffcc3936SMike Snitzer 2749ffcc3936SMike Snitzer done: 2750ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2751ffcc3936SMike Snitzer smp_mb__after_atomic(); 2752ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2753ffcc3936SMike Snitzer } 2754ffcc3936SMike Snitzer 2755ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2756fd2ed4d2SMikulas Patocka { 2757fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2758ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2759ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2760ffcc3936SMike Snitzer } 2761ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2762ffcc3936SMike Snitzer 2763ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2764ffcc3936SMike Snitzer { 2765ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2766ffcc3936SMike Snitzer __dm_internal_resume(md); 2767ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2768ffcc3936SMike Snitzer } 2769ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2770ffcc3936SMike Snitzer 2771ffcc3936SMike Snitzer /* 2772ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2773ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2774ffcc3936SMike Snitzer */ 2775ffcc3936SMike Snitzer 2776ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2777ffcc3936SMike Snitzer { 2778ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2779ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2780fd2ed4d2SMikulas Patocka return; 2781fd2ed4d2SMikulas Patocka 2782fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2783fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2784fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2785fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2786fd2ed4d2SMikulas Patocka } 2787b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2788fd2ed4d2SMikulas Patocka 2789ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2790fd2ed4d2SMikulas Patocka { 2791ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2792fd2ed4d2SMikulas Patocka goto done; 2793fd2ed4d2SMikulas Patocka 2794fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2795fd2ed4d2SMikulas Patocka 2796fd2ed4d2SMikulas Patocka done: 2797fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2798fd2ed4d2SMikulas Patocka } 2799b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2800fd2ed4d2SMikulas Patocka 28011da177e4SLinus Torvalds /*----------------------------------------------------------------- 28021da177e4SLinus Torvalds * Event notification. 28031da177e4SLinus Torvalds *---------------------------------------------------------------*/ 28043abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 280560935eb2SMilan Broz unsigned cookie) 280669267a30SAlasdair G Kergon { 28076958c1c6SMikulas Patocka int r; 28086958c1c6SMikulas Patocka unsigned noio_flag; 280960935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 281060935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 281160935eb2SMilan Broz 28126958c1c6SMikulas Patocka noio_flag = memalloc_noio_save(); 28136958c1c6SMikulas Patocka 281460935eb2SMilan Broz if (!cookie) 28156958c1c6SMikulas Patocka r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 281660935eb2SMilan Broz else { 281760935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 281860935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 28196958c1c6SMikulas Patocka r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 28203abf85b5SPeter Rajnoha action, envp); 282160935eb2SMilan Broz } 28226958c1c6SMikulas Patocka 28236958c1c6SMikulas Patocka memalloc_noio_restore(noio_flag); 28246958c1c6SMikulas Patocka 28256958c1c6SMikulas Patocka return r; 282669267a30SAlasdair G Kergon } 282769267a30SAlasdair G Kergon 28287a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28297a8c3d3bSMike Anderson { 28307a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 28317a8c3d3bSMike Anderson } 28327a8c3d3bSMike Anderson 28331da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 28341da177e4SLinus Torvalds { 28351da177e4SLinus Torvalds return atomic_read(&md->event_nr); 28361da177e4SLinus Torvalds } 28371da177e4SLinus Torvalds 28381da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 28391da177e4SLinus Torvalds { 28401da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 28411da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds 28447a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 28457a8c3d3bSMike Anderson { 28467a8c3d3bSMike Anderson unsigned long flags; 28477a8c3d3bSMike Anderson 28487a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 28497a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 28507a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 28517a8c3d3bSMike Anderson } 28527a8c3d3bSMike Anderson 28531da177e4SLinus Torvalds /* 28541da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 28551da177e4SLinus Torvalds * count on 'md'. 28561da177e4SLinus Torvalds */ 28571da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 28581da177e4SLinus Torvalds { 28591da177e4SLinus Torvalds return md->disk; 28601da177e4SLinus Torvalds } 286165ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 28621da177e4SLinus Torvalds 2863784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2864784aae73SMilan Broz { 28652995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2866784aae73SMilan Broz } 2867784aae73SMilan Broz 2868784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2869784aae73SMilan Broz { 2870784aae73SMilan Broz struct mapped_device *md; 2871784aae73SMilan Broz 28722995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2873784aae73SMilan Broz 2874b9a41d21SHou Tao spin_lock(&_minor_lock); 2875b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2876b9a41d21SHou Tao md = NULL; 2877b9a41d21SHou Tao goto out; 2878b9a41d21SHou Tao } 2879784aae73SMilan Broz dm_get(md); 2880b9a41d21SHou Tao out: 2881b9a41d21SHou Tao spin_unlock(&_minor_lock); 2882b9a41d21SHou Tao 2883784aae73SMilan Broz return md; 2884784aae73SMilan Broz } 2885784aae73SMilan Broz 28864f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 28871da177e4SLinus Torvalds { 28881da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 28891da177e4SLinus Torvalds } 28901da177e4SLinus Torvalds 28915df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md) 28925df96f2bSMikulas Patocka { 28935df96f2bSMikulas Patocka return test_bit(DMF_POST_SUSPENDING, &md->flags); 28945df96f2bSMikulas Patocka } 28955df96f2bSMikulas Patocka 2896ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2897ffcc3936SMike Snitzer { 2898ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2899ffcc3936SMike Snitzer } 2900ffcc3936SMike Snitzer 29012c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 29022c140a24SMikulas Patocka { 29032c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 29042c140a24SMikulas Patocka } 29052c140a24SMikulas Patocka 290664dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 290764dbce58SKiyoshi Ueda { 290833bd6f06SMike Snitzer return dm_suspended_md(ti->table->md); 290964dbce58SKiyoshi Ueda } 291064dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 291164dbce58SKiyoshi Ueda 29125df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti) 29135df96f2bSMikulas Patocka { 291433bd6f06SMike Snitzer return dm_post_suspending_md(ti->table->md); 29155df96f2bSMikulas Patocka } 29165df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending); 29175df96f2bSMikulas Patocka 29182e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 29192e93ccc1SKiyoshi Ueda { 292033bd6f06SMike Snitzer return __noflush_suspending(ti->table->md); 29212e93ccc1SKiyoshi Ueda } 29222e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 29232e93ccc1SKiyoshi Ueda 29247e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 29250776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 29260776aa0eSMike Snitzer unsigned min_pool_size) 2927e6ee8c0bSKiyoshi Ueda { 2928115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 292978d8e58aSMike Snitzer unsigned int pool_size = 0; 293064f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 29316f1c819cSKent Overstreet int ret; 2932e6ee8c0bSKiyoshi Ueda 2933e6ee8c0bSKiyoshi Ueda if (!pools) 29344e6e36c3SMike Snitzer return NULL; 2935e6ee8c0bSKiyoshi Ueda 293678d8e58aSMike Snitzer switch (type) { 293778d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2938545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 29390776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 294062f26317SJeffle Xu front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 294162f26317SJeffle Xu io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 29426f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 29436f1c819cSKent Overstreet if (ret) 294464f52b0eSMike Snitzer goto out; 29456f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2946eb8db831SChristoph Hellwig goto out; 294778d8e58aSMike Snitzer break; 294878d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 29490776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 295078d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2951591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 295278d8e58aSMike Snitzer break; 295378d8e58aSMike Snitzer default: 295478d8e58aSMike Snitzer BUG(); 295578d8e58aSMike Snitzer } 295678d8e58aSMike Snitzer 29576f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 29586f1c819cSKent Overstreet if (ret) 29595f015204SJun'ichi Nomura goto out; 2960e6ee8c0bSKiyoshi Ueda 29616f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 29625f015204SJun'ichi Nomura goto out; 2963a91a2785SMartin K. Petersen 2964e6ee8c0bSKiyoshi Ueda return pools; 296578d8e58aSMike Snitzer 29665f015204SJun'ichi Nomura out: 29675f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2968e6ee8c0bSKiyoshi Ueda 29694e6e36c3SMike Snitzer return NULL; 2970e6ee8c0bSKiyoshi Ueda } 2971e6ee8c0bSKiyoshi Ueda 2972e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2973e6ee8c0bSKiyoshi Ueda { 2974e6ee8c0bSKiyoshi Ueda if (!pools) 2975e6ee8c0bSKiyoshi Ueda return; 2976e6ee8c0bSKiyoshi Ueda 29776f1c819cSKent Overstreet bioset_exit(&pools->bs); 29786f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 2979e6ee8c0bSKiyoshi Ueda 2980e6ee8c0bSKiyoshi Ueda kfree(pools); 2981e6ee8c0bSKiyoshi Ueda } 2982e6ee8c0bSKiyoshi Ueda 29839c72bad1SChristoph Hellwig struct dm_pr { 29849c72bad1SChristoph Hellwig u64 old_key; 29859c72bad1SChristoph Hellwig u64 new_key; 29869c72bad1SChristoph Hellwig u32 flags; 29879c72bad1SChristoph Hellwig bool fail_early; 29889c72bad1SChristoph Hellwig }; 29899c72bad1SChristoph Hellwig 29909c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 29919c72bad1SChristoph Hellwig void *data) 29929c72bad1SChristoph Hellwig { 29939c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 29949c72bad1SChristoph Hellwig struct dm_table *table; 29959c72bad1SChristoph Hellwig struct dm_target *ti; 29969c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 29979c72bad1SChristoph Hellwig 29989c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 29999c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 30009c72bad1SChristoph Hellwig goto out; 30019c72bad1SChristoph Hellwig 30029c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 30039c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30049c72bad1SChristoph Hellwig goto out; 30059c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30069c72bad1SChristoph Hellwig 30079c72bad1SChristoph Hellwig ret = -EINVAL; 30089c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30099c72bad1SChristoph Hellwig goto out; 30109c72bad1SChristoph Hellwig 30119c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 30129c72bad1SChristoph Hellwig out: 30139c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 30149c72bad1SChristoph Hellwig return ret; 30159c72bad1SChristoph Hellwig } 30169c72bad1SChristoph Hellwig 30179c72bad1SChristoph Hellwig /* 30189c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 30199c72bad1SChristoph Hellwig */ 30209c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 30219c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 30229c72bad1SChristoph Hellwig { 30239c72bad1SChristoph Hellwig struct dm_pr *pr = data; 30249c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 30259c72bad1SChristoph Hellwig 30269c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 30279c72bad1SChristoph Hellwig return -EOPNOTSUPP; 30289c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 30299c72bad1SChristoph Hellwig } 30309c72bad1SChristoph Hellwig 303171cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 303271cdb697SChristoph Hellwig u32 flags) 303371cdb697SChristoph Hellwig { 30349c72bad1SChristoph Hellwig struct dm_pr pr = { 30359c72bad1SChristoph Hellwig .old_key = old_key, 30369c72bad1SChristoph Hellwig .new_key = new_key, 30379c72bad1SChristoph Hellwig .flags = flags, 30389c72bad1SChristoph Hellwig .fail_early = true, 30399c72bad1SChristoph Hellwig }; 30409c72bad1SChristoph Hellwig int ret; 304171cdb697SChristoph Hellwig 30429c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 30439c72bad1SChristoph Hellwig if (ret && new_key) { 30449c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 30459c72bad1SChristoph Hellwig pr.old_key = new_key; 30469c72bad1SChristoph Hellwig pr.new_key = 0; 30479c72bad1SChristoph Hellwig pr.flags = 0; 30489c72bad1SChristoph Hellwig pr.fail_early = false; 30499c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 30509c72bad1SChristoph Hellwig } 305171cdb697SChristoph Hellwig 30529c72bad1SChristoph Hellwig return ret; 305371cdb697SChristoph Hellwig } 305471cdb697SChristoph Hellwig 305571cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 305671cdb697SChristoph Hellwig u32 flags) 305771cdb697SChristoph Hellwig { 305871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 305971cdb697SChristoph Hellwig const struct pr_ops *ops; 3060971888c4SMike Snitzer int r, srcu_idx; 306171cdb697SChristoph Hellwig 30625bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 306371cdb697SChristoph Hellwig if (r < 0) 3064971888c4SMike Snitzer goto out; 306571cdb697SChristoph Hellwig 306671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 306771cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 306871cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 306971cdb697SChristoph Hellwig else 307071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3071971888c4SMike Snitzer out: 3072971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 307371cdb697SChristoph Hellwig return r; 307471cdb697SChristoph Hellwig } 307571cdb697SChristoph Hellwig 307671cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 307771cdb697SChristoph Hellwig { 307871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 307971cdb697SChristoph Hellwig const struct pr_ops *ops; 3080971888c4SMike Snitzer int r, srcu_idx; 308171cdb697SChristoph Hellwig 30825bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 308371cdb697SChristoph Hellwig if (r < 0) 3084971888c4SMike Snitzer goto out; 308571cdb697SChristoph Hellwig 308671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 308771cdb697SChristoph Hellwig if (ops && ops->pr_release) 308871cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 308971cdb697SChristoph Hellwig else 309071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3091971888c4SMike Snitzer out: 3092971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 309371cdb697SChristoph Hellwig return r; 309471cdb697SChristoph Hellwig } 309571cdb697SChristoph Hellwig 309671cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 309771cdb697SChristoph Hellwig enum pr_type type, bool abort) 309871cdb697SChristoph Hellwig { 309971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 310071cdb697SChristoph Hellwig const struct pr_ops *ops; 3101971888c4SMike Snitzer int r, srcu_idx; 310271cdb697SChristoph Hellwig 31035bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 310471cdb697SChristoph Hellwig if (r < 0) 3105971888c4SMike Snitzer goto out; 310671cdb697SChristoph Hellwig 310771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 310871cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 310971cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 311071cdb697SChristoph Hellwig else 311171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3112971888c4SMike Snitzer out: 3113971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 311471cdb697SChristoph Hellwig return r; 311571cdb697SChristoph Hellwig } 311671cdb697SChristoph Hellwig 311771cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 311871cdb697SChristoph Hellwig { 311971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 312071cdb697SChristoph Hellwig const struct pr_ops *ops; 3121971888c4SMike Snitzer int r, srcu_idx; 312271cdb697SChristoph Hellwig 31235bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 312471cdb697SChristoph Hellwig if (r < 0) 3125971888c4SMike Snitzer goto out; 312671cdb697SChristoph Hellwig 312771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 312871cdb697SChristoph Hellwig if (ops && ops->pr_clear) 312971cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 313071cdb697SChristoph Hellwig else 313171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3132971888c4SMike Snitzer out: 3133971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 313471cdb697SChristoph Hellwig return r; 313571cdb697SChristoph Hellwig } 313671cdb697SChristoph Hellwig 313771cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 313871cdb697SChristoph Hellwig .pr_register = dm_pr_register, 313971cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 314071cdb697SChristoph Hellwig .pr_release = dm_pr_release, 314171cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 314271cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 314371cdb697SChristoph Hellwig }; 314471cdb697SChristoph Hellwig 314583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 3146c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 3147b99fdcdcSMing Lei .poll_bio = dm_poll_bio, 31481da177e4SLinus Torvalds .open = dm_blk_open, 31491da177e4SLinus Torvalds .release = dm_blk_close, 3150aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 31513ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3152e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 315371cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 31541da177e4SLinus Torvalds .owner = THIS_MODULE 31551da177e4SLinus Torvalds }; 31561da177e4SLinus Torvalds 3157681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = { 3158681cc5e8SMike Snitzer .open = dm_blk_open, 3159681cc5e8SMike Snitzer .release = dm_blk_close, 3160681cc5e8SMike Snitzer .ioctl = dm_blk_ioctl, 3161681cc5e8SMike Snitzer .getgeo = dm_blk_getgeo, 3162681cc5e8SMike Snitzer .pr_ops = &dm_pr_ops, 3163681cc5e8SMike Snitzer .owner = THIS_MODULE 3164681cc5e8SMike Snitzer }; 3165681cc5e8SMike Snitzer 3166f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3167f26c5719SDan Williams .direct_access = dm_dax_direct_access, 3168cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 3169*047218ecSJane Chu .recovery_write = dm_dax_recovery_write, 3170f26c5719SDan Williams }; 3171f26c5719SDan Williams 31721da177e4SLinus Torvalds /* 31731da177e4SLinus Torvalds * module hooks 31741da177e4SLinus Torvalds */ 31751da177e4SLinus Torvalds module_init(dm_init); 31761da177e4SLinus Torvalds module_exit(dm_exit); 31771da177e4SLinus Torvalds 31781da177e4SLinus Torvalds module_param(major, uint, 0); 31791da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3180f4790826SMike Snitzer 3181e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3182e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3183e8603136SMike Snitzer 3184115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3185115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3186115485e8SMike Snitzer 3187a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3188a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3189a666e5c0SMikulas Patocka 31901da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 31911da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 31921da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3193