11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 227e026c8cSDan Williams #include <linux/uio.h> 233ac51e74SDarrick J. Wong #include <linux/hdreg.h> 243f77316dSKiyoshi Ueda #include <linux/delay.h> 25ffcc3936SMike Snitzer #include <linux/wait.h> 2671cdb697SChristoph Hellwig #include <linux/pr.h> 27b0b4d7c6SElena Reshetova #include <linux/refcount.h> 2855782138SLi Zefan 2972d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3072d94861SAlasdair G Kergon 3160935eb2SMilan Broz /* 3260935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3360935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3460935eb2SMilan Broz */ 3560935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3660935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3760935eb2SMilan Broz 381da177e4SLinus Torvalds static const char *_name = DM_NAME; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds static unsigned int major = 0; 411da177e4SLinus Torvalds static unsigned int _major = 0; 421da177e4SLinus Torvalds 43d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 44d15b774cSAlasdair G Kergon 45f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 462c140a24SMikulas Patocka 472c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 482c140a24SMikulas Patocka 492c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 502c140a24SMikulas Patocka 51acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 52acfe0ad7SMikulas Patocka 5393e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5493e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5593e6442cSMikulas Patocka 5662e08243SMikulas Patocka void dm_issue_global_event(void) 5762e08243SMikulas Patocka { 5862e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 5962e08243SMikulas Patocka wake_up(&dm_global_eventq); 6062e08243SMikulas Patocka } 6162e08243SMikulas Patocka 621da177e4SLinus Torvalds /* 6364f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 641da177e4SLinus Torvalds */ 6564f52b0eSMike Snitzer struct clone_info { 6664f52b0eSMike Snitzer struct dm_table *map; 6764f52b0eSMike Snitzer struct bio *bio; 6864f52b0eSMike Snitzer struct dm_io *io; 6964f52b0eSMike Snitzer sector_t sector; 7064f52b0eSMike Snitzer unsigned sector_count; 7164f52b0eSMike Snitzer }; 7264f52b0eSMike Snitzer 7364f52b0eSMike Snitzer /* 7464f52b0eSMike Snitzer * One of these is allocated per clone bio. 7564f52b0eSMike Snitzer */ 7664f52b0eSMike Snitzer #define DM_TIO_MAGIC 7282014 7764f52b0eSMike Snitzer struct dm_target_io { 7864f52b0eSMike Snitzer unsigned magic; 7964f52b0eSMike Snitzer struct dm_io *io; 8064f52b0eSMike Snitzer struct dm_target *ti; 8164f52b0eSMike Snitzer unsigned target_bio_nr; 8264f52b0eSMike Snitzer unsigned *len_ptr; 8364f52b0eSMike Snitzer bool inside_dm_io; 8464f52b0eSMike Snitzer struct bio clone; 8564f52b0eSMike Snitzer }; 8664f52b0eSMike Snitzer 8764f52b0eSMike Snitzer /* 8864f52b0eSMike Snitzer * One of these is allocated per original bio. 8964f52b0eSMike Snitzer * It contains the first clone used for that original. 9064f52b0eSMike Snitzer */ 9164f52b0eSMike Snitzer #define DM_IO_MAGIC 5191977 921da177e4SLinus Torvalds struct dm_io { 9364f52b0eSMike Snitzer unsigned magic; 941da177e4SLinus Torvalds struct mapped_device *md; 954e4cbee9SChristoph Hellwig blk_status_t status; 961da177e4SLinus Torvalds atomic_t io_count; 97745dc570SMike Snitzer struct bio *orig_bio; 983eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 99f88fb981SKiyoshi Ueda spinlock_t endio_lock; 100fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 10164f52b0eSMike Snitzer /* last member of dm_target_io is 'struct bio' */ 10264f52b0eSMike Snitzer struct dm_target_io tio; 1031da177e4SLinus Torvalds }; 1041da177e4SLinus Torvalds 10564f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10664f52b0eSMike Snitzer { 10764f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 10864f52b0eSMike Snitzer if (!tio->inside_dm_io) 10964f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 11064f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 11164f52b0eSMike Snitzer } 11264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 11364f52b0eSMike Snitzer 11464f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 11564f52b0eSMike Snitzer { 11664f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 11764f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 11864f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 11964f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 12064f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 12164f52b0eSMike Snitzer } 12264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 12364f52b0eSMike Snitzer 12464f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 12564f52b0eSMike Snitzer { 12664f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 12764f52b0eSMike Snitzer } 12864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 12964f52b0eSMike Snitzer 130ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 131ba61fdd1SJeff Mahoney 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * Bits for the md->flags field. 1341da177e4SLinus Torvalds */ 1351eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1361da177e4SLinus Torvalds #define DMF_SUSPENDED 1 137aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 138fba9f90eSJeff Mahoney #define DMF_FREEING 3 1395c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1402e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1418ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1428ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1431da177e4SLinus Torvalds 144115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 145115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 146faad87dfSMike Snitzer 147e6ee8c0bSKiyoshi Ueda /* 148e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 149e6ee8c0bSKiyoshi Ueda */ 150e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1516f1c819cSKent Overstreet struct bio_set bs; 1526f1c819cSKent Overstreet struct bio_set io_bs; 153e6ee8c0bSKiyoshi Ueda }; 154e6ee8c0bSKiyoshi Ueda 15586f1152bSBenjamin Marzinski struct table_device { 15686f1152bSBenjamin Marzinski struct list_head list; 157b0b4d7c6SElena Reshetova refcount_t count; 15886f1152bSBenjamin Marzinski struct dm_dev dm_dev; 15986f1152bSBenjamin Marzinski }; 16086f1152bSBenjamin Marzinski 1618fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 1621ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 16394818742SKent Overstreet 164f4790826SMike Snitzer /* 165e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 166e8603136SMike Snitzer */ 1674cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 168e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 169e8603136SMike Snitzer 170115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 171115485e8SMike Snitzer { 1726aa7de05SMark Rutland int param = READ_ONCE(*module_param); 173115485e8SMike Snitzer int modified_param = 0; 174115485e8SMike Snitzer bool modified = true; 175115485e8SMike Snitzer 176115485e8SMike Snitzer if (param < min) 177115485e8SMike Snitzer modified_param = min; 178115485e8SMike Snitzer else if (param > max) 179115485e8SMike Snitzer modified_param = max; 180115485e8SMike Snitzer else 181115485e8SMike Snitzer modified = false; 182115485e8SMike Snitzer 183115485e8SMike Snitzer if (modified) { 184115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 185115485e8SMike Snitzer param = modified_param; 186115485e8SMike Snitzer } 187115485e8SMike Snitzer 188115485e8SMike Snitzer return param; 189115485e8SMike Snitzer } 190115485e8SMike Snitzer 1914cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 192f4790826SMike Snitzer unsigned def, unsigned max) 193f4790826SMike Snitzer { 1946aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 19509c2d531SMike Snitzer unsigned modified_param = 0; 196f4790826SMike Snitzer 19709c2d531SMike Snitzer if (!param) 19809c2d531SMike Snitzer modified_param = def; 19909c2d531SMike Snitzer else if (param > max) 20009c2d531SMike Snitzer modified_param = max; 201f4790826SMike Snitzer 20209c2d531SMike Snitzer if (modified_param) { 20309c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 20409c2d531SMike Snitzer param = modified_param; 205f4790826SMike Snitzer } 206f4790826SMike Snitzer 20709c2d531SMike Snitzer return param; 208f4790826SMike Snitzer } 209f4790826SMike Snitzer 210e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 211e8603136SMike Snitzer { 21209c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 2134cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 214e8603136SMike Snitzer } 215e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 216e8603136SMike Snitzer 217115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 218115485e8SMike Snitzer { 219115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 220115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 221115485e8SMike Snitzer } 222115485e8SMike Snitzer 2231da177e4SLinus Torvalds static int __init local_init(void) 2241da177e4SLinus Torvalds { 22551157b4aSKiyoshi Ueda int r = -ENOMEM; 2261da177e4SLinus Torvalds 2278fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2288fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 229dde1e1ecSMike Snitzer return r; 2308fbf26adSKiyoshi Ueda 231eca7ee6dSMike Snitzer _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 2321ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 2331ae49ea2SMike Snitzer if (!_rq_cache) 2341ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 2351ae49ea2SMike Snitzer 23651e5b2bdSMike Anderson r = dm_uevent_init(); 23751157b4aSKiyoshi Ueda if (r) 2381ae49ea2SMike Snitzer goto out_free_rq_cache; 23951e5b2bdSMike Anderson 240acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 241acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 242acfe0ad7SMikulas Patocka r = -ENOMEM; 243acfe0ad7SMikulas Patocka goto out_uevent_exit; 244acfe0ad7SMikulas Patocka } 245acfe0ad7SMikulas Patocka 2461da177e4SLinus Torvalds _major = major; 2471da177e4SLinus Torvalds r = register_blkdev(_major, _name); 24851157b4aSKiyoshi Ueda if (r < 0) 249acfe0ad7SMikulas Patocka goto out_free_workqueue; 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds if (!_major) 2521da177e4SLinus Torvalds _major = r; 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds return 0; 25551157b4aSKiyoshi Ueda 256acfe0ad7SMikulas Patocka out_free_workqueue: 257acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 25851157b4aSKiyoshi Ueda out_uevent_exit: 25951157b4aSKiyoshi Ueda dm_uevent_exit(); 2601ae49ea2SMike Snitzer out_free_rq_cache: 2611ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2628fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2638fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 26451157b4aSKiyoshi Ueda 26551157b4aSKiyoshi Ueda return r; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds static void local_exit(void) 2691da177e4SLinus Torvalds { 2702c140a24SMikulas Patocka flush_scheduled_work(); 271acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2722c140a24SMikulas Patocka 2731ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2748fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 27500d59405SAkinobu Mita unregister_blkdev(_major, _name); 27651e5b2bdSMike Anderson dm_uevent_exit(); 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds _major = 0; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds DMINFO("cleaned up"); 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds 283b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2841da177e4SLinus Torvalds local_init, 2851da177e4SLinus Torvalds dm_target_init, 2861da177e4SLinus Torvalds dm_linear_init, 2871da177e4SLinus Torvalds dm_stripe_init, 288952b3557SMikulas Patocka dm_io_init, 289945fa4d2SMikulas Patocka dm_kcopyd_init, 2901da177e4SLinus Torvalds dm_interface_init, 291fd2ed4d2SMikulas Patocka dm_statistics_init, 2921da177e4SLinus Torvalds }; 2931da177e4SLinus Torvalds 294b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2951da177e4SLinus Torvalds local_exit, 2961da177e4SLinus Torvalds dm_target_exit, 2971da177e4SLinus Torvalds dm_linear_exit, 2981da177e4SLinus Torvalds dm_stripe_exit, 299952b3557SMikulas Patocka dm_io_exit, 300945fa4d2SMikulas Patocka dm_kcopyd_exit, 3011da177e4SLinus Torvalds dm_interface_exit, 302fd2ed4d2SMikulas Patocka dm_statistics_exit, 3031da177e4SLinus Torvalds }; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds static int __init dm_init(void) 3061da177e4SLinus Torvalds { 3071da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds int r, i; 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds for (i = 0; i < count; i++) { 3121da177e4SLinus Torvalds r = _inits[i](); 3131da177e4SLinus Torvalds if (r) 3141da177e4SLinus Torvalds goto bad; 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds return 0; 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds bad: 3201da177e4SLinus Torvalds while (i--) 3211da177e4SLinus Torvalds _exits[i](); 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds return r; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds static void __exit dm_exit(void) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3291da177e4SLinus Torvalds 3301da177e4SLinus Torvalds while (i--) 3311da177e4SLinus Torvalds _exits[i](); 332d15b774cSAlasdair G Kergon 333d15b774cSAlasdair G Kergon /* 334d15b774cSAlasdair G Kergon * Should be empty by this point. 335d15b774cSAlasdair G Kergon */ 336d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 3391da177e4SLinus Torvalds /* 3401da177e4SLinus Torvalds * Block device functions 3411da177e4SLinus Torvalds */ 342432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 343432a212cSMike Anderson { 344432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 345432a212cSMike Anderson } 346432a212cSMike Anderson 347fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3481da177e4SLinus Torvalds { 3491da177e4SLinus Torvalds struct mapped_device *md; 3501da177e4SLinus Torvalds 351fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 352fba9f90eSJeff Mahoney 353fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 354fba9f90eSJeff Mahoney if (!md) 355fba9f90eSJeff Mahoney goto out; 356fba9f90eSJeff Mahoney 3575c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 358432a212cSMike Anderson dm_deleting_md(md)) { 359fba9f90eSJeff Mahoney md = NULL; 360fba9f90eSJeff Mahoney goto out; 361fba9f90eSJeff Mahoney } 362fba9f90eSJeff Mahoney 3631da177e4SLinus Torvalds dm_get(md); 3645c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 365fba9f90eSJeff Mahoney out: 366fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 367fba9f90eSJeff Mahoney 368fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 371db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3721da177e4SLinus Torvalds { 37363a4f065SMike Snitzer struct mapped_device *md; 3746e9624b8SArnd Bergmann 3754a1aeb98SMilan Broz spin_lock(&_minor_lock); 3764a1aeb98SMilan Broz 37763a4f065SMike Snitzer md = disk->private_data; 37863a4f065SMike Snitzer if (WARN_ON(!md)) 37963a4f065SMike Snitzer goto out; 38063a4f065SMike Snitzer 3812c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3822c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 383acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3842c140a24SMikulas Patocka 3851da177e4SLinus Torvalds dm_put(md); 38663a4f065SMike Snitzer out: 3874a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 3905c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3915c6bd75dSAlasdair G Kergon { 3925c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3935c6bd75dSAlasdair G Kergon } 3945c6bd75dSAlasdair G Kergon 3955c6bd75dSAlasdair G Kergon /* 3965c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3975c6bd75dSAlasdair G Kergon */ 3982c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3995c6bd75dSAlasdair G Kergon { 4005c6bd75dSAlasdair G Kergon int r = 0; 4015c6bd75dSAlasdair G Kergon 4025c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4035c6bd75dSAlasdair G Kergon 4042c140a24SMikulas Patocka if (dm_open_count(md)) { 4055c6bd75dSAlasdair G Kergon r = -EBUSY; 4062c140a24SMikulas Patocka if (mark_deferred) 4072c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4082c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 4092c140a24SMikulas Patocka r = -EEXIST; 4105c6bd75dSAlasdair G Kergon else 4115c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 4125c6bd75dSAlasdair G Kergon 4135c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4145c6bd75dSAlasdair G Kergon 4155c6bd75dSAlasdair G Kergon return r; 4165c6bd75dSAlasdair G Kergon } 4175c6bd75dSAlasdair G Kergon 4182c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4192c140a24SMikulas Patocka { 4202c140a24SMikulas Patocka int r = 0; 4212c140a24SMikulas Patocka 4222c140a24SMikulas Patocka spin_lock(&_minor_lock); 4232c140a24SMikulas Patocka 4242c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4252c140a24SMikulas Patocka r = -EBUSY; 4262c140a24SMikulas Patocka else 4272c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4282c140a24SMikulas Patocka 4292c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4302c140a24SMikulas Patocka 4312c140a24SMikulas Patocka return r; 4322c140a24SMikulas Patocka } 4332c140a24SMikulas Patocka 4342c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4352c140a24SMikulas Patocka { 4362c140a24SMikulas Patocka dm_deferred_remove(); 4372c140a24SMikulas Patocka } 4382c140a24SMikulas Patocka 439fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 440fd2ed4d2SMikulas Patocka { 441fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 442fd2ed4d2SMikulas Patocka } 443fd2ed4d2SMikulas Patocka 4449974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 4459974fa2cSMike Snitzer { 4469974fa2cSMike Snitzer return md->queue; 4479974fa2cSMike Snitzer } 4489974fa2cSMike Snitzer 449fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 450fd2ed4d2SMikulas Patocka { 451fd2ed4d2SMikulas Patocka return &md->stats; 452fd2ed4d2SMikulas Patocka } 453fd2ed4d2SMikulas Patocka 4543ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4553ac51e74SDarrick J. Wong { 4563ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4573ac51e74SDarrick J. Wong 4583ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4593ac51e74SDarrick J. Wong } 4603ac51e74SDarrick J. Wong 461e76239a3SChristoph Hellwig static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 462e76239a3SChristoph Hellwig struct blk_zone *zones, unsigned int *nr_zones, 463e76239a3SChristoph Hellwig gfp_t gfp_mask) 464e76239a3SChristoph Hellwig { 465e76239a3SChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 466e76239a3SChristoph Hellwig struct mapped_device *md = disk->private_data; 467e76239a3SChristoph Hellwig struct dm_target *tgt; 468e76239a3SChristoph Hellwig struct dm_table *map; 469e76239a3SChristoph Hellwig int srcu_idx, ret; 470e76239a3SChristoph Hellwig 471e76239a3SChristoph Hellwig if (dm_suspended_md(md)) 472e76239a3SChristoph Hellwig return -EAGAIN; 473e76239a3SChristoph Hellwig 474e76239a3SChristoph Hellwig map = dm_get_live_table(md, &srcu_idx); 475e76239a3SChristoph Hellwig if (!map) 476e76239a3SChristoph Hellwig return -EIO; 477e76239a3SChristoph Hellwig 478e76239a3SChristoph Hellwig tgt = dm_table_find_target(map, sector); 479e76239a3SChristoph Hellwig if (!dm_target_is_valid(tgt)) { 480e76239a3SChristoph Hellwig ret = -EIO; 481e76239a3SChristoph Hellwig goto out; 482e76239a3SChristoph Hellwig } 483e76239a3SChristoph Hellwig 484e76239a3SChristoph Hellwig /* 485e76239a3SChristoph Hellwig * If we are executing this, we already know that the block device 486e76239a3SChristoph Hellwig * is a zoned device and so each target should have support for that 487e76239a3SChristoph Hellwig * type of drive. A missing report_zones method means that the target 488e76239a3SChristoph Hellwig * driver has a problem. 489e76239a3SChristoph Hellwig */ 490e76239a3SChristoph Hellwig if (WARN_ON(!tgt->type->report_zones)) { 491e76239a3SChristoph Hellwig ret = -EIO; 492e76239a3SChristoph Hellwig goto out; 493e76239a3SChristoph Hellwig } 494e76239a3SChristoph Hellwig 495e76239a3SChristoph Hellwig /* 496e76239a3SChristoph Hellwig * blkdev_report_zones() will loop and call this again to cover all the 497e76239a3SChristoph Hellwig * zones of the target, eventually moving on to the next target. 498e76239a3SChristoph Hellwig * So there is no need to loop here trying to fill the entire array 499e76239a3SChristoph Hellwig * of zones. 500e76239a3SChristoph Hellwig */ 501e76239a3SChristoph Hellwig ret = tgt->type->report_zones(tgt, sector, zones, 502e76239a3SChristoph Hellwig nr_zones, gfp_mask); 503e76239a3SChristoph Hellwig 504e76239a3SChristoph Hellwig out: 505e76239a3SChristoph Hellwig dm_put_live_table(md, srcu_idx); 506e76239a3SChristoph Hellwig return ret; 507e76239a3SChristoph Hellwig #else 508e76239a3SChristoph Hellwig return -ENOTSUPP; 509e76239a3SChristoph Hellwig #endif 510e76239a3SChristoph Hellwig } 511e76239a3SChristoph Hellwig 512971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 5135bd5e8d8SMike Snitzer struct block_device **bdev) 514971888c4SMike Snitzer __acquires(md->io_barrier) 515aa129a22SMilan Broz { 51666482026SMike Snitzer struct dm_target *tgt; 5176c182cd8SHannes Reinecke struct dm_table *map; 518971888c4SMike Snitzer int r; 519aa129a22SMilan Broz 5206c182cd8SHannes Reinecke retry: 521e56f81e0SChristoph Hellwig r = -ENOTTY; 522971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 523aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 524971888c4SMike Snitzer return r; 525aa129a22SMilan Broz 526aa129a22SMilan Broz /* We only support devices that have a single target */ 527aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 528971888c4SMike Snitzer return r; 529aa129a22SMilan Broz 53066482026SMike Snitzer tgt = dm_table_get_target(map, 0); 53166482026SMike Snitzer if (!tgt->type->prepare_ioctl) 532e56f81e0SChristoph Hellwig return r; 533aa129a22SMilan Broz 534971888c4SMike Snitzer if (dm_suspended_md(md)) 535971888c4SMike Snitzer return -EAGAIN; 536971888c4SMike Snitzer 5375bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 5385bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 539971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 5406c182cd8SHannes Reinecke msleep(10); 5416c182cd8SHannes Reinecke goto retry; 5426c182cd8SHannes Reinecke } 543971888c4SMike Snitzer 544e56f81e0SChristoph Hellwig return r; 545e56f81e0SChristoph Hellwig } 5466c182cd8SHannes Reinecke 547971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 548971888c4SMike Snitzer __releases(md->io_barrier) 549971888c4SMike Snitzer { 550971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 551971888c4SMike Snitzer } 552971888c4SMike Snitzer 553e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 554e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 555e56f81e0SChristoph Hellwig { 556e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 557971888c4SMike Snitzer int r, srcu_idx; 558e56f81e0SChristoph Hellwig 5595bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 560e56f81e0SChristoph Hellwig if (r < 0) 561971888c4SMike Snitzer goto out; 562e56f81e0SChristoph Hellwig 563e56f81e0SChristoph Hellwig if (r > 0) { 564e56f81e0SChristoph Hellwig /* 565e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 566e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 567e56f81e0SChristoph Hellwig */ 568e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 569e980f623SChristoph Hellwig DMWARN_LIMIT( 570e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 571e980f623SChristoph Hellwig current->comm, cmd); 572e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 573e56f81e0SChristoph Hellwig goto out; 574e56f81e0SChristoph Hellwig } 575e980f623SChristoph Hellwig } 576e56f81e0SChristoph Hellwig 57766482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 578e56f81e0SChristoph Hellwig out: 579971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 580aa129a22SMilan Broz return r; 581aa129a22SMilan Broz } 582aa129a22SMilan Broz 583978e51baSMike Snitzer static void start_io_acct(struct dm_io *io); 584978e51baSMike Snitzer 585978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5861da177e4SLinus Torvalds { 58764f52b0eSMike Snitzer struct dm_io *io; 58864f52b0eSMike Snitzer struct dm_target_io *tio; 58964f52b0eSMike Snitzer struct bio *clone; 59064f52b0eSMike Snitzer 5916f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 59264f52b0eSMike Snitzer if (!clone) 59364f52b0eSMike Snitzer return NULL; 59464f52b0eSMike Snitzer 59564f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 59664f52b0eSMike Snitzer tio->inside_dm_io = true; 59764f52b0eSMike Snitzer tio->io = NULL; 59864f52b0eSMike Snitzer 59964f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 60064f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 601978e51baSMike Snitzer io->status = 0; 602978e51baSMike Snitzer atomic_set(&io->io_count, 1); 603978e51baSMike Snitzer io->orig_bio = bio; 604978e51baSMike Snitzer io->md = md; 605978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 606978e51baSMike Snitzer 607978e51baSMike Snitzer start_io_acct(io); 60864f52b0eSMike Snitzer 60964f52b0eSMike Snitzer return io; 6101da177e4SLinus Torvalds } 6111da177e4SLinus Torvalds 612028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 6131da177e4SLinus Torvalds { 61464f52b0eSMike Snitzer bio_put(&io->tio.clone); 61564f52b0eSMike Snitzer } 61664f52b0eSMike Snitzer 61764f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 61864f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 61964f52b0eSMike Snitzer { 62064f52b0eSMike Snitzer struct dm_target_io *tio; 62164f52b0eSMike Snitzer 62264f52b0eSMike Snitzer if (!ci->io->tio.io) { 62364f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 62464f52b0eSMike Snitzer tio = &ci->io->tio; 62564f52b0eSMike Snitzer } else { 6266f1c819cSKent Overstreet struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 62764f52b0eSMike Snitzer if (!clone) 62864f52b0eSMike Snitzer return NULL; 62964f52b0eSMike Snitzer 63064f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 63164f52b0eSMike Snitzer tio->inside_dm_io = false; 63264f52b0eSMike Snitzer } 63364f52b0eSMike Snitzer 63464f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 63564f52b0eSMike Snitzer tio->io = ci->io; 63664f52b0eSMike Snitzer tio->ti = ti; 63764f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 63864f52b0eSMike Snitzer 63964f52b0eSMike Snitzer return tio; 6401da177e4SLinus Torvalds } 6411da177e4SLinus Torvalds 642cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 6431da177e4SLinus Torvalds { 64464f52b0eSMike Snitzer if (tio->inside_dm_io) 64564f52b0eSMike Snitzer return; 646dba14160SMikulas Patocka bio_put(&tio->clone); 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 649*6f757231SMikulas Patocka static bool md_in_flight(struct mapped_device *md) 65090abb8c4SKiyoshi Ueda { 651*6f757231SMikulas Patocka int cpu; 652*6f757231SMikulas Patocka struct hd_struct *part = &dm_disk(md)->part0; 653*6f757231SMikulas Patocka 654*6f757231SMikulas Patocka for_each_possible_cpu(cpu) { 655*6f757231SMikulas Patocka if (part_stat_local_read_cpu(part, in_flight[0], cpu) || 656*6f757231SMikulas Patocka part_stat_local_read_cpu(part, in_flight[1], cpu)) 657*6f757231SMikulas Patocka return true; 658*6f757231SMikulas Patocka } 659*6f757231SMikulas Patocka 660*6f757231SMikulas Patocka return false; 66190abb8c4SKiyoshi Ueda } 66290abb8c4SKiyoshi Ueda 6633eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6643eaf840eSJun'ichi "Nick" Nomura { 6653eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 666745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6673eaf840eSJun'ichi "Nick" Nomura 6683eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6693eaf840eSJun'ichi "Nick" Nomura 670ddcf35d3SMichael Callahan generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), 671ddcf35d3SMichael Callahan &dm_disk(md)->part0); 672f3986374SMike Snitzer 673fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 674528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 675528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 676528ec5abSMike Christie false, 0, &io->stats_aux); 6773eaf840eSJun'ichi "Nick" Nomura } 6783eaf840eSJun'ichi "Nick" Nomura 679d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6803eaf840eSJun'ichi "Nick" Nomura { 6813eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 682745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6833eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 6843eaf840eSJun'ichi "Nick" Nomura 685ddcf35d3SMichael Callahan generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, 686ddcf35d3SMichael Callahan io->start_time); 6873eaf840eSJun'ichi "Nick" Nomura 688fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 689528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 690528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 691528ec5abSMike Christie true, duration, &io->stats_aux); 692fd2ed4d2SMikulas Patocka 693d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 694*6f757231SMikulas Patocka if (unlikely(waitqueue_active(&md->wait))) { 695*6f757231SMikulas Patocka if (!md_in_flight(md)) 696d221d2e7SMikulas Patocka wake_up(&md->wait); 6973eaf840eSJun'ichi "Nick" Nomura } 698*6f757231SMikulas Patocka } 6993eaf840eSJun'ichi "Nick" Nomura 7001da177e4SLinus Torvalds /* 7011da177e4SLinus Torvalds * Add the bio to the list of deferred io. 7021da177e4SLinus Torvalds */ 70392c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 7041da177e4SLinus Torvalds { 70505447420SKiyoshi Ueda unsigned long flags; 7061da177e4SLinus Torvalds 70705447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 7081da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 70905447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 71092c63902SMikulas Patocka queue_work(md->wq, &md->work); 7111da177e4SLinus Torvalds } 7121da177e4SLinus Torvalds 7131da177e4SLinus Torvalds /* 7141da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 7151da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 71683d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 7171da177e4SLinus Torvalds */ 71883d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7191da177e4SLinus Torvalds { 72083d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7211da177e4SLinus Torvalds 72283d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 72383d5e5b0SMikulas Patocka } 7241da177e4SLinus Torvalds 72583d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 72683d5e5b0SMikulas Patocka { 72783d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 72883d5e5b0SMikulas Patocka } 72983d5e5b0SMikulas Patocka 73083d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 73183d5e5b0SMikulas Patocka { 73283d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 73383d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 73483d5e5b0SMikulas Patocka } 73583d5e5b0SMikulas Patocka 73683d5e5b0SMikulas Patocka /* 73783d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 73883d5e5b0SMikulas Patocka * The caller must not block between these two functions. 73983d5e5b0SMikulas Patocka */ 74083d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 74183d5e5b0SMikulas Patocka { 74283d5e5b0SMikulas Patocka rcu_read_lock(); 74383d5e5b0SMikulas Patocka return rcu_dereference(md->map); 74483d5e5b0SMikulas Patocka } 74583d5e5b0SMikulas Patocka 74683d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 74783d5e5b0SMikulas Patocka { 74883d5e5b0SMikulas Patocka rcu_read_unlock(); 7491da177e4SLinus Torvalds } 7501da177e4SLinus Torvalds 751971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 752971888c4SMike Snitzer 7533ac51e74SDarrick J. Wong /* 75486f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 75586f1152bSBenjamin Marzinski */ 75686f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 75786f1152bSBenjamin Marzinski struct mapped_device *md) 75886f1152bSBenjamin Marzinski { 75986f1152bSBenjamin Marzinski struct block_device *bdev; 76086f1152bSBenjamin Marzinski 76186f1152bSBenjamin Marzinski int r; 76286f1152bSBenjamin Marzinski 76386f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 76486f1152bSBenjamin Marzinski 765519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 76686f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 76786f1152bSBenjamin Marzinski return PTR_ERR(bdev); 76886f1152bSBenjamin Marzinski 76986f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 77086f1152bSBenjamin Marzinski if (r) { 77186f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 77286f1152bSBenjamin Marzinski return r; 77386f1152bSBenjamin Marzinski } 77486f1152bSBenjamin Marzinski 77586f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 776817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 77786f1152bSBenjamin Marzinski return 0; 77886f1152bSBenjamin Marzinski } 77986f1152bSBenjamin Marzinski 78086f1152bSBenjamin Marzinski /* 78186f1152bSBenjamin Marzinski * Close a table device that we've been using. 78286f1152bSBenjamin Marzinski */ 78386f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 78486f1152bSBenjamin Marzinski { 78586f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 78686f1152bSBenjamin Marzinski return; 78786f1152bSBenjamin Marzinski 78886f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 78986f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 790817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 79186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 792817bf402SDan Williams td->dm_dev.dax_dev = NULL; 79386f1152bSBenjamin Marzinski } 79486f1152bSBenjamin Marzinski 79586f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 79686f1152bSBenjamin Marzinski fmode_t mode) { 79786f1152bSBenjamin Marzinski struct table_device *td; 79886f1152bSBenjamin Marzinski 79986f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 80086f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 80186f1152bSBenjamin Marzinski return td; 80286f1152bSBenjamin Marzinski 80386f1152bSBenjamin Marzinski return NULL; 80486f1152bSBenjamin Marzinski } 80586f1152bSBenjamin Marzinski 80686f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 80786f1152bSBenjamin Marzinski struct dm_dev **result) { 80886f1152bSBenjamin Marzinski int r; 80986f1152bSBenjamin Marzinski struct table_device *td; 81086f1152bSBenjamin Marzinski 81186f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 81286f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 81386f1152bSBenjamin Marzinski if (!td) { 814115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 81586f1152bSBenjamin Marzinski if (!td) { 81686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 81786f1152bSBenjamin Marzinski return -ENOMEM; 81886f1152bSBenjamin Marzinski } 81986f1152bSBenjamin Marzinski 82086f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 82186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 82286f1152bSBenjamin Marzinski 82386f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 82486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 82586f1152bSBenjamin Marzinski kfree(td); 82686f1152bSBenjamin Marzinski return r; 82786f1152bSBenjamin Marzinski } 82886f1152bSBenjamin Marzinski 82986f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 83086f1152bSBenjamin Marzinski 831b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 83286f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 833b0b4d7c6SElena Reshetova } else { 834b0b4d7c6SElena Reshetova refcount_inc(&td->count); 83586f1152bSBenjamin Marzinski } 83686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83786f1152bSBenjamin Marzinski 83886f1152bSBenjamin Marzinski *result = &td->dm_dev; 83986f1152bSBenjamin Marzinski return 0; 84086f1152bSBenjamin Marzinski } 84186f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 84286f1152bSBenjamin Marzinski 84386f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 84486f1152bSBenjamin Marzinski { 84586f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 84686f1152bSBenjamin Marzinski 84786f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 848b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 84986f1152bSBenjamin Marzinski close_table_device(td, md); 85086f1152bSBenjamin Marzinski list_del(&td->list); 85186f1152bSBenjamin Marzinski kfree(td); 85286f1152bSBenjamin Marzinski } 85386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 85486f1152bSBenjamin Marzinski } 85586f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 85686f1152bSBenjamin Marzinski 85786f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 85886f1152bSBenjamin Marzinski { 85986f1152bSBenjamin Marzinski struct list_head *tmp, *next; 86086f1152bSBenjamin Marzinski 86186f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 86286f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 86386f1152bSBenjamin Marzinski 86486f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 865b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 86686f1152bSBenjamin Marzinski kfree(td); 86786f1152bSBenjamin Marzinski } 86886f1152bSBenjamin Marzinski } 86986f1152bSBenjamin Marzinski 87086f1152bSBenjamin Marzinski /* 8713ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8723ac51e74SDarrick J. Wong */ 8733ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8743ac51e74SDarrick J. Wong { 8753ac51e74SDarrick J. Wong *geo = md->geometry; 8763ac51e74SDarrick J. Wong 8773ac51e74SDarrick J. Wong return 0; 8783ac51e74SDarrick J. Wong } 8793ac51e74SDarrick J. Wong 8803ac51e74SDarrick J. Wong /* 8813ac51e74SDarrick J. Wong * Set the geometry of a device. 8823ac51e74SDarrick J. Wong */ 8833ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8843ac51e74SDarrick J. Wong { 8853ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8863ac51e74SDarrick J. Wong 8873ac51e74SDarrick J. Wong if (geo->start > sz) { 8883ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8893ac51e74SDarrick J. Wong return -EINVAL; 8903ac51e74SDarrick J. Wong } 8913ac51e74SDarrick J. Wong 8923ac51e74SDarrick J. Wong md->geometry = *geo; 8933ac51e74SDarrick J. Wong 8943ac51e74SDarrick J. Wong return 0; 8953ac51e74SDarrick J. Wong } 8963ac51e74SDarrick J. Wong 8972e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8982e93ccc1SKiyoshi Ueda { 8992e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 9002e93ccc1SKiyoshi Ueda } 9012e93ccc1SKiyoshi Ueda 9021da177e4SLinus Torvalds /* 9031da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 9041da177e4SLinus Torvalds * cloned into, completing the original io if necc. 9051da177e4SLinus Torvalds */ 9064e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 9071da177e4SLinus Torvalds { 9082e93ccc1SKiyoshi Ueda unsigned long flags; 9094e4cbee9SChristoph Hellwig blk_status_t io_error; 910b35f8caaSMilan Broz struct bio *bio; 911b35f8caaSMilan Broz struct mapped_device *md = io->md; 9122e93ccc1SKiyoshi Ueda 9132e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 914f88fb981SKiyoshi Ueda if (unlikely(error)) { 915f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 916745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 9174e4cbee9SChristoph Hellwig io->status = error; 918f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 919f88fb981SKiyoshi Ueda } 9201da177e4SLinus Torvalds 9211da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9224e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 9232e93ccc1SKiyoshi Ueda /* 9242e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9252e93ccc1SKiyoshi Ueda */ 926022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9276a8736d1STejun Heo if (__noflush_suspending(md)) 928745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 929745dc570SMike Snitzer bio_list_add_head(&md->deferred, io->orig_bio); 9306a8736d1STejun Heo else 9312e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9324e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 933022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9342e93ccc1SKiyoshi Ueda } 9352e93ccc1SKiyoshi Ueda 9364e4cbee9SChristoph Hellwig io_error = io->status; 937745dc570SMike Snitzer bio = io->orig_bio; 938af7e466aSMikulas Patocka end_io_acct(io); 939a97f925aSMikulas Patocka free_io(md, io); 9401da177e4SLinus Torvalds 9414e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 9426a8736d1STejun Heo return; 9436a8736d1STejun Heo 9441eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 9451da177e4SLinus Torvalds /* 9466a8736d1STejun Heo * Preflush done for flush with data, reissue 94728a8f0d3SMike Christie * without REQ_PREFLUSH. 9481da177e4SLinus Torvalds */ 9491eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9506a8736d1STejun Heo queue_io(md, bio); 951b35f8caaSMilan Broz } else { 952b372d360SMike Snitzer /* done with normal IO or empty flush */ 9538dd601faSNeilBrown if (io_error) 9544e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9554246a0b6SChristoph Hellwig bio_endio(bio); 9562e93ccc1SKiyoshi Ueda } 9571da177e4SLinus Torvalds } 958af7e466aSMikulas Patocka } 9591da177e4SLinus Torvalds 9604cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 9617eee4ae2SMike Snitzer { 9627eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9637eee4ae2SMike Snitzer 9647eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9657eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9667eee4ae2SMike Snitzer } 9677eee4ae2SMike Snitzer 968ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 969ac62d620SChristoph Hellwig { 970ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 971ac62d620SChristoph Hellwig 972ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 973ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 974ac62d620SChristoph Hellwig } 975ac62d620SChristoph Hellwig 9764246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9771da177e4SLinus Torvalds { 9784e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 979bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 980b35f8caaSMilan Broz struct dm_io *io = tio->io; 9819faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9821da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9831da177e4SLinus Torvalds 984978e51baSMike Snitzer if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 985ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_SAME && 98674d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 9877eee4ae2SMike Snitzer disable_write_same(md); 988ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 98974d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 990ac62d620SChristoph Hellwig disable_write_zeroes(md); 991ac62d620SChristoph Hellwig } 9927eee4ae2SMike Snitzer 9931be56909SChristoph Hellwig if (endio) { 9944e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 9951be56909SChristoph Hellwig switch (r) { 9961be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 9974e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 9981be56909SChristoph Hellwig /*FALLTHRU*/ 9991be56909SChristoph Hellwig case DM_ENDIO_DONE: 10001be56909SChristoph Hellwig break; 10011be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 10021be56909SChristoph Hellwig /* The target will handle the io */ 10031be56909SChristoph Hellwig return; 10041be56909SChristoph Hellwig default: 10051be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 10061be56909SChristoph Hellwig BUG(); 10071be56909SChristoph Hellwig } 10081be56909SChristoph Hellwig } 10091be56909SChristoph Hellwig 1010cfae7529SMike Snitzer free_tio(tio); 1011b35f8caaSMilan Broz dec_pending(io, error); 10121da177e4SLinus Torvalds } 10131da177e4SLinus Torvalds 101478d8e58aSMike Snitzer /* 101556a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 101656a67df7SMike Snitzer * target boundary. 101756a67df7SMike Snitzer */ 101856a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 10191da177e4SLinus Torvalds { 102056a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 102156a67df7SMike Snitzer 102256a67df7SMike Snitzer return ti->len - target_offset; 102356a67df7SMike Snitzer } 102456a67df7SMike Snitzer 102556a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 102656a67df7SMike Snitzer { 102756a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1028542f9038SMike Snitzer sector_t offset, max_len; 10291da177e4SLinus Torvalds 10301da177e4SLinus Torvalds /* 10311da177e4SLinus Torvalds * Does the target need to split even further? 10321da177e4SLinus Torvalds */ 1033542f9038SMike Snitzer if (ti->max_io_len) { 1034542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1035542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1036542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1037542f9038SMike Snitzer else 1038542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1039542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1040542f9038SMike Snitzer 1041542f9038SMike Snitzer if (len > max_len) 1042542f9038SMike Snitzer len = max_len; 10431da177e4SLinus Torvalds } 10441da177e4SLinus Torvalds 10451da177e4SLinus Torvalds return len; 10461da177e4SLinus Torvalds } 10471da177e4SLinus Torvalds 1048542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1049542f9038SMike Snitzer { 1050542f9038SMike Snitzer if (len > UINT_MAX) { 1051542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1052542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1053542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1054542f9038SMike Snitzer return -EINVAL; 1055542f9038SMike Snitzer } 1056542f9038SMike Snitzer 10578f50e358SMing Lei /* 10588f50e358SMing Lei * BIO based queue uses its own splitting. When multipage bvecs 10598f50e358SMing Lei * is switched on, size of the incoming bio may be too big to 10608f50e358SMing Lei * be handled in some targets, such as crypt. 10618f50e358SMing Lei * 10628f50e358SMing Lei * When these targets are ready for the big bio, we can remove 10638f50e358SMing Lei * the limit. 10648f50e358SMing Lei */ 10658f50e358SMing Lei ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); 1066542f9038SMike Snitzer 1067542f9038SMike Snitzer return 0; 1068542f9038SMike Snitzer } 1069542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1070542f9038SMike Snitzer 1071f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1072f26c5719SDan Williams sector_t sector, int *srcu_idx) 10733d97c829SMike Snitzer __acquires(md->io_barrier) 1074545ed20eSToshi Kani { 1075545ed20eSToshi Kani struct dm_table *map; 1076545ed20eSToshi Kani struct dm_target *ti; 1077545ed20eSToshi Kani 1078f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1079545ed20eSToshi Kani if (!map) 1080f26c5719SDan Williams return NULL; 1081545ed20eSToshi Kani 1082545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1083545ed20eSToshi Kani if (!dm_target_is_valid(ti)) 1084f26c5719SDan Williams return NULL; 1085f26c5719SDan Williams 1086f26c5719SDan Williams return ti; 1087f26c5719SDan Williams } 1088f26c5719SDan Williams 1089f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1090f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1091f26c5719SDan Williams { 1092f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1093f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1094f26c5719SDan Williams struct dm_target *ti; 1095f26c5719SDan Williams long len, ret = -EIO; 1096f26c5719SDan Williams int srcu_idx; 1097f26c5719SDan Williams 1098f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1099f26c5719SDan Williams 1100f26c5719SDan Williams if (!ti) 1101545ed20eSToshi Kani goto out; 1102f26c5719SDan Williams if (!ti->type->direct_access) 1103f26c5719SDan Williams goto out; 1104f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 1105f26c5719SDan Williams if (len < 1) 1106f26c5719SDan Williams goto out; 1107f26c5719SDan Williams nr_pages = min(len, nr_pages); 1108817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1109817bf402SDan Williams 1110545ed20eSToshi Kani out: 1111545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1112f26c5719SDan Williams 1113f26c5719SDan Williams return ret; 1114545ed20eSToshi Kani } 1115545ed20eSToshi Kani 11167e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 11177e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 11187e026c8cSDan Williams { 11197e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11207e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 11217e026c8cSDan Williams struct dm_target *ti; 11227e026c8cSDan Williams long ret = 0; 11237e026c8cSDan Williams int srcu_idx; 11247e026c8cSDan Williams 11257e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 11267e026c8cSDan Williams 11277e026c8cSDan Williams if (!ti) 11287e026c8cSDan Williams goto out; 11297e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 11307e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 11317e026c8cSDan Williams goto out; 11327e026c8cSDan Williams } 11337e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 11347e026c8cSDan Williams out: 11357e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 11367e026c8cSDan Williams 11377e026c8cSDan Williams return ret; 11387e026c8cSDan Williams } 11397e026c8cSDan Williams 1140b3a9a0c3SDan Williams static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1141b3a9a0c3SDan Williams void *addr, size_t bytes, struct iov_iter *i) 1142b3a9a0c3SDan Williams { 1143b3a9a0c3SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1144b3a9a0c3SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1145b3a9a0c3SDan Williams struct dm_target *ti; 1146b3a9a0c3SDan Williams long ret = 0; 1147b3a9a0c3SDan Williams int srcu_idx; 1148b3a9a0c3SDan Williams 1149b3a9a0c3SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1150b3a9a0c3SDan Williams 1151b3a9a0c3SDan Williams if (!ti) 1152b3a9a0c3SDan Williams goto out; 1153b3a9a0c3SDan Williams if (!ti->type->dax_copy_to_iter) { 1154b3a9a0c3SDan Williams ret = copy_to_iter(addr, bytes, i); 1155b3a9a0c3SDan Williams goto out; 1156b3a9a0c3SDan Williams } 1157b3a9a0c3SDan Williams ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1158b3a9a0c3SDan Williams out: 1159b3a9a0c3SDan Williams dm_put_live_table(md, srcu_idx); 1160b3a9a0c3SDan Williams 1161b3a9a0c3SDan Williams return ret; 1162b3a9a0c3SDan Williams } 1163b3a9a0c3SDan Williams 11641dd40c3eSMikulas Patocka /* 11651dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 1166c06b3e58SNeilBrown * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. 11671dd40c3eSMikulas Patocka * 11681dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 11691dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 11701dd40c3eSMikulas Patocka * sent in a next bio. 11711dd40c3eSMikulas Patocka * 11721dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 11731dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11741dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 11751dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11761dd40c3eSMikulas Patocka * 11771dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 11781dd40c3eSMikulas Patocka * <------- bi_size -------> 11791dd40c3eSMikulas Patocka * <-- n_sectors --> 11801dd40c3eSMikulas Patocka * 11811dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11821dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11831dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11841dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11851dd40c3eSMikulas Patocka * to make it empty) 11861dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11871dd40c3eSMikulas Patocka * 11881dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 11891dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 11901dd40c3eSMikulas Patocka * copies of the bio. 11911dd40c3eSMikulas Patocka */ 11921dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 11931dd40c3eSMikulas Patocka { 11941dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11951dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 11961eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 11971dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 11981dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 11991dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 12001dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 12011dd40c3eSMikulas Patocka } 12021dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 12031dd40c3eSMikulas Patocka 1204d67a5f4bSMikulas Patocka /* 1205e76239a3SChristoph Hellwig * The zone descriptors obtained with a zone report indicate 1206e76239a3SChristoph Hellwig * zone positions within the underlying device of the target. The zone 1207e76239a3SChristoph Hellwig * descriptors must be remapped to match their position within the dm device. 1208e76239a3SChristoph Hellwig * The caller target should obtain the zones information using 1209e76239a3SChristoph Hellwig * blkdev_report_zones() to ensure that remapping for partition offset is 1210e76239a3SChristoph Hellwig * already handled. 121110999307SDamien Le Moal */ 1212e76239a3SChristoph Hellwig void dm_remap_zone_report(struct dm_target *ti, sector_t start, 1213e76239a3SChristoph Hellwig struct blk_zone *zones, unsigned int *nr_zones) 121410999307SDamien Le Moal { 121510999307SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED 121610999307SDamien Le Moal struct blk_zone *zone; 1217e76239a3SChristoph Hellwig unsigned int nrz = *nr_zones; 1218e76239a3SChristoph Hellwig int i; 121910999307SDamien Le Moal 122010999307SDamien Le Moal /* 1221e76239a3SChristoph Hellwig * Remap the start sector and write pointer position of the zones in 1222e76239a3SChristoph Hellwig * the array. Since we may have obtained from the target underlying 1223e76239a3SChristoph Hellwig * device more zones that the target size, also adjust the number 1224e76239a3SChristoph Hellwig * of zones. 12259864cd5dSDamien Le Moal */ 1226e76239a3SChristoph Hellwig for (i = 0; i < nrz; i++) { 1227e76239a3SChristoph Hellwig zone = zones + i; 122810999307SDamien Le Moal if (zone->start >= start + ti->len) { 1229e76239a3SChristoph Hellwig memset(zone, 0, sizeof(struct blk_zone) * (nrz - i)); 123010999307SDamien Le Moal break; 123110999307SDamien Le Moal } 1232e76239a3SChristoph Hellwig 123310999307SDamien Le Moal zone->start = zone->start + ti->begin - start; 1234e76239a3SChristoph Hellwig if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 1235e76239a3SChristoph Hellwig continue; 1236e76239a3SChristoph Hellwig 123710999307SDamien Le Moal if (zone->cond == BLK_ZONE_COND_FULL) 123810999307SDamien Le Moal zone->wp = zone->start + zone->len; 123910999307SDamien Le Moal else if (zone->cond == BLK_ZONE_COND_EMPTY) 124010999307SDamien Le Moal zone->wp = zone->start; 124110999307SDamien Le Moal else 1242e76239a3SChristoph Hellwig zone->wp = zone->wp + ti->begin - start; 124310999307SDamien Le Moal } 124410999307SDamien Le Moal 1245e76239a3SChristoph Hellwig *nr_zones = i; 124610999307SDamien Le Moal #else /* !CONFIG_BLK_DEV_ZONED */ 1247e76239a3SChristoph Hellwig *nr_zones = 0; 124810999307SDamien Le Moal #endif 124910999307SDamien Le Moal } 125010999307SDamien Le Moal EXPORT_SYMBOL_GPL(dm_remap_zone_report); 125110999307SDamien Le Moal 1252978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 12531da177e4SLinus Torvalds { 12541da177e4SLinus Torvalds int r; 12552056a782SJens Axboe sector_t sector; 1256dba14160SMikulas Patocka struct bio *clone = &tio->clone; 125764f52b0eSMike Snitzer struct dm_io *io = tio->io; 1258978e51baSMike Snitzer struct mapped_device *md = io->md; 1259bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1260978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 12611da177e4SLinus Torvalds 12621da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12631da177e4SLinus Torvalds 12641da177e4SLinus Torvalds /* 12651da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 12661da177e4SLinus Torvalds * anything, the target has assumed ownership of 12671da177e4SLinus Torvalds * this io. 12681da177e4SLinus Torvalds */ 126964f52b0eSMike Snitzer atomic_inc(&io->io_count); 12704f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1271d67a5f4bSMikulas Patocka 12727de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1273846785e6SChristoph Hellwig switch (r) { 1274846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1275846785e6SChristoph Hellwig break; 1276846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 12771da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 127874d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 127964f52b0eSMike Snitzer bio_dev(io->orig_bio), sector); 1280978e51baSMike Snitzer if (md->type == DM_TYPE_NVME_BIO_BASED) 1281978e51baSMike Snitzer ret = direct_make_request(clone); 1282978e51baSMike Snitzer else 1283978e51baSMike Snitzer ret = generic_make_request(clone); 1284846785e6SChristoph Hellwig break; 1285846785e6SChristoph Hellwig case DM_MAPIO_KILL: 12864e4cbee9SChristoph Hellwig free_tio(tio); 128764f52b0eSMike Snitzer dec_pending(io, BLK_STS_IOERR); 12884e4cbee9SChristoph Hellwig break; 1289846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1290cfae7529SMike Snitzer free_tio(tio); 129164f52b0eSMike Snitzer dec_pending(io, BLK_STS_DM_REQUEUE); 1292846785e6SChristoph Hellwig break; 1293846785e6SChristoph Hellwig default: 129445cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 129545cbcd79SKiyoshi Ueda BUG(); 12961da177e4SLinus Torvalds } 12971da177e4SLinus Torvalds 1298978e51baSMike Snitzer return ret; 12991da177e4SLinus Torvalds } 13001da177e4SLinus Torvalds 1301e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1302bd2a49b8SAlasdair G Kergon { 13034f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 13044f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 13051da177e4SLinus Torvalds } 13061da177e4SLinus Torvalds 13071da177e4SLinus Torvalds /* 13081da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 13091da177e4SLinus Torvalds */ 1310c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 13111c3b13e6SKent Overstreet sector_t sector, unsigned len) 13121da177e4SLinus Torvalds { 1313dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13141da177e4SLinus Torvalds 13151c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 13169c47008dSMartin K. Petersen 1317e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) { 1318e2460f2aSMikulas Patocka int r; 1319e2460f2aSMikulas Patocka 1320e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1321e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1322e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1323e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1324e2460f2aSMikulas Patocka tio->ti->type->name); 1325e2460f2aSMikulas Patocka return -EIO; 1326e2460f2aSMikulas Patocka } 1327e2460f2aSMikulas Patocka 1328e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1329c80914e8SMike Snitzer if (r < 0) 1330c80914e8SMike Snitzer return r; 1331c80914e8SMike Snitzer } 13321c3b13e6SKent Overstreet 13331c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 13341c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 13351c3b13e6SKent Overstreet 1336e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) 1337fbd08e76SDmitry Monakhov bio_integrity_trim(clone); 1338c80914e8SMike Snitzer 1339c80914e8SMike Snitzer return 0; 13401da177e4SLinus Torvalds } 13411da177e4SLinus Torvalds 1342318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1343318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 1344f9ab94ceSMikulas Patocka { 1345dba14160SMikulas Patocka struct dm_target_io *tio; 1346318716ddSMike Snitzer int try; 1347dba14160SMikulas Patocka 1348318716ddSMike Snitzer if (!num_bios) 1349318716ddSMike Snitzer return; 1350f9ab94ceSMikulas Patocka 1351318716ddSMike Snitzer if (num_bios == 1) { 1352318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1353318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1354318716ddSMike Snitzer return; 13559015df24SAlasdair G Kergon } 13569015df24SAlasdair G Kergon 1357318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1358318716ddSMike Snitzer int bio_nr; 1359318716ddSMike Snitzer struct bio *bio; 1360318716ddSMike Snitzer 1361318716ddSMike Snitzer if (try) 1362bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1363318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1364318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1365318716ddSMike Snitzer if (!tio) 1366318716ddSMike Snitzer break; 1367318716ddSMike Snitzer 1368318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1369318716ddSMike Snitzer } 1370318716ddSMike Snitzer if (try) 1371bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1372318716ddSMike Snitzer if (bio_nr == num_bios) 1373318716ddSMike Snitzer return; 1374318716ddSMike Snitzer 1375318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1376318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1377318716ddSMike Snitzer free_tio(tio); 1378318716ddSMike Snitzer } 1379318716ddSMike Snitzer } 1380318716ddSMike Snitzer } 1381318716ddSMike Snitzer 1382978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1383318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 13849015df24SAlasdair G Kergon { 1385dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13869015df24SAlasdair G Kergon 13871dd40c3eSMikulas Patocka tio->len_ptr = len; 13881dd40c3eSMikulas Patocka 13891c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1390bd2a49b8SAlasdair G Kergon if (len) 13911dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1392f9ab94ceSMikulas Patocka 1393978e51baSMike Snitzer return __map_bio(tio); 1394f9ab94ceSMikulas Patocka } 1395f9ab94ceSMikulas Patocka 139614fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13971dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 139806a426ceSMike Snitzer { 1399318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1400318716ddSMike Snitzer struct bio *bio; 1401318716ddSMike Snitzer struct dm_target_io *tio; 140206a426ceSMike Snitzer 1403318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1404318716ddSMike Snitzer 1405318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1406318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1407978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1408318716ddSMike Snitzer } 140906a426ceSMike Snitzer } 141006a426ceSMike Snitzer 141114fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1412f9ab94ceSMikulas Patocka { 141306a426ceSMike Snitzer unsigned target_nr = 0; 1414f9ab94ceSMikulas Patocka struct dm_target *ti; 1415f9ab94ceSMikulas Patocka 1416892ad71fSDennis Zhou /* 1417892ad71fSDennis Zhou * Empty flush uses a statically initialized bio, &md->flush_bio, as 1418892ad71fSDennis Zhou * the base for cloning. However, blkg association requires that a 1419892ad71fSDennis Zhou * bdev is associated with a gendisk, which doesn't happen until the 1420892ad71fSDennis Zhou * bdev is opened. So, blkg association is done at issue time of the 1421892ad71fSDennis Zhou * flush rather than when the device is created in alloc_dev(). 1422892ad71fSDennis Zhou */ 1423892ad71fSDennis Zhou bio_set_dev(ci->bio, ci->io->md->bdev); 1424892ad71fSDennis Zhou 1425b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1426f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14271dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1428f9ab94ceSMikulas Patocka 1429892ad71fSDennis Zhou bio_disassociate_blkg(ci->bio); 1430892ad71fSDennis Zhou 1431f9ab94ceSMikulas Patocka return 0; 1432f9ab94ceSMikulas Patocka } 1433f9ab94ceSMikulas Patocka 1434c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 14351dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 14365ae89a87SMike Snitzer { 1437dba14160SMikulas Patocka struct bio *bio = ci->bio; 14385ae89a87SMike Snitzer struct dm_target_io *tio; 1439f31c21e4SNeilBrown int r; 14405ae89a87SMike Snitzer 1441318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 14421dd40c3eSMikulas Patocka tio->len_ptr = len; 1443c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1444072623deSMikulas Patocka if (r < 0) { 1445cfae7529SMike Snitzer free_tio(tio); 1446c80914e8SMike Snitzer return r; 1447b0d8ed4dSAlasdair G Kergon } 1448978e51baSMike Snitzer (void) __map_bio(tio); 144955a62eefSAlasdair G Kergon 1450f31c21e4SNeilBrown return 0; 145123508a96SMike Snitzer } 145255a62eefSAlasdair G Kergon 145323508a96SMike Snitzer typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 145455a62eefSAlasdair G Kergon 145523508a96SMike Snitzer static unsigned get_num_discard_bios(struct dm_target *ti) 145623508a96SMike Snitzer { 145723508a96SMike Snitzer return ti->num_discard_bios; 145823508a96SMike Snitzer } 145923508a96SMike Snitzer 146000716545SDenis Semakin static unsigned get_num_secure_erase_bios(struct dm_target *ti) 146100716545SDenis Semakin { 146200716545SDenis Semakin return ti->num_secure_erase_bios; 146300716545SDenis Semakin } 146400716545SDenis Semakin 146523508a96SMike Snitzer static unsigned get_num_write_same_bios(struct dm_target *ti) 146623508a96SMike Snitzer { 146723508a96SMike Snitzer return ti->num_write_same_bios; 146823508a96SMike Snitzer } 146923508a96SMike Snitzer 1470ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1471ac62d620SChristoph Hellwig { 1472ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1473ac62d620SChristoph Hellwig } 1474ac62d620SChristoph Hellwig 147523508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 147623508a96SMike Snitzer 147723508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 147823508a96SMike Snitzer { 147955a62eefSAlasdair G Kergon return ti->split_discard_bios; 148023508a96SMike Snitzer } 148123508a96SMike Snitzer 14823d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 148355a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 148423508a96SMike Snitzer is_split_required_fn is_split_required) 14855ae89a87SMike Snitzer { 1486e0d6609aSMikulas Patocka unsigned len; 148755a62eefSAlasdair G Kergon unsigned num_bios; 14885ae89a87SMike Snitzer 14895ae89a87SMike Snitzer /* 149023508a96SMike Snitzer * Even though the device advertised support for this type of 149123508a96SMike Snitzer * request, that does not mean every target supports it, and 1492936688d7SMike Snitzer * reconfiguration might also have changed that since the 14935ae89a87SMike Snitzer * check was performed. 14945ae89a87SMike Snitzer */ 149555a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 149655a62eefSAlasdair G Kergon if (!num_bios) 14975ae89a87SMike Snitzer return -EOPNOTSUPP; 14985ae89a87SMike Snitzer 149923508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1500e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 15017acf0277SMikulas Patocka else 1502e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 15035ae89a87SMike Snitzer 15041dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 15055ae89a87SMike Snitzer 1506a79245b3SMike Snitzer ci->sector += len; 15073d7f4562SMike Snitzer ci->sector_count -= len; 15085ae89a87SMike Snitzer 15095ae89a87SMike Snitzer return 0; 15105ae89a87SMike Snitzer } 15115ae89a87SMike Snitzer 15123d7f4562SMike Snitzer static int __send_discard(struct clone_info *ci, struct dm_target *ti) 151323508a96SMike Snitzer { 15143d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_discard_bios, 151523508a96SMike Snitzer is_split_required_for_discard); 151623508a96SMike Snitzer } 151723508a96SMike Snitzer 151800716545SDenis Semakin static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 151900716545SDenis Semakin { 152000716545SDenis Semakin return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios, NULL); 152100716545SDenis Semakin } 152200716545SDenis Semakin 15233d7f4562SMike Snitzer static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 152423508a96SMike Snitzer { 15253d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL); 152623508a96SMike Snitzer } 152723508a96SMike Snitzer 15283d7f4562SMike Snitzer static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1529ac62d620SChristoph Hellwig { 15303d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL); 1531ac62d620SChristoph Hellwig } 1532ac62d620SChristoph Hellwig 15330519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 15340519c71eSMike Snitzer int *result) 15350519c71eSMike Snitzer { 15360519c71eSMike Snitzer struct bio *bio = ci->bio; 15370519c71eSMike Snitzer 15380519c71eSMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD) 15390519c71eSMike Snitzer *result = __send_discard(ci, ti); 154000716545SDenis Semakin else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 154100716545SDenis Semakin *result = __send_secure_erase(ci, ti); 15420519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME) 15430519c71eSMike Snitzer *result = __send_write_same(ci, ti); 15440519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 15450519c71eSMike Snitzer *result = __send_write_zeroes(ci, ti); 15460519c71eSMike Snitzer else 15470519c71eSMike Snitzer return false; 15480519c71eSMike Snitzer 15490519c71eSMike Snitzer return true; 15500519c71eSMike Snitzer } 15510519c71eSMike Snitzer 1552e4c93811SAlasdair G Kergon /* 1553e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1554e4c93811SAlasdair G Kergon */ 1555e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1556e4c93811SAlasdair G Kergon { 1557e4c93811SAlasdair G Kergon struct dm_target *ti; 15581c3b13e6SKent Overstreet unsigned len; 1559c80914e8SMike Snitzer int r; 1560e4c93811SAlasdair G Kergon 1561e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1562e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1563e4c93811SAlasdair G Kergon return -EIO; 1564e4c93811SAlasdair G Kergon 15650519c71eSMike Snitzer if (unlikely(__process_abnormal_io(ci, ti, &r))) 15660519c71eSMike Snitzer return r; 15673d7f4562SMike Snitzer 1568e76239a3SChristoph Hellwig len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1569e4c93811SAlasdair G Kergon 1570c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1571c80914e8SMike Snitzer if (r < 0) 1572c80914e8SMike Snitzer return r; 1573e4c93811SAlasdair G Kergon 1574e4c93811SAlasdair G Kergon ci->sector += len; 1575e4c93811SAlasdair G Kergon ci->sector_count -= len; 1576e4c93811SAlasdair G Kergon 1577e4c93811SAlasdair G Kergon return 0; 1578e4c93811SAlasdair G Kergon } 1579e4c93811SAlasdair G Kergon 1580978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1581978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1582978e51baSMike Snitzer { 1583978e51baSMike Snitzer ci->map = map; 1584978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1585978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1586978e51baSMike Snitzer } 1587978e51baSMike Snitzer 1588e4c93811SAlasdair G Kergon /* 158914fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 15901da177e4SLinus Torvalds */ 1591978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 159283d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 15931da177e4SLinus Torvalds { 15941da177e4SLinus Torvalds struct clone_info ci; 1595978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1596512875bdSJun'ichi Nomura int error = 0; 15971da177e4SLinus Torvalds 159883d5e5b0SMikulas Patocka if (unlikely(!map)) { 1599f0b9a450SMikulas Patocka bio_io_error(bio); 1600978e51baSMike Snitzer return ret; 1601f0b9a450SMikulas Patocka } 1602692d0eb9SMikulas Patocka 1603978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1604bd2a49b8SAlasdair G Kergon 16051eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1606bc02cdbeSMike Snitzer ci.bio = &ci.io->md->flush_bio; 1607b372d360SMike Snitzer ci.sector_count = 0; 160814fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1609b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1610a4aa5e56SDamien Le Moal } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1611a4aa5e56SDamien Le Moal ci.bio = bio; 1612a4aa5e56SDamien Le Moal ci.sector_count = 0; 1613a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1614b372d360SMike Snitzer } else { 16156a8736d1STejun Heo ci.bio = bio; 16161da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 161718a25da8SNeilBrown while (ci.sector_count && !error) { 161814fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 161918a25da8SNeilBrown if (current->bio_list && ci.sector_count && !error) { 162018a25da8SNeilBrown /* 162118a25da8SNeilBrown * Remainder must be passed to generic_make_request() 162218a25da8SNeilBrown * so that it gets handled *after* bios already submitted 162318a25da8SNeilBrown * have been completely processed. 162418a25da8SNeilBrown * We take a clone of the original to store in 1625745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 162618a25da8SNeilBrown * for dec_pending to use for completion handling. 162718a25da8SNeilBrown */ 1628f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1629f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1630745dc570SMike Snitzer ci.io->orig_bio = b; 163118a25da8SNeilBrown bio_chain(b, bio); 1632978e51baSMike Snitzer ret = generic_make_request(bio); 163318a25da8SNeilBrown break; 163418a25da8SNeilBrown } 163518a25da8SNeilBrown } 1636d87f4c14STejun Heo } 16371da177e4SLinus Torvalds 16381da177e4SLinus Torvalds /* drop the extra reference count */ 163954385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 1640978e51baSMike Snitzer return ret; 16411da177e4SLinus Torvalds } 16421da177e4SLinus Torvalds 16431da177e4SLinus Torvalds /* 1644978e51baSMike Snitzer * Optimized variant of __split_and_process_bio that leverages the 1645978e51baSMike Snitzer * fact that targets that use it do _not_ have a need to split bios. 16461da177e4SLinus Torvalds */ 1647978e51baSMike Snitzer static blk_qc_t __process_bio(struct mapped_device *md, 1648978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 16491da177e4SLinus Torvalds { 1650978e51baSMike Snitzer struct clone_info ci; 1651978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1652978e51baSMike Snitzer int error = 0; 1653978e51baSMike Snitzer 1654978e51baSMike Snitzer if (unlikely(!map)) { 1655978e51baSMike Snitzer bio_io_error(bio); 1656978e51baSMike Snitzer return ret; 1657978e51baSMike Snitzer } 1658978e51baSMike Snitzer 1659978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1660978e51baSMike Snitzer 1661978e51baSMike Snitzer if (bio->bi_opf & REQ_PREFLUSH) { 1662978e51baSMike Snitzer ci.bio = &ci.io->md->flush_bio; 1663978e51baSMike Snitzer ci.sector_count = 0; 1664978e51baSMike Snitzer error = __send_empty_flush(&ci); 1665978e51baSMike Snitzer /* dec_pending submits any data associated with flush */ 1666978e51baSMike Snitzer } else { 1667978e51baSMike Snitzer struct dm_target *ti = md->immutable_target; 1668978e51baSMike Snitzer struct dm_target_io *tio; 1669978e51baSMike Snitzer 1670978e51baSMike Snitzer /* 1671978e51baSMike Snitzer * Defend against IO still getting in during teardown 1672978e51baSMike Snitzer * - as was seen for a time with nvme-fcloop 1673978e51baSMike Snitzer */ 1674bab5d988SIgor Stoppa if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) { 1675978e51baSMike Snitzer error = -EIO; 1676978e51baSMike Snitzer goto out; 1677978e51baSMike Snitzer } 1678978e51baSMike Snitzer 1679978e51baSMike Snitzer ci.bio = bio; 1680978e51baSMike Snitzer ci.sector_count = bio_sectors(bio); 16810519c71eSMike Snitzer if (unlikely(__process_abnormal_io(&ci, ti, &error))) 16820519c71eSMike Snitzer goto out; 16830519c71eSMike Snitzer 16840519c71eSMike Snitzer tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1685978e51baSMike Snitzer ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1686978e51baSMike Snitzer } 1687978e51baSMike Snitzer out: 1688978e51baSMike Snitzer /* drop the extra reference count */ 1689978e51baSMike Snitzer dec_pending(ci.io, errno_to_blk_status(error)); 1690978e51baSMike Snitzer return ret; 1691978e51baSMike Snitzer } 1692978e51baSMike Snitzer 1693978e51baSMike Snitzer typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *); 1694978e51baSMike Snitzer 1695978e51baSMike Snitzer static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, 1696978e51baSMike Snitzer process_bio_fn process_bio) 16971da177e4SLinus Torvalds { 16981da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1699978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 170083d5e5b0SMikulas Patocka int srcu_idx; 170183d5e5b0SMikulas Patocka struct dm_table *map; 17021da177e4SLinus Torvalds 170383d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17041da177e4SLinus Torvalds 17056a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17066a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 170783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17081da177e4SLinus Torvalds 17091eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 171092c63902SMikulas Patocka queue_io(md, bio); 17116a8736d1STejun Heo else 17126a8736d1STejun Heo bio_io_error(bio); 1713978e51baSMike Snitzer return ret; 17141da177e4SLinus Torvalds } 17151da177e4SLinus Torvalds 1716978e51baSMike Snitzer ret = process_bio(md, map, bio); 1717978e51baSMike Snitzer 171883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1719978e51baSMike Snitzer return ret; 1720978e51baSMike Snitzer } 1721978e51baSMike Snitzer 1722978e51baSMike Snitzer /* 1723978e51baSMike Snitzer * The request function that remaps the bio to one target and 1724978e51baSMike Snitzer * splits off any remainder. 1725978e51baSMike Snitzer */ 1726978e51baSMike Snitzer static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1727978e51baSMike Snitzer { 1728978e51baSMike Snitzer return __dm_make_request(q, bio, __split_and_process_bio); 1729978e51baSMike Snitzer } 1730978e51baSMike Snitzer 1731978e51baSMike Snitzer static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio) 1732978e51baSMike Snitzer { 1733978e51baSMike Snitzer return __dm_make_request(q, bio, __process_bio); 1734cec47e3dSKiyoshi Ueda } 1735cec47e3dSKiyoshi Ueda 17361da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 17371da177e4SLinus Torvalds { 17388a57dfc6SChandra Seetharaman int r = bdi_bits; 17398a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 17408a57dfc6SChandra Seetharaman struct dm_table *map; 17411da177e4SLinus Torvalds 17421eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1743e522c039SMike Snitzer if (dm_request_based(md)) { 1744cec47e3dSKiyoshi Ueda /* 1745e522c039SMike Snitzer * With request-based DM we only need to check the 1746e522c039SMike Snitzer * top-level queue for congestion. 1747cec47e3dSKiyoshi Ueda */ 1748dc3b17ccSJan Kara r = md->queue->backing_dev_info->wb.state & bdi_bits; 1749e522c039SMike Snitzer } else { 1750e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1751e522c039SMike Snitzer if (map) 17521da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 175383d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 17548a57dfc6SChandra Seetharaman } 1755e522c039SMike Snitzer } 17568a57dfc6SChandra Seetharaman 17571da177e4SLinus Torvalds return r; 17581da177e4SLinus Torvalds } 17591da177e4SLinus Torvalds 17601da177e4SLinus Torvalds /*----------------------------------------------------------------- 17611da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 17621da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17632b06cfffSAlasdair G Kergon static void free_minor(int minor) 17641da177e4SLinus Torvalds { 1765f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17661da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1767f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17681da177e4SLinus Torvalds } 17691da177e4SLinus Torvalds 17701da177e4SLinus Torvalds /* 17711da177e4SLinus Torvalds * See if the device with a specific minor # is free. 17721da177e4SLinus Torvalds */ 1773cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 17741da177e4SLinus Torvalds { 1775c9d76be6STejun Heo int r; 17761da177e4SLinus Torvalds 17771da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17781da177e4SLinus Torvalds return -EINVAL; 17791da177e4SLinus Torvalds 1780c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1781f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17821da177e4SLinus Torvalds 1783c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 17841da177e4SLinus Torvalds 1785f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1786c9d76be6STejun Heo idr_preload_end(); 1787c9d76be6STejun Heo if (r < 0) 1788c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1789c9d76be6STejun Heo return 0; 17901da177e4SLinus Torvalds } 17911da177e4SLinus Torvalds 1792cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 17931da177e4SLinus Torvalds { 1794c9d76be6STejun Heo int r; 17951da177e4SLinus Torvalds 1796c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1797f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17981da177e4SLinus Torvalds 1799c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 18001da177e4SLinus Torvalds 1801f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1802c9d76be6STejun Heo idr_preload_end(); 1803c9d76be6STejun Heo if (r < 0) 18041da177e4SLinus Torvalds return r; 1805c9d76be6STejun Heo *minor = r; 1806c9d76be6STejun Heo return 0; 18071da177e4SLinus Torvalds } 18081da177e4SLinus Torvalds 180983d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1810f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 18111da177e4SLinus Torvalds 181253d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 181353d5914fSMikulas Patocka 1814c12c9a3cSMike Snitzer static void dm_init_normal_md_queue(struct mapped_device *md) 1815bfebd1cdSMike Snitzer { 1816bfebd1cdSMike Snitzer /* 1817bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1818bfebd1cdSMike Snitzer */ 1819dc3b17ccSJan Kara md->queue->backing_dev_info->congested_fn = dm_any_congested; 18204a0b4ddfSMike Snitzer } 18214a0b4ddfSMike Snitzer 18220f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 18230f20972fSMike Snitzer { 18240f20972fSMike Snitzer if (md->wq) 18250f20972fSMike Snitzer destroy_workqueue(md->wq); 18266f1c819cSKent Overstreet bioset_exit(&md->bs); 18276f1c819cSKent Overstreet bioset_exit(&md->io_bs); 18280f20972fSMike Snitzer 1829f26c5719SDan Williams if (md->dax_dev) { 1830f26c5719SDan Williams kill_dax(md->dax_dev); 1831f26c5719SDan Williams put_dax(md->dax_dev); 1832f26c5719SDan Williams md->dax_dev = NULL; 1833f26c5719SDan Williams } 1834f26c5719SDan Williams 18350f20972fSMike Snitzer if (md->disk) { 18360f20972fSMike Snitzer spin_lock(&_minor_lock); 18370f20972fSMike Snitzer md->disk->private_data = NULL; 18380f20972fSMike Snitzer spin_unlock(&_minor_lock); 18390f20972fSMike Snitzer del_gendisk(md->disk); 18400f20972fSMike Snitzer put_disk(md->disk); 18410f20972fSMike Snitzer } 18420f20972fSMike Snitzer 18430f20972fSMike Snitzer if (md->queue) 18440f20972fSMike Snitzer blk_cleanup_queue(md->queue); 18450f20972fSMike Snitzer 1846d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1847d09960b0STahsin Erdogan 18480f20972fSMike Snitzer if (md->bdev) { 18490f20972fSMike Snitzer bdput(md->bdev); 18500f20972fSMike Snitzer md->bdev = NULL; 18510f20972fSMike Snitzer } 18524cc96131SMike Snitzer 1853d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1854d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1855d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1856d5ffebddSMike Snitzer 18574cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 18580f20972fSMike Snitzer } 18590f20972fSMike Snitzer 18601da177e4SLinus Torvalds /* 18611da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 18621da177e4SLinus Torvalds */ 18632b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 18641da177e4SLinus Torvalds { 1865115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1866976431b0SDan Williams struct dax_device *dax_dev = NULL; 1867115485e8SMike Snitzer struct mapped_device *md; 1868ba61fdd1SJeff Mahoney void *old_md; 18691da177e4SLinus Torvalds 1870856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 18711da177e4SLinus Torvalds if (!md) { 18721da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18731da177e4SLinus Torvalds return NULL; 18741da177e4SLinus Torvalds } 18751da177e4SLinus Torvalds 187610da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18776ed7ade8SMilan Broz goto bad_module_get; 187810da4f79SJeff Mahoney 18791da177e4SLinus Torvalds /* get a minor number for the dev */ 18802b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1881cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18822b06cfffSAlasdair G Kergon else 1883cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18841da177e4SLinus Torvalds if (r < 0) 18856ed7ade8SMilan Broz goto bad_minor; 18861da177e4SLinus Torvalds 188783d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 188883d5e5b0SMikulas Patocka if (r < 0) 188983d5e5b0SMikulas Patocka goto bad_io_barrier; 189083d5e5b0SMikulas Patocka 1891115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1892591ddcfcSMike Snitzer md->init_tio_pdu = false; 1893a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1894e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1895a5664dadSMike Snitzer mutex_init(&md->type_lock); 189686f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1897022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18981da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18995c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 19001da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 19017a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 19027a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 190386f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 19047a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 19051da177e4SLinus Torvalds 19066d469642SChristoph Hellwig md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 19071da177e4SLinus Torvalds if (!md->queue) 19080f20972fSMike Snitzer goto bad; 1909c12c9a3cSMike Snitzer md->queue->queuedata = md; 1910c12c9a3cSMike Snitzer md->queue->backing_dev_info->congested_data = md; 19111da177e4SLinus Torvalds 1912c12c9a3cSMike Snitzer md->disk = alloc_disk_node(1, md->numa_node_id); 19131da177e4SLinus Torvalds if (!md->disk) 19140f20972fSMike Snitzer goto bad; 19151da177e4SLinus Torvalds 1916f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 191753d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1918f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 19192995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1920f0b04115SJeff Mahoney 19211da177e4SLinus Torvalds md->disk->major = _major; 19221da177e4SLinus Torvalds md->disk->first_minor = minor; 19231da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19241da177e4SLinus Torvalds md->disk->queue = md->queue; 19251da177e4SLinus Torvalds md->disk->private_data = md; 19261da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1927f26c5719SDan Williams 1928976431b0SDan Williams if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1929f26c5719SDan Williams dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1930f26c5719SDan Williams if (!dax_dev) 1931f26c5719SDan Williams goto bad; 1932976431b0SDan Williams } 1933f26c5719SDan Williams md->dax_dev = dax_dev; 1934f26c5719SDan Williams 1935c100ec49SMike Snitzer add_disk_no_queue_reg(md->disk); 19367e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 19371da177e4SLinus Torvalds 1938670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1939304f3f6aSMilan Broz if (!md->wq) 19400f20972fSMike Snitzer goto bad; 1941304f3f6aSMilan Broz 194232a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 194332a926daSMikulas Patocka if (!md->bdev) 19440f20972fSMike Snitzer goto bad; 194532a926daSMikulas Patocka 19463a83f467SMing Lei bio_init(&md->flush_bio, NULL, 0); 1947ff0361b3SJan Kara md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 19486a8736d1STejun Heo 1949fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1950fd2ed4d2SMikulas Patocka 1951ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1952f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1953ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1954f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1955ba61fdd1SJeff Mahoney 1956ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1957ba61fdd1SJeff Mahoney 19581da177e4SLinus Torvalds return md; 19591da177e4SLinus Torvalds 19600f20972fSMike Snitzer bad: 19610f20972fSMike Snitzer cleanup_mapped_device(md); 196283d5e5b0SMikulas Patocka bad_io_barrier: 19631da177e4SLinus Torvalds free_minor(minor); 19646ed7ade8SMilan Broz bad_minor: 196510da4f79SJeff Mahoney module_put(THIS_MODULE); 19666ed7ade8SMilan Broz bad_module_get: 1967856eb091SMikulas Patocka kvfree(md); 19681da177e4SLinus Torvalds return NULL; 19691da177e4SLinus Torvalds } 19701da177e4SLinus Torvalds 1971ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1972ae9da83fSJun'ichi Nomura 19731da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19741da177e4SLinus Torvalds { 1975f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 197663d94e48SJun'ichi Nomura 1977ae9da83fSJun'ichi Nomura unlock_fs(md); 19782eb6e1e3SKeith Busch 19790f20972fSMike Snitzer cleanup_mapped_device(md); 19800f20972fSMike Snitzer 19810f20972fSMike Snitzer free_table_devices(&md->table_devices); 19820f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 198363a4f065SMike Snitzer free_minor(minor); 198463a4f065SMike Snitzer 198510da4f79SJeff Mahoney module_put(THIS_MODULE); 1986856eb091SMikulas Patocka kvfree(md); 19871da177e4SLinus Torvalds } 19881da177e4SLinus Torvalds 19892a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1990e6ee8c0bSKiyoshi Ueda { 1991c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 19922a2a4c51SJens Axboe int ret = 0; 1993e6ee8c0bSKiyoshi Ueda 1994545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1995c0820cf5SMikulas Patocka /* 199664f52b0eSMike Snitzer * The md may already have mempools that need changing. 199764f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 199816245bdcSJun'ichi Nomura * because a different table was loaded. 1999c0820cf5SMikulas Patocka */ 20006f1c819cSKent Overstreet bioset_exit(&md->bs); 20016f1c819cSKent Overstreet bioset_exit(&md->io_bs); 20020776aa0eSMike Snitzer 20036f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 2004cbc4e3c1SMike Snitzer /* 20054e6e36c3SMike Snitzer * There's no need to reload with request-based dm 20064e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 20074e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 20084e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 20094e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 20104e6e36c3SMike Snitzer * through the queue to unprep. 2011cbc4e3c1SMike Snitzer */ 2012cbc4e3c1SMike Snitzer goto out; 2013cbc4e3c1SMike Snitzer } 2014cbc4e3c1SMike Snitzer 20156f1c819cSKent Overstreet BUG_ON(!p || 20166f1c819cSKent Overstreet bioset_initialized(&md->bs) || 20176f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 2018e6ee8c0bSKiyoshi Ueda 20192a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 20202a2a4c51SJens Axboe if (ret) 20212a2a4c51SJens Axboe goto out; 20222a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 20232a2a4c51SJens Axboe if (ret) 20242a2a4c51SJens Axboe bioset_exit(&md->bs); 2025e6ee8c0bSKiyoshi Ueda out: 202602233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2027e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 20282a2a4c51SJens Axboe return ret; 2029e6ee8c0bSKiyoshi Ueda } 2030e6ee8c0bSKiyoshi Ueda 20311da177e4SLinus Torvalds /* 20321da177e4SLinus Torvalds * Bind a table to the device. 20331da177e4SLinus Torvalds */ 20341da177e4SLinus Torvalds static void event_callback(void *context) 20351da177e4SLinus Torvalds { 20367a8c3d3bSMike Anderson unsigned long flags; 20377a8c3d3bSMike Anderson LIST_HEAD(uevents); 20381da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 20391da177e4SLinus Torvalds 20407a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 20417a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 20427a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 20437a8c3d3bSMike Anderson 2044ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 20457a8c3d3bSMike Anderson 20461da177e4SLinus Torvalds atomic_inc(&md->event_nr); 20471da177e4SLinus Torvalds wake_up(&md->eventq); 204862e08243SMikulas Patocka dm_issue_global_event(); 20491da177e4SLinus Torvalds } 20501da177e4SLinus Torvalds 2051c217649bSMike Snitzer /* 2052c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2053c217649bSMike Snitzer */ 20544e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 20551da177e4SLinus Torvalds { 20561ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 20571ea0654eSBart Van Assche 20584e90188bSAlasdair G Kergon set_capacity(md->disk, size); 20591da177e4SLinus Torvalds 2060db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 20611da177e4SLinus Torvalds } 20621da177e4SLinus Torvalds 2063042d2a9bSAlasdair G Kergon /* 2064042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2065042d2a9bSAlasdair G Kergon */ 2066042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2067754c5fc7SMike Snitzer struct queue_limits *limits) 20681da177e4SLinus Torvalds { 2069042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2070165125e1SJens Axboe struct request_queue *q = md->queue; 2071978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 20721da177e4SLinus Torvalds sector_t size; 20732a2a4c51SJens Axboe int ret; 20741da177e4SLinus Torvalds 20755a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 20765a8f1f80SBart Van Assche 20771da177e4SLinus Torvalds size = dm_table_get_size(t); 20783ac51e74SDarrick J. Wong 20793ac51e74SDarrick J. Wong /* 20803ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20813ac51e74SDarrick J. Wong */ 2082fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 20833ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20843ac51e74SDarrick J. Wong 20854e90188bSAlasdair G Kergon __set_size(md, size); 20861da177e4SLinus Torvalds 2087cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20882ca3310eSAlasdair G Kergon 2089e6ee8c0bSKiyoshi Ueda /* 2090e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2091e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2092e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2093e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2094e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2095e6ee8c0bSKiyoshi Ueda */ 2096978e51baSMike Snitzer if (request_based) 2097eca7ee6dSMike Snitzer dm_stop_queue(q); 2098978e51baSMike Snitzer 2099978e51baSMike Snitzer if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 210016f12266SMike Snitzer /* 2101978e51baSMike Snitzer * Leverage the fact that request-based DM targets and 2102978e51baSMike Snitzer * NVMe bio based targets are immutable singletons 2103978e51baSMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2104978e51baSMike Snitzer * and __process_bio. 210516f12266SMike Snitzer */ 210616f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 210716f12266SMike Snitzer } 2108e6ee8c0bSKiyoshi Ueda 21092a2a4c51SJens Axboe ret = __bind_mempools(md, t); 21102a2a4c51SJens Axboe if (ret) { 21112a2a4c51SJens Axboe old_map = ERR_PTR(ret); 21122a2a4c51SJens Axboe goto out; 21132a2a4c51SJens Axboe } 2114e6ee8c0bSKiyoshi Ueda 2115a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 21161d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 211736a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 211836a0456fSAlasdair G Kergon 2119754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 212041abc4e1SHannes Reinecke if (old_map) 212183d5e5b0SMikulas Patocka dm_sync_table(md); 21222ca3310eSAlasdair G Kergon 21232a2a4c51SJens Axboe out: 2124042d2a9bSAlasdair G Kergon return old_map; 21251da177e4SLinus Torvalds } 21261da177e4SLinus Torvalds 2127a7940155SAlasdair G Kergon /* 2128a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2129a7940155SAlasdair G Kergon */ 2130a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 21311da177e4SLinus Torvalds { 2132a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds if (!map) 2135a7940155SAlasdair G Kergon return NULL; 21361da177e4SLinus Torvalds 21371da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 21389cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 213983d5e5b0SMikulas Patocka dm_sync_table(md); 2140a7940155SAlasdair G Kergon 2141a7940155SAlasdair G Kergon return map; 21421da177e4SLinus Torvalds } 21431da177e4SLinus Torvalds 21441da177e4SLinus Torvalds /* 21451da177e4SLinus Torvalds * Constructor for a new device. 21461da177e4SLinus Torvalds */ 21472b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 21481da177e4SLinus Torvalds { 2149c12c9a3cSMike Snitzer int r; 21501da177e4SLinus Torvalds struct mapped_device *md; 21511da177e4SLinus Torvalds 21522b06cfffSAlasdair G Kergon md = alloc_dev(minor); 21531da177e4SLinus Torvalds if (!md) 21541da177e4SLinus Torvalds return -ENXIO; 21551da177e4SLinus Torvalds 2156c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2157c12c9a3cSMike Snitzer if (r) { 2158c12c9a3cSMike Snitzer free_dev(md); 2159c12c9a3cSMike Snitzer return r; 2160c12c9a3cSMike Snitzer } 2161784aae73SMilan Broz 21621da177e4SLinus Torvalds *result = md; 21631da177e4SLinus Torvalds return 0; 21641da177e4SLinus Torvalds } 21651da177e4SLinus Torvalds 2166a5664dadSMike Snitzer /* 2167a5664dadSMike Snitzer * Functions to manage md->type. 2168a5664dadSMike Snitzer * All are required to hold md->type_lock. 2169a5664dadSMike Snitzer */ 2170a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2171a5664dadSMike Snitzer { 2172a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2173a5664dadSMike Snitzer } 2174a5664dadSMike Snitzer 2175a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2176a5664dadSMike Snitzer { 2177a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2178a5664dadSMike Snitzer } 2179a5664dadSMike Snitzer 21807e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2181a5664dadSMike Snitzer { 218200c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2183a5664dadSMike Snitzer md->type = type; 2184a5664dadSMike Snitzer } 2185a5664dadSMike Snitzer 21867e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2187a5664dadSMike Snitzer { 2188a5664dadSMike Snitzer return md->type; 2189a5664dadSMike Snitzer } 2190a5664dadSMike Snitzer 219136a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 219236a0456fSAlasdair G Kergon { 219336a0456fSAlasdair G Kergon return md->immutable_target_type; 219436a0456fSAlasdair G Kergon } 219536a0456fSAlasdair G Kergon 21964a0b4ddfSMike Snitzer /* 2197f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2198f84cb8a4SMike Snitzer * count on 'md'. 2199f84cb8a4SMike Snitzer */ 2200f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2201f84cb8a4SMike Snitzer { 2202f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2203f84cb8a4SMike Snitzer return &md->queue->limits; 2204f84cb8a4SMike Snitzer } 2205f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2206f84cb8a4SMike Snitzer 22074a0b4ddfSMike Snitzer /* 22084a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 22094a0b4ddfSMike Snitzer */ 2210591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 22114a0b4ddfSMike Snitzer { 2212bfebd1cdSMike Snitzer int r; 2213c100ec49SMike Snitzer struct queue_limits limits; 22147e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2215bfebd1cdSMike Snitzer 2216545ed20eSToshi Kani switch (type) { 2217bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2218e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2219bfebd1cdSMike Snitzer if (r) { 2220eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2221bfebd1cdSMike Snitzer return r; 2222bfebd1cdSMike Snitzer } 2223bfebd1cdSMike Snitzer break; 2224bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2225545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2226eca7ee6dSMike Snitzer dm_init_normal_md_queue(md); 2227ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2228bfebd1cdSMike Snitzer break; 2229978e51baSMike Snitzer case DM_TYPE_NVME_BIO_BASED: 2230978e51baSMike Snitzer dm_init_normal_md_queue(md); 2231978e51baSMike Snitzer blk_queue_make_request(md->queue, dm_make_request_nvme); 223273d410c0SMilan Broz break; 22337e0d574fSBart Van Assche case DM_TYPE_NONE: 22347e0d574fSBart Van Assche WARN_ON_ONCE(true); 22357e0d574fSBart Van Assche break; 22361da177e4SLinus Torvalds } 22371da177e4SLinus Torvalds 2238c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2239c100ec49SMike Snitzer if (r) { 2240c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2241c100ec49SMike Snitzer return r; 2242c100ec49SMike Snitzer } 2243c100ec49SMike Snitzer dm_table_set_restrictions(t, md->queue, &limits); 2244c100ec49SMike Snitzer blk_register_queue(md->disk); 2245c100ec49SMike Snitzer 22461da177e4SLinus Torvalds return 0; 22471da177e4SLinus Torvalds } 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds struct mapped_device *dm_get_md(dev_t dev) 22501da177e4SLinus Torvalds { 22511da177e4SLinus Torvalds struct mapped_device *md; 22521da177e4SLinus Torvalds unsigned minor = MINOR(dev); 22531da177e4SLinus Torvalds 22541da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 22551da177e4SLinus Torvalds return NULL; 22561da177e4SLinus Torvalds 22571da177e4SLinus Torvalds spin_lock(&_minor_lock); 22581da177e4SLinus Torvalds 22591da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 226049de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 226149de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 22621da177e4SLinus Torvalds md = NULL; 22631da177e4SLinus Torvalds goto out; 22641da177e4SLinus Torvalds } 22651da177e4SLinus Torvalds dm_get(md); 22661da177e4SLinus Torvalds out: 2267f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22681da177e4SLinus Torvalds 22691da177e4SLinus Torvalds return md; 2270fba9f90eSJeff Mahoney } 2271fba9f90eSJeff Mahoney EXPORT_SYMBOL_GPL(dm_get_md); 2272fba9f90eSJeff Mahoney 2273637842cfSDavid Teigland void *dm_get_mdptr(struct mapped_device *md) 2274fba9f90eSJeff Mahoney { 2275fba9f90eSJeff Mahoney return md->interface_ptr; 22761da177e4SLinus Torvalds } 2277fba9f90eSJeff Mahoney 2278f32c10b0SJeff Mahoney void dm_set_mdptr(struct mapped_device *md, void *ptr) 22791da177e4SLinus Torvalds { 2280637842cfSDavid Teigland md->interface_ptr = ptr; 2281637842cfSDavid Teigland } 2282637842cfSDavid Teigland 2283d229a958SDavid Teigland void dm_get(struct mapped_device *md) 2284d229a958SDavid Teigland { 2285d229a958SDavid Teigland atomic_inc(&md->holders); 2286d229a958SDavid Teigland BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2287d229a958SDavid Teigland } 2288d229a958SDavid Teigland 2289d229a958SDavid Teigland int dm_hold(struct mapped_device *md) 2290d229a958SDavid Teigland { 2291d229a958SDavid Teigland spin_lock(&_minor_lock); 2292d229a958SDavid Teigland if (test_bit(DMF_FREEING, &md->flags)) { 22939ade92a9SAlasdair G Kergon spin_unlock(&_minor_lock); 2294637842cfSDavid Teigland return -EBUSY; 22959ade92a9SAlasdair G Kergon } 22961da177e4SLinus Torvalds dm_get(md); 22971da177e4SLinus Torvalds spin_unlock(&_minor_lock); 22981da177e4SLinus Torvalds return 0; 22991da177e4SLinus Torvalds } 23001da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(dm_hold); 23011da177e4SLinus Torvalds 23021da177e4SLinus Torvalds const char *dm_device_name(struct mapped_device *md) 23031da177e4SLinus Torvalds { 23041da177e4SLinus Torvalds return md->name; 23051da177e4SLinus Torvalds } 23061da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(dm_device_name); 23071da177e4SLinus Torvalds 230872d94861SAlasdair G Kergon static void __dm_destroy(struct mapped_device *md, bool wait) 230972d94861SAlasdair G Kergon { 231072d94861SAlasdair G Kergon struct dm_table *map; 231172d94861SAlasdair G Kergon int srcu_idx; 231272d94861SAlasdair G Kergon 231372d94861SAlasdair G Kergon might_sleep(); 23141da177e4SLinus Torvalds 23151da177e4SLinus Torvalds spin_lock(&_minor_lock); 23161da177e4SLinus Torvalds idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 23171da177e4SLinus Torvalds set_bit(DMF_FREEING, &md->flags); 23181da177e4SLinus Torvalds spin_unlock(&_minor_lock); 23191da177e4SLinus Torvalds 2320c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 23213b785fbcSBart Van Assche 23221da177e4SLinus Torvalds /* 23231da177e4SLinus Torvalds * Take suspend_lock so that presuspend and postsuspend methods 23241da177e4SLinus Torvalds * do not race with internal suspend. 23251da177e4SLinus Torvalds */ 23261da177e4SLinus Torvalds mutex_lock(&md->suspend_lock); 23272a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 23281da177e4SLinus Torvalds if (!dm_suspended_md(md)) { 23291da177e4SLinus Torvalds dm_table_presuspend_targets(map); 23301da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 23311da177e4SLinus Torvalds } 23321da177e4SLinus Torvalds /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 23331da177e4SLinus Torvalds dm_put_live_table(md, srcu_idx); 23342a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 23351da177e4SLinus Torvalds 23361da177e4SLinus Torvalds /* 23371da177e4SLinus Torvalds * Rare, but there may be I/O requests still going to complete, 23381da177e4SLinus Torvalds * for example. Wait for all references to disappear. 23391da177e4SLinus Torvalds * No one should increment the reference count of the mapped_device, 23401da177e4SLinus Torvalds * after the mapped_device state becomes DMF_FREEING. 23411da177e4SLinus Torvalds */ 23421da177e4SLinus Torvalds if (wait) 23431da177e4SLinus Torvalds while (atomic_read(&md->holders)) 23441da177e4SLinus Torvalds msleep(1); 23451da177e4SLinus Torvalds else if (atomic_read(&md->holders)) 23461da177e4SLinus Torvalds DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 23471da177e4SLinus Torvalds dm_device_name(md), atomic_read(&md->holders)); 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds dm_sysfs_exit(md); 23501da177e4SLinus Torvalds dm_table_destroy(__unbind(md)); 2351cf222b37SAlasdair G Kergon free_dev(md); 23521da177e4SLinus Torvalds } 23531da177e4SLinus Torvalds 23541da177e4SLinus Torvalds void dm_destroy(struct mapped_device *md) 23551da177e4SLinus Torvalds { 23561da177e4SLinus Torvalds __dm_destroy(md, true); 23571da177e4SLinus Torvalds } 23581da177e4SLinus Torvalds 23591da177e4SLinus Torvalds void dm_destroy_immediate(struct mapped_device *md) 23601da177e4SLinus Torvalds { 23611134e5aeSMike Anderson __dm_destroy(md, false); 23621da177e4SLinus Torvalds } 2363fba9f90eSJeff Mahoney 2364fba9f90eSJeff Mahoney void dm_put(struct mapped_device *md) 2365f32c10b0SJeff Mahoney { 23661134e5aeSMike Anderson atomic_dec(&md->holders); 2367ba61fdd1SJeff Mahoney } 2368fba9f90eSJeff Mahoney EXPORT_SYMBOL_GPL(dm_put); 2369f32c10b0SJeff Mahoney 2370b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 23711da177e4SLinus Torvalds { 23721da177e4SLinus Torvalds int r = 0; 23739f4c3f87SBart Van Assche DEFINE_WAIT(wait); 23741da177e4SLinus Torvalds 23751da177e4SLinus Torvalds while (1) { 23769f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 23771da177e4SLinus Torvalds 23781da177e4SLinus Torvalds if (!md_in_flight(md)) 23791da177e4SLinus Torvalds break; 23801da177e4SLinus Torvalds 2381e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 23821da177e4SLinus Torvalds r = -EINTR; 23831da177e4SLinus Torvalds break; 23841da177e4SLinus Torvalds } 23851da177e4SLinus Torvalds 23861da177e4SLinus Torvalds io_schedule(); 23871da177e4SLinus Torvalds } 23889f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 23891da177e4SLinus Torvalds 23901da177e4SLinus Torvalds return r; 23911da177e4SLinus Torvalds } 23921da177e4SLinus Torvalds 23931da177e4SLinus Torvalds /* 239479eb885cSEdward Goggin * Process the deferred bios 23951da177e4SLinus Torvalds */ 23961da177e4SLinus Torvalds static void dm_wq_work(struct work_struct *work) 23971da177e4SLinus Torvalds { 23981da177e4SLinus Torvalds struct mapped_device *md = container_of(work, struct mapped_device, 23996d6f10dfSMilan Broz work); 24001da177e4SLinus Torvalds struct bio *c; 24016d6f10dfSMilan Broz int srcu_idx; 24021da177e4SLinus Torvalds struct dm_table *map; 24036d6f10dfSMilan Broz 24049e4e5f87SMilan Broz map = dm_get_live_table(md, &srcu_idx); 24059e4e5f87SMilan Broz 24061da177e4SLinus Torvalds while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 24073b00b203SMikulas Patocka spin_lock_irq(&md->deferred_lock); 24083b00b203SMikulas Patocka c = bio_list_pop(&md->deferred); 2409af7e466aSMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2410af7e466aSMikulas Patocka 2411af7e466aSMikulas Patocka if (!c) 2412df12ee99SAlasdair G Kergon break; 24133b00b203SMikulas Patocka 2414e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2415e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2416e39e2e95SAlasdair G Kergon else 241783d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2418e6ee8c0bSKiyoshi Ueda } 24191da177e4SLinus Torvalds 242083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 24211da177e4SLinus Torvalds } 24222ca3310eSAlasdair G Kergon 24231da177e4SLinus Torvalds static void dm_queue_flush(struct mapped_device *md) 24241da177e4SLinus Torvalds { 2425cf222b37SAlasdair G Kergon clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24264e857c58SPeter Zijlstra smp_mb__after_atomic(); 24271da177e4SLinus Torvalds queue_work(md->wq, &md->work); 24281da177e4SLinus Torvalds } 24291da177e4SLinus Torvalds 24301da177e4SLinus Torvalds /* 2431042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24321da177e4SLinus Torvalds */ 2433042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24341da177e4SLinus Torvalds { 243587eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2436754c5fc7SMike Snitzer struct queue_limits limits; 2437042d2a9bSAlasdair G Kergon int r; 24381da177e4SLinus Torvalds 24391da177e4SLinus Torvalds mutex_lock(&md->suspend_lock); 24401da177e4SLinus Torvalds 244193c534aeSAlasdair G Kergon /* device must be suspended */ 24424f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 24431da177e4SLinus Torvalds goto out; 24441da177e4SLinus Torvalds 24453ae70656SMike Snitzer /* 24463ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 24473ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 24483ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 24493ae70656SMike Snitzer * reappear. 24503ae70656SMike Snitzer */ 24513ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 245283d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 24533ae70656SMike Snitzer if (live_map) 24543ae70656SMike Snitzer limits = md->queue->limits; 245583d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 24563ae70656SMike Snitzer } 24573ae70656SMike Snitzer 245887eb5b21SMike Christie if (!live_map) { 2459754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2460042d2a9bSAlasdair G Kergon if (r) { 2461042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2462754c5fc7SMike Snitzer goto out; 2463042d2a9bSAlasdair G Kergon } 246487eb5b21SMike Christie } 2465754c5fc7SMike Snitzer 2466042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 246762e08243SMikulas Patocka dm_issue_global_event(); 24681da177e4SLinus Torvalds 24691da177e4SLinus Torvalds out: 24701da177e4SLinus Torvalds mutex_unlock(&md->suspend_lock); 2471042d2a9bSAlasdair G Kergon return map; 24722ca3310eSAlasdair G Kergon } 24731da177e4SLinus Torvalds 2474cf222b37SAlasdair G Kergon /* 2475dfbe03f6SAlasdair G Kergon * Functions to lock and unlock any filesystem running on the 2476d1782a3bSAlasdair G Kergon * device. 2477d1782a3bSAlasdair G Kergon */ 24782ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 2479dfbe03f6SAlasdair G Kergon { 24801da177e4SLinus Torvalds int r; 24811da177e4SLinus Torvalds 24821da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2483dfbe03f6SAlasdair G Kergon 2484db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2485dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2486cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2487e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2488e39e2e95SAlasdair G Kergon return r; 2489dfbe03f6SAlasdair G Kergon } 2490dfbe03f6SAlasdair G Kergon 2491aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2492aa8d7c2fSAlasdair G Kergon 24931da177e4SLinus Torvalds return 0; 24941da177e4SLinus Torvalds } 24951da177e4SLinus Torvalds 24962ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 24971da177e4SLinus Torvalds { 2498aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2499aa8d7c2fSAlasdair G Kergon return; 2500aa8d7c2fSAlasdair G Kergon 2501db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 25021da177e4SLinus Torvalds md->frozen_sb = NULL; 2503aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25041da177e4SLinus Torvalds } 25051da177e4SLinus Torvalds 25061da177e4SLinus Torvalds /* 2507b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2508b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2509b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2510b48633f8SBart Van Assche * 2511ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2512ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2513ffcc3936SMike Snitzer * are being added to md->deferred list. 2514cec47e3dSKiyoshi Ueda */ 2515ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2516b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2517eaf9a736SMike Snitzer int dmf_suspended_flag) 25181da177e4SLinus Torvalds { 2519ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2520ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2521ffcc3936SMike Snitzer int r; 2522cf222b37SAlasdair G Kergon 25235a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 25245a8f1f80SBart Van Assche 25252e93ccc1SKiyoshi Ueda /* 25262e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 25272e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25282e93ccc1SKiyoshi Ueda */ 25292e93ccc1SKiyoshi Ueda if (noflush) 25302e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 253186331f39SBart Van Assche else 253286331f39SBart Van Assche pr_debug("%s: suspending with flush\n", dm_device_name(md)); 25332e93ccc1SKiyoshi Ueda 2534d67ee213SMike Snitzer /* 2535d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2536d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2537d67ee213SMike Snitzer */ 25381da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25391da177e4SLinus Torvalds 25402e93ccc1SKiyoshi Ueda /* 25419f518b27SKiyoshi Ueda * Flush I/O to the device. 25429f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 25439f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 25449f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 25452e93ccc1SKiyoshi Ueda */ 254632a926daSMikulas Patocka if (!noflush && do_lockfs) { 25472ca3310eSAlasdair G Kergon r = lock_fs(md); 2548d67ee213SMike Snitzer if (r) { 2549d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2550ffcc3936SMike Snitzer return r; 2551aa8d7c2fSAlasdair G Kergon } 2552d67ee213SMike Snitzer } 25531da177e4SLinus Torvalds 25541da177e4SLinus Torvalds /* 25553b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 25563b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 25573b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 25583b00b203SMikulas Patocka * dm_wq_work. 25593b00b203SMikulas Patocka * 25603b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 25613b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 25626a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 25636a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 25646a8736d1STejun Heo * flush_workqueue(md->wq). 25651da177e4SLinus Torvalds */ 25661eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 256741abc4e1SHannes Reinecke if (map) 256883d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25691da177e4SLinus Torvalds 2570d0bcb878SKiyoshi Ueda /* 257129e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 257229e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2573d0bcb878SKiyoshi Ueda */ 25746a23e05cSJens Axboe if (dm_request_based(md)) 2575eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2576cec47e3dSKiyoshi Ueda 2577d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2578d0bcb878SKiyoshi Ueda 25791da177e4SLinus Torvalds /* 25803b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 25813b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 25823b00b203SMikulas Patocka * to finish. 25831da177e4SLinus Torvalds */ 2584b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2585eaf9a736SMike Snitzer if (!r) 2586eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 25871da177e4SLinus Torvalds 25886d6f10dfSMilan Broz if (noflush) 2589022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 259041abc4e1SHannes Reinecke if (map) 259183d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25922e93ccc1SKiyoshi Ueda 25931da177e4SLinus Torvalds /* were we interrupted ? */ 259446125c1cSMilan Broz if (r < 0) { 25959a1fb464SMikulas Patocka dm_queue_flush(md); 259673d410c0SMilan Broz 2597cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2598eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2599cec47e3dSKiyoshi Ueda 26002ca3310eSAlasdair G Kergon unlock_fs(md); 2601d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2602ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2603ffcc3936SMike Snitzer } 2604ffcc3936SMike Snitzer 2605ffcc3936SMike Snitzer return r; 26062ca3310eSAlasdair G Kergon } 26072ca3310eSAlasdair G Kergon 26083b00b203SMikulas Patocka /* 2609ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2610ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2611ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2612ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2613ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 26143b00b203SMikulas Patocka */ 2615ffcc3936SMike Snitzer /* 2616ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2617ffcc3936SMike Snitzer * 2618ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2619ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2620ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2621ffcc3936SMike Snitzer * 2622ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2623ffcc3936SMike Snitzer */ 2624ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2625ffcc3936SMike Snitzer { 2626ffcc3936SMike Snitzer struct dm_table *map = NULL; 2627ffcc3936SMike Snitzer int r = 0; 2628ffcc3936SMike Snitzer 2629ffcc3936SMike Snitzer retry: 2630ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2631ffcc3936SMike Snitzer 2632ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2633ffcc3936SMike Snitzer r = -EINVAL; 2634ffcc3936SMike Snitzer goto out_unlock; 2635ffcc3936SMike Snitzer } 2636ffcc3936SMike Snitzer 2637ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2638ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2639ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2640ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2641ffcc3936SMike Snitzer if (r) 2642ffcc3936SMike Snitzer return r; 2643ffcc3936SMike Snitzer goto retry; 2644ffcc3936SMike Snitzer } 2645ffcc3936SMike Snitzer 2646a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2647ffcc3936SMike Snitzer 2648eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2649ffcc3936SMike Snitzer if (r) 2650ffcc3936SMike Snitzer goto out_unlock; 26513b00b203SMikulas Patocka 26524d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 26534d4471cbSKiyoshi Ueda 2654d287483dSAlasdair G Kergon out_unlock: 2655e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2656cf222b37SAlasdair G Kergon return r; 26571da177e4SLinus Torvalds } 26581da177e4SLinus Torvalds 2659ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 26601da177e4SLinus Torvalds { 2661ffcc3936SMike Snitzer if (map) { 2662ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 26638757b776SMilan Broz if (r) 2664ffcc3936SMike Snitzer return r; 2665ffcc3936SMike Snitzer } 26662ca3310eSAlasdair G Kergon 26679a1fb464SMikulas Patocka dm_queue_flush(md); 26682ca3310eSAlasdair G Kergon 2669cec47e3dSKiyoshi Ueda /* 2670cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2671cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2672cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2673cec47e3dSKiyoshi Ueda */ 2674cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2675eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2676cec47e3dSKiyoshi Ueda 26772ca3310eSAlasdair G Kergon unlock_fs(md); 26782ca3310eSAlasdair G Kergon 2679ffcc3936SMike Snitzer return 0; 2680ffcc3936SMike Snitzer } 2681ffcc3936SMike Snitzer 2682ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2683ffcc3936SMike Snitzer { 26848dc23658SMinfei Huang int r; 2685ffcc3936SMike Snitzer struct dm_table *map = NULL; 2686ffcc3936SMike Snitzer 2687ffcc3936SMike Snitzer retry: 26888dc23658SMinfei Huang r = -EINVAL; 2689ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2690ffcc3936SMike Snitzer 2691ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2692ffcc3936SMike Snitzer goto out; 2693ffcc3936SMike Snitzer 2694ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2695ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2696ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2697ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2698ffcc3936SMike Snitzer if (r) 2699ffcc3936SMike Snitzer return r; 2700ffcc3936SMike Snitzer goto retry; 2701ffcc3936SMike Snitzer } 2702ffcc3936SMike Snitzer 2703a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2704ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2705ffcc3936SMike Snitzer goto out; 2706ffcc3936SMike Snitzer 2707ffcc3936SMike Snitzer r = __dm_resume(md, map); 2708ffcc3936SMike Snitzer if (r) 2709ffcc3936SMike Snitzer goto out; 2710ffcc3936SMike Snitzer 27112ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2712cf222b37SAlasdair G Kergon out: 2713e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 27142ca3310eSAlasdair G Kergon 2715cf222b37SAlasdair G Kergon return r; 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds 2718fd2ed4d2SMikulas Patocka /* 2719fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2720fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2721fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2722fd2ed4d2SMikulas Patocka */ 2723fd2ed4d2SMikulas Patocka 2724ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2725ffcc3936SMike Snitzer { 2726ffcc3936SMike Snitzer struct dm_table *map = NULL; 2727ffcc3936SMike Snitzer 27281ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 27291ea0654eSBart Van Assche 273096b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2731ffcc3936SMike Snitzer return; /* nested internal suspend */ 2732ffcc3936SMike Snitzer 2733ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2734ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2735ffcc3936SMike Snitzer return; /* nest suspend */ 2736ffcc3936SMike Snitzer } 2737ffcc3936SMike Snitzer 2738a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2739ffcc3936SMike Snitzer 2740ffcc3936SMike Snitzer /* 2741ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2742ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2743ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2744ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2745ffcc3936SMike Snitzer */ 2746eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2747eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2748ffcc3936SMike Snitzer 2749ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2750ffcc3936SMike Snitzer } 2751ffcc3936SMike Snitzer 2752ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2753ffcc3936SMike Snitzer { 275496b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 275596b26c8cSMikulas Patocka 275696b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2757ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2758ffcc3936SMike Snitzer 2759ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2760ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2761ffcc3936SMike Snitzer 2762ffcc3936SMike Snitzer /* 2763ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2764ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2765ffcc3936SMike Snitzer */ 2766ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2767ffcc3936SMike Snitzer 2768ffcc3936SMike Snitzer done: 2769ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2770ffcc3936SMike Snitzer smp_mb__after_atomic(); 2771ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2772ffcc3936SMike Snitzer } 2773ffcc3936SMike Snitzer 2774ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2775fd2ed4d2SMikulas Patocka { 2776fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2777ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2778ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2779ffcc3936SMike Snitzer } 2780ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2781ffcc3936SMike Snitzer 2782ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2783ffcc3936SMike Snitzer { 2784ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2785ffcc3936SMike Snitzer __dm_internal_resume(md); 2786ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2787ffcc3936SMike Snitzer } 2788ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2789ffcc3936SMike Snitzer 2790ffcc3936SMike Snitzer /* 2791ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2792ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2793ffcc3936SMike Snitzer */ 2794ffcc3936SMike Snitzer 2795ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2796ffcc3936SMike Snitzer { 2797ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2798ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2799fd2ed4d2SMikulas Patocka return; 2800fd2ed4d2SMikulas Patocka 2801fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2802fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2803fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2804fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2805fd2ed4d2SMikulas Patocka } 2806b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2807fd2ed4d2SMikulas Patocka 2808ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2809fd2ed4d2SMikulas Patocka { 2810ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2811fd2ed4d2SMikulas Patocka goto done; 2812fd2ed4d2SMikulas Patocka 2813fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2814fd2ed4d2SMikulas Patocka 2815fd2ed4d2SMikulas Patocka done: 2816fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2817fd2ed4d2SMikulas Patocka } 2818b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2819fd2ed4d2SMikulas Patocka 28201da177e4SLinus Torvalds /*----------------------------------------------------------------- 28211da177e4SLinus Torvalds * Event notification. 28221da177e4SLinus Torvalds *---------------------------------------------------------------*/ 28233abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 282460935eb2SMilan Broz unsigned cookie) 282569267a30SAlasdair G Kergon { 282660935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 282760935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 282860935eb2SMilan Broz 282960935eb2SMilan Broz if (!cookie) 28303abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 283160935eb2SMilan Broz else { 283260935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 283360935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 28343abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 28353abf85b5SPeter Rajnoha action, envp); 283660935eb2SMilan Broz } 283769267a30SAlasdair G Kergon } 283869267a30SAlasdair G Kergon 28397a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28407a8c3d3bSMike Anderson { 28417a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 28427a8c3d3bSMike Anderson } 28437a8c3d3bSMike Anderson 28441da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 28451da177e4SLinus Torvalds { 28461da177e4SLinus Torvalds return atomic_read(&md->event_nr); 28471da177e4SLinus Torvalds } 28481da177e4SLinus Torvalds 28491da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 28501da177e4SLinus Torvalds { 28511da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 28521da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 28531da177e4SLinus Torvalds } 28541da177e4SLinus Torvalds 28557a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 28567a8c3d3bSMike Anderson { 28577a8c3d3bSMike Anderson unsigned long flags; 28587a8c3d3bSMike Anderson 28597a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 28607a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 28617a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 28627a8c3d3bSMike Anderson } 28637a8c3d3bSMike Anderson 28641da177e4SLinus Torvalds /* 28651da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 28661da177e4SLinus Torvalds * count on 'md'. 28671da177e4SLinus Torvalds */ 28681da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 28691da177e4SLinus Torvalds { 28701da177e4SLinus Torvalds return md->disk; 28711da177e4SLinus Torvalds } 287265ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 28731da177e4SLinus Torvalds 2874784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2875784aae73SMilan Broz { 28762995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2877784aae73SMilan Broz } 2878784aae73SMilan Broz 2879784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2880784aae73SMilan Broz { 2881784aae73SMilan Broz struct mapped_device *md; 2882784aae73SMilan Broz 28832995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2884784aae73SMilan Broz 2885b9a41d21SHou Tao spin_lock(&_minor_lock); 2886b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2887b9a41d21SHou Tao md = NULL; 2888b9a41d21SHou Tao goto out; 2889b9a41d21SHou Tao } 2890784aae73SMilan Broz dm_get(md); 2891b9a41d21SHou Tao out: 2892b9a41d21SHou Tao spin_unlock(&_minor_lock); 2893b9a41d21SHou Tao 2894784aae73SMilan Broz return md; 2895784aae73SMilan Broz } 2896784aae73SMilan Broz 28974f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 28981da177e4SLinus Torvalds { 28991da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 29001da177e4SLinus Torvalds } 29011da177e4SLinus Torvalds 2902ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2903ffcc3936SMike Snitzer { 2904ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2905ffcc3936SMike Snitzer } 2906ffcc3936SMike Snitzer 29072c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 29082c140a24SMikulas Patocka { 29092c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 29102c140a24SMikulas Patocka } 29112c140a24SMikulas Patocka 291264dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 291364dbce58SKiyoshi Ueda { 2914ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 291564dbce58SKiyoshi Ueda } 291664dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 291764dbce58SKiyoshi Ueda 29182e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 29192e93ccc1SKiyoshi Ueda { 2920ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 29212e93ccc1SKiyoshi Ueda } 29222e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 29232e93ccc1SKiyoshi Ueda 29247e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 29250776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 29260776aa0eSMike Snitzer unsigned min_pool_size) 2927e6ee8c0bSKiyoshi Ueda { 2928115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 292978d8e58aSMike Snitzer unsigned int pool_size = 0; 293064f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 29316f1c819cSKent Overstreet int ret; 2932e6ee8c0bSKiyoshi Ueda 2933e6ee8c0bSKiyoshi Ueda if (!pools) 29344e6e36c3SMike Snitzer return NULL; 2935e6ee8c0bSKiyoshi Ueda 293678d8e58aSMike Snitzer switch (type) { 293778d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2938545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 293922c11858SMike Snitzer case DM_TYPE_NVME_BIO_BASED: 29400776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 294130187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 294264f52b0eSMike Snitzer io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 29436f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 29446f1c819cSKent Overstreet if (ret) 294564f52b0eSMike Snitzer goto out; 29466f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2947eb8db831SChristoph Hellwig goto out; 294878d8e58aSMike Snitzer break; 294978d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 29500776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 295178d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2952591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 295378d8e58aSMike Snitzer break; 295478d8e58aSMike Snitzer default: 295578d8e58aSMike Snitzer BUG(); 295678d8e58aSMike Snitzer } 295778d8e58aSMike Snitzer 29586f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 29596f1c819cSKent Overstreet if (ret) 29605f015204SJun'ichi Nomura goto out; 2961e6ee8c0bSKiyoshi Ueda 29626f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 29635f015204SJun'ichi Nomura goto out; 2964a91a2785SMartin K. Petersen 2965e6ee8c0bSKiyoshi Ueda return pools; 296678d8e58aSMike Snitzer 29675f015204SJun'ichi Nomura out: 29685f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2969e6ee8c0bSKiyoshi Ueda 29704e6e36c3SMike Snitzer return NULL; 2971e6ee8c0bSKiyoshi Ueda } 2972e6ee8c0bSKiyoshi Ueda 2973e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2974e6ee8c0bSKiyoshi Ueda { 2975e6ee8c0bSKiyoshi Ueda if (!pools) 2976e6ee8c0bSKiyoshi Ueda return; 2977e6ee8c0bSKiyoshi Ueda 29786f1c819cSKent Overstreet bioset_exit(&pools->bs); 29796f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 2980e6ee8c0bSKiyoshi Ueda 2981e6ee8c0bSKiyoshi Ueda kfree(pools); 2982e6ee8c0bSKiyoshi Ueda } 2983e6ee8c0bSKiyoshi Ueda 29849c72bad1SChristoph Hellwig struct dm_pr { 29859c72bad1SChristoph Hellwig u64 old_key; 29869c72bad1SChristoph Hellwig u64 new_key; 29879c72bad1SChristoph Hellwig u32 flags; 29889c72bad1SChristoph Hellwig bool fail_early; 29899c72bad1SChristoph Hellwig }; 29909c72bad1SChristoph Hellwig 29919c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 29929c72bad1SChristoph Hellwig void *data) 29939c72bad1SChristoph Hellwig { 29949c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 29959c72bad1SChristoph Hellwig struct dm_table *table; 29969c72bad1SChristoph Hellwig struct dm_target *ti; 29979c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 29989c72bad1SChristoph Hellwig 29999c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 30009c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 30019c72bad1SChristoph Hellwig goto out; 30029c72bad1SChristoph Hellwig 30039c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 30049c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30059c72bad1SChristoph Hellwig goto out; 30069c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30079c72bad1SChristoph Hellwig 30089c72bad1SChristoph Hellwig ret = -EINVAL; 30099c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30109c72bad1SChristoph Hellwig goto out; 30119c72bad1SChristoph Hellwig 30129c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 30139c72bad1SChristoph Hellwig out: 30149c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 30159c72bad1SChristoph Hellwig return ret; 30169c72bad1SChristoph Hellwig } 30179c72bad1SChristoph Hellwig 30189c72bad1SChristoph Hellwig /* 30199c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 30209c72bad1SChristoph Hellwig */ 30219c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 30229c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 30239c72bad1SChristoph Hellwig { 30249c72bad1SChristoph Hellwig struct dm_pr *pr = data; 30259c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 30269c72bad1SChristoph Hellwig 30279c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 30289c72bad1SChristoph Hellwig return -EOPNOTSUPP; 30299c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 30309c72bad1SChristoph Hellwig } 30319c72bad1SChristoph Hellwig 303271cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 303371cdb697SChristoph Hellwig u32 flags) 303471cdb697SChristoph Hellwig { 30359c72bad1SChristoph Hellwig struct dm_pr pr = { 30369c72bad1SChristoph Hellwig .old_key = old_key, 30379c72bad1SChristoph Hellwig .new_key = new_key, 30389c72bad1SChristoph Hellwig .flags = flags, 30399c72bad1SChristoph Hellwig .fail_early = true, 30409c72bad1SChristoph Hellwig }; 30419c72bad1SChristoph Hellwig int ret; 304271cdb697SChristoph Hellwig 30439c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 30449c72bad1SChristoph Hellwig if (ret && new_key) { 30459c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 30469c72bad1SChristoph Hellwig pr.old_key = new_key; 30479c72bad1SChristoph Hellwig pr.new_key = 0; 30489c72bad1SChristoph Hellwig pr.flags = 0; 30499c72bad1SChristoph Hellwig pr.fail_early = false; 30509c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 30519c72bad1SChristoph Hellwig } 305271cdb697SChristoph Hellwig 30539c72bad1SChristoph Hellwig return ret; 305471cdb697SChristoph Hellwig } 305571cdb697SChristoph Hellwig 305671cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 305771cdb697SChristoph Hellwig u32 flags) 305871cdb697SChristoph Hellwig { 305971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 306071cdb697SChristoph Hellwig const struct pr_ops *ops; 3061971888c4SMike Snitzer int r, srcu_idx; 306271cdb697SChristoph Hellwig 30635bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 306471cdb697SChristoph Hellwig if (r < 0) 3065971888c4SMike Snitzer goto out; 306671cdb697SChristoph Hellwig 306771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 306871cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 306971cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 307071cdb697SChristoph Hellwig else 307171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3072971888c4SMike Snitzer out: 3073971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 307471cdb697SChristoph Hellwig return r; 307571cdb697SChristoph Hellwig } 307671cdb697SChristoph Hellwig 307771cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 307871cdb697SChristoph Hellwig { 307971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 308071cdb697SChristoph Hellwig const struct pr_ops *ops; 3081971888c4SMike Snitzer int r, srcu_idx; 308271cdb697SChristoph Hellwig 30835bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 308471cdb697SChristoph Hellwig if (r < 0) 3085971888c4SMike Snitzer goto out; 308671cdb697SChristoph Hellwig 308771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 308871cdb697SChristoph Hellwig if (ops && ops->pr_release) 308971cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 309071cdb697SChristoph Hellwig else 309171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3092971888c4SMike Snitzer out: 3093971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 309471cdb697SChristoph Hellwig return r; 309571cdb697SChristoph Hellwig } 309671cdb697SChristoph Hellwig 309771cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 309871cdb697SChristoph Hellwig enum pr_type type, bool abort) 309971cdb697SChristoph Hellwig { 310071cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 310171cdb697SChristoph Hellwig const struct pr_ops *ops; 3102971888c4SMike Snitzer int r, srcu_idx; 310371cdb697SChristoph Hellwig 31045bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 310571cdb697SChristoph Hellwig if (r < 0) 3106971888c4SMike Snitzer goto out; 310771cdb697SChristoph Hellwig 310871cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 310971cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 311071cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 311171cdb697SChristoph Hellwig else 311271cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3113971888c4SMike Snitzer out: 3114971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 311571cdb697SChristoph Hellwig return r; 311671cdb697SChristoph Hellwig } 311771cdb697SChristoph Hellwig 311871cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 311971cdb697SChristoph Hellwig { 312071cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 312171cdb697SChristoph Hellwig const struct pr_ops *ops; 3122971888c4SMike Snitzer int r, srcu_idx; 312371cdb697SChristoph Hellwig 31245bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 312571cdb697SChristoph Hellwig if (r < 0) 3126971888c4SMike Snitzer goto out; 312771cdb697SChristoph Hellwig 312871cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 312971cdb697SChristoph Hellwig if (ops && ops->pr_clear) 313071cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 313171cdb697SChristoph Hellwig else 313271cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3133971888c4SMike Snitzer out: 3134971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 313571cdb697SChristoph Hellwig return r; 313671cdb697SChristoph Hellwig } 313771cdb697SChristoph Hellwig 313871cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 313971cdb697SChristoph Hellwig .pr_register = dm_pr_register, 314071cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 314171cdb697SChristoph Hellwig .pr_release = dm_pr_release, 314271cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 314371cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 314471cdb697SChristoph Hellwig }; 314571cdb697SChristoph Hellwig 314683d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 31471da177e4SLinus Torvalds .open = dm_blk_open, 31481da177e4SLinus Torvalds .release = dm_blk_close, 3149aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 31503ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3151e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 315271cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 31531da177e4SLinus Torvalds .owner = THIS_MODULE 31541da177e4SLinus Torvalds }; 31551da177e4SLinus Torvalds 3156f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3157f26c5719SDan Williams .direct_access = dm_dax_direct_access, 31587e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3159b3a9a0c3SDan Williams .copy_to_iter = dm_dax_copy_to_iter, 3160f26c5719SDan Williams }; 3161f26c5719SDan Williams 31621da177e4SLinus Torvalds /* 31631da177e4SLinus Torvalds * module hooks 31641da177e4SLinus Torvalds */ 31651da177e4SLinus Torvalds module_init(dm_init); 31661da177e4SLinus Torvalds module_exit(dm_exit); 31671da177e4SLinus Torvalds 31681da177e4SLinus Torvalds module_param(major, uint, 0); 31691da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3170f4790826SMike Snitzer 3171e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3172e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3173e8603136SMike Snitzer 3174115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3175115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3176115485e8SMike Snitzer 31771da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 31781da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 31791da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3180