11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 227e026c8cSDan Williams #include <linux/uio.h> 233ac51e74SDarrick J. Wong #include <linux/hdreg.h> 243f77316dSKiyoshi Ueda #include <linux/delay.h> 25ffcc3936SMike Snitzer #include <linux/wait.h> 2671cdb697SChristoph Hellwig #include <linux/pr.h> 27b0b4d7c6SElena Reshetova #include <linux/refcount.h> 2855782138SLi Zefan 2972d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3072d94861SAlasdair G Kergon 3160935eb2SMilan Broz /* 3260935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3360935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3460935eb2SMilan Broz */ 3560935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3660935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3760935eb2SMilan Broz 381da177e4SLinus Torvalds static const char *_name = DM_NAME; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds static unsigned int major = 0; 411da177e4SLinus Torvalds static unsigned int _major = 0; 421da177e4SLinus Torvalds 43d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 44d15b774cSAlasdair G Kergon 45f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 462c140a24SMikulas Patocka 472c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 482c140a24SMikulas Patocka 492c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 502c140a24SMikulas Patocka 51acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 52acfe0ad7SMikulas Patocka 5393e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5493e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5593e6442cSMikulas Patocka 5662e08243SMikulas Patocka void dm_issue_global_event(void) 5762e08243SMikulas Patocka { 5862e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 5962e08243SMikulas Patocka wake_up(&dm_global_eventq); 6062e08243SMikulas Patocka } 6162e08243SMikulas Patocka 621da177e4SLinus Torvalds /* 6364f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 641da177e4SLinus Torvalds */ 6564f52b0eSMike Snitzer struct clone_info { 6664f52b0eSMike Snitzer struct dm_table *map; 6764f52b0eSMike Snitzer struct bio *bio; 6864f52b0eSMike Snitzer struct dm_io *io; 6964f52b0eSMike Snitzer sector_t sector; 7064f52b0eSMike Snitzer unsigned sector_count; 7164f52b0eSMike Snitzer }; 7264f52b0eSMike Snitzer 7364f52b0eSMike Snitzer /* 7464f52b0eSMike Snitzer * One of these is allocated per clone bio. 7564f52b0eSMike Snitzer */ 7664f52b0eSMike Snitzer #define DM_TIO_MAGIC 7282014 7764f52b0eSMike Snitzer struct dm_target_io { 7864f52b0eSMike Snitzer unsigned magic; 7964f52b0eSMike Snitzer struct dm_io *io; 8064f52b0eSMike Snitzer struct dm_target *ti; 8164f52b0eSMike Snitzer unsigned target_bio_nr; 8264f52b0eSMike Snitzer unsigned *len_ptr; 8364f52b0eSMike Snitzer bool inside_dm_io; 8464f52b0eSMike Snitzer struct bio clone; 8564f52b0eSMike Snitzer }; 8664f52b0eSMike Snitzer 8764f52b0eSMike Snitzer /* 8864f52b0eSMike Snitzer * One of these is allocated per original bio. 8964f52b0eSMike Snitzer * It contains the first clone used for that original. 9064f52b0eSMike Snitzer */ 9164f52b0eSMike Snitzer #define DM_IO_MAGIC 5191977 921da177e4SLinus Torvalds struct dm_io { 9364f52b0eSMike Snitzer unsigned magic; 941da177e4SLinus Torvalds struct mapped_device *md; 954e4cbee9SChristoph Hellwig blk_status_t status; 961da177e4SLinus Torvalds atomic_t io_count; 97745dc570SMike Snitzer struct bio *orig_bio; 983eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 99f88fb981SKiyoshi Ueda spinlock_t endio_lock; 100fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 10164f52b0eSMike Snitzer /* last member of dm_target_io is 'struct bio' */ 10264f52b0eSMike Snitzer struct dm_target_io tio; 1031da177e4SLinus Torvalds }; 1041da177e4SLinus Torvalds 10564f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10664f52b0eSMike Snitzer { 10764f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 10864f52b0eSMike Snitzer if (!tio->inside_dm_io) 10964f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 11064f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 11164f52b0eSMike Snitzer } 11264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 11364f52b0eSMike Snitzer 11464f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 11564f52b0eSMike Snitzer { 11664f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 11764f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 11864f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 11964f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 12064f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 12164f52b0eSMike Snitzer } 12264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 12364f52b0eSMike Snitzer 12464f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 12564f52b0eSMike Snitzer { 12664f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 12764f52b0eSMike Snitzer } 12864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 12964f52b0eSMike Snitzer 130ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 131ba61fdd1SJeff Mahoney 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * Bits for the md->flags field. 1341da177e4SLinus Torvalds */ 1351eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1361da177e4SLinus Torvalds #define DMF_SUSPENDED 1 137aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 138fba9f90eSJeff Mahoney #define DMF_FREEING 3 1395c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1402e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1418ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1428ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1431da177e4SLinus Torvalds 144115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 145115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 146faad87dfSMike Snitzer 147e6ee8c0bSKiyoshi Ueda /* 148e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 149e6ee8c0bSKiyoshi Ueda */ 150e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1516f1c819cSKent Overstreet struct bio_set bs; 1526f1c819cSKent Overstreet struct bio_set io_bs; 153e6ee8c0bSKiyoshi Ueda }; 154e6ee8c0bSKiyoshi Ueda 15586f1152bSBenjamin Marzinski struct table_device { 15686f1152bSBenjamin Marzinski struct list_head list; 157b0b4d7c6SElena Reshetova refcount_t count; 15886f1152bSBenjamin Marzinski struct dm_dev dm_dev; 15986f1152bSBenjamin Marzinski }; 16086f1152bSBenjamin Marzinski 1618fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 1621ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 16394818742SKent Overstreet 164f4790826SMike Snitzer /* 165e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 166e8603136SMike Snitzer */ 1674cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 168e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 169e8603136SMike Snitzer 170115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 171115485e8SMike Snitzer { 1726aa7de05SMark Rutland int param = READ_ONCE(*module_param); 173115485e8SMike Snitzer int modified_param = 0; 174115485e8SMike Snitzer bool modified = true; 175115485e8SMike Snitzer 176115485e8SMike Snitzer if (param < min) 177115485e8SMike Snitzer modified_param = min; 178115485e8SMike Snitzer else if (param > max) 179115485e8SMike Snitzer modified_param = max; 180115485e8SMike Snitzer else 181115485e8SMike Snitzer modified = false; 182115485e8SMike Snitzer 183115485e8SMike Snitzer if (modified) { 184115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 185115485e8SMike Snitzer param = modified_param; 186115485e8SMike Snitzer } 187115485e8SMike Snitzer 188115485e8SMike Snitzer return param; 189115485e8SMike Snitzer } 190115485e8SMike Snitzer 1914cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 192f4790826SMike Snitzer unsigned def, unsigned max) 193f4790826SMike Snitzer { 1946aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 19509c2d531SMike Snitzer unsigned modified_param = 0; 196f4790826SMike Snitzer 19709c2d531SMike Snitzer if (!param) 19809c2d531SMike Snitzer modified_param = def; 19909c2d531SMike Snitzer else if (param > max) 20009c2d531SMike Snitzer modified_param = max; 201f4790826SMike Snitzer 20209c2d531SMike Snitzer if (modified_param) { 20309c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 20409c2d531SMike Snitzer param = modified_param; 205f4790826SMike Snitzer } 206f4790826SMike Snitzer 20709c2d531SMike Snitzer return param; 208f4790826SMike Snitzer } 209f4790826SMike Snitzer 210e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 211e8603136SMike Snitzer { 21209c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 2134cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 214e8603136SMike Snitzer } 215e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 216e8603136SMike Snitzer 217115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 218115485e8SMike Snitzer { 219115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 220115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 221115485e8SMike Snitzer } 222115485e8SMike Snitzer 2231da177e4SLinus Torvalds static int __init local_init(void) 2241da177e4SLinus Torvalds { 22551157b4aSKiyoshi Ueda int r = -ENOMEM; 2261da177e4SLinus Torvalds 2278fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2288fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 229dde1e1ecSMike Snitzer return r; 2308fbf26adSKiyoshi Ueda 231eca7ee6dSMike Snitzer _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 2321ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 2331ae49ea2SMike Snitzer if (!_rq_cache) 2341ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 2351ae49ea2SMike Snitzer 23651e5b2bdSMike Anderson r = dm_uevent_init(); 23751157b4aSKiyoshi Ueda if (r) 2381ae49ea2SMike Snitzer goto out_free_rq_cache; 23951e5b2bdSMike Anderson 240acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 241acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 242acfe0ad7SMikulas Patocka r = -ENOMEM; 243acfe0ad7SMikulas Patocka goto out_uevent_exit; 244acfe0ad7SMikulas Patocka } 245acfe0ad7SMikulas Patocka 2461da177e4SLinus Torvalds _major = major; 2471da177e4SLinus Torvalds r = register_blkdev(_major, _name); 24851157b4aSKiyoshi Ueda if (r < 0) 249acfe0ad7SMikulas Patocka goto out_free_workqueue; 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds if (!_major) 2521da177e4SLinus Torvalds _major = r; 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds return 0; 25551157b4aSKiyoshi Ueda 256acfe0ad7SMikulas Patocka out_free_workqueue: 257acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 25851157b4aSKiyoshi Ueda out_uevent_exit: 25951157b4aSKiyoshi Ueda dm_uevent_exit(); 2601ae49ea2SMike Snitzer out_free_rq_cache: 2611ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2628fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2638fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 26451157b4aSKiyoshi Ueda 26551157b4aSKiyoshi Ueda return r; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds static void local_exit(void) 2691da177e4SLinus Torvalds { 2702c140a24SMikulas Patocka flush_scheduled_work(); 271acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2722c140a24SMikulas Patocka 2731ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2748fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 27500d59405SAkinobu Mita unregister_blkdev(_major, _name); 27651e5b2bdSMike Anderson dm_uevent_exit(); 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds _major = 0; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds DMINFO("cleaned up"); 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds 283b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2841da177e4SLinus Torvalds local_init, 2851da177e4SLinus Torvalds dm_target_init, 2861da177e4SLinus Torvalds dm_linear_init, 2871da177e4SLinus Torvalds dm_stripe_init, 288952b3557SMikulas Patocka dm_io_init, 289945fa4d2SMikulas Patocka dm_kcopyd_init, 2901da177e4SLinus Torvalds dm_interface_init, 291fd2ed4d2SMikulas Patocka dm_statistics_init, 2921da177e4SLinus Torvalds }; 2931da177e4SLinus Torvalds 294b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2951da177e4SLinus Torvalds local_exit, 2961da177e4SLinus Torvalds dm_target_exit, 2971da177e4SLinus Torvalds dm_linear_exit, 2981da177e4SLinus Torvalds dm_stripe_exit, 299952b3557SMikulas Patocka dm_io_exit, 300945fa4d2SMikulas Patocka dm_kcopyd_exit, 3011da177e4SLinus Torvalds dm_interface_exit, 302fd2ed4d2SMikulas Patocka dm_statistics_exit, 3031da177e4SLinus Torvalds }; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds static int __init dm_init(void) 3061da177e4SLinus Torvalds { 3071da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds int r, i; 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds for (i = 0; i < count; i++) { 3121da177e4SLinus Torvalds r = _inits[i](); 3131da177e4SLinus Torvalds if (r) 3141da177e4SLinus Torvalds goto bad; 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds return 0; 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds bad: 3201da177e4SLinus Torvalds while (i--) 3211da177e4SLinus Torvalds _exits[i](); 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds return r; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds static void __exit dm_exit(void) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3291da177e4SLinus Torvalds 3301da177e4SLinus Torvalds while (i--) 3311da177e4SLinus Torvalds _exits[i](); 332d15b774cSAlasdair G Kergon 333d15b774cSAlasdair G Kergon /* 334d15b774cSAlasdair G Kergon * Should be empty by this point. 335d15b774cSAlasdair G Kergon */ 336d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 3391da177e4SLinus Torvalds /* 3401da177e4SLinus Torvalds * Block device functions 3411da177e4SLinus Torvalds */ 342432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 343432a212cSMike Anderson { 344432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 345432a212cSMike Anderson } 346432a212cSMike Anderson 347fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3481da177e4SLinus Torvalds { 3491da177e4SLinus Torvalds struct mapped_device *md; 3501da177e4SLinus Torvalds 351fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 352fba9f90eSJeff Mahoney 353fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 354fba9f90eSJeff Mahoney if (!md) 355fba9f90eSJeff Mahoney goto out; 356fba9f90eSJeff Mahoney 3575c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 358432a212cSMike Anderson dm_deleting_md(md)) { 359fba9f90eSJeff Mahoney md = NULL; 360fba9f90eSJeff Mahoney goto out; 361fba9f90eSJeff Mahoney } 362fba9f90eSJeff Mahoney 3631da177e4SLinus Torvalds dm_get(md); 3645c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 365fba9f90eSJeff Mahoney out: 366fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 367fba9f90eSJeff Mahoney 368fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 371db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3721da177e4SLinus Torvalds { 37363a4f065SMike Snitzer struct mapped_device *md; 3746e9624b8SArnd Bergmann 3754a1aeb98SMilan Broz spin_lock(&_minor_lock); 3764a1aeb98SMilan Broz 37763a4f065SMike Snitzer md = disk->private_data; 37863a4f065SMike Snitzer if (WARN_ON(!md)) 37963a4f065SMike Snitzer goto out; 38063a4f065SMike Snitzer 3812c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3822c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 383acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3842c140a24SMikulas Patocka 3851da177e4SLinus Torvalds dm_put(md); 38663a4f065SMike Snitzer out: 3874a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 3905c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3915c6bd75dSAlasdair G Kergon { 3925c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3935c6bd75dSAlasdair G Kergon } 3945c6bd75dSAlasdair G Kergon 3955c6bd75dSAlasdair G Kergon /* 3965c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3975c6bd75dSAlasdair G Kergon */ 3982c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3995c6bd75dSAlasdair G Kergon { 4005c6bd75dSAlasdair G Kergon int r = 0; 4015c6bd75dSAlasdair G Kergon 4025c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4035c6bd75dSAlasdair G Kergon 4042c140a24SMikulas Patocka if (dm_open_count(md)) { 4055c6bd75dSAlasdair G Kergon r = -EBUSY; 4062c140a24SMikulas Patocka if (mark_deferred) 4072c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4082c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 4092c140a24SMikulas Patocka r = -EEXIST; 4105c6bd75dSAlasdair G Kergon else 4115c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 4125c6bd75dSAlasdair G Kergon 4135c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4145c6bd75dSAlasdair G Kergon 4155c6bd75dSAlasdair G Kergon return r; 4165c6bd75dSAlasdair G Kergon } 4175c6bd75dSAlasdair G Kergon 4182c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4192c140a24SMikulas Patocka { 4202c140a24SMikulas Patocka int r = 0; 4212c140a24SMikulas Patocka 4222c140a24SMikulas Patocka spin_lock(&_minor_lock); 4232c140a24SMikulas Patocka 4242c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4252c140a24SMikulas Patocka r = -EBUSY; 4262c140a24SMikulas Patocka else 4272c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4282c140a24SMikulas Patocka 4292c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4302c140a24SMikulas Patocka 4312c140a24SMikulas Patocka return r; 4322c140a24SMikulas Patocka } 4332c140a24SMikulas Patocka 4342c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4352c140a24SMikulas Patocka { 4362c140a24SMikulas Patocka dm_deferred_remove(); 4372c140a24SMikulas Patocka } 4382c140a24SMikulas Patocka 439fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 440fd2ed4d2SMikulas Patocka { 441fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 442fd2ed4d2SMikulas Patocka } 443fd2ed4d2SMikulas Patocka 4449974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 4459974fa2cSMike Snitzer { 4469974fa2cSMike Snitzer return md->queue; 4479974fa2cSMike Snitzer } 4489974fa2cSMike Snitzer 449fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 450fd2ed4d2SMikulas Patocka { 451fd2ed4d2SMikulas Patocka return &md->stats; 452fd2ed4d2SMikulas Patocka } 453fd2ed4d2SMikulas Patocka 4543ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4553ac51e74SDarrick J. Wong { 4563ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4573ac51e74SDarrick J. Wong 4583ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4593ac51e74SDarrick J. Wong } 4603ac51e74SDarrick J. Wong 461971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4625bd5e8d8SMike Snitzer struct block_device **bdev) 463971888c4SMike Snitzer __acquires(md->io_barrier) 464aa129a22SMilan Broz { 46566482026SMike Snitzer struct dm_target *tgt; 4666c182cd8SHannes Reinecke struct dm_table *map; 467971888c4SMike Snitzer int r; 468aa129a22SMilan Broz 4696c182cd8SHannes Reinecke retry: 470e56f81e0SChristoph Hellwig r = -ENOTTY; 471971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 472aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 473971888c4SMike Snitzer return r; 474aa129a22SMilan Broz 475aa129a22SMilan Broz /* We only support devices that have a single target */ 476aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 477971888c4SMike Snitzer return r; 478aa129a22SMilan Broz 47966482026SMike Snitzer tgt = dm_table_get_target(map, 0); 48066482026SMike Snitzer if (!tgt->type->prepare_ioctl) 481e56f81e0SChristoph Hellwig return r; 482aa129a22SMilan Broz 483971888c4SMike Snitzer if (dm_suspended_md(md)) 484971888c4SMike Snitzer return -EAGAIN; 485971888c4SMike Snitzer 4865bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 4875bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 488971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 4896c182cd8SHannes Reinecke msleep(10); 4906c182cd8SHannes Reinecke goto retry; 4916c182cd8SHannes Reinecke } 492971888c4SMike Snitzer 493e56f81e0SChristoph Hellwig return r; 494e56f81e0SChristoph Hellwig } 4956c182cd8SHannes Reinecke 496971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 497971888c4SMike Snitzer __releases(md->io_barrier) 498971888c4SMike Snitzer { 499971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 500971888c4SMike Snitzer } 501971888c4SMike Snitzer 502e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 503e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 504e56f81e0SChristoph Hellwig { 505e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 506971888c4SMike Snitzer int r, srcu_idx; 507e56f81e0SChristoph Hellwig 5085bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 509e56f81e0SChristoph Hellwig if (r < 0) 510971888c4SMike Snitzer goto out; 511e56f81e0SChristoph Hellwig 512e56f81e0SChristoph Hellwig if (r > 0) { 513e56f81e0SChristoph Hellwig /* 514e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 515e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 516e56f81e0SChristoph Hellwig */ 517e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 518e980f623SChristoph Hellwig DMWARN_LIMIT( 519e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 520e980f623SChristoph Hellwig current->comm, cmd); 521e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 522e56f81e0SChristoph Hellwig goto out; 523e56f81e0SChristoph Hellwig } 524e980f623SChristoph Hellwig } 525e56f81e0SChristoph Hellwig 52666482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 527e56f81e0SChristoph Hellwig out: 528971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 529aa129a22SMilan Broz return r; 530aa129a22SMilan Broz } 531aa129a22SMilan Broz 532978e51baSMike Snitzer static void start_io_acct(struct dm_io *io); 533978e51baSMike Snitzer 534978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5351da177e4SLinus Torvalds { 53664f52b0eSMike Snitzer struct dm_io *io; 53764f52b0eSMike Snitzer struct dm_target_io *tio; 53864f52b0eSMike Snitzer struct bio *clone; 53964f52b0eSMike Snitzer 5406f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 54164f52b0eSMike Snitzer if (!clone) 54264f52b0eSMike Snitzer return NULL; 54364f52b0eSMike Snitzer 54464f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 54564f52b0eSMike Snitzer tio->inside_dm_io = true; 54664f52b0eSMike Snitzer tio->io = NULL; 54764f52b0eSMike Snitzer 54864f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 54964f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 550978e51baSMike Snitzer io->status = 0; 551978e51baSMike Snitzer atomic_set(&io->io_count, 1); 552978e51baSMike Snitzer io->orig_bio = bio; 553978e51baSMike Snitzer io->md = md; 554978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 555978e51baSMike Snitzer 556978e51baSMike Snitzer start_io_acct(io); 55764f52b0eSMike Snitzer 55864f52b0eSMike Snitzer return io; 5591da177e4SLinus Torvalds } 5601da177e4SLinus Torvalds 561028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5621da177e4SLinus Torvalds { 56364f52b0eSMike Snitzer bio_put(&io->tio.clone); 56464f52b0eSMike Snitzer } 56564f52b0eSMike Snitzer 56664f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 56764f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 56864f52b0eSMike Snitzer { 56964f52b0eSMike Snitzer struct dm_target_io *tio; 57064f52b0eSMike Snitzer 57164f52b0eSMike Snitzer if (!ci->io->tio.io) { 57264f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 57364f52b0eSMike Snitzer tio = &ci->io->tio; 57464f52b0eSMike Snitzer } else { 5756f1c819cSKent Overstreet struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 57664f52b0eSMike Snitzer if (!clone) 57764f52b0eSMike Snitzer return NULL; 57864f52b0eSMike Snitzer 57964f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 58064f52b0eSMike Snitzer tio->inside_dm_io = false; 58164f52b0eSMike Snitzer } 58264f52b0eSMike Snitzer 58364f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 58464f52b0eSMike Snitzer tio->io = ci->io; 58564f52b0eSMike Snitzer tio->ti = ti; 58664f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 58764f52b0eSMike Snitzer 58864f52b0eSMike Snitzer return tio; 5891da177e4SLinus Torvalds } 5901da177e4SLinus Torvalds 591cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 5921da177e4SLinus Torvalds { 59364f52b0eSMike Snitzer if (tio->inside_dm_io) 59464f52b0eSMike Snitzer return; 595dba14160SMikulas Patocka bio_put(&tio->clone); 5961da177e4SLinus Torvalds } 5971da177e4SLinus Torvalds 5984cc96131SMike Snitzer int md_in_flight(struct mapped_device *md) 59990abb8c4SKiyoshi Ueda { 60090abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 60190abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 60290abb8c4SKiyoshi Ueda } 60390abb8c4SKiyoshi Ueda 6043eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6053eaf840eSJun'ichi "Nick" Nomura { 6063eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 607745dc570SMike Snitzer struct bio *bio = io->orig_bio; 608fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 6093eaf840eSJun'ichi "Nick" Nomura 6103eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6113eaf840eSJun'ichi "Nick" Nomura 612ddcf35d3SMichael Callahan generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), 613ddcf35d3SMichael Callahan &dm_disk(md)->part0); 614f3986374SMike Snitzer 6151e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 6161e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 617fd2ed4d2SMikulas Patocka 618fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 619528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 620528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 621528ec5abSMike Christie false, 0, &io->stats_aux); 6223eaf840eSJun'ichi "Nick" Nomura } 6233eaf840eSJun'ichi "Nick" Nomura 624d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6253eaf840eSJun'ichi "Nick" Nomura { 6263eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 627745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6283eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 62918c0b223SGu Zheng int pending; 6303eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 6313eaf840eSJun'ichi "Nick" Nomura 632ddcf35d3SMichael Callahan generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, 633ddcf35d3SMichael Callahan io->start_time); 6343eaf840eSJun'ichi "Nick" Nomura 635fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 636528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 637528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 638528ec5abSMike Christie true, duration, &io->stats_aux); 639fd2ed4d2SMikulas Patocka 640af7e466aSMikulas Patocka /* 641af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 642d87f4c14STejun Heo * a flush. 643af7e466aSMikulas Patocka */ 6441e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 6451e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 646316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 6473eaf840eSJun'ichi "Nick" Nomura 648d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 649d221d2e7SMikulas Patocka if (!pending) 650d221d2e7SMikulas Patocka wake_up(&md->wait); 6513eaf840eSJun'ichi "Nick" Nomura } 6523eaf840eSJun'ichi "Nick" Nomura 6531da177e4SLinus Torvalds /* 6541da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6551da177e4SLinus Torvalds */ 65692c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6571da177e4SLinus Torvalds { 65805447420SKiyoshi Ueda unsigned long flags; 6591da177e4SLinus Torvalds 66005447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6611da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 66205447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 66392c63902SMikulas Patocka queue_work(md->wq, &md->work); 6641da177e4SLinus Torvalds } 6651da177e4SLinus Torvalds 6661da177e4SLinus Torvalds /* 6671da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6681da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 66983d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6701da177e4SLinus Torvalds */ 67183d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6721da177e4SLinus Torvalds { 67383d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6741da177e4SLinus Torvalds 67583d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 67683d5e5b0SMikulas Patocka } 6771da177e4SLinus Torvalds 67883d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 67983d5e5b0SMikulas Patocka { 68083d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 68183d5e5b0SMikulas Patocka } 68283d5e5b0SMikulas Patocka 68383d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 68483d5e5b0SMikulas Patocka { 68583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 68683d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 68783d5e5b0SMikulas Patocka } 68883d5e5b0SMikulas Patocka 68983d5e5b0SMikulas Patocka /* 69083d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 69183d5e5b0SMikulas Patocka * The caller must not block between these two functions. 69283d5e5b0SMikulas Patocka */ 69383d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 69483d5e5b0SMikulas Patocka { 69583d5e5b0SMikulas Patocka rcu_read_lock(); 69683d5e5b0SMikulas Patocka return rcu_dereference(md->map); 69783d5e5b0SMikulas Patocka } 69883d5e5b0SMikulas Patocka 69983d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 70083d5e5b0SMikulas Patocka { 70183d5e5b0SMikulas Patocka rcu_read_unlock(); 7021da177e4SLinus Torvalds } 7031da177e4SLinus Torvalds 704971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 705971888c4SMike Snitzer 7063ac51e74SDarrick J. Wong /* 70786f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 70886f1152bSBenjamin Marzinski */ 70986f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 71086f1152bSBenjamin Marzinski struct mapped_device *md) 71186f1152bSBenjamin Marzinski { 71286f1152bSBenjamin Marzinski struct block_device *bdev; 71386f1152bSBenjamin Marzinski 71486f1152bSBenjamin Marzinski int r; 71586f1152bSBenjamin Marzinski 71686f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 71786f1152bSBenjamin Marzinski 718519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 71986f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 72086f1152bSBenjamin Marzinski return PTR_ERR(bdev); 72186f1152bSBenjamin Marzinski 72286f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 72386f1152bSBenjamin Marzinski if (r) { 72486f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 72586f1152bSBenjamin Marzinski return r; 72686f1152bSBenjamin Marzinski } 72786f1152bSBenjamin Marzinski 72886f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 729817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 73086f1152bSBenjamin Marzinski return 0; 73186f1152bSBenjamin Marzinski } 73286f1152bSBenjamin Marzinski 73386f1152bSBenjamin Marzinski /* 73486f1152bSBenjamin Marzinski * Close a table device that we've been using. 73586f1152bSBenjamin Marzinski */ 73686f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 73786f1152bSBenjamin Marzinski { 73886f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 73986f1152bSBenjamin Marzinski return; 74086f1152bSBenjamin Marzinski 74186f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 74286f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 743817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 74486f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 745817bf402SDan Williams td->dm_dev.dax_dev = NULL; 74686f1152bSBenjamin Marzinski } 74786f1152bSBenjamin Marzinski 74886f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 74986f1152bSBenjamin Marzinski fmode_t mode) { 75086f1152bSBenjamin Marzinski struct table_device *td; 75186f1152bSBenjamin Marzinski 75286f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 75386f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 75486f1152bSBenjamin Marzinski return td; 75586f1152bSBenjamin Marzinski 75686f1152bSBenjamin Marzinski return NULL; 75786f1152bSBenjamin Marzinski } 75886f1152bSBenjamin Marzinski 75986f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 76086f1152bSBenjamin Marzinski struct dm_dev **result) { 76186f1152bSBenjamin Marzinski int r; 76286f1152bSBenjamin Marzinski struct table_device *td; 76386f1152bSBenjamin Marzinski 76486f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 76586f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 76686f1152bSBenjamin Marzinski if (!td) { 767115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 76886f1152bSBenjamin Marzinski if (!td) { 76986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 77086f1152bSBenjamin Marzinski return -ENOMEM; 77186f1152bSBenjamin Marzinski } 77286f1152bSBenjamin Marzinski 77386f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 77486f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 77586f1152bSBenjamin Marzinski 77686f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 77786f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 77886f1152bSBenjamin Marzinski kfree(td); 77986f1152bSBenjamin Marzinski return r; 78086f1152bSBenjamin Marzinski } 78186f1152bSBenjamin Marzinski 78286f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 78386f1152bSBenjamin Marzinski 784b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 78586f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 786b0b4d7c6SElena Reshetova } else { 787b0b4d7c6SElena Reshetova refcount_inc(&td->count); 78886f1152bSBenjamin Marzinski } 78986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 79086f1152bSBenjamin Marzinski 79186f1152bSBenjamin Marzinski *result = &td->dm_dev; 79286f1152bSBenjamin Marzinski return 0; 79386f1152bSBenjamin Marzinski } 79486f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 79586f1152bSBenjamin Marzinski 79686f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 79786f1152bSBenjamin Marzinski { 79886f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 79986f1152bSBenjamin Marzinski 80086f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 801b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 80286f1152bSBenjamin Marzinski close_table_device(td, md); 80386f1152bSBenjamin Marzinski list_del(&td->list); 80486f1152bSBenjamin Marzinski kfree(td); 80586f1152bSBenjamin Marzinski } 80686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80786f1152bSBenjamin Marzinski } 80886f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 80986f1152bSBenjamin Marzinski 81086f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 81186f1152bSBenjamin Marzinski { 81286f1152bSBenjamin Marzinski struct list_head *tmp, *next; 81386f1152bSBenjamin Marzinski 81486f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 81586f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 81686f1152bSBenjamin Marzinski 81786f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 818b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 81986f1152bSBenjamin Marzinski kfree(td); 82086f1152bSBenjamin Marzinski } 82186f1152bSBenjamin Marzinski } 82286f1152bSBenjamin Marzinski 82386f1152bSBenjamin Marzinski /* 8243ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8253ac51e74SDarrick J. Wong */ 8263ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8273ac51e74SDarrick J. Wong { 8283ac51e74SDarrick J. Wong *geo = md->geometry; 8293ac51e74SDarrick J. Wong 8303ac51e74SDarrick J. Wong return 0; 8313ac51e74SDarrick J. Wong } 8323ac51e74SDarrick J. Wong 8333ac51e74SDarrick J. Wong /* 8343ac51e74SDarrick J. Wong * Set the geometry of a device. 8353ac51e74SDarrick J. Wong */ 8363ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8373ac51e74SDarrick J. Wong { 8383ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8393ac51e74SDarrick J. Wong 8403ac51e74SDarrick J. Wong if (geo->start > sz) { 8413ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8423ac51e74SDarrick J. Wong return -EINVAL; 8433ac51e74SDarrick J. Wong } 8443ac51e74SDarrick J. Wong 8453ac51e74SDarrick J. Wong md->geometry = *geo; 8463ac51e74SDarrick J. Wong 8473ac51e74SDarrick J. Wong return 0; 8483ac51e74SDarrick J. Wong } 8493ac51e74SDarrick J. Wong 8502e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8512e93ccc1SKiyoshi Ueda { 8522e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8532e93ccc1SKiyoshi Ueda } 8542e93ccc1SKiyoshi Ueda 8551da177e4SLinus Torvalds /* 8561da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 8571da177e4SLinus Torvalds * cloned into, completing the original io if necc. 8581da177e4SLinus Torvalds */ 8594e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 8601da177e4SLinus Torvalds { 8612e93ccc1SKiyoshi Ueda unsigned long flags; 8624e4cbee9SChristoph Hellwig blk_status_t io_error; 863b35f8caaSMilan Broz struct bio *bio; 864b35f8caaSMilan Broz struct mapped_device *md = io->md; 8652e93ccc1SKiyoshi Ueda 8662e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 867f88fb981SKiyoshi Ueda if (unlikely(error)) { 868f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 869745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 8704e4cbee9SChristoph Hellwig io->status = error; 871f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 872f88fb981SKiyoshi Ueda } 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 8754e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 8762e93ccc1SKiyoshi Ueda /* 8772e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8782e93ccc1SKiyoshi Ueda */ 879022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 8806a8736d1STejun Heo if (__noflush_suspending(md)) 881745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 882745dc570SMike Snitzer bio_list_add_head(&md->deferred, io->orig_bio); 8836a8736d1STejun Heo else 8842e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 8854e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 886022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 8872e93ccc1SKiyoshi Ueda } 8882e93ccc1SKiyoshi Ueda 8894e4cbee9SChristoph Hellwig io_error = io->status; 890745dc570SMike Snitzer bio = io->orig_bio; 891af7e466aSMikulas Patocka end_io_acct(io); 892a97f925aSMikulas Patocka free_io(md, io); 8931da177e4SLinus Torvalds 8944e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 8956a8736d1STejun Heo return; 8966a8736d1STejun Heo 8971eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8981da177e4SLinus Torvalds /* 8996a8736d1STejun Heo * Preflush done for flush with data, reissue 90028a8f0d3SMike Christie * without REQ_PREFLUSH. 9011da177e4SLinus Torvalds */ 9021eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9036a8736d1STejun Heo queue_io(md, bio); 904b35f8caaSMilan Broz } else { 905b372d360SMike Snitzer /* done with normal IO or empty flush */ 9068dd601faSNeilBrown if (io_error) 9074e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9084246a0b6SChristoph Hellwig bio_endio(bio); 9092e93ccc1SKiyoshi Ueda } 9101da177e4SLinus Torvalds } 911af7e466aSMikulas Patocka } 9121da177e4SLinus Torvalds 9134cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 9147eee4ae2SMike Snitzer { 9157eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9167eee4ae2SMike Snitzer 9177eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9187eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9197eee4ae2SMike Snitzer } 9207eee4ae2SMike Snitzer 921ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 922ac62d620SChristoph Hellwig { 923ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 924ac62d620SChristoph Hellwig 925ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 926ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 927ac62d620SChristoph Hellwig } 928ac62d620SChristoph Hellwig 9294246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9301da177e4SLinus Torvalds { 9314e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 932bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 933b35f8caaSMilan Broz struct dm_io *io = tio->io; 9349faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9351da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9361da177e4SLinus Torvalds 937978e51baSMike Snitzer if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 938ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_SAME && 93974d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 9407eee4ae2SMike Snitzer disable_write_same(md); 941ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 94274d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 943ac62d620SChristoph Hellwig disable_write_zeroes(md); 944ac62d620SChristoph Hellwig } 9457eee4ae2SMike Snitzer 9461be56909SChristoph Hellwig if (endio) { 9474e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 9481be56909SChristoph Hellwig switch (r) { 9491be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 9504e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 9511be56909SChristoph Hellwig /*FALLTHRU*/ 9521be56909SChristoph Hellwig case DM_ENDIO_DONE: 9531be56909SChristoph Hellwig break; 9541be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 9551be56909SChristoph Hellwig /* The target will handle the io */ 9561be56909SChristoph Hellwig return; 9571be56909SChristoph Hellwig default: 9581be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 9591be56909SChristoph Hellwig BUG(); 9601be56909SChristoph Hellwig } 9611be56909SChristoph Hellwig } 9621be56909SChristoph Hellwig 963cfae7529SMike Snitzer free_tio(tio); 964b35f8caaSMilan Broz dec_pending(io, error); 9651da177e4SLinus Torvalds } 9661da177e4SLinus Torvalds 96778d8e58aSMike Snitzer /* 96856a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 96956a67df7SMike Snitzer * target boundary. 97056a67df7SMike Snitzer */ 97156a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 9721da177e4SLinus Torvalds { 97356a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 97456a67df7SMike Snitzer 97556a67df7SMike Snitzer return ti->len - target_offset; 97656a67df7SMike Snitzer } 97756a67df7SMike Snitzer 97856a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 97956a67df7SMike Snitzer { 98056a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 981542f9038SMike Snitzer sector_t offset, max_len; 9821da177e4SLinus Torvalds 9831da177e4SLinus Torvalds /* 9841da177e4SLinus Torvalds * Does the target need to split even further? 9851da177e4SLinus Torvalds */ 986542f9038SMike Snitzer if (ti->max_io_len) { 987542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 988542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 989542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 990542f9038SMike Snitzer else 991542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 992542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 993542f9038SMike Snitzer 994542f9038SMike Snitzer if (len > max_len) 995542f9038SMike Snitzer len = max_len; 9961da177e4SLinus Torvalds } 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds return len; 9991da177e4SLinus Torvalds } 10001da177e4SLinus Torvalds 1001542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1002542f9038SMike Snitzer { 1003542f9038SMike Snitzer if (len > UINT_MAX) { 1004542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1005542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1006542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1007542f9038SMike Snitzer return -EINVAL; 1008542f9038SMike Snitzer } 1009542f9038SMike Snitzer 10108f50e358SMing Lei /* 10118f50e358SMing Lei * BIO based queue uses its own splitting. When multipage bvecs 10128f50e358SMing Lei * is switched on, size of the incoming bio may be too big to 10138f50e358SMing Lei * be handled in some targets, such as crypt. 10148f50e358SMing Lei * 10158f50e358SMing Lei * When these targets are ready for the big bio, we can remove 10168f50e358SMing Lei * the limit. 10178f50e358SMing Lei */ 10188f50e358SMing Lei ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); 1019542f9038SMike Snitzer 1020542f9038SMike Snitzer return 0; 1021542f9038SMike Snitzer } 1022542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1023542f9038SMike Snitzer 1024f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1025f26c5719SDan Williams sector_t sector, int *srcu_idx) 10263d97c829SMike Snitzer __acquires(md->io_barrier) 1027545ed20eSToshi Kani { 1028545ed20eSToshi Kani struct dm_table *map; 1029545ed20eSToshi Kani struct dm_target *ti; 1030545ed20eSToshi Kani 1031f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1032545ed20eSToshi Kani if (!map) 1033f26c5719SDan Williams return NULL; 1034545ed20eSToshi Kani 1035545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1036545ed20eSToshi Kani if (!dm_target_is_valid(ti)) 1037f26c5719SDan Williams return NULL; 1038f26c5719SDan Williams 1039f26c5719SDan Williams return ti; 1040f26c5719SDan Williams } 1041f26c5719SDan Williams 1042f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1043f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1044f26c5719SDan Williams { 1045f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1046f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1047f26c5719SDan Williams struct dm_target *ti; 1048f26c5719SDan Williams long len, ret = -EIO; 1049f26c5719SDan Williams int srcu_idx; 1050f26c5719SDan Williams 1051f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1052f26c5719SDan Williams 1053f26c5719SDan Williams if (!ti) 1054545ed20eSToshi Kani goto out; 1055f26c5719SDan Williams if (!ti->type->direct_access) 1056f26c5719SDan Williams goto out; 1057f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 1058f26c5719SDan Williams if (len < 1) 1059f26c5719SDan Williams goto out; 1060f26c5719SDan Williams nr_pages = min(len, nr_pages); 1061817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1062817bf402SDan Williams 1063545ed20eSToshi Kani out: 1064545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1065f26c5719SDan Williams 1066f26c5719SDan Williams return ret; 1067545ed20eSToshi Kani } 1068545ed20eSToshi Kani 10697e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 10707e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 10717e026c8cSDan Williams { 10727e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 10737e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 10747e026c8cSDan Williams struct dm_target *ti; 10757e026c8cSDan Williams long ret = 0; 10767e026c8cSDan Williams int srcu_idx; 10777e026c8cSDan Williams 10787e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 10797e026c8cSDan Williams 10807e026c8cSDan Williams if (!ti) 10817e026c8cSDan Williams goto out; 10827e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 10837e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 10847e026c8cSDan Williams goto out; 10857e026c8cSDan Williams } 10867e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 10877e026c8cSDan Williams out: 10887e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 10897e026c8cSDan Williams 10907e026c8cSDan Williams return ret; 10917e026c8cSDan Williams } 10927e026c8cSDan Williams 1093b3a9a0c3SDan Williams static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1094b3a9a0c3SDan Williams void *addr, size_t bytes, struct iov_iter *i) 1095b3a9a0c3SDan Williams { 1096b3a9a0c3SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1097b3a9a0c3SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1098b3a9a0c3SDan Williams struct dm_target *ti; 1099b3a9a0c3SDan Williams long ret = 0; 1100b3a9a0c3SDan Williams int srcu_idx; 1101b3a9a0c3SDan Williams 1102b3a9a0c3SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1103b3a9a0c3SDan Williams 1104b3a9a0c3SDan Williams if (!ti) 1105b3a9a0c3SDan Williams goto out; 1106b3a9a0c3SDan Williams if (!ti->type->dax_copy_to_iter) { 1107b3a9a0c3SDan Williams ret = copy_to_iter(addr, bytes, i); 1108b3a9a0c3SDan Williams goto out; 1109b3a9a0c3SDan Williams } 1110b3a9a0c3SDan Williams ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1111b3a9a0c3SDan Williams out: 1112b3a9a0c3SDan Williams dm_put_live_table(md, srcu_idx); 1113b3a9a0c3SDan Williams 1114b3a9a0c3SDan Williams return ret; 1115b3a9a0c3SDan Williams } 1116b3a9a0c3SDan Williams 11171dd40c3eSMikulas Patocka /* 11181dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 1119c06b3e58SNeilBrown * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. 11201dd40c3eSMikulas Patocka * 11211dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 11221dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 11231dd40c3eSMikulas Patocka * sent in a next bio. 11241dd40c3eSMikulas Patocka * 11251dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 11261dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11271dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 11281dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11291dd40c3eSMikulas Patocka * 11301dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 11311dd40c3eSMikulas Patocka * <------- bi_size -------> 11321dd40c3eSMikulas Patocka * <-- n_sectors --> 11331dd40c3eSMikulas Patocka * 11341dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11351dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11361dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11371dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11381dd40c3eSMikulas Patocka * to make it empty) 11391dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11401dd40c3eSMikulas Patocka * 11411dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 11421dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 11431dd40c3eSMikulas Patocka * copies of the bio. 11441dd40c3eSMikulas Patocka */ 11451dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 11461dd40c3eSMikulas Patocka { 11471dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11481dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 11491eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 11501dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 11511dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 11521dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 11531dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 11541dd40c3eSMikulas Patocka } 11551dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 11561dd40c3eSMikulas Patocka 1157d67a5f4bSMikulas Patocka /* 1158*9864cd5dSDamien Le Moal * The zone descriptors obtained with a zone report indicate zone positions 1159*9864cd5dSDamien Le Moal * within the target backing device, regardless of that device is a partition 1160*9864cd5dSDamien Le Moal * and regardless of the target mapping start sector on the device or partition. 1161*9864cd5dSDamien Le Moal * The zone descriptors start sector and write pointer position must be adjusted 1162*9864cd5dSDamien Le Moal * to match their relative position within the dm device. 1163*9864cd5dSDamien Le Moal * A target may call dm_remap_zone_report() after completion of a 1164*9864cd5dSDamien Le Moal * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the 1165*9864cd5dSDamien Le Moal * backing device. 116610999307SDamien Le Moal */ 116710999307SDamien Le Moal void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 116810999307SDamien Le Moal { 116910999307SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED 117010999307SDamien Le Moal struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1171745dc570SMike Snitzer struct bio *report_bio = tio->io->orig_bio; 117210999307SDamien Le Moal struct blk_zone_report_hdr *hdr = NULL; 117310999307SDamien Le Moal struct blk_zone *zone; 117410999307SDamien Le Moal unsigned int nr_rep = 0; 117510999307SDamien Le Moal unsigned int ofst; 1176*9864cd5dSDamien Le Moal sector_t part_offset; 117710999307SDamien Le Moal struct bio_vec bvec; 117810999307SDamien Le Moal struct bvec_iter iter; 117910999307SDamien Le Moal void *addr; 118010999307SDamien Le Moal 118110999307SDamien Le Moal if (bio->bi_status) 118210999307SDamien Le Moal return; 118310999307SDamien Le Moal 118410999307SDamien Le Moal /* 1185*9864cd5dSDamien Le Moal * bio sector was incremented by the request size on completion. Taking 1186*9864cd5dSDamien Le Moal * into account the original request sector, the target start offset on 1187*9864cd5dSDamien Le Moal * the backing device and the target mapping offset (ti->begin), the 1188*9864cd5dSDamien Le Moal * start sector of the backing device. The partition offset is always 0 1189*9864cd5dSDamien Le Moal * if the target uses a whole device. 1190*9864cd5dSDamien Le Moal */ 1191*9864cd5dSDamien Le Moal part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio)); 1192*9864cd5dSDamien Le Moal 1193*9864cd5dSDamien Le Moal /* 119410999307SDamien Le Moal * Remap the start sector of the reported zones. For sequential zones, 119510999307SDamien Le Moal * also remap the write pointer position. 119610999307SDamien Le Moal */ 119710999307SDamien Le Moal bio_for_each_segment(bvec, report_bio, iter) { 119810999307SDamien Le Moal addr = kmap_atomic(bvec.bv_page); 119910999307SDamien Le Moal 120010999307SDamien Le Moal /* Remember the report header in the first page */ 120110999307SDamien Le Moal if (!hdr) { 120210999307SDamien Le Moal hdr = addr; 120310999307SDamien Le Moal ofst = sizeof(struct blk_zone_report_hdr); 120410999307SDamien Le Moal } else 120510999307SDamien Le Moal ofst = 0; 120610999307SDamien Le Moal 120710999307SDamien Le Moal /* Set zones start sector */ 120810999307SDamien Le Moal while (hdr->nr_zones && ofst < bvec.bv_len) { 120910999307SDamien Le Moal zone = addr + ofst; 1210*9864cd5dSDamien Le Moal zone->start -= part_offset; 121110999307SDamien Le Moal if (zone->start >= start + ti->len) { 121210999307SDamien Le Moal hdr->nr_zones = 0; 121310999307SDamien Le Moal break; 121410999307SDamien Le Moal } 121510999307SDamien Le Moal zone->start = zone->start + ti->begin - start; 121610999307SDamien Le Moal if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 121710999307SDamien Le Moal if (zone->cond == BLK_ZONE_COND_FULL) 121810999307SDamien Le Moal zone->wp = zone->start + zone->len; 121910999307SDamien Le Moal else if (zone->cond == BLK_ZONE_COND_EMPTY) 122010999307SDamien Le Moal zone->wp = zone->start; 122110999307SDamien Le Moal else 1222*9864cd5dSDamien Le Moal zone->wp = zone->wp + ti->begin - start - part_offset; 122310999307SDamien Le Moal } 122410999307SDamien Le Moal ofst += sizeof(struct blk_zone); 122510999307SDamien Le Moal hdr->nr_zones--; 122610999307SDamien Le Moal nr_rep++; 122710999307SDamien Le Moal } 122810999307SDamien Le Moal 122910999307SDamien Le Moal if (addr != hdr) 123010999307SDamien Le Moal kunmap_atomic(addr); 123110999307SDamien Le Moal 123210999307SDamien Le Moal if (!hdr->nr_zones) 123310999307SDamien Le Moal break; 123410999307SDamien Le Moal } 123510999307SDamien Le Moal 123610999307SDamien Le Moal if (hdr) { 123710999307SDamien Le Moal hdr->nr_zones = nr_rep; 123810999307SDamien Le Moal kunmap_atomic(hdr); 123910999307SDamien Le Moal } 124010999307SDamien Le Moal 124110999307SDamien Le Moal bio_advance(report_bio, report_bio->bi_iter.bi_size); 124210999307SDamien Le Moal 124310999307SDamien Le Moal #else /* !CONFIG_BLK_DEV_ZONED */ 124410999307SDamien Le Moal bio->bi_status = BLK_STS_NOTSUPP; 124510999307SDamien Le Moal #endif 124610999307SDamien Le Moal } 124710999307SDamien Le Moal EXPORT_SYMBOL_GPL(dm_remap_zone_report); 124810999307SDamien Le Moal 1249978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 12501da177e4SLinus Torvalds { 12511da177e4SLinus Torvalds int r; 12522056a782SJens Axboe sector_t sector; 1253dba14160SMikulas Patocka struct bio *clone = &tio->clone; 125464f52b0eSMike Snitzer struct dm_io *io = tio->io; 1255978e51baSMike Snitzer struct mapped_device *md = io->md; 1256bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1257978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 12581da177e4SLinus Torvalds 12591da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12601da177e4SLinus Torvalds 12611da177e4SLinus Torvalds /* 12621da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 12631da177e4SLinus Torvalds * anything, the target has assumed ownership of 12641da177e4SLinus Torvalds * this io. 12651da177e4SLinus Torvalds */ 126664f52b0eSMike Snitzer atomic_inc(&io->io_count); 12674f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1268d67a5f4bSMikulas Patocka 12697de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1270846785e6SChristoph Hellwig switch (r) { 1271846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1272846785e6SChristoph Hellwig break; 1273846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 12741da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 127574d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 127664f52b0eSMike Snitzer bio_dev(io->orig_bio), sector); 1277978e51baSMike Snitzer if (md->type == DM_TYPE_NVME_BIO_BASED) 1278978e51baSMike Snitzer ret = direct_make_request(clone); 1279978e51baSMike Snitzer else 1280978e51baSMike Snitzer ret = generic_make_request(clone); 1281846785e6SChristoph Hellwig break; 1282846785e6SChristoph Hellwig case DM_MAPIO_KILL: 12834e4cbee9SChristoph Hellwig free_tio(tio); 128464f52b0eSMike Snitzer dec_pending(io, BLK_STS_IOERR); 12854e4cbee9SChristoph Hellwig break; 1286846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1287cfae7529SMike Snitzer free_tio(tio); 128864f52b0eSMike Snitzer dec_pending(io, BLK_STS_DM_REQUEUE); 1289846785e6SChristoph Hellwig break; 1290846785e6SChristoph Hellwig default: 129145cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 129245cbcd79SKiyoshi Ueda BUG(); 12931da177e4SLinus Torvalds } 12941da177e4SLinus Torvalds 1295978e51baSMike Snitzer return ret; 12961da177e4SLinus Torvalds } 12971da177e4SLinus Torvalds 1298e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1299bd2a49b8SAlasdair G Kergon { 13004f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 13014f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 13021da177e4SLinus Torvalds } 13031da177e4SLinus Torvalds 13041da177e4SLinus Torvalds /* 13051da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 13061da177e4SLinus Torvalds */ 1307c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 13081c3b13e6SKent Overstreet sector_t sector, unsigned len) 13091da177e4SLinus Torvalds { 1310dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13111da177e4SLinus Torvalds 13121c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 13139c47008dSMartin K. Petersen 1314e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) { 1315e2460f2aSMikulas Patocka int r; 1316e2460f2aSMikulas Patocka 1317e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1318e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1319e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1320e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1321e2460f2aSMikulas Patocka tio->ti->type->name); 1322e2460f2aSMikulas Patocka return -EIO; 1323e2460f2aSMikulas Patocka } 1324e2460f2aSMikulas Patocka 1325e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1326c80914e8SMike Snitzer if (r < 0) 1327c80914e8SMike Snitzer return r; 1328c80914e8SMike Snitzer } 13291c3b13e6SKent Overstreet 1330264c869dSDamien Le Moal if (bio_op(bio) != REQ_OP_ZONE_REPORT) 13311c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 13321c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 13331c3b13e6SKent Overstreet 1334e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) 1335fbd08e76SDmitry Monakhov bio_integrity_trim(clone); 1336c80914e8SMike Snitzer 1337c80914e8SMike Snitzer return 0; 13381da177e4SLinus Torvalds } 13391da177e4SLinus Torvalds 1340318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1341318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 1342f9ab94ceSMikulas Patocka { 1343dba14160SMikulas Patocka struct dm_target_io *tio; 1344318716ddSMike Snitzer int try; 1345dba14160SMikulas Patocka 1346318716ddSMike Snitzer if (!num_bios) 1347318716ddSMike Snitzer return; 1348f9ab94ceSMikulas Patocka 1349318716ddSMike Snitzer if (num_bios == 1) { 1350318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1351318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1352318716ddSMike Snitzer return; 13539015df24SAlasdair G Kergon } 13549015df24SAlasdair G Kergon 1355318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1356318716ddSMike Snitzer int bio_nr; 1357318716ddSMike Snitzer struct bio *bio; 1358318716ddSMike Snitzer 1359318716ddSMike Snitzer if (try) 1360bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1361318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1362318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1363318716ddSMike Snitzer if (!tio) 1364318716ddSMike Snitzer break; 1365318716ddSMike Snitzer 1366318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1367318716ddSMike Snitzer } 1368318716ddSMike Snitzer if (try) 1369bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1370318716ddSMike Snitzer if (bio_nr == num_bios) 1371318716ddSMike Snitzer return; 1372318716ddSMike Snitzer 1373318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1374318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1375318716ddSMike Snitzer free_tio(tio); 1376318716ddSMike Snitzer } 1377318716ddSMike Snitzer } 1378318716ddSMike Snitzer } 1379318716ddSMike Snitzer 1380978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1381318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 13829015df24SAlasdair G Kergon { 1383dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13849015df24SAlasdair G Kergon 13851dd40c3eSMikulas Patocka tio->len_ptr = len; 13861dd40c3eSMikulas Patocka 13871c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1388bd2a49b8SAlasdair G Kergon if (len) 13891dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1390f9ab94ceSMikulas Patocka 1391978e51baSMike Snitzer return __map_bio(tio); 1392f9ab94ceSMikulas Patocka } 1393f9ab94ceSMikulas Patocka 139414fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13951dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 139606a426ceSMike Snitzer { 1397318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1398318716ddSMike Snitzer struct bio *bio; 1399318716ddSMike Snitzer struct dm_target_io *tio; 140006a426ceSMike Snitzer 1401318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1402318716ddSMike Snitzer 1403318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1404318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1405978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1406318716ddSMike Snitzer } 140706a426ceSMike Snitzer } 140806a426ceSMike Snitzer 140914fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1410f9ab94ceSMikulas Patocka { 141106a426ceSMike Snitzer unsigned target_nr = 0; 1412f9ab94ceSMikulas Patocka struct dm_target *ti; 1413f9ab94ceSMikulas Patocka 1414b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1415f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14161dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1417f9ab94ceSMikulas Patocka 1418f9ab94ceSMikulas Patocka return 0; 1419f9ab94ceSMikulas Patocka } 1420f9ab94ceSMikulas Patocka 1421c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 14221dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 14235ae89a87SMike Snitzer { 1424dba14160SMikulas Patocka struct bio *bio = ci->bio; 14255ae89a87SMike Snitzer struct dm_target_io *tio; 1426f31c21e4SNeilBrown int r; 14275ae89a87SMike Snitzer 1428318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 14291dd40c3eSMikulas Patocka tio->len_ptr = len; 1430c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1431072623deSMikulas Patocka if (r < 0) { 1432cfae7529SMike Snitzer free_tio(tio); 1433c80914e8SMike Snitzer return r; 1434b0d8ed4dSAlasdair G Kergon } 1435978e51baSMike Snitzer (void) __map_bio(tio); 143655a62eefSAlasdair G Kergon 1437f31c21e4SNeilBrown return 0; 143823508a96SMike Snitzer } 143955a62eefSAlasdair G Kergon 144023508a96SMike Snitzer typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 144155a62eefSAlasdair G Kergon 144223508a96SMike Snitzer static unsigned get_num_discard_bios(struct dm_target *ti) 144323508a96SMike Snitzer { 144423508a96SMike Snitzer return ti->num_discard_bios; 144523508a96SMike Snitzer } 144623508a96SMike Snitzer 144700716545SDenis Semakin static unsigned get_num_secure_erase_bios(struct dm_target *ti) 144800716545SDenis Semakin { 144900716545SDenis Semakin return ti->num_secure_erase_bios; 145000716545SDenis Semakin } 145100716545SDenis Semakin 145223508a96SMike Snitzer static unsigned get_num_write_same_bios(struct dm_target *ti) 145323508a96SMike Snitzer { 145423508a96SMike Snitzer return ti->num_write_same_bios; 145523508a96SMike Snitzer } 145623508a96SMike Snitzer 1457ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1458ac62d620SChristoph Hellwig { 1459ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1460ac62d620SChristoph Hellwig } 1461ac62d620SChristoph Hellwig 146223508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 146323508a96SMike Snitzer 146423508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 146523508a96SMike Snitzer { 146655a62eefSAlasdair G Kergon return ti->split_discard_bios; 146723508a96SMike Snitzer } 146823508a96SMike Snitzer 14693d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 147055a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 147123508a96SMike Snitzer is_split_required_fn is_split_required) 14725ae89a87SMike Snitzer { 1473e0d6609aSMikulas Patocka unsigned len; 147455a62eefSAlasdair G Kergon unsigned num_bios; 14755ae89a87SMike Snitzer 14765ae89a87SMike Snitzer /* 147723508a96SMike Snitzer * Even though the device advertised support for this type of 147823508a96SMike Snitzer * request, that does not mean every target supports it, and 1479936688d7SMike Snitzer * reconfiguration might also have changed that since the 14805ae89a87SMike Snitzer * check was performed. 14815ae89a87SMike Snitzer */ 148255a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 148355a62eefSAlasdair G Kergon if (!num_bios) 14845ae89a87SMike Snitzer return -EOPNOTSUPP; 14855ae89a87SMike Snitzer 148623508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1487e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 14887acf0277SMikulas Patocka else 1489e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 14905ae89a87SMike Snitzer 14911dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 14925ae89a87SMike Snitzer 1493a79245b3SMike Snitzer ci->sector += len; 14943d7f4562SMike Snitzer ci->sector_count -= len; 14955ae89a87SMike Snitzer 14965ae89a87SMike Snitzer return 0; 14975ae89a87SMike Snitzer } 14985ae89a87SMike Snitzer 14993d7f4562SMike Snitzer static int __send_discard(struct clone_info *ci, struct dm_target *ti) 150023508a96SMike Snitzer { 15013d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_discard_bios, 150223508a96SMike Snitzer is_split_required_for_discard); 150323508a96SMike Snitzer } 150423508a96SMike Snitzer 150500716545SDenis Semakin static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 150600716545SDenis Semakin { 150700716545SDenis Semakin return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios, NULL); 150800716545SDenis Semakin } 150900716545SDenis Semakin 15103d7f4562SMike Snitzer static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 151123508a96SMike Snitzer { 15123d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL); 151323508a96SMike Snitzer } 151423508a96SMike Snitzer 15153d7f4562SMike Snitzer static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1516ac62d620SChristoph Hellwig { 15173d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL); 1518ac62d620SChristoph Hellwig } 1519ac62d620SChristoph Hellwig 15200519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 15210519c71eSMike Snitzer int *result) 15220519c71eSMike Snitzer { 15230519c71eSMike Snitzer struct bio *bio = ci->bio; 15240519c71eSMike Snitzer 15250519c71eSMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD) 15260519c71eSMike Snitzer *result = __send_discard(ci, ti); 152700716545SDenis Semakin else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 152800716545SDenis Semakin *result = __send_secure_erase(ci, ti); 15290519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME) 15300519c71eSMike Snitzer *result = __send_write_same(ci, ti); 15310519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 15320519c71eSMike Snitzer *result = __send_write_zeroes(ci, ti); 15330519c71eSMike Snitzer else 15340519c71eSMike Snitzer return false; 15350519c71eSMike Snitzer 15360519c71eSMike Snitzer return true; 15370519c71eSMike Snitzer } 15380519c71eSMike Snitzer 1539e4c93811SAlasdair G Kergon /* 1540e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1541e4c93811SAlasdair G Kergon */ 1542e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1543e4c93811SAlasdair G Kergon { 1544e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1545e4c93811SAlasdair G Kergon struct dm_target *ti; 15461c3b13e6SKent Overstreet unsigned len; 1547c80914e8SMike Snitzer int r; 1548e4c93811SAlasdair G Kergon 1549e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1550e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1551e4c93811SAlasdair G Kergon return -EIO; 1552e4c93811SAlasdair G Kergon 15530519c71eSMike Snitzer if (unlikely(__process_abnormal_io(ci, ti, &r))) 15540519c71eSMike Snitzer return r; 15553d7f4562SMike Snitzer 1556264c869dSDamien Le Moal if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1557264c869dSDamien Le Moal len = ci->sector_count; 1558264c869dSDamien Le Moal else 1559264c869dSDamien Le Moal len = min_t(sector_t, max_io_len(ci->sector, ti), 1560264c869dSDamien Le Moal ci->sector_count); 1561e4c93811SAlasdair G Kergon 1562c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1563c80914e8SMike Snitzer if (r < 0) 1564c80914e8SMike Snitzer return r; 1565e4c93811SAlasdair G Kergon 1566e4c93811SAlasdair G Kergon ci->sector += len; 1567e4c93811SAlasdair G Kergon ci->sector_count -= len; 1568e4c93811SAlasdair G Kergon 1569e4c93811SAlasdair G Kergon return 0; 1570e4c93811SAlasdair G Kergon } 1571e4c93811SAlasdair G Kergon 1572978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1573978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1574978e51baSMike Snitzer { 1575978e51baSMike Snitzer ci->map = map; 1576978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1577978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1578978e51baSMike Snitzer } 1579978e51baSMike Snitzer 1580e4c93811SAlasdair G Kergon /* 158114fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 15821da177e4SLinus Torvalds */ 1583978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 158483d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 15851da177e4SLinus Torvalds { 15861da177e4SLinus Torvalds struct clone_info ci; 1587978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1588512875bdSJun'ichi Nomura int error = 0; 15891da177e4SLinus Torvalds 159083d5e5b0SMikulas Patocka if (unlikely(!map)) { 1591f0b9a450SMikulas Patocka bio_io_error(bio); 1592978e51baSMike Snitzer return ret; 1593f0b9a450SMikulas Patocka } 1594692d0eb9SMikulas Patocka 1595978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1596bd2a49b8SAlasdair G Kergon 15971eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1598bc02cdbeSMike Snitzer ci.bio = &ci.io->md->flush_bio; 1599b372d360SMike Snitzer ci.sector_count = 0; 160014fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1601b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1602a4aa5e56SDamien Le Moal } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1603a4aa5e56SDamien Le Moal ci.bio = bio; 1604a4aa5e56SDamien Le Moal ci.sector_count = 0; 1605a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1606b372d360SMike Snitzer } else { 16076a8736d1STejun Heo ci.bio = bio; 16081da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 160918a25da8SNeilBrown while (ci.sector_count && !error) { 161014fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 161118a25da8SNeilBrown if (current->bio_list && ci.sector_count && !error) { 161218a25da8SNeilBrown /* 161318a25da8SNeilBrown * Remainder must be passed to generic_make_request() 161418a25da8SNeilBrown * so that it gets handled *after* bios already submitted 161518a25da8SNeilBrown * have been completely processed. 161618a25da8SNeilBrown * We take a clone of the original to store in 1617745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 161818a25da8SNeilBrown * for dec_pending to use for completion handling. 161918a25da8SNeilBrown * As this path is not used for REQ_OP_ZONE_REPORT, 1620745dc570SMike Snitzer * the usage of io->orig_bio in dm_remap_zone_report() 162118a25da8SNeilBrown * won't be affected by this reassignment. 162218a25da8SNeilBrown */ 1623f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1624f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1625745dc570SMike Snitzer ci.io->orig_bio = b; 162618a25da8SNeilBrown bio_chain(b, bio); 1627978e51baSMike Snitzer ret = generic_make_request(bio); 162818a25da8SNeilBrown break; 162918a25da8SNeilBrown } 163018a25da8SNeilBrown } 1631d87f4c14STejun Heo } 16321da177e4SLinus Torvalds 16331da177e4SLinus Torvalds /* drop the extra reference count */ 163454385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 1635978e51baSMike Snitzer return ret; 16361da177e4SLinus Torvalds } 16371da177e4SLinus Torvalds 16381da177e4SLinus Torvalds /* 1639978e51baSMike Snitzer * Optimized variant of __split_and_process_bio that leverages the 1640978e51baSMike Snitzer * fact that targets that use it do _not_ have a need to split bios. 16411da177e4SLinus Torvalds */ 1642978e51baSMike Snitzer static blk_qc_t __process_bio(struct mapped_device *md, 1643978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 16441da177e4SLinus Torvalds { 1645978e51baSMike Snitzer struct clone_info ci; 1646978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1647978e51baSMike Snitzer int error = 0; 1648978e51baSMike Snitzer 1649978e51baSMike Snitzer if (unlikely(!map)) { 1650978e51baSMike Snitzer bio_io_error(bio); 1651978e51baSMike Snitzer return ret; 1652978e51baSMike Snitzer } 1653978e51baSMike Snitzer 1654978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1655978e51baSMike Snitzer 1656978e51baSMike Snitzer if (bio->bi_opf & REQ_PREFLUSH) { 1657978e51baSMike Snitzer ci.bio = &ci.io->md->flush_bio; 1658978e51baSMike Snitzer ci.sector_count = 0; 1659978e51baSMike Snitzer error = __send_empty_flush(&ci); 1660978e51baSMike Snitzer /* dec_pending submits any data associated with flush */ 1661978e51baSMike Snitzer } else { 1662978e51baSMike Snitzer struct dm_target *ti = md->immutable_target; 1663978e51baSMike Snitzer struct dm_target_io *tio; 1664978e51baSMike Snitzer 1665978e51baSMike Snitzer /* 1666978e51baSMike Snitzer * Defend against IO still getting in during teardown 1667978e51baSMike Snitzer * - as was seen for a time with nvme-fcloop 1668978e51baSMike Snitzer */ 1669978e51baSMike Snitzer if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) { 1670978e51baSMike Snitzer error = -EIO; 1671978e51baSMike Snitzer goto out; 1672978e51baSMike Snitzer } 1673978e51baSMike Snitzer 1674978e51baSMike Snitzer ci.bio = bio; 1675978e51baSMike Snitzer ci.sector_count = bio_sectors(bio); 16760519c71eSMike Snitzer if (unlikely(__process_abnormal_io(&ci, ti, &error))) 16770519c71eSMike Snitzer goto out; 16780519c71eSMike Snitzer 16790519c71eSMike Snitzer tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1680978e51baSMike Snitzer ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1681978e51baSMike Snitzer } 1682978e51baSMike Snitzer out: 1683978e51baSMike Snitzer /* drop the extra reference count */ 1684978e51baSMike Snitzer dec_pending(ci.io, errno_to_blk_status(error)); 1685978e51baSMike Snitzer return ret; 1686978e51baSMike Snitzer } 1687978e51baSMike Snitzer 1688978e51baSMike Snitzer typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *); 1689978e51baSMike Snitzer 1690978e51baSMike Snitzer static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, 1691978e51baSMike Snitzer process_bio_fn process_bio) 16921da177e4SLinus Torvalds { 16931da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1694978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 169583d5e5b0SMikulas Patocka int srcu_idx; 169683d5e5b0SMikulas Patocka struct dm_table *map; 16971da177e4SLinus Torvalds 169883d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 16991da177e4SLinus Torvalds 17006a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17016a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 170283d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17031da177e4SLinus Torvalds 17041eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 170592c63902SMikulas Patocka queue_io(md, bio); 17066a8736d1STejun Heo else 17076a8736d1STejun Heo bio_io_error(bio); 1708978e51baSMike Snitzer return ret; 17091da177e4SLinus Torvalds } 17101da177e4SLinus Torvalds 1711978e51baSMike Snitzer ret = process_bio(md, map, bio); 1712978e51baSMike Snitzer 171383d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1714978e51baSMike Snitzer return ret; 1715978e51baSMike Snitzer } 1716978e51baSMike Snitzer 1717978e51baSMike Snitzer /* 1718978e51baSMike Snitzer * The request function that remaps the bio to one target and 1719978e51baSMike Snitzer * splits off any remainder. 1720978e51baSMike Snitzer */ 1721978e51baSMike Snitzer static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1722978e51baSMike Snitzer { 1723978e51baSMike Snitzer return __dm_make_request(q, bio, __split_and_process_bio); 1724978e51baSMike Snitzer } 1725978e51baSMike Snitzer 1726978e51baSMike Snitzer static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio) 1727978e51baSMike Snitzer { 1728978e51baSMike Snitzer return __dm_make_request(q, bio, __process_bio); 1729cec47e3dSKiyoshi Ueda } 1730cec47e3dSKiyoshi Ueda 17311da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 17321da177e4SLinus Torvalds { 17338a57dfc6SChandra Seetharaman int r = bdi_bits; 17348a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 17358a57dfc6SChandra Seetharaman struct dm_table *map; 17361da177e4SLinus Torvalds 17371eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1738e522c039SMike Snitzer if (dm_request_based(md)) { 1739cec47e3dSKiyoshi Ueda /* 1740e522c039SMike Snitzer * With request-based DM we only need to check the 1741e522c039SMike Snitzer * top-level queue for congestion. 1742cec47e3dSKiyoshi Ueda */ 1743dc3b17ccSJan Kara r = md->queue->backing_dev_info->wb.state & bdi_bits; 1744e522c039SMike Snitzer } else { 1745e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1746e522c039SMike Snitzer if (map) 17471da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 174883d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 17498a57dfc6SChandra Seetharaman } 1750e522c039SMike Snitzer } 17518a57dfc6SChandra Seetharaman 17521da177e4SLinus Torvalds return r; 17531da177e4SLinus Torvalds } 17541da177e4SLinus Torvalds 17551da177e4SLinus Torvalds /*----------------------------------------------------------------- 17561da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 17571da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17582b06cfffSAlasdair G Kergon static void free_minor(int minor) 17591da177e4SLinus Torvalds { 1760f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17611da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1762f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17631da177e4SLinus Torvalds } 17641da177e4SLinus Torvalds 17651da177e4SLinus Torvalds /* 17661da177e4SLinus Torvalds * See if the device with a specific minor # is free. 17671da177e4SLinus Torvalds */ 1768cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 17691da177e4SLinus Torvalds { 1770c9d76be6STejun Heo int r; 17711da177e4SLinus Torvalds 17721da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17731da177e4SLinus Torvalds return -EINVAL; 17741da177e4SLinus Torvalds 1775c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1776f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17771da177e4SLinus Torvalds 1778c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 17791da177e4SLinus Torvalds 1780f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1781c9d76be6STejun Heo idr_preload_end(); 1782c9d76be6STejun Heo if (r < 0) 1783c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1784c9d76be6STejun Heo return 0; 17851da177e4SLinus Torvalds } 17861da177e4SLinus Torvalds 1787cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 17881da177e4SLinus Torvalds { 1789c9d76be6STejun Heo int r; 17901da177e4SLinus Torvalds 1791c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1792f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17931da177e4SLinus Torvalds 1794c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 17951da177e4SLinus Torvalds 1796f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1797c9d76be6STejun Heo idr_preload_end(); 1798c9d76be6STejun Heo if (r < 0) 17991da177e4SLinus Torvalds return r; 1800c9d76be6STejun Heo *minor = r; 1801c9d76be6STejun Heo return 0; 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds 180483d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1805f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 18061da177e4SLinus Torvalds 180753d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 180853d5914fSMikulas Patocka 1809c12c9a3cSMike Snitzer static void dm_init_normal_md_queue(struct mapped_device *md) 1810bfebd1cdSMike Snitzer { 181117e149b8SMike Snitzer md->use_blk_mq = false; 1812bfebd1cdSMike Snitzer 1813bfebd1cdSMike Snitzer /* 1814bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1815bfebd1cdSMike Snitzer */ 1816dc3b17ccSJan Kara md->queue->backing_dev_info->congested_fn = dm_any_congested; 18174a0b4ddfSMike Snitzer } 18184a0b4ddfSMike Snitzer 18190f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 18200f20972fSMike Snitzer { 18210f20972fSMike Snitzer if (md->wq) 18220f20972fSMike Snitzer destroy_workqueue(md->wq); 18230f20972fSMike Snitzer if (md->kworker_task) 18240f20972fSMike Snitzer kthread_stop(md->kworker_task); 18256f1c819cSKent Overstreet bioset_exit(&md->bs); 18266f1c819cSKent Overstreet bioset_exit(&md->io_bs); 18270f20972fSMike Snitzer 1828f26c5719SDan Williams if (md->dax_dev) { 1829f26c5719SDan Williams kill_dax(md->dax_dev); 1830f26c5719SDan Williams put_dax(md->dax_dev); 1831f26c5719SDan Williams md->dax_dev = NULL; 1832f26c5719SDan Williams } 1833f26c5719SDan Williams 18340f20972fSMike Snitzer if (md->disk) { 18350f20972fSMike Snitzer spin_lock(&_minor_lock); 18360f20972fSMike Snitzer md->disk->private_data = NULL; 18370f20972fSMike Snitzer spin_unlock(&_minor_lock); 18380f20972fSMike Snitzer del_gendisk(md->disk); 18390f20972fSMike Snitzer put_disk(md->disk); 18400f20972fSMike Snitzer } 18410f20972fSMike Snitzer 18420f20972fSMike Snitzer if (md->queue) 18430f20972fSMike Snitzer blk_cleanup_queue(md->queue); 18440f20972fSMike Snitzer 1845d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1846d09960b0STahsin Erdogan 18470f20972fSMike Snitzer if (md->bdev) { 18480f20972fSMike Snitzer bdput(md->bdev); 18490f20972fSMike Snitzer md->bdev = NULL; 18500f20972fSMike Snitzer } 18514cc96131SMike Snitzer 1852d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1853d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1854d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1855d5ffebddSMike Snitzer 18564cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 18570f20972fSMike Snitzer } 18580f20972fSMike Snitzer 18591da177e4SLinus Torvalds /* 18601da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 18611da177e4SLinus Torvalds */ 18622b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 18631da177e4SLinus Torvalds { 1864115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1865976431b0SDan Williams struct dax_device *dax_dev = NULL; 1866115485e8SMike Snitzer struct mapped_device *md; 1867ba61fdd1SJeff Mahoney void *old_md; 18681da177e4SLinus Torvalds 1869856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 18701da177e4SLinus Torvalds if (!md) { 18711da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18721da177e4SLinus Torvalds return NULL; 18731da177e4SLinus Torvalds } 18741da177e4SLinus Torvalds 187510da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18766ed7ade8SMilan Broz goto bad_module_get; 187710da4f79SJeff Mahoney 18781da177e4SLinus Torvalds /* get a minor number for the dev */ 18792b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1880cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18812b06cfffSAlasdair G Kergon else 1882cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18831da177e4SLinus Torvalds if (r < 0) 18846ed7ade8SMilan Broz goto bad_minor; 18851da177e4SLinus Torvalds 188683d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 188783d5e5b0SMikulas Patocka if (r < 0) 188883d5e5b0SMikulas Patocka goto bad_io_barrier; 188983d5e5b0SMikulas Patocka 1890115485e8SMike Snitzer md->numa_node_id = numa_node_id; 18914cc96131SMike Snitzer md->use_blk_mq = dm_use_blk_mq_default(); 1892591ddcfcSMike Snitzer md->init_tio_pdu = false; 1893a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1894e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1895a5664dadSMike Snitzer mutex_init(&md->type_lock); 189686f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1897022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18981da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18995c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 19001da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 19017a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 19027a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 190386f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 19047a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 19051da177e4SLinus Torvalds 19065ee0524bSBart Van Assche md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL); 19071da177e4SLinus Torvalds if (!md->queue) 19080f20972fSMike Snitzer goto bad; 1909c12c9a3cSMike Snitzer md->queue->queuedata = md; 1910c12c9a3cSMike Snitzer md->queue->backing_dev_info->congested_data = md; 19111da177e4SLinus Torvalds 1912c12c9a3cSMike Snitzer md->disk = alloc_disk_node(1, md->numa_node_id); 19131da177e4SLinus Torvalds if (!md->disk) 19140f20972fSMike Snitzer goto bad; 19151da177e4SLinus Torvalds 1916316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1917316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1918f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 191953d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1920f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 19212995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 19222eb6e1e3SKeith Busch md->kworker_task = NULL; 1923f0b04115SJeff Mahoney 19241da177e4SLinus Torvalds md->disk->major = _major; 19251da177e4SLinus Torvalds md->disk->first_minor = minor; 19261da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19271da177e4SLinus Torvalds md->disk->queue = md->queue; 19281da177e4SLinus Torvalds md->disk->private_data = md; 19291da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1930f26c5719SDan Williams 1931976431b0SDan Williams if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1932f26c5719SDan Williams dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1933f26c5719SDan Williams if (!dax_dev) 1934f26c5719SDan Williams goto bad; 1935976431b0SDan Williams } 1936f26c5719SDan Williams md->dax_dev = dax_dev; 1937f26c5719SDan Williams 1938c100ec49SMike Snitzer add_disk_no_queue_reg(md->disk); 19397e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 19401da177e4SLinus Torvalds 1941670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1942304f3f6aSMilan Broz if (!md->wq) 19430f20972fSMike Snitzer goto bad; 1944304f3f6aSMilan Broz 194532a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 194632a926daSMikulas Patocka if (!md->bdev) 19470f20972fSMike Snitzer goto bad; 194832a926daSMikulas Patocka 19493a83f467SMing Lei bio_init(&md->flush_bio, NULL, 0); 195074d46992SChristoph Hellwig bio_set_dev(&md->flush_bio, md->bdev); 1951ff0361b3SJan Kara md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 19526a8736d1STejun Heo 1953fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1954fd2ed4d2SMikulas Patocka 1955ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1956f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1957ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1958f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1959ba61fdd1SJeff Mahoney 1960ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1961ba61fdd1SJeff Mahoney 19621da177e4SLinus Torvalds return md; 19631da177e4SLinus Torvalds 19640f20972fSMike Snitzer bad: 19650f20972fSMike Snitzer cleanup_mapped_device(md); 196683d5e5b0SMikulas Patocka bad_io_barrier: 19671da177e4SLinus Torvalds free_minor(minor); 19686ed7ade8SMilan Broz bad_minor: 196910da4f79SJeff Mahoney module_put(THIS_MODULE); 19706ed7ade8SMilan Broz bad_module_get: 1971856eb091SMikulas Patocka kvfree(md); 19721da177e4SLinus Torvalds return NULL; 19731da177e4SLinus Torvalds } 19741da177e4SLinus Torvalds 1975ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1976ae9da83fSJun'ichi Nomura 19771da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19781da177e4SLinus Torvalds { 1979f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 198063d94e48SJun'ichi Nomura 1981ae9da83fSJun'ichi Nomura unlock_fs(md); 19822eb6e1e3SKeith Busch 19830f20972fSMike Snitzer cleanup_mapped_device(md); 19840f20972fSMike Snitzer 19850f20972fSMike Snitzer free_table_devices(&md->table_devices); 19860f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 198763a4f065SMike Snitzer free_minor(minor); 198863a4f065SMike Snitzer 198910da4f79SJeff Mahoney module_put(THIS_MODULE); 1990856eb091SMikulas Patocka kvfree(md); 19911da177e4SLinus Torvalds } 19921da177e4SLinus Torvalds 19932a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1994e6ee8c0bSKiyoshi Ueda { 1995c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 19962a2a4c51SJens Axboe int ret = 0; 1997e6ee8c0bSKiyoshi Ueda 1998545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1999c0820cf5SMikulas Patocka /* 200064f52b0eSMike Snitzer * The md may already have mempools that need changing. 200164f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 200216245bdcSJun'ichi Nomura * because a different table was loaded. 2003c0820cf5SMikulas Patocka */ 20046f1c819cSKent Overstreet bioset_exit(&md->bs); 20056f1c819cSKent Overstreet bioset_exit(&md->io_bs); 20060776aa0eSMike Snitzer 20076f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 2008cbc4e3c1SMike Snitzer /* 20094e6e36c3SMike Snitzer * There's no need to reload with request-based dm 20104e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 20114e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 20124e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 20134e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 20144e6e36c3SMike Snitzer * through the queue to unprep. 2015cbc4e3c1SMike Snitzer */ 2016cbc4e3c1SMike Snitzer goto out; 2017cbc4e3c1SMike Snitzer } 2018cbc4e3c1SMike Snitzer 20196f1c819cSKent Overstreet BUG_ON(!p || 20206f1c819cSKent Overstreet bioset_initialized(&md->bs) || 20216f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 2022e6ee8c0bSKiyoshi Ueda 20232a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 20242a2a4c51SJens Axboe if (ret) 20252a2a4c51SJens Axboe goto out; 20262a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 20272a2a4c51SJens Axboe if (ret) 20282a2a4c51SJens Axboe bioset_exit(&md->bs); 2029e6ee8c0bSKiyoshi Ueda out: 203002233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2031e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 20322a2a4c51SJens Axboe return ret; 2033e6ee8c0bSKiyoshi Ueda } 2034e6ee8c0bSKiyoshi Ueda 20351da177e4SLinus Torvalds /* 20361da177e4SLinus Torvalds * Bind a table to the device. 20371da177e4SLinus Torvalds */ 20381da177e4SLinus Torvalds static void event_callback(void *context) 20391da177e4SLinus Torvalds { 20407a8c3d3bSMike Anderson unsigned long flags; 20417a8c3d3bSMike Anderson LIST_HEAD(uevents); 20421da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 20431da177e4SLinus Torvalds 20447a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 20457a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 20467a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 20477a8c3d3bSMike Anderson 2048ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 20497a8c3d3bSMike Anderson 20501da177e4SLinus Torvalds atomic_inc(&md->event_nr); 20511da177e4SLinus Torvalds wake_up(&md->eventq); 205262e08243SMikulas Patocka dm_issue_global_event(); 20531da177e4SLinus Torvalds } 20541da177e4SLinus Torvalds 2055c217649bSMike Snitzer /* 2056c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2057c217649bSMike Snitzer */ 20584e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 20591da177e4SLinus Torvalds { 20601ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 20611ea0654eSBart Van Assche 20624e90188bSAlasdair G Kergon set_capacity(md->disk, size); 20631da177e4SLinus Torvalds 2064db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 20651da177e4SLinus Torvalds } 20661da177e4SLinus Torvalds 2067042d2a9bSAlasdair G Kergon /* 2068042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2069042d2a9bSAlasdair G Kergon */ 2070042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2071754c5fc7SMike Snitzer struct queue_limits *limits) 20721da177e4SLinus Torvalds { 2073042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2074165125e1SJens Axboe struct request_queue *q = md->queue; 2075978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 20761da177e4SLinus Torvalds sector_t size; 20772a2a4c51SJens Axboe int ret; 20781da177e4SLinus Torvalds 20795a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 20805a8f1f80SBart Van Assche 20811da177e4SLinus Torvalds size = dm_table_get_size(t); 20823ac51e74SDarrick J. Wong 20833ac51e74SDarrick J. Wong /* 20843ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20853ac51e74SDarrick J. Wong */ 2086fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 20873ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20883ac51e74SDarrick J. Wong 20894e90188bSAlasdair G Kergon __set_size(md, size); 20901da177e4SLinus Torvalds 2091cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20922ca3310eSAlasdair G Kergon 2093e6ee8c0bSKiyoshi Ueda /* 2094e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2095e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2096e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2097e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2098e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2099e6ee8c0bSKiyoshi Ueda */ 2100978e51baSMike Snitzer if (request_based) 2101eca7ee6dSMike Snitzer dm_stop_queue(q); 2102978e51baSMike Snitzer 2103978e51baSMike Snitzer if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 210416f12266SMike Snitzer /* 2105978e51baSMike Snitzer * Leverage the fact that request-based DM targets and 2106978e51baSMike Snitzer * NVMe bio based targets are immutable singletons 2107978e51baSMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2108978e51baSMike Snitzer * and __process_bio. 210916f12266SMike Snitzer */ 211016f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 211116f12266SMike Snitzer } 2112e6ee8c0bSKiyoshi Ueda 21132a2a4c51SJens Axboe ret = __bind_mempools(md, t); 21142a2a4c51SJens Axboe if (ret) { 21152a2a4c51SJens Axboe old_map = ERR_PTR(ret); 21162a2a4c51SJens Axboe goto out; 21172a2a4c51SJens Axboe } 2118e6ee8c0bSKiyoshi Ueda 2119a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 21201d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 212136a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 212236a0456fSAlasdair G Kergon 2123754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 212441abc4e1SHannes Reinecke if (old_map) 212583d5e5b0SMikulas Patocka dm_sync_table(md); 21262ca3310eSAlasdair G Kergon 21272a2a4c51SJens Axboe out: 2128042d2a9bSAlasdair G Kergon return old_map; 21291da177e4SLinus Torvalds } 21301da177e4SLinus Torvalds 2131a7940155SAlasdair G Kergon /* 2132a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2133a7940155SAlasdair G Kergon */ 2134a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 21351da177e4SLinus Torvalds { 2136a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 21371da177e4SLinus Torvalds 21381da177e4SLinus Torvalds if (!map) 2139a7940155SAlasdair G Kergon return NULL; 21401da177e4SLinus Torvalds 21411da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 21429cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 214383d5e5b0SMikulas Patocka dm_sync_table(md); 2144a7940155SAlasdair G Kergon 2145a7940155SAlasdair G Kergon return map; 21461da177e4SLinus Torvalds } 21471da177e4SLinus Torvalds 21481da177e4SLinus Torvalds /* 21491da177e4SLinus Torvalds * Constructor for a new device. 21501da177e4SLinus Torvalds */ 21512b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 21521da177e4SLinus Torvalds { 2153c12c9a3cSMike Snitzer int r; 21541da177e4SLinus Torvalds struct mapped_device *md; 21551da177e4SLinus Torvalds 21562b06cfffSAlasdair G Kergon md = alloc_dev(minor); 21571da177e4SLinus Torvalds if (!md) 21581da177e4SLinus Torvalds return -ENXIO; 21591da177e4SLinus Torvalds 2160c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2161c12c9a3cSMike Snitzer if (r) { 2162c12c9a3cSMike Snitzer free_dev(md); 2163c12c9a3cSMike Snitzer return r; 2164c12c9a3cSMike Snitzer } 2165784aae73SMilan Broz 21661da177e4SLinus Torvalds *result = md; 21671da177e4SLinus Torvalds return 0; 21681da177e4SLinus Torvalds } 21691da177e4SLinus Torvalds 2170a5664dadSMike Snitzer /* 2171a5664dadSMike Snitzer * Functions to manage md->type. 2172a5664dadSMike Snitzer * All are required to hold md->type_lock. 2173a5664dadSMike Snitzer */ 2174a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2175a5664dadSMike Snitzer { 2176a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2177a5664dadSMike Snitzer } 2178a5664dadSMike Snitzer 2179a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2180a5664dadSMike Snitzer { 2181a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2182a5664dadSMike Snitzer } 2183a5664dadSMike Snitzer 21847e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2185a5664dadSMike Snitzer { 218600c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2187a5664dadSMike Snitzer md->type = type; 2188a5664dadSMike Snitzer } 2189a5664dadSMike Snitzer 21907e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2191a5664dadSMike Snitzer { 2192a5664dadSMike Snitzer return md->type; 2193a5664dadSMike Snitzer } 2194a5664dadSMike Snitzer 219536a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 219636a0456fSAlasdair G Kergon { 219736a0456fSAlasdair G Kergon return md->immutable_target_type; 219836a0456fSAlasdair G Kergon } 219936a0456fSAlasdair G Kergon 22004a0b4ddfSMike Snitzer /* 2201f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2202f84cb8a4SMike Snitzer * count on 'md'. 2203f84cb8a4SMike Snitzer */ 2204f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2205f84cb8a4SMike Snitzer { 2206f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2207f84cb8a4SMike Snitzer return &md->queue->limits; 2208f84cb8a4SMike Snitzer } 2209f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2210f84cb8a4SMike Snitzer 22114a0b4ddfSMike Snitzer /* 22124a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 22134a0b4ddfSMike Snitzer */ 2214591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 22154a0b4ddfSMike Snitzer { 2216bfebd1cdSMike Snitzer int r; 2217c100ec49SMike Snitzer struct queue_limits limits; 22187e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2219bfebd1cdSMike Snitzer 2220545ed20eSToshi Kani switch (type) { 2221bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2222c12c9a3cSMike Snitzer dm_init_normal_md_queue(md); 2223eb8db831SChristoph Hellwig r = dm_old_init_request_queue(md, t); 2224bfebd1cdSMike Snitzer if (r) { 2225eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based mapped device"); 2226bfebd1cdSMike Snitzer return r; 22274a0b4ddfSMike Snitzer } 2228bfebd1cdSMike Snitzer break; 2229bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 2230e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2231bfebd1cdSMike Snitzer if (r) { 2232eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2233bfebd1cdSMike Snitzer return r; 2234bfebd1cdSMike Snitzer } 2235bfebd1cdSMike Snitzer break; 2236bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2237545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2238eca7ee6dSMike Snitzer dm_init_normal_md_queue(md); 2239ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2240bfebd1cdSMike Snitzer break; 2241978e51baSMike Snitzer case DM_TYPE_NVME_BIO_BASED: 2242978e51baSMike Snitzer dm_init_normal_md_queue(md); 2243978e51baSMike Snitzer blk_queue_make_request(md->queue, dm_make_request_nvme); 224473d410c0SMilan Broz break; 22457e0d574fSBart Van Assche case DM_TYPE_NONE: 22467e0d574fSBart Van Assche WARN_ON_ONCE(true); 22477e0d574fSBart Van Assche break; 22481da177e4SLinus Torvalds } 22491da177e4SLinus Torvalds 2250c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2251c100ec49SMike Snitzer if (r) { 2252c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2253c100ec49SMike Snitzer return r; 2254c100ec49SMike Snitzer } 2255c100ec49SMike Snitzer dm_table_set_restrictions(t, md->queue, &limits); 2256c100ec49SMike Snitzer blk_register_queue(md->disk); 2257c100ec49SMike Snitzer 22581da177e4SLinus Torvalds return 0; 22591da177e4SLinus Torvalds } 22601da177e4SLinus Torvalds 22611da177e4SLinus Torvalds struct mapped_device *dm_get_md(dev_t dev) 22621da177e4SLinus Torvalds { 22631da177e4SLinus Torvalds struct mapped_device *md; 22641da177e4SLinus Torvalds unsigned minor = MINOR(dev); 22651da177e4SLinus Torvalds 22661da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 22671da177e4SLinus Torvalds return NULL; 22681da177e4SLinus Torvalds 22691da177e4SLinus Torvalds spin_lock(&_minor_lock); 22701da177e4SLinus Torvalds 22711da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 227249de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 227349de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 22741da177e4SLinus Torvalds md = NULL; 22751da177e4SLinus Torvalds goto out; 22761da177e4SLinus Torvalds } 22771da177e4SLinus Torvalds dm_get(md); 22781da177e4SLinus Torvalds out: 2279f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds return md; 2282fba9f90eSJeff Mahoney } 2283fba9f90eSJeff Mahoney EXPORT_SYMBOL_GPL(dm_get_md); 2284fba9f90eSJeff Mahoney 2285637842cfSDavid Teigland void *dm_get_mdptr(struct mapped_device *md) 2286fba9f90eSJeff Mahoney { 2287fba9f90eSJeff Mahoney return md->interface_ptr; 22881da177e4SLinus Torvalds } 2289fba9f90eSJeff Mahoney 2290f32c10b0SJeff Mahoney void dm_set_mdptr(struct mapped_device *md, void *ptr) 22911da177e4SLinus Torvalds { 2292637842cfSDavid Teigland md->interface_ptr = ptr; 2293637842cfSDavid Teigland } 2294637842cfSDavid Teigland 2295d229a958SDavid Teigland void dm_get(struct mapped_device *md) 2296d229a958SDavid Teigland { 2297d229a958SDavid Teigland atomic_inc(&md->holders); 2298d229a958SDavid Teigland BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2299d229a958SDavid Teigland } 2300d229a958SDavid Teigland 2301d229a958SDavid Teigland int dm_hold(struct mapped_device *md) 2302d229a958SDavid Teigland { 2303d229a958SDavid Teigland spin_lock(&_minor_lock); 2304d229a958SDavid Teigland if (test_bit(DMF_FREEING, &md->flags)) { 23059ade92a9SAlasdair G Kergon spin_unlock(&_minor_lock); 2306637842cfSDavid Teigland return -EBUSY; 23079ade92a9SAlasdair G Kergon } 23081da177e4SLinus Torvalds dm_get(md); 23091da177e4SLinus Torvalds spin_unlock(&_minor_lock); 23101da177e4SLinus Torvalds return 0; 23111da177e4SLinus Torvalds } 23121da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(dm_hold); 23131da177e4SLinus Torvalds 23141da177e4SLinus Torvalds const char *dm_device_name(struct mapped_device *md) 23151da177e4SLinus Torvalds { 23161da177e4SLinus Torvalds return md->name; 23171da177e4SLinus Torvalds } 23181da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(dm_device_name); 23191da177e4SLinus Torvalds 232072d94861SAlasdair G Kergon static void __dm_destroy(struct mapped_device *md, bool wait) 232172d94861SAlasdair G Kergon { 232272d94861SAlasdair G Kergon struct dm_table *map; 232372d94861SAlasdair G Kergon int srcu_idx; 232472d94861SAlasdair G Kergon 232572d94861SAlasdair G Kergon might_sleep(); 23261da177e4SLinus Torvalds 23271da177e4SLinus Torvalds spin_lock(&_minor_lock); 23281da177e4SLinus Torvalds idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 23291da177e4SLinus Torvalds set_bit(DMF_FREEING, &md->flags); 23301da177e4SLinus Torvalds spin_unlock(&_minor_lock); 23311da177e4SLinus Torvalds 2332c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 23333b785fbcSBart Van Assche 233402233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 23353989144fSPetr Mladek kthread_flush_worker(&md->kworker); 23361da177e4SLinus Torvalds 23371da177e4SLinus Torvalds /* 23381da177e4SLinus Torvalds * Take suspend_lock so that presuspend and postsuspend methods 23391da177e4SLinus Torvalds * do not race with internal suspend. 23401da177e4SLinus Torvalds */ 23411da177e4SLinus Torvalds mutex_lock(&md->suspend_lock); 23422a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 23431da177e4SLinus Torvalds if (!dm_suspended_md(md)) { 23441da177e4SLinus Torvalds dm_table_presuspend_targets(map); 23451da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 23461da177e4SLinus Torvalds } 23471da177e4SLinus Torvalds /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 23481da177e4SLinus Torvalds dm_put_live_table(md, srcu_idx); 23492a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 23501da177e4SLinus Torvalds 23511da177e4SLinus Torvalds /* 23521da177e4SLinus Torvalds * Rare, but there may be I/O requests still going to complete, 23531da177e4SLinus Torvalds * for example. Wait for all references to disappear. 23541da177e4SLinus Torvalds * No one should increment the reference count of the mapped_device, 23551da177e4SLinus Torvalds * after the mapped_device state becomes DMF_FREEING. 23561da177e4SLinus Torvalds */ 23571da177e4SLinus Torvalds if (wait) 23581da177e4SLinus Torvalds while (atomic_read(&md->holders)) 23591da177e4SLinus Torvalds msleep(1); 23601da177e4SLinus Torvalds else if (atomic_read(&md->holders)) 23611da177e4SLinus Torvalds DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 23621da177e4SLinus Torvalds dm_device_name(md), atomic_read(&md->holders)); 23631da177e4SLinus Torvalds 23641da177e4SLinus Torvalds dm_sysfs_exit(md); 23651da177e4SLinus Torvalds dm_table_destroy(__unbind(md)); 2366cf222b37SAlasdair G Kergon free_dev(md); 23671da177e4SLinus Torvalds } 23681da177e4SLinus Torvalds 23691da177e4SLinus Torvalds void dm_destroy(struct mapped_device *md) 23701da177e4SLinus Torvalds { 23711da177e4SLinus Torvalds __dm_destroy(md, true); 23721da177e4SLinus Torvalds } 23731da177e4SLinus Torvalds 23741da177e4SLinus Torvalds void dm_destroy_immediate(struct mapped_device *md) 23751da177e4SLinus Torvalds { 23761134e5aeSMike Anderson __dm_destroy(md, false); 23771da177e4SLinus Torvalds } 2378fba9f90eSJeff Mahoney 2379fba9f90eSJeff Mahoney void dm_put(struct mapped_device *md) 2380f32c10b0SJeff Mahoney { 23811134e5aeSMike Anderson atomic_dec(&md->holders); 2382ba61fdd1SJeff Mahoney } 2383fba9f90eSJeff Mahoney EXPORT_SYMBOL_GPL(dm_put); 2384f32c10b0SJeff Mahoney 2385b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 23861da177e4SLinus Torvalds { 23871da177e4SLinus Torvalds int r = 0; 23889f4c3f87SBart Van Assche DEFINE_WAIT(wait); 23891da177e4SLinus Torvalds 23901da177e4SLinus Torvalds while (1) { 23919f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 23921da177e4SLinus Torvalds 23931da177e4SLinus Torvalds if (!md_in_flight(md)) 23941da177e4SLinus Torvalds break; 23951da177e4SLinus Torvalds 2396e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 23971da177e4SLinus Torvalds r = -EINTR; 23981da177e4SLinus Torvalds break; 23991da177e4SLinus Torvalds } 24001da177e4SLinus Torvalds 24011da177e4SLinus Torvalds io_schedule(); 24021da177e4SLinus Torvalds } 24039f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 24041da177e4SLinus Torvalds 24051da177e4SLinus Torvalds return r; 24061da177e4SLinus Torvalds } 24071da177e4SLinus Torvalds 24081da177e4SLinus Torvalds /* 240979eb885cSEdward Goggin * Process the deferred bios 24101da177e4SLinus Torvalds */ 24111da177e4SLinus Torvalds static void dm_wq_work(struct work_struct *work) 24121da177e4SLinus Torvalds { 24131da177e4SLinus Torvalds struct mapped_device *md = container_of(work, struct mapped_device, 24146d6f10dfSMilan Broz work); 24151da177e4SLinus Torvalds struct bio *c; 24166d6f10dfSMilan Broz int srcu_idx; 24171da177e4SLinus Torvalds struct dm_table *map; 24186d6f10dfSMilan Broz 24199e4e5f87SMilan Broz map = dm_get_live_table(md, &srcu_idx); 24209e4e5f87SMilan Broz 24211da177e4SLinus Torvalds while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 24223b00b203SMikulas Patocka spin_lock_irq(&md->deferred_lock); 24233b00b203SMikulas Patocka c = bio_list_pop(&md->deferred); 2424af7e466aSMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2425af7e466aSMikulas Patocka 2426af7e466aSMikulas Patocka if (!c) 2427df12ee99SAlasdair G Kergon break; 24283b00b203SMikulas Patocka 2429e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2430e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2431e39e2e95SAlasdair G Kergon else 243283d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2433e6ee8c0bSKiyoshi Ueda } 24341da177e4SLinus Torvalds 243583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 24361da177e4SLinus Torvalds } 24372ca3310eSAlasdair G Kergon 24381da177e4SLinus Torvalds static void dm_queue_flush(struct mapped_device *md) 24391da177e4SLinus Torvalds { 2440cf222b37SAlasdair G Kergon clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24414e857c58SPeter Zijlstra smp_mb__after_atomic(); 24421da177e4SLinus Torvalds queue_work(md->wq, &md->work); 24431da177e4SLinus Torvalds } 24441da177e4SLinus Torvalds 24451da177e4SLinus Torvalds /* 2446042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24471da177e4SLinus Torvalds */ 2448042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24491da177e4SLinus Torvalds { 245087eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2451754c5fc7SMike Snitzer struct queue_limits limits; 2452042d2a9bSAlasdair G Kergon int r; 24531da177e4SLinus Torvalds 24541da177e4SLinus Torvalds mutex_lock(&md->suspend_lock); 24551da177e4SLinus Torvalds 245693c534aeSAlasdair G Kergon /* device must be suspended */ 24574f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 24581da177e4SLinus Torvalds goto out; 24591da177e4SLinus Torvalds 24603ae70656SMike Snitzer /* 24613ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 24623ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 24633ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 24643ae70656SMike Snitzer * reappear. 24653ae70656SMike Snitzer */ 24663ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 246783d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 24683ae70656SMike Snitzer if (live_map) 24693ae70656SMike Snitzer limits = md->queue->limits; 247083d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 24713ae70656SMike Snitzer } 24723ae70656SMike Snitzer 247387eb5b21SMike Christie if (!live_map) { 2474754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2475042d2a9bSAlasdair G Kergon if (r) { 2476042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2477754c5fc7SMike Snitzer goto out; 2478042d2a9bSAlasdair G Kergon } 247987eb5b21SMike Christie } 2480754c5fc7SMike Snitzer 2481042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 248262e08243SMikulas Patocka dm_issue_global_event(); 24831da177e4SLinus Torvalds 24841da177e4SLinus Torvalds out: 24851da177e4SLinus Torvalds mutex_unlock(&md->suspend_lock); 2486042d2a9bSAlasdair G Kergon return map; 24872ca3310eSAlasdair G Kergon } 24881da177e4SLinus Torvalds 2489cf222b37SAlasdair G Kergon /* 2490dfbe03f6SAlasdair G Kergon * Functions to lock and unlock any filesystem running on the 2491d1782a3bSAlasdair G Kergon * device. 2492d1782a3bSAlasdair G Kergon */ 24932ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 2494dfbe03f6SAlasdair G Kergon { 24951da177e4SLinus Torvalds int r; 24961da177e4SLinus Torvalds 24971da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2498dfbe03f6SAlasdair G Kergon 2499db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2500dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2501cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2502e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2503e39e2e95SAlasdair G Kergon return r; 2504dfbe03f6SAlasdair G Kergon } 2505dfbe03f6SAlasdair G Kergon 2506aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2507aa8d7c2fSAlasdair G Kergon 25081da177e4SLinus Torvalds return 0; 25091da177e4SLinus Torvalds } 25101da177e4SLinus Torvalds 25112ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 25121da177e4SLinus Torvalds { 2513aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2514aa8d7c2fSAlasdair G Kergon return; 2515aa8d7c2fSAlasdair G Kergon 2516db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 25171da177e4SLinus Torvalds md->frozen_sb = NULL; 2518aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25191da177e4SLinus Torvalds } 25201da177e4SLinus Torvalds 25211da177e4SLinus Torvalds /* 2522b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2523b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2524b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2525b48633f8SBart Van Assche * 2526ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2527ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2528ffcc3936SMike Snitzer * are being added to md->deferred list. 2529cec47e3dSKiyoshi Ueda */ 2530ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2531b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2532eaf9a736SMike Snitzer int dmf_suspended_flag) 25331da177e4SLinus Torvalds { 2534ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2535ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2536ffcc3936SMike Snitzer int r; 2537cf222b37SAlasdair G Kergon 25385a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 25395a8f1f80SBart Van Assche 25402e93ccc1SKiyoshi Ueda /* 25412e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 25422e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25432e93ccc1SKiyoshi Ueda */ 25442e93ccc1SKiyoshi Ueda if (noflush) 25452e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 254686331f39SBart Van Assche else 254786331f39SBart Van Assche pr_debug("%s: suspending with flush\n", dm_device_name(md)); 25482e93ccc1SKiyoshi Ueda 2549d67ee213SMike Snitzer /* 2550d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2551d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2552d67ee213SMike Snitzer */ 25531da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25541da177e4SLinus Torvalds 25552e93ccc1SKiyoshi Ueda /* 25569f518b27SKiyoshi Ueda * Flush I/O to the device. 25579f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 25589f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 25599f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 25602e93ccc1SKiyoshi Ueda */ 256132a926daSMikulas Patocka if (!noflush && do_lockfs) { 25622ca3310eSAlasdair G Kergon r = lock_fs(md); 2563d67ee213SMike Snitzer if (r) { 2564d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2565ffcc3936SMike Snitzer return r; 2566aa8d7c2fSAlasdair G Kergon } 2567d67ee213SMike Snitzer } 25681da177e4SLinus Torvalds 25691da177e4SLinus Torvalds /* 25703b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 25713b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 25723b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 25733b00b203SMikulas Patocka * dm_wq_work. 25743b00b203SMikulas Patocka * 25753b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 25763b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 25776a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 25786a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 25796a8736d1STejun Heo * flush_workqueue(md->wq). 25801da177e4SLinus Torvalds */ 25811eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 258241abc4e1SHannes Reinecke if (map) 258383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25841da177e4SLinus Torvalds 2585d0bcb878SKiyoshi Ueda /* 258629e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 258729e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2588d0bcb878SKiyoshi Ueda */ 25892eb6e1e3SKeith Busch if (dm_request_based(md)) { 2590eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 259102233342SMike Snitzer if (md->kworker_task) 25923989144fSPetr Mladek kthread_flush_worker(&md->kworker); 25932eb6e1e3SKeith Busch } 2594cec47e3dSKiyoshi Ueda 2595d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2596d0bcb878SKiyoshi Ueda 25971da177e4SLinus Torvalds /* 25983b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 25993b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 26003b00b203SMikulas Patocka * to finish. 26011da177e4SLinus Torvalds */ 2602b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2603eaf9a736SMike Snitzer if (!r) 2604eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 26051da177e4SLinus Torvalds 26066d6f10dfSMilan Broz if (noflush) 2607022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 260841abc4e1SHannes Reinecke if (map) 260983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26102e93ccc1SKiyoshi Ueda 26111da177e4SLinus Torvalds /* were we interrupted ? */ 261246125c1cSMilan Broz if (r < 0) { 26139a1fb464SMikulas Patocka dm_queue_flush(md); 261473d410c0SMilan Broz 2615cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2616eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2617cec47e3dSKiyoshi Ueda 26182ca3310eSAlasdair G Kergon unlock_fs(md); 2619d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2620ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2621ffcc3936SMike Snitzer } 2622ffcc3936SMike Snitzer 2623ffcc3936SMike Snitzer return r; 26242ca3310eSAlasdair G Kergon } 26252ca3310eSAlasdair G Kergon 26263b00b203SMikulas Patocka /* 2627ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2628ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2629ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2630ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2631ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 26323b00b203SMikulas Patocka */ 2633ffcc3936SMike Snitzer /* 2634ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2635ffcc3936SMike Snitzer * 2636ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2637ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2638ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2639ffcc3936SMike Snitzer * 2640ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2641ffcc3936SMike Snitzer */ 2642ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2643ffcc3936SMike Snitzer { 2644ffcc3936SMike Snitzer struct dm_table *map = NULL; 2645ffcc3936SMike Snitzer int r = 0; 2646ffcc3936SMike Snitzer 2647ffcc3936SMike Snitzer retry: 2648ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2649ffcc3936SMike Snitzer 2650ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2651ffcc3936SMike Snitzer r = -EINVAL; 2652ffcc3936SMike Snitzer goto out_unlock; 2653ffcc3936SMike Snitzer } 2654ffcc3936SMike Snitzer 2655ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2656ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2657ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2658ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2659ffcc3936SMike Snitzer if (r) 2660ffcc3936SMike Snitzer return r; 2661ffcc3936SMike Snitzer goto retry; 2662ffcc3936SMike Snitzer } 2663ffcc3936SMike Snitzer 2664a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2665ffcc3936SMike Snitzer 2666eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2667ffcc3936SMike Snitzer if (r) 2668ffcc3936SMike Snitzer goto out_unlock; 26693b00b203SMikulas Patocka 26704d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 26714d4471cbSKiyoshi Ueda 2672d287483dSAlasdair G Kergon out_unlock: 2673e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2674cf222b37SAlasdair G Kergon return r; 26751da177e4SLinus Torvalds } 26761da177e4SLinus Torvalds 2677ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 26781da177e4SLinus Torvalds { 2679ffcc3936SMike Snitzer if (map) { 2680ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 26818757b776SMilan Broz if (r) 2682ffcc3936SMike Snitzer return r; 2683ffcc3936SMike Snitzer } 26842ca3310eSAlasdair G Kergon 26859a1fb464SMikulas Patocka dm_queue_flush(md); 26862ca3310eSAlasdair G Kergon 2687cec47e3dSKiyoshi Ueda /* 2688cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2689cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2690cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2691cec47e3dSKiyoshi Ueda */ 2692cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2693eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2694cec47e3dSKiyoshi Ueda 26952ca3310eSAlasdair G Kergon unlock_fs(md); 26962ca3310eSAlasdair G Kergon 2697ffcc3936SMike Snitzer return 0; 2698ffcc3936SMike Snitzer } 2699ffcc3936SMike Snitzer 2700ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2701ffcc3936SMike Snitzer { 27028dc23658SMinfei Huang int r; 2703ffcc3936SMike Snitzer struct dm_table *map = NULL; 2704ffcc3936SMike Snitzer 2705ffcc3936SMike Snitzer retry: 27068dc23658SMinfei Huang r = -EINVAL; 2707ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2708ffcc3936SMike Snitzer 2709ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2710ffcc3936SMike Snitzer goto out; 2711ffcc3936SMike Snitzer 2712ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2713ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2714ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2715ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2716ffcc3936SMike Snitzer if (r) 2717ffcc3936SMike Snitzer return r; 2718ffcc3936SMike Snitzer goto retry; 2719ffcc3936SMike Snitzer } 2720ffcc3936SMike Snitzer 2721a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2722ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2723ffcc3936SMike Snitzer goto out; 2724ffcc3936SMike Snitzer 2725ffcc3936SMike Snitzer r = __dm_resume(md, map); 2726ffcc3936SMike Snitzer if (r) 2727ffcc3936SMike Snitzer goto out; 2728ffcc3936SMike Snitzer 27292ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2730cf222b37SAlasdair G Kergon out: 2731e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 27322ca3310eSAlasdair G Kergon 2733cf222b37SAlasdair G Kergon return r; 27341da177e4SLinus Torvalds } 27351da177e4SLinus Torvalds 2736fd2ed4d2SMikulas Patocka /* 2737fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2738fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2739fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2740fd2ed4d2SMikulas Patocka */ 2741fd2ed4d2SMikulas Patocka 2742ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2743ffcc3936SMike Snitzer { 2744ffcc3936SMike Snitzer struct dm_table *map = NULL; 2745ffcc3936SMike Snitzer 27461ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 27471ea0654eSBart Van Assche 274896b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2749ffcc3936SMike Snitzer return; /* nested internal suspend */ 2750ffcc3936SMike Snitzer 2751ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2752ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2753ffcc3936SMike Snitzer return; /* nest suspend */ 2754ffcc3936SMike Snitzer } 2755ffcc3936SMike Snitzer 2756a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2757ffcc3936SMike Snitzer 2758ffcc3936SMike Snitzer /* 2759ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2760ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2761ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2762ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2763ffcc3936SMike Snitzer */ 2764eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2765eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2766ffcc3936SMike Snitzer 2767ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2768ffcc3936SMike Snitzer } 2769ffcc3936SMike Snitzer 2770ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2771ffcc3936SMike Snitzer { 277296b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 277396b26c8cSMikulas Patocka 277496b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2775ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2776ffcc3936SMike Snitzer 2777ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2778ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2779ffcc3936SMike Snitzer 2780ffcc3936SMike Snitzer /* 2781ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2782ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2783ffcc3936SMike Snitzer */ 2784ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2785ffcc3936SMike Snitzer 2786ffcc3936SMike Snitzer done: 2787ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2788ffcc3936SMike Snitzer smp_mb__after_atomic(); 2789ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2790ffcc3936SMike Snitzer } 2791ffcc3936SMike Snitzer 2792ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2793fd2ed4d2SMikulas Patocka { 2794fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2795ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2796ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2797ffcc3936SMike Snitzer } 2798ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2799ffcc3936SMike Snitzer 2800ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2801ffcc3936SMike Snitzer { 2802ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2803ffcc3936SMike Snitzer __dm_internal_resume(md); 2804ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2805ffcc3936SMike Snitzer } 2806ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2807ffcc3936SMike Snitzer 2808ffcc3936SMike Snitzer /* 2809ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2810ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2811ffcc3936SMike Snitzer */ 2812ffcc3936SMike Snitzer 2813ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2814ffcc3936SMike Snitzer { 2815ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2816ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2817fd2ed4d2SMikulas Patocka return; 2818fd2ed4d2SMikulas Patocka 2819fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2820fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2821fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2822fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2823fd2ed4d2SMikulas Patocka } 2824b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2825fd2ed4d2SMikulas Patocka 2826ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2827fd2ed4d2SMikulas Patocka { 2828ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2829fd2ed4d2SMikulas Patocka goto done; 2830fd2ed4d2SMikulas Patocka 2831fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2832fd2ed4d2SMikulas Patocka 2833fd2ed4d2SMikulas Patocka done: 2834fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2835fd2ed4d2SMikulas Patocka } 2836b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2837fd2ed4d2SMikulas Patocka 28381da177e4SLinus Torvalds /*----------------------------------------------------------------- 28391da177e4SLinus Torvalds * Event notification. 28401da177e4SLinus Torvalds *---------------------------------------------------------------*/ 28413abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 284260935eb2SMilan Broz unsigned cookie) 284369267a30SAlasdair G Kergon { 284460935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 284560935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 284660935eb2SMilan Broz 284760935eb2SMilan Broz if (!cookie) 28483abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 284960935eb2SMilan Broz else { 285060935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 285160935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 28523abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 28533abf85b5SPeter Rajnoha action, envp); 285460935eb2SMilan Broz } 285569267a30SAlasdair G Kergon } 285669267a30SAlasdair G Kergon 28577a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28587a8c3d3bSMike Anderson { 28597a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 28607a8c3d3bSMike Anderson } 28617a8c3d3bSMike Anderson 28621da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 28631da177e4SLinus Torvalds { 28641da177e4SLinus Torvalds return atomic_read(&md->event_nr); 28651da177e4SLinus Torvalds } 28661da177e4SLinus Torvalds 28671da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 28681da177e4SLinus Torvalds { 28691da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 28701da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 28711da177e4SLinus Torvalds } 28721da177e4SLinus Torvalds 28737a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 28747a8c3d3bSMike Anderson { 28757a8c3d3bSMike Anderson unsigned long flags; 28767a8c3d3bSMike Anderson 28777a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 28787a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 28797a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 28807a8c3d3bSMike Anderson } 28817a8c3d3bSMike Anderson 28821da177e4SLinus Torvalds /* 28831da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 28841da177e4SLinus Torvalds * count on 'md'. 28851da177e4SLinus Torvalds */ 28861da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 28871da177e4SLinus Torvalds { 28881da177e4SLinus Torvalds return md->disk; 28891da177e4SLinus Torvalds } 289065ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 28911da177e4SLinus Torvalds 2892784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2893784aae73SMilan Broz { 28942995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2895784aae73SMilan Broz } 2896784aae73SMilan Broz 2897784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2898784aae73SMilan Broz { 2899784aae73SMilan Broz struct mapped_device *md; 2900784aae73SMilan Broz 29012995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2902784aae73SMilan Broz 2903b9a41d21SHou Tao spin_lock(&_minor_lock); 2904b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2905b9a41d21SHou Tao md = NULL; 2906b9a41d21SHou Tao goto out; 2907b9a41d21SHou Tao } 2908784aae73SMilan Broz dm_get(md); 2909b9a41d21SHou Tao out: 2910b9a41d21SHou Tao spin_unlock(&_minor_lock); 2911b9a41d21SHou Tao 2912784aae73SMilan Broz return md; 2913784aae73SMilan Broz } 2914784aae73SMilan Broz 29154f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 29161da177e4SLinus Torvalds { 29171da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 29181da177e4SLinus Torvalds } 29191da177e4SLinus Torvalds 2920ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2921ffcc3936SMike Snitzer { 2922ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2923ffcc3936SMike Snitzer } 2924ffcc3936SMike Snitzer 29252c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 29262c140a24SMikulas Patocka { 29272c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 29282c140a24SMikulas Patocka } 29292c140a24SMikulas Patocka 293064dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 293164dbce58SKiyoshi Ueda { 2932ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 293364dbce58SKiyoshi Ueda } 293464dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 293564dbce58SKiyoshi Ueda 29362e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 29372e93ccc1SKiyoshi Ueda { 2938ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 29392e93ccc1SKiyoshi Ueda } 29402e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 29412e93ccc1SKiyoshi Ueda 29427e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 29430776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 29440776aa0eSMike Snitzer unsigned min_pool_size) 2945e6ee8c0bSKiyoshi Ueda { 2946115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 294778d8e58aSMike Snitzer unsigned int pool_size = 0; 294864f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 29496f1c819cSKent Overstreet int ret; 2950e6ee8c0bSKiyoshi Ueda 2951e6ee8c0bSKiyoshi Ueda if (!pools) 29524e6e36c3SMike Snitzer return NULL; 2953e6ee8c0bSKiyoshi Ueda 295478d8e58aSMike Snitzer switch (type) { 295578d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2956545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 295722c11858SMike Snitzer case DM_TYPE_NVME_BIO_BASED: 29580776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 295930187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 296064f52b0eSMike Snitzer io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 29616f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 29626f1c819cSKent Overstreet if (ret) 296364f52b0eSMike Snitzer goto out; 29646f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2965eb8db831SChristoph Hellwig goto out; 296678d8e58aSMike Snitzer break; 296778d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 296878d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 29690776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 297078d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2971591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 297278d8e58aSMike Snitzer break; 297378d8e58aSMike Snitzer default: 297478d8e58aSMike Snitzer BUG(); 297578d8e58aSMike Snitzer } 297678d8e58aSMike Snitzer 29776f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 29786f1c819cSKent Overstreet if (ret) 29795f015204SJun'ichi Nomura goto out; 2980e6ee8c0bSKiyoshi Ueda 29816f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 29825f015204SJun'ichi Nomura goto out; 2983a91a2785SMartin K. Petersen 2984e6ee8c0bSKiyoshi Ueda return pools; 298578d8e58aSMike Snitzer 29865f015204SJun'ichi Nomura out: 29875f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2988e6ee8c0bSKiyoshi Ueda 29894e6e36c3SMike Snitzer return NULL; 2990e6ee8c0bSKiyoshi Ueda } 2991e6ee8c0bSKiyoshi Ueda 2992e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2993e6ee8c0bSKiyoshi Ueda { 2994e6ee8c0bSKiyoshi Ueda if (!pools) 2995e6ee8c0bSKiyoshi Ueda return; 2996e6ee8c0bSKiyoshi Ueda 29976f1c819cSKent Overstreet bioset_exit(&pools->bs); 29986f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 2999e6ee8c0bSKiyoshi Ueda 3000e6ee8c0bSKiyoshi Ueda kfree(pools); 3001e6ee8c0bSKiyoshi Ueda } 3002e6ee8c0bSKiyoshi Ueda 30039c72bad1SChristoph Hellwig struct dm_pr { 30049c72bad1SChristoph Hellwig u64 old_key; 30059c72bad1SChristoph Hellwig u64 new_key; 30069c72bad1SChristoph Hellwig u32 flags; 30079c72bad1SChristoph Hellwig bool fail_early; 30089c72bad1SChristoph Hellwig }; 30099c72bad1SChristoph Hellwig 30109c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 30119c72bad1SChristoph Hellwig void *data) 30129c72bad1SChristoph Hellwig { 30139c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 30149c72bad1SChristoph Hellwig struct dm_table *table; 30159c72bad1SChristoph Hellwig struct dm_target *ti; 30169c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 30179c72bad1SChristoph Hellwig 30189c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 30199c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 30209c72bad1SChristoph Hellwig goto out; 30219c72bad1SChristoph Hellwig 30229c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 30239c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30249c72bad1SChristoph Hellwig goto out; 30259c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30269c72bad1SChristoph Hellwig 30279c72bad1SChristoph Hellwig ret = -EINVAL; 30289c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30299c72bad1SChristoph Hellwig goto out; 30309c72bad1SChristoph Hellwig 30319c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 30329c72bad1SChristoph Hellwig out: 30339c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 30349c72bad1SChristoph Hellwig return ret; 30359c72bad1SChristoph Hellwig } 30369c72bad1SChristoph Hellwig 30379c72bad1SChristoph Hellwig /* 30389c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 30399c72bad1SChristoph Hellwig */ 30409c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 30419c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 30429c72bad1SChristoph Hellwig { 30439c72bad1SChristoph Hellwig struct dm_pr *pr = data; 30449c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 30459c72bad1SChristoph Hellwig 30469c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 30479c72bad1SChristoph Hellwig return -EOPNOTSUPP; 30489c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 30499c72bad1SChristoph Hellwig } 30509c72bad1SChristoph Hellwig 305171cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 305271cdb697SChristoph Hellwig u32 flags) 305371cdb697SChristoph Hellwig { 30549c72bad1SChristoph Hellwig struct dm_pr pr = { 30559c72bad1SChristoph Hellwig .old_key = old_key, 30569c72bad1SChristoph Hellwig .new_key = new_key, 30579c72bad1SChristoph Hellwig .flags = flags, 30589c72bad1SChristoph Hellwig .fail_early = true, 30599c72bad1SChristoph Hellwig }; 30609c72bad1SChristoph Hellwig int ret; 306171cdb697SChristoph Hellwig 30629c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 30639c72bad1SChristoph Hellwig if (ret && new_key) { 30649c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 30659c72bad1SChristoph Hellwig pr.old_key = new_key; 30669c72bad1SChristoph Hellwig pr.new_key = 0; 30679c72bad1SChristoph Hellwig pr.flags = 0; 30689c72bad1SChristoph Hellwig pr.fail_early = false; 30699c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 30709c72bad1SChristoph Hellwig } 307171cdb697SChristoph Hellwig 30729c72bad1SChristoph Hellwig return ret; 307371cdb697SChristoph Hellwig } 307471cdb697SChristoph Hellwig 307571cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 307671cdb697SChristoph Hellwig u32 flags) 307771cdb697SChristoph Hellwig { 307871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 307971cdb697SChristoph Hellwig const struct pr_ops *ops; 3080971888c4SMike Snitzer int r, srcu_idx; 308171cdb697SChristoph Hellwig 30825bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 308371cdb697SChristoph Hellwig if (r < 0) 3084971888c4SMike Snitzer goto out; 308571cdb697SChristoph Hellwig 308671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 308771cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 308871cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 308971cdb697SChristoph Hellwig else 309071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3091971888c4SMike Snitzer out: 3092971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 309371cdb697SChristoph Hellwig return r; 309471cdb697SChristoph Hellwig } 309571cdb697SChristoph Hellwig 309671cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 309771cdb697SChristoph Hellwig { 309871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 309971cdb697SChristoph Hellwig const struct pr_ops *ops; 3100971888c4SMike Snitzer int r, srcu_idx; 310171cdb697SChristoph Hellwig 31025bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 310371cdb697SChristoph Hellwig if (r < 0) 3104971888c4SMike Snitzer goto out; 310571cdb697SChristoph Hellwig 310671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 310771cdb697SChristoph Hellwig if (ops && ops->pr_release) 310871cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 310971cdb697SChristoph Hellwig else 311071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3111971888c4SMike Snitzer out: 3112971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 311371cdb697SChristoph Hellwig return r; 311471cdb697SChristoph Hellwig } 311571cdb697SChristoph Hellwig 311671cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 311771cdb697SChristoph Hellwig enum pr_type type, bool abort) 311871cdb697SChristoph Hellwig { 311971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 312071cdb697SChristoph Hellwig const struct pr_ops *ops; 3121971888c4SMike Snitzer int r, srcu_idx; 312271cdb697SChristoph Hellwig 31235bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 312471cdb697SChristoph Hellwig if (r < 0) 3125971888c4SMike Snitzer goto out; 312671cdb697SChristoph Hellwig 312771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 312871cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 312971cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 313071cdb697SChristoph Hellwig else 313171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3132971888c4SMike Snitzer out: 3133971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 313471cdb697SChristoph Hellwig return r; 313571cdb697SChristoph Hellwig } 313671cdb697SChristoph Hellwig 313771cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 313871cdb697SChristoph Hellwig { 313971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 314071cdb697SChristoph Hellwig const struct pr_ops *ops; 3141971888c4SMike Snitzer int r, srcu_idx; 314271cdb697SChristoph Hellwig 31435bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 314471cdb697SChristoph Hellwig if (r < 0) 3145971888c4SMike Snitzer goto out; 314671cdb697SChristoph Hellwig 314771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 314871cdb697SChristoph Hellwig if (ops && ops->pr_clear) 314971cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 315071cdb697SChristoph Hellwig else 315171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3152971888c4SMike Snitzer out: 3153971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 315471cdb697SChristoph Hellwig return r; 315571cdb697SChristoph Hellwig } 315671cdb697SChristoph Hellwig 315771cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 315871cdb697SChristoph Hellwig .pr_register = dm_pr_register, 315971cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 316071cdb697SChristoph Hellwig .pr_release = dm_pr_release, 316171cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 316271cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 316371cdb697SChristoph Hellwig }; 316471cdb697SChristoph Hellwig 316583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 31661da177e4SLinus Torvalds .open = dm_blk_open, 31671da177e4SLinus Torvalds .release = dm_blk_close, 3168aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 31693ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 317071cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 31711da177e4SLinus Torvalds .owner = THIS_MODULE 31721da177e4SLinus Torvalds }; 31731da177e4SLinus Torvalds 3174f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3175f26c5719SDan Williams .direct_access = dm_dax_direct_access, 31767e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3177b3a9a0c3SDan Williams .copy_to_iter = dm_dax_copy_to_iter, 3178f26c5719SDan Williams }; 3179f26c5719SDan Williams 31801da177e4SLinus Torvalds /* 31811da177e4SLinus Torvalds * module hooks 31821da177e4SLinus Torvalds */ 31831da177e4SLinus Torvalds module_init(dm_init); 31841da177e4SLinus Torvalds module_exit(dm_exit); 31851da177e4SLinus Torvalds 31861da177e4SLinus Torvalds module_param(major, uint, 0); 31871da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3188f4790826SMike Snitzer 3189e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3190e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3191e8603136SMike Snitzer 3192115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3193115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3194115485e8SMike Snitzer 31951da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 31961da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 31971da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3198