11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 227e026c8cSDan Williams #include <linux/uio.h> 233ac51e74SDarrick J. Wong #include <linux/hdreg.h> 243f77316dSKiyoshi Ueda #include <linux/delay.h> 25ffcc3936SMike Snitzer #include <linux/wait.h> 2671cdb697SChristoph Hellwig #include <linux/pr.h> 27b0b4d7c6SElena Reshetova #include <linux/refcount.h> 2855782138SLi Zefan 2972d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3072d94861SAlasdair G Kergon 3160935eb2SMilan Broz /* 3260935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3360935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3460935eb2SMilan Broz */ 3560935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3660935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3760935eb2SMilan Broz 381da177e4SLinus Torvalds static const char *_name = DM_NAME; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds static unsigned int major = 0; 411da177e4SLinus Torvalds static unsigned int _major = 0; 421da177e4SLinus Torvalds 43d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 44d15b774cSAlasdair G Kergon 45f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 462c140a24SMikulas Patocka 472c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 482c140a24SMikulas Patocka 492c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 502c140a24SMikulas Patocka 51acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 52acfe0ad7SMikulas Patocka 5393e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5493e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5593e6442cSMikulas Patocka 5662e08243SMikulas Patocka void dm_issue_global_event(void) 5762e08243SMikulas Patocka { 5862e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 5962e08243SMikulas Patocka wake_up(&dm_global_eventq); 6062e08243SMikulas Patocka } 6162e08243SMikulas Patocka 621da177e4SLinus Torvalds /* 6364f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 641da177e4SLinus Torvalds */ 6564f52b0eSMike Snitzer struct clone_info { 6664f52b0eSMike Snitzer struct dm_table *map; 6764f52b0eSMike Snitzer struct bio *bio; 6864f52b0eSMike Snitzer struct dm_io *io; 6964f52b0eSMike Snitzer sector_t sector; 7064f52b0eSMike Snitzer unsigned sector_count; 7164f52b0eSMike Snitzer }; 7264f52b0eSMike Snitzer 7364f52b0eSMike Snitzer /* 7464f52b0eSMike Snitzer * One of these is allocated per clone bio. 7564f52b0eSMike Snitzer */ 7664f52b0eSMike Snitzer #define DM_TIO_MAGIC 7282014 7764f52b0eSMike Snitzer struct dm_target_io { 7864f52b0eSMike Snitzer unsigned magic; 7964f52b0eSMike Snitzer struct dm_io *io; 8064f52b0eSMike Snitzer struct dm_target *ti; 8164f52b0eSMike Snitzer unsigned target_bio_nr; 8264f52b0eSMike Snitzer unsigned *len_ptr; 8364f52b0eSMike Snitzer bool inside_dm_io; 8464f52b0eSMike Snitzer struct bio clone; 8564f52b0eSMike Snitzer }; 8664f52b0eSMike Snitzer 8764f52b0eSMike Snitzer /* 8864f52b0eSMike Snitzer * One of these is allocated per original bio. 8964f52b0eSMike Snitzer * It contains the first clone used for that original. 9064f52b0eSMike Snitzer */ 9164f52b0eSMike Snitzer #define DM_IO_MAGIC 5191977 921da177e4SLinus Torvalds struct dm_io { 9364f52b0eSMike Snitzer unsigned magic; 941da177e4SLinus Torvalds struct mapped_device *md; 954e4cbee9SChristoph Hellwig blk_status_t status; 961da177e4SLinus Torvalds atomic_t io_count; 97745dc570SMike Snitzer struct bio *orig_bio; 983eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 99f88fb981SKiyoshi Ueda spinlock_t endio_lock; 100fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 10164f52b0eSMike Snitzer /* last member of dm_target_io is 'struct bio' */ 10264f52b0eSMike Snitzer struct dm_target_io tio; 1031da177e4SLinus Torvalds }; 1041da177e4SLinus Torvalds 10564f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10664f52b0eSMike Snitzer { 10764f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 10864f52b0eSMike Snitzer if (!tio->inside_dm_io) 10964f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 11064f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 11164f52b0eSMike Snitzer } 11264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 11364f52b0eSMike Snitzer 11464f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 11564f52b0eSMike Snitzer { 11664f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 11764f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 11864f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 11964f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 12064f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 12164f52b0eSMike Snitzer } 12264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 12364f52b0eSMike Snitzer 12464f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 12564f52b0eSMike Snitzer { 12664f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 12764f52b0eSMike Snitzer } 12864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 12964f52b0eSMike Snitzer 130ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 131ba61fdd1SJeff Mahoney 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * Bits for the md->flags field. 1341da177e4SLinus Torvalds */ 1351eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1361da177e4SLinus Torvalds #define DMF_SUSPENDED 1 137aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 138fba9f90eSJeff Mahoney #define DMF_FREEING 3 1395c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1402e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1418ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1428ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1431da177e4SLinus Torvalds 144115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 145115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 146faad87dfSMike Snitzer 147e6ee8c0bSKiyoshi Ueda /* 148e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 149e6ee8c0bSKiyoshi Ueda */ 150e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 151e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 15264f52b0eSMike Snitzer struct bio_set *io_bs; 153e6ee8c0bSKiyoshi Ueda }; 154e6ee8c0bSKiyoshi Ueda 15586f1152bSBenjamin Marzinski struct table_device { 15686f1152bSBenjamin Marzinski struct list_head list; 157b0b4d7c6SElena Reshetova refcount_t count; 15886f1152bSBenjamin Marzinski struct dm_dev dm_dev; 15986f1152bSBenjamin Marzinski }; 16086f1152bSBenjamin Marzinski 1618fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 1621ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 16394818742SKent Overstreet 164f4790826SMike Snitzer /* 165e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 166e8603136SMike Snitzer */ 1674cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 168e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 169e8603136SMike Snitzer 170115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 171115485e8SMike Snitzer { 1726aa7de05SMark Rutland int param = READ_ONCE(*module_param); 173115485e8SMike Snitzer int modified_param = 0; 174115485e8SMike Snitzer bool modified = true; 175115485e8SMike Snitzer 176115485e8SMike Snitzer if (param < min) 177115485e8SMike Snitzer modified_param = min; 178115485e8SMike Snitzer else if (param > max) 179115485e8SMike Snitzer modified_param = max; 180115485e8SMike Snitzer else 181115485e8SMike Snitzer modified = false; 182115485e8SMike Snitzer 183115485e8SMike Snitzer if (modified) { 184115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 185115485e8SMike Snitzer param = modified_param; 186115485e8SMike Snitzer } 187115485e8SMike Snitzer 188115485e8SMike Snitzer return param; 189115485e8SMike Snitzer } 190115485e8SMike Snitzer 1914cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 192f4790826SMike Snitzer unsigned def, unsigned max) 193f4790826SMike Snitzer { 1946aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 19509c2d531SMike Snitzer unsigned modified_param = 0; 196f4790826SMike Snitzer 19709c2d531SMike Snitzer if (!param) 19809c2d531SMike Snitzer modified_param = def; 19909c2d531SMike Snitzer else if (param > max) 20009c2d531SMike Snitzer modified_param = max; 201f4790826SMike Snitzer 20209c2d531SMike Snitzer if (modified_param) { 20309c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 20409c2d531SMike Snitzer param = modified_param; 205f4790826SMike Snitzer } 206f4790826SMike Snitzer 20709c2d531SMike Snitzer return param; 208f4790826SMike Snitzer } 209f4790826SMike Snitzer 210e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 211e8603136SMike Snitzer { 21209c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 2134cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 214e8603136SMike Snitzer } 215e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 216e8603136SMike Snitzer 217115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 218115485e8SMike Snitzer { 219115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 220115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 221115485e8SMike Snitzer } 222115485e8SMike Snitzer 2231da177e4SLinus Torvalds static int __init local_init(void) 2241da177e4SLinus Torvalds { 22551157b4aSKiyoshi Ueda int r = -ENOMEM; 2261da177e4SLinus Torvalds 2278fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2288fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 229dde1e1ecSMike Snitzer return r; 2308fbf26adSKiyoshi Ueda 231eca7ee6dSMike Snitzer _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 2321ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 2331ae49ea2SMike Snitzer if (!_rq_cache) 2341ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 2351ae49ea2SMike Snitzer 23651e5b2bdSMike Anderson r = dm_uevent_init(); 23751157b4aSKiyoshi Ueda if (r) 2381ae49ea2SMike Snitzer goto out_free_rq_cache; 23951e5b2bdSMike Anderson 240acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 241acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 242acfe0ad7SMikulas Patocka r = -ENOMEM; 243acfe0ad7SMikulas Patocka goto out_uevent_exit; 244acfe0ad7SMikulas Patocka } 245acfe0ad7SMikulas Patocka 2461da177e4SLinus Torvalds _major = major; 2471da177e4SLinus Torvalds r = register_blkdev(_major, _name); 24851157b4aSKiyoshi Ueda if (r < 0) 249acfe0ad7SMikulas Patocka goto out_free_workqueue; 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds if (!_major) 2521da177e4SLinus Torvalds _major = r; 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds return 0; 25551157b4aSKiyoshi Ueda 256acfe0ad7SMikulas Patocka out_free_workqueue: 257acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 25851157b4aSKiyoshi Ueda out_uevent_exit: 25951157b4aSKiyoshi Ueda dm_uevent_exit(); 2601ae49ea2SMike Snitzer out_free_rq_cache: 2611ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2628fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2638fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 26451157b4aSKiyoshi Ueda 26551157b4aSKiyoshi Ueda return r; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds static void local_exit(void) 2691da177e4SLinus Torvalds { 2702c140a24SMikulas Patocka flush_scheduled_work(); 271acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2722c140a24SMikulas Patocka 2731ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2748fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 27500d59405SAkinobu Mita unregister_blkdev(_major, _name); 27651e5b2bdSMike Anderson dm_uevent_exit(); 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds _major = 0; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds DMINFO("cleaned up"); 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds 283b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2841da177e4SLinus Torvalds local_init, 2851da177e4SLinus Torvalds dm_target_init, 2861da177e4SLinus Torvalds dm_linear_init, 2871da177e4SLinus Torvalds dm_stripe_init, 288952b3557SMikulas Patocka dm_io_init, 289945fa4d2SMikulas Patocka dm_kcopyd_init, 2901da177e4SLinus Torvalds dm_interface_init, 291fd2ed4d2SMikulas Patocka dm_statistics_init, 2921da177e4SLinus Torvalds }; 2931da177e4SLinus Torvalds 294b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2951da177e4SLinus Torvalds local_exit, 2961da177e4SLinus Torvalds dm_target_exit, 2971da177e4SLinus Torvalds dm_linear_exit, 2981da177e4SLinus Torvalds dm_stripe_exit, 299952b3557SMikulas Patocka dm_io_exit, 300945fa4d2SMikulas Patocka dm_kcopyd_exit, 3011da177e4SLinus Torvalds dm_interface_exit, 302fd2ed4d2SMikulas Patocka dm_statistics_exit, 3031da177e4SLinus Torvalds }; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds static int __init dm_init(void) 3061da177e4SLinus Torvalds { 3071da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds int r, i; 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds for (i = 0; i < count; i++) { 3121da177e4SLinus Torvalds r = _inits[i](); 3131da177e4SLinus Torvalds if (r) 3141da177e4SLinus Torvalds goto bad; 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds return 0; 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds bad: 3201da177e4SLinus Torvalds while (i--) 3211da177e4SLinus Torvalds _exits[i](); 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds return r; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds static void __exit dm_exit(void) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3291da177e4SLinus Torvalds 3301da177e4SLinus Torvalds while (i--) 3311da177e4SLinus Torvalds _exits[i](); 332d15b774cSAlasdair G Kergon 333d15b774cSAlasdair G Kergon /* 334d15b774cSAlasdair G Kergon * Should be empty by this point. 335d15b774cSAlasdair G Kergon */ 336d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 3391da177e4SLinus Torvalds /* 3401da177e4SLinus Torvalds * Block device functions 3411da177e4SLinus Torvalds */ 342432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 343432a212cSMike Anderson { 344432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 345432a212cSMike Anderson } 346432a212cSMike Anderson 347fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3481da177e4SLinus Torvalds { 3491da177e4SLinus Torvalds struct mapped_device *md; 3501da177e4SLinus Torvalds 351fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 352fba9f90eSJeff Mahoney 353fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 354fba9f90eSJeff Mahoney if (!md) 355fba9f90eSJeff Mahoney goto out; 356fba9f90eSJeff Mahoney 3575c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 358432a212cSMike Anderson dm_deleting_md(md)) { 359fba9f90eSJeff Mahoney md = NULL; 360fba9f90eSJeff Mahoney goto out; 361fba9f90eSJeff Mahoney } 362fba9f90eSJeff Mahoney 3631da177e4SLinus Torvalds dm_get(md); 3645c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 365fba9f90eSJeff Mahoney out: 366fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 367fba9f90eSJeff Mahoney 368fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 371db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3721da177e4SLinus Torvalds { 37363a4f065SMike Snitzer struct mapped_device *md; 3746e9624b8SArnd Bergmann 3754a1aeb98SMilan Broz spin_lock(&_minor_lock); 3764a1aeb98SMilan Broz 37763a4f065SMike Snitzer md = disk->private_data; 37863a4f065SMike Snitzer if (WARN_ON(!md)) 37963a4f065SMike Snitzer goto out; 38063a4f065SMike Snitzer 3812c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3822c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 383acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3842c140a24SMikulas Patocka 3851da177e4SLinus Torvalds dm_put(md); 38663a4f065SMike Snitzer out: 3874a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 3905c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3915c6bd75dSAlasdair G Kergon { 3925c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3935c6bd75dSAlasdair G Kergon } 3945c6bd75dSAlasdair G Kergon 3955c6bd75dSAlasdair G Kergon /* 3965c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3975c6bd75dSAlasdair G Kergon */ 3982c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3995c6bd75dSAlasdair G Kergon { 4005c6bd75dSAlasdair G Kergon int r = 0; 4015c6bd75dSAlasdair G Kergon 4025c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4035c6bd75dSAlasdair G Kergon 4042c140a24SMikulas Patocka if (dm_open_count(md)) { 4055c6bd75dSAlasdair G Kergon r = -EBUSY; 4062c140a24SMikulas Patocka if (mark_deferred) 4072c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4082c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 4092c140a24SMikulas Patocka r = -EEXIST; 4105c6bd75dSAlasdair G Kergon else 4115c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 4125c6bd75dSAlasdair G Kergon 4135c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4145c6bd75dSAlasdair G Kergon 4155c6bd75dSAlasdair G Kergon return r; 4165c6bd75dSAlasdair G Kergon } 4175c6bd75dSAlasdair G Kergon 4182c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4192c140a24SMikulas Patocka { 4202c140a24SMikulas Patocka int r = 0; 4212c140a24SMikulas Patocka 4222c140a24SMikulas Patocka spin_lock(&_minor_lock); 4232c140a24SMikulas Patocka 4242c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4252c140a24SMikulas Patocka r = -EBUSY; 4262c140a24SMikulas Patocka else 4272c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4282c140a24SMikulas Patocka 4292c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4302c140a24SMikulas Patocka 4312c140a24SMikulas Patocka return r; 4322c140a24SMikulas Patocka } 4332c140a24SMikulas Patocka 4342c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4352c140a24SMikulas Patocka { 4362c140a24SMikulas Patocka dm_deferred_remove(); 4372c140a24SMikulas Patocka } 4382c140a24SMikulas Patocka 439fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 440fd2ed4d2SMikulas Patocka { 441fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 442fd2ed4d2SMikulas Patocka } 443fd2ed4d2SMikulas Patocka 4449974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 4459974fa2cSMike Snitzer { 4469974fa2cSMike Snitzer return md->queue; 4479974fa2cSMike Snitzer } 4489974fa2cSMike Snitzer 449fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 450fd2ed4d2SMikulas Patocka { 451fd2ed4d2SMikulas Patocka return &md->stats; 452fd2ed4d2SMikulas Patocka } 453fd2ed4d2SMikulas Patocka 4543ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4553ac51e74SDarrick J. Wong { 4563ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4573ac51e74SDarrick J. Wong 4583ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4593ac51e74SDarrick J. Wong } 4603ac51e74SDarrick J. Wong 461956a4025SMike Snitzer static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 46266482026SMike Snitzer struct block_device **bdev, 463956a4025SMike Snitzer fmode_t *mode) 464aa129a22SMilan Broz { 46566482026SMike Snitzer struct dm_target *tgt; 4666c182cd8SHannes Reinecke struct dm_table *map; 467956a4025SMike Snitzer int srcu_idx, r; 468aa129a22SMilan Broz 4696c182cd8SHannes Reinecke retry: 470e56f81e0SChristoph Hellwig r = -ENOTTY; 471956a4025SMike Snitzer map = dm_get_live_table(md, &srcu_idx); 472aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 473aa129a22SMilan Broz goto out; 474aa129a22SMilan Broz 475aa129a22SMilan Broz /* We only support devices that have a single target */ 476aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 477aa129a22SMilan Broz goto out; 478aa129a22SMilan Broz 47966482026SMike Snitzer tgt = dm_table_get_target(map, 0); 48066482026SMike Snitzer if (!tgt->type->prepare_ioctl) 4814d341d82SMike Snitzer goto out; 482aa129a22SMilan Broz 4834f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 484aa129a22SMilan Broz r = -EAGAIN; 485aa129a22SMilan Broz goto out; 486aa129a22SMilan Broz } 487aa129a22SMilan Broz 48866482026SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev, mode); 489e56f81e0SChristoph Hellwig if (r < 0) 490e56f81e0SChristoph Hellwig goto out; 491e56f81e0SChristoph Hellwig 492956a4025SMike Snitzer bdgrab(*bdev); 493956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 494e56f81e0SChristoph Hellwig return r; 495aa129a22SMilan Broz 496aa129a22SMilan Broz out: 497956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 4985bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 4996c182cd8SHannes Reinecke msleep(10); 5006c182cd8SHannes Reinecke goto retry; 5016c182cd8SHannes Reinecke } 502e56f81e0SChristoph Hellwig return r; 503e56f81e0SChristoph Hellwig } 5046c182cd8SHannes Reinecke 505e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 506e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 507e56f81e0SChristoph Hellwig { 508e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 509956a4025SMike Snitzer int r; 510e56f81e0SChristoph Hellwig 511956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 512e56f81e0SChristoph Hellwig if (r < 0) 513e56f81e0SChristoph Hellwig return r; 514e56f81e0SChristoph Hellwig 515e56f81e0SChristoph Hellwig if (r > 0) { 516e56f81e0SChristoph Hellwig /* 517e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 518e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 519e56f81e0SChristoph Hellwig */ 520e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 521e980f623SChristoph Hellwig DMWARN_LIMIT( 522e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 523e980f623SChristoph Hellwig current->comm, cmd); 524e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 525e56f81e0SChristoph Hellwig goto out; 526e56f81e0SChristoph Hellwig } 527e980f623SChristoph Hellwig } 528e56f81e0SChristoph Hellwig 52966482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 530e56f81e0SChristoph Hellwig out: 531956a4025SMike Snitzer bdput(bdev); 532aa129a22SMilan Broz return r; 533aa129a22SMilan Broz } 534aa129a22SMilan Broz 535978e51baSMike Snitzer static void start_io_acct(struct dm_io *io); 536978e51baSMike Snitzer 537978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5381da177e4SLinus Torvalds { 53964f52b0eSMike Snitzer struct dm_io *io; 54064f52b0eSMike Snitzer struct dm_target_io *tio; 54164f52b0eSMike Snitzer struct bio *clone; 54264f52b0eSMike Snitzer 54364f52b0eSMike Snitzer clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs); 54464f52b0eSMike Snitzer if (!clone) 54564f52b0eSMike Snitzer return NULL; 54664f52b0eSMike Snitzer 54764f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 54864f52b0eSMike Snitzer tio->inside_dm_io = true; 54964f52b0eSMike Snitzer tio->io = NULL; 55064f52b0eSMike Snitzer 55164f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 55264f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 553978e51baSMike Snitzer io->status = 0; 554978e51baSMike Snitzer atomic_set(&io->io_count, 1); 555978e51baSMike Snitzer io->orig_bio = bio; 556978e51baSMike Snitzer io->md = md; 557978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 558978e51baSMike Snitzer 559978e51baSMike Snitzer start_io_acct(io); 56064f52b0eSMike Snitzer 56164f52b0eSMike Snitzer return io; 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds 564028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5651da177e4SLinus Torvalds { 56664f52b0eSMike Snitzer bio_put(&io->tio.clone); 56764f52b0eSMike Snitzer } 56864f52b0eSMike Snitzer 56964f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 57064f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 57164f52b0eSMike Snitzer { 57264f52b0eSMike Snitzer struct dm_target_io *tio; 57364f52b0eSMike Snitzer 57464f52b0eSMike Snitzer if (!ci->io->tio.io) { 57564f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 57664f52b0eSMike Snitzer tio = &ci->io->tio; 57764f52b0eSMike Snitzer } else { 578bc02cdbeSMike Snitzer struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs); 57964f52b0eSMike Snitzer if (!clone) 58064f52b0eSMike Snitzer return NULL; 58164f52b0eSMike Snitzer 58264f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 58364f52b0eSMike Snitzer tio->inside_dm_io = false; 58464f52b0eSMike Snitzer } 58564f52b0eSMike Snitzer 58664f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 58764f52b0eSMike Snitzer tio->io = ci->io; 58864f52b0eSMike Snitzer tio->ti = ti; 58964f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 59064f52b0eSMike Snitzer 59164f52b0eSMike Snitzer return tio; 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds 594cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 5951da177e4SLinus Torvalds { 59664f52b0eSMike Snitzer if (tio->inside_dm_io) 59764f52b0eSMike Snitzer return; 598dba14160SMikulas Patocka bio_put(&tio->clone); 5991da177e4SLinus Torvalds } 6001da177e4SLinus Torvalds 6014cc96131SMike Snitzer int md_in_flight(struct mapped_device *md) 60290abb8c4SKiyoshi Ueda { 60390abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 60490abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 60590abb8c4SKiyoshi Ueda } 60690abb8c4SKiyoshi Ueda 6073eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6083eaf840eSJun'ichi "Nick" Nomura { 6093eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 610745dc570SMike Snitzer struct bio *bio = io->orig_bio; 611fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 6123eaf840eSJun'ichi "Nick" Nomura 6133eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6143eaf840eSJun'ichi "Nick" Nomura 615f3986374SMike Snitzer generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0); 616f3986374SMike Snitzer 6171e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 6181e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 619fd2ed4d2SMikulas Patocka 620fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 621528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 622528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 623528ec5abSMike Christie false, 0, &io->stats_aux); 6243eaf840eSJun'ichi "Nick" Nomura } 6253eaf840eSJun'ichi "Nick" Nomura 626d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6273eaf840eSJun'ichi "Nick" Nomura { 6283eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 629745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6303eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 63118c0b223SGu Zheng int pending; 6323eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 6333eaf840eSJun'ichi "Nick" Nomura 634d62e26b3SJens Axboe generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); 6353eaf840eSJun'ichi "Nick" Nomura 636fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 637528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 638528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 639528ec5abSMike Christie true, duration, &io->stats_aux); 640fd2ed4d2SMikulas Patocka 641af7e466aSMikulas Patocka /* 642af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 643d87f4c14STejun Heo * a flush. 644af7e466aSMikulas Patocka */ 6451e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 6461e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 647316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 6483eaf840eSJun'ichi "Nick" Nomura 649d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 650d221d2e7SMikulas Patocka if (!pending) 651d221d2e7SMikulas Patocka wake_up(&md->wait); 6523eaf840eSJun'ichi "Nick" Nomura } 6533eaf840eSJun'ichi "Nick" Nomura 6541da177e4SLinus Torvalds /* 6551da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6561da177e4SLinus Torvalds */ 65792c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6581da177e4SLinus Torvalds { 65905447420SKiyoshi Ueda unsigned long flags; 6601da177e4SLinus Torvalds 66105447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6621da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 66305447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 66492c63902SMikulas Patocka queue_work(md->wq, &md->work); 6651da177e4SLinus Torvalds } 6661da177e4SLinus Torvalds 6671da177e4SLinus Torvalds /* 6681da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6691da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 67083d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6711da177e4SLinus Torvalds */ 67283d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6731da177e4SLinus Torvalds { 67483d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6751da177e4SLinus Torvalds 67683d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 67783d5e5b0SMikulas Patocka } 6781da177e4SLinus Torvalds 67983d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 68083d5e5b0SMikulas Patocka { 68183d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 68283d5e5b0SMikulas Patocka } 68383d5e5b0SMikulas Patocka 68483d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 68583d5e5b0SMikulas Patocka { 68683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 68783d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 68883d5e5b0SMikulas Patocka } 68983d5e5b0SMikulas Patocka 69083d5e5b0SMikulas Patocka /* 69183d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 69283d5e5b0SMikulas Patocka * The caller must not block between these two functions. 69383d5e5b0SMikulas Patocka */ 69483d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 69583d5e5b0SMikulas Patocka { 69683d5e5b0SMikulas Patocka rcu_read_lock(); 69783d5e5b0SMikulas Patocka return rcu_dereference(md->map); 69883d5e5b0SMikulas Patocka } 69983d5e5b0SMikulas Patocka 70083d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 70183d5e5b0SMikulas Patocka { 70283d5e5b0SMikulas Patocka rcu_read_unlock(); 7031da177e4SLinus Torvalds } 7041da177e4SLinus Torvalds 7053ac51e74SDarrick J. Wong /* 70686f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 70786f1152bSBenjamin Marzinski */ 70886f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 70986f1152bSBenjamin Marzinski struct mapped_device *md) 71086f1152bSBenjamin Marzinski { 71186f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 71286f1152bSBenjamin Marzinski struct block_device *bdev; 71386f1152bSBenjamin Marzinski 71486f1152bSBenjamin Marzinski int r; 71586f1152bSBenjamin Marzinski 71686f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 71786f1152bSBenjamin Marzinski 71886f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 71986f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 72086f1152bSBenjamin Marzinski return PTR_ERR(bdev); 72186f1152bSBenjamin Marzinski 72286f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 72386f1152bSBenjamin Marzinski if (r) { 72486f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 72586f1152bSBenjamin Marzinski return r; 72686f1152bSBenjamin Marzinski } 72786f1152bSBenjamin Marzinski 72886f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 729817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 73086f1152bSBenjamin Marzinski return 0; 73186f1152bSBenjamin Marzinski } 73286f1152bSBenjamin Marzinski 73386f1152bSBenjamin Marzinski /* 73486f1152bSBenjamin Marzinski * Close a table device that we've been using. 73586f1152bSBenjamin Marzinski */ 73686f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 73786f1152bSBenjamin Marzinski { 73886f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 73986f1152bSBenjamin Marzinski return; 74086f1152bSBenjamin Marzinski 74186f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 74286f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 743817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 74486f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 745817bf402SDan Williams td->dm_dev.dax_dev = NULL; 74686f1152bSBenjamin Marzinski } 74786f1152bSBenjamin Marzinski 74886f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 74986f1152bSBenjamin Marzinski fmode_t mode) { 75086f1152bSBenjamin Marzinski struct table_device *td; 75186f1152bSBenjamin Marzinski 75286f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 75386f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 75486f1152bSBenjamin Marzinski return td; 75586f1152bSBenjamin Marzinski 75686f1152bSBenjamin Marzinski return NULL; 75786f1152bSBenjamin Marzinski } 75886f1152bSBenjamin Marzinski 75986f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 76086f1152bSBenjamin Marzinski struct dm_dev **result) { 76186f1152bSBenjamin Marzinski int r; 76286f1152bSBenjamin Marzinski struct table_device *td; 76386f1152bSBenjamin Marzinski 76486f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 76586f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 76686f1152bSBenjamin Marzinski if (!td) { 767115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 76886f1152bSBenjamin Marzinski if (!td) { 76986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 77086f1152bSBenjamin Marzinski return -ENOMEM; 77186f1152bSBenjamin Marzinski } 77286f1152bSBenjamin Marzinski 77386f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 77486f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 77586f1152bSBenjamin Marzinski 77686f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 77786f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 77886f1152bSBenjamin Marzinski kfree(td); 77986f1152bSBenjamin Marzinski return r; 78086f1152bSBenjamin Marzinski } 78186f1152bSBenjamin Marzinski 78286f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 78386f1152bSBenjamin Marzinski 784b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 78586f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 786b0b4d7c6SElena Reshetova } else { 787b0b4d7c6SElena Reshetova refcount_inc(&td->count); 78886f1152bSBenjamin Marzinski } 78986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 79086f1152bSBenjamin Marzinski 79186f1152bSBenjamin Marzinski *result = &td->dm_dev; 79286f1152bSBenjamin Marzinski return 0; 79386f1152bSBenjamin Marzinski } 79486f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 79586f1152bSBenjamin Marzinski 79686f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 79786f1152bSBenjamin Marzinski { 79886f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 79986f1152bSBenjamin Marzinski 80086f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 801b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 80286f1152bSBenjamin Marzinski close_table_device(td, md); 80386f1152bSBenjamin Marzinski list_del(&td->list); 80486f1152bSBenjamin Marzinski kfree(td); 80586f1152bSBenjamin Marzinski } 80686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80786f1152bSBenjamin Marzinski } 80886f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 80986f1152bSBenjamin Marzinski 81086f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 81186f1152bSBenjamin Marzinski { 81286f1152bSBenjamin Marzinski struct list_head *tmp, *next; 81386f1152bSBenjamin Marzinski 81486f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 81586f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 81686f1152bSBenjamin Marzinski 81786f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 818b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 81986f1152bSBenjamin Marzinski kfree(td); 82086f1152bSBenjamin Marzinski } 82186f1152bSBenjamin Marzinski } 82286f1152bSBenjamin Marzinski 82386f1152bSBenjamin Marzinski /* 8243ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8253ac51e74SDarrick J. Wong */ 8263ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8273ac51e74SDarrick J. Wong { 8283ac51e74SDarrick J. Wong *geo = md->geometry; 8293ac51e74SDarrick J. Wong 8303ac51e74SDarrick J. Wong return 0; 8313ac51e74SDarrick J. Wong } 8323ac51e74SDarrick J. Wong 8333ac51e74SDarrick J. Wong /* 8343ac51e74SDarrick J. Wong * Set the geometry of a device. 8353ac51e74SDarrick J. Wong */ 8363ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8373ac51e74SDarrick J. Wong { 8383ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8393ac51e74SDarrick J. Wong 8403ac51e74SDarrick J. Wong if (geo->start > sz) { 8413ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8423ac51e74SDarrick J. Wong return -EINVAL; 8433ac51e74SDarrick J. Wong } 8443ac51e74SDarrick J. Wong 8453ac51e74SDarrick J. Wong md->geometry = *geo; 8463ac51e74SDarrick J. Wong 8473ac51e74SDarrick J. Wong return 0; 8483ac51e74SDarrick J. Wong } 8493ac51e74SDarrick J. Wong 8502e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8512e93ccc1SKiyoshi Ueda { 8522e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8532e93ccc1SKiyoshi Ueda } 8542e93ccc1SKiyoshi Ueda 8551da177e4SLinus Torvalds /* 8561da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 8571da177e4SLinus Torvalds * cloned into, completing the original io if necc. 8581da177e4SLinus Torvalds */ 8594e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 8601da177e4SLinus Torvalds { 8612e93ccc1SKiyoshi Ueda unsigned long flags; 8624e4cbee9SChristoph Hellwig blk_status_t io_error; 863b35f8caaSMilan Broz struct bio *bio; 864b35f8caaSMilan Broz struct mapped_device *md = io->md; 8652e93ccc1SKiyoshi Ueda 8662e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 867f88fb981SKiyoshi Ueda if (unlikely(error)) { 868f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 869745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 8704e4cbee9SChristoph Hellwig io->status = error; 871f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 872f88fb981SKiyoshi Ueda } 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 8754e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 8762e93ccc1SKiyoshi Ueda /* 8772e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8782e93ccc1SKiyoshi Ueda */ 879022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 8806a8736d1STejun Heo if (__noflush_suspending(md)) 881745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 882745dc570SMike Snitzer bio_list_add_head(&md->deferred, io->orig_bio); 8836a8736d1STejun Heo else 8842e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 8854e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 886022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 8872e93ccc1SKiyoshi Ueda } 8882e93ccc1SKiyoshi Ueda 8894e4cbee9SChristoph Hellwig io_error = io->status; 890745dc570SMike Snitzer bio = io->orig_bio; 891af7e466aSMikulas Patocka end_io_acct(io); 892a97f925aSMikulas Patocka free_io(md, io); 8931da177e4SLinus Torvalds 8944e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 8956a8736d1STejun Heo return; 8966a8736d1STejun Heo 8971eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8981da177e4SLinus Torvalds /* 8996a8736d1STejun Heo * Preflush done for flush with data, reissue 90028a8f0d3SMike Christie * without REQ_PREFLUSH. 9011da177e4SLinus Torvalds */ 9021eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9036a8736d1STejun Heo queue_io(md, bio); 904b35f8caaSMilan Broz } else { 905b372d360SMike Snitzer /* done with normal IO or empty flush */ 9064e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9074246a0b6SChristoph Hellwig bio_endio(bio); 9082e93ccc1SKiyoshi Ueda } 9091da177e4SLinus Torvalds } 910af7e466aSMikulas Patocka } 9111da177e4SLinus Torvalds 9124cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 9137eee4ae2SMike Snitzer { 9147eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9157eee4ae2SMike Snitzer 9167eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9177eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9187eee4ae2SMike Snitzer } 9197eee4ae2SMike Snitzer 920ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 921ac62d620SChristoph Hellwig { 922ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 923ac62d620SChristoph Hellwig 924ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 925ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 926ac62d620SChristoph Hellwig } 927ac62d620SChristoph Hellwig 9284246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9291da177e4SLinus Torvalds { 9304e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 931bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 932b35f8caaSMilan Broz struct dm_io *io = tio->io; 9339faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9341da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9351da177e4SLinus Torvalds 936978e51baSMike Snitzer if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 937ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_SAME && 93874d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 9397eee4ae2SMike Snitzer disable_write_same(md); 940ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 94174d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 942ac62d620SChristoph Hellwig disable_write_zeroes(md); 943ac62d620SChristoph Hellwig } 9447eee4ae2SMike Snitzer 9451be56909SChristoph Hellwig if (endio) { 9464e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 9471be56909SChristoph Hellwig switch (r) { 9481be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 9494e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 9501be56909SChristoph Hellwig /*FALLTHRU*/ 9511be56909SChristoph Hellwig case DM_ENDIO_DONE: 9521be56909SChristoph Hellwig break; 9531be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 9541be56909SChristoph Hellwig /* The target will handle the io */ 9551be56909SChristoph Hellwig return; 9561be56909SChristoph Hellwig default: 9571be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 9581be56909SChristoph Hellwig BUG(); 9591be56909SChristoph Hellwig } 9601be56909SChristoph Hellwig } 9611be56909SChristoph Hellwig 962cfae7529SMike Snitzer free_tio(tio); 963b35f8caaSMilan Broz dec_pending(io, error); 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds 96678d8e58aSMike Snitzer /* 96756a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 96856a67df7SMike Snitzer * target boundary. 96956a67df7SMike Snitzer */ 97056a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 9711da177e4SLinus Torvalds { 97256a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 97356a67df7SMike Snitzer 97456a67df7SMike Snitzer return ti->len - target_offset; 97556a67df7SMike Snitzer } 97656a67df7SMike Snitzer 97756a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 97856a67df7SMike Snitzer { 97956a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 980542f9038SMike Snitzer sector_t offset, max_len; 9811da177e4SLinus Torvalds 9821da177e4SLinus Torvalds /* 9831da177e4SLinus Torvalds * Does the target need to split even further? 9841da177e4SLinus Torvalds */ 985542f9038SMike Snitzer if (ti->max_io_len) { 986542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 987542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 988542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 989542f9038SMike Snitzer else 990542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 991542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 992542f9038SMike Snitzer 993542f9038SMike Snitzer if (len > max_len) 994542f9038SMike Snitzer len = max_len; 9951da177e4SLinus Torvalds } 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds return len; 9981da177e4SLinus Torvalds } 9991da177e4SLinus Torvalds 1000542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1001542f9038SMike Snitzer { 1002542f9038SMike Snitzer if (len > UINT_MAX) { 1003542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1004542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1005542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1006542f9038SMike Snitzer return -EINVAL; 1007542f9038SMike Snitzer } 1008542f9038SMike Snitzer 1009542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1010542f9038SMike Snitzer 1011542f9038SMike Snitzer return 0; 1012542f9038SMike Snitzer } 1013542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1014542f9038SMike Snitzer 1015f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1016f26c5719SDan Williams sector_t sector, int *srcu_idx) 1017545ed20eSToshi Kani { 1018545ed20eSToshi Kani struct dm_table *map; 1019545ed20eSToshi Kani struct dm_target *ti; 1020545ed20eSToshi Kani 1021f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1022545ed20eSToshi Kani if (!map) 1023f26c5719SDan Williams return NULL; 1024545ed20eSToshi Kani 1025545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1026545ed20eSToshi Kani if (!dm_target_is_valid(ti)) 1027f26c5719SDan Williams return NULL; 1028f26c5719SDan Williams 1029f26c5719SDan Williams return ti; 1030f26c5719SDan Williams } 1031f26c5719SDan Williams 1032f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1033f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1034f26c5719SDan Williams { 1035f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1036f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1037f26c5719SDan Williams struct dm_target *ti; 1038f26c5719SDan Williams long len, ret = -EIO; 1039f26c5719SDan Williams int srcu_idx; 1040f26c5719SDan Williams 1041f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1042f26c5719SDan Williams 1043f26c5719SDan Williams if (!ti) 1044545ed20eSToshi Kani goto out; 1045f26c5719SDan Williams if (!ti->type->direct_access) 1046f26c5719SDan Williams goto out; 1047f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 1048f26c5719SDan Williams if (len < 1) 1049f26c5719SDan Williams goto out; 1050f26c5719SDan Williams nr_pages = min(len, nr_pages); 1051545ed20eSToshi Kani if (ti->type->direct_access) 1052817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1053817bf402SDan Williams 1054545ed20eSToshi Kani out: 1055545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1056f26c5719SDan Williams 1057f26c5719SDan Williams return ret; 1058545ed20eSToshi Kani } 1059545ed20eSToshi Kani 10607e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 10617e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 10627e026c8cSDan Williams { 10637e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 10647e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 10657e026c8cSDan Williams struct dm_target *ti; 10667e026c8cSDan Williams long ret = 0; 10677e026c8cSDan Williams int srcu_idx; 10687e026c8cSDan Williams 10697e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 10707e026c8cSDan Williams 10717e026c8cSDan Williams if (!ti) 10727e026c8cSDan Williams goto out; 10737e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 10747e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 10757e026c8cSDan Williams goto out; 10767e026c8cSDan Williams } 10777e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 10787e026c8cSDan Williams out: 10797e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 10807e026c8cSDan Williams 10817e026c8cSDan Williams return ret; 10827e026c8cSDan Williams } 10837e026c8cSDan Williams 10841dd40c3eSMikulas Patocka /* 10851dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 1086c06b3e58SNeilBrown * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. 10871dd40c3eSMikulas Patocka * 10881dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 10891dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 10901dd40c3eSMikulas Patocka * sent in a next bio. 10911dd40c3eSMikulas Patocka * 10921dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 10931dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 10941dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 10951dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 10961dd40c3eSMikulas Patocka * 10971dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 10981dd40c3eSMikulas Patocka * <------- bi_size -------> 10991dd40c3eSMikulas Patocka * <-- n_sectors --> 11001dd40c3eSMikulas Patocka * 11011dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11021dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11031dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11041dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11051dd40c3eSMikulas Patocka * to make it empty) 11061dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11071dd40c3eSMikulas Patocka * 11081dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 11091dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 11101dd40c3eSMikulas Patocka * copies of the bio. 11111dd40c3eSMikulas Patocka */ 11121dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 11131dd40c3eSMikulas Patocka { 11141dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11151dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 11161eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 11171dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 11181dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 11191dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 11201dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 11211dd40c3eSMikulas Patocka } 11221dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 11231dd40c3eSMikulas Patocka 1124d67a5f4bSMikulas Patocka /* 112510999307SDamien Le Moal * The zone descriptors obtained with a zone report indicate 112610999307SDamien Le Moal * zone positions within the target device. The zone descriptors 112710999307SDamien Le Moal * must be remapped to match their position within the dm device. 112810999307SDamien Le Moal * A target may call dm_remap_zone_report after completion of a 112910999307SDamien Le Moal * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained 113010999307SDamien Le Moal * from the target device mapping to the dm device. 113110999307SDamien Le Moal */ 113210999307SDamien Le Moal void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 113310999307SDamien Le Moal { 113410999307SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED 113510999307SDamien Le Moal struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1136745dc570SMike Snitzer struct bio *report_bio = tio->io->orig_bio; 113710999307SDamien Le Moal struct blk_zone_report_hdr *hdr = NULL; 113810999307SDamien Le Moal struct blk_zone *zone; 113910999307SDamien Le Moal unsigned int nr_rep = 0; 114010999307SDamien Le Moal unsigned int ofst; 114110999307SDamien Le Moal struct bio_vec bvec; 114210999307SDamien Le Moal struct bvec_iter iter; 114310999307SDamien Le Moal void *addr; 114410999307SDamien Le Moal 114510999307SDamien Le Moal if (bio->bi_status) 114610999307SDamien Le Moal return; 114710999307SDamien Le Moal 114810999307SDamien Le Moal /* 114910999307SDamien Le Moal * Remap the start sector of the reported zones. For sequential zones, 115010999307SDamien Le Moal * also remap the write pointer position. 115110999307SDamien Le Moal */ 115210999307SDamien Le Moal bio_for_each_segment(bvec, report_bio, iter) { 115310999307SDamien Le Moal addr = kmap_atomic(bvec.bv_page); 115410999307SDamien Le Moal 115510999307SDamien Le Moal /* Remember the report header in the first page */ 115610999307SDamien Le Moal if (!hdr) { 115710999307SDamien Le Moal hdr = addr; 115810999307SDamien Le Moal ofst = sizeof(struct blk_zone_report_hdr); 115910999307SDamien Le Moal } else 116010999307SDamien Le Moal ofst = 0; 116110999307SDamien Le Moal 116210999307SDamien Le Moal /* Set zones start sector */ 116310999307SDamien Le Moal while (hdr->nr_zones && ofst < bvec.bv_len) { 116410999307SDamien Le Moal zone = addr + ofst; 116510999307SDamien Le Moal if (zone->start >= start + ti->len) { 116610999307SDamien Le Moal hdr->nr_zones = 0; 116710999307SDamien Le Moal break; 116810999307SDamien Le Moal } 116910999307SDamien Le Moal zone->start = zone->start + ti->begin - start; 117010999307SDamien Le Moal if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 117110999307SDamien Le Moal if (zone->cond == BLK_ZONE_COND_FULL) 117210999307SDamien Le Moal zone->wp = zone->start + zone->len; 117310999307SDamien Le Moal else if (zone->cond == BLK_ZONE_COND_EMPTY) 117410999307SDamien Le Moal zone->wp = zone->start; 117510999307SDamien Le Moal else 117610999307SDamien Le Moal zone->wp = zone->wp + ti->begin - start; 117710999307SDamien Le Moal } 117810999307SDamien Le Moal ofst += sizeof(struct blk_zone); 117910999307SDamien Le Moal hdr->nr_zones--; 118010999307SDamien Le Moal nr_rep++; 118110999307SDamien Le Moal } 118210999307SDamien Le Moal 118310999307SDamien Le Moal if (addr != hdr) 118410999307SDamien Le Moal kunmap_atomic(addr); 118510999307SDamien Le Moal 118610999307SDamien Le Moal if (!hdr->nr_zones) 118710999307SDamien Le Moal break; 118810999307SDamien Le Moal } 118910999307SDamien Le Moal 119010999307SDamien Le Moal if (hdr) { 119110999307SDamien Le Moal hdr->nr_zones = nr_rep; 119210999307SDamien Le Moal kunmap_atomic(hdr); 119310999307SDamien Le Moal } 119410999307SDamien Le Moal 119510999307SDamien Le Moal bio_advance(report_bio, report_bio->bi_iter.bi_size); 119610999307SDamien Le Moal 119710999307SDamien Le Moal #else /* !CONFIG_BLK_DEV_ZONED */ 119810999307SDamien Le Moal bio->bi_status = BLK_STS_NOTSUPP; 119910999307SDamien Le Moal #endif 120010999307SDamien Le Moal } 120110999307SDamien Le Moal EXPORT_SYMBOL_GPL(dm_remap_zone_report); 120210999307SDamien Le Moal 1203978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 12041da177e4SLinus Torvalds { 12051da177e4SLinus Torvalds int r; 12062056a782SJens Axboe sector_t sector; 1207dba14160SMikulas Patocka struct bio *clone = &tio->clone; 120864f52b0eSMike Snitzer struct dm_io *io = tio->io; 1209978e51baSMike Snitzer struct mapped_device *md = io->md; 1210bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1211978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 12121da177e4SLinus Torvalds 12131da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12141da177e4SLinus Torvalds 12151da177e4SLinus Torvalds /* 12161da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 12171da177e4SLinus Torvalds * anything, the target has assumed ownership of 12181da177e4SLinus Torvalds * this io. 12191da177e4SLinus Torvalds */ 122064f52b0eSMike Snitzer atomic_inc(&io->io_count); 12214f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1222d67a5f4bSMikulas Patocka 12237de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1224846785e6SChristoph Hellwig switch (r) { 1225846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1226846785e6SChristoph Hellwig break; 1227846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 12281da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 122974d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 123064f52b0eSMike Snitzer bio_dev(io->orig_bio), sector); 1231978e51baSMike Snitzer if (md->type == DM_TYPE_NVME_BIO_BASED) 1232978e51baSMike Snitzer ret = direct_make_request(clone); 1233978e51baSMike Snitzer else 1234978e51baSMike Snitzer ret = generic_make_request(clone); 1235846785e6SChristoph Hellwig break; 1236846785e6SChristoph Hellwig case DM_MAPIO_KILL: 12374e4cbee9SChristoph Hellwig free_tio(tio); 123864f52b0eSMike Snitzer dec_pending(io, BLK_STS_IOERR); 12394e4cbee9SChristoph Hellwig break; 1240846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1241cfae7529SMike Snitzer free_tio(tio); 124264f52b0eSMike Snitzer dec_pending(io, BLK_STS_DM_REQUEUE); 1243846785e6SChristoph Hellwig break; 1244846785e6SChristoph Hellwig default: 124545cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 124645cbcd79SKiyoshi Ueda BUG(); 12471da177e4SLinus Torvalds } 1248978e51baSMike Snitzer 1249978e51baSMike Snitzer return ret; 12501da177e4SLinus Torvalds } 12511da177e4SLinus Torvalds 1252e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1253bd2a49b8SAlasdair G Kergon { 12544f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 12554f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 12561da177e4SLinus Torvalds } 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds /* 12591da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 12601da177e4SLinus Torvalds */ 1261c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 12621c3b13e6SKent Overstreet sector_t sector, unsigned len) 12631da177e4SLinus Torvalds { 1264dba14160SMikulas Patocka struct bio *clone = &tio->clone; 12651da177e4SLinus Torvalds 12661c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 12679c47008dSMartin K. Petersen 1268e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) { 1269e2460f2aSMikulas Patocka int r; 1270e2460f2aSMikulas Patocka 1271e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1272e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1273e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1274e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1275e2460f2aSMikulas Patocka tio->ti->type->name); 1276e2460f2aSMikulas Patocka return -EIO; 1277e2460f2aSMikulas Patocka } 1278e2460f2aSMikulas Patocka 1279e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1280c80914e8SMike Snitzer if (r < 0) 1281c80914e8SMike Snitzer return r; 1282c80914e8SMike Snitzer } 12831c3b13e6SKent Overstreet 1284264c869dSDamien Le Moal if (bio_op(bio) != REQ_OP_ZONE_REPORT) 12851c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 12861c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 12871c3b13e6SKent Overstreet 1288e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) 1289fbd08e76SDmitry Monakhov bio_integrity_trim(clone); 1290c80914e8SMike Snitzer 1291c80914e8SMike Snitzer return 0; 12921da177e4SLinus Torvalds } 12931da177e4SLinus Torvalds 1294318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1295318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 12969015df24SAlasdair G Kergon { 1297318716ddSMike Snitzer struct dm_target_io *tio; 1298318716ddSMike Snitzer int try; 1299318716ddSMike Snitzer 1300318716ddSMike Snitzer if (!num_bios) 1301318716ddSMike Snitzer return; 1302318716ddSMike Snitzer 1303318716ddSMike Snitzer if (num_bios == 1) { 1304318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1305318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1306318716ddSMike Snitzer return; 1307318716ddSMike Snitzer } 1308318716ddSMike Snitzer 1309318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1310318716ddSMike Snitzer int bio_nr; 1311318716ddSMike Snitzer struct bio *bio; 1312318716ddSMike Snitzer 1313318716ddSMike Snitzer if (try) 1314bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1315318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1316318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1317318716ddSMike Snitzer if (!tio) 1318318716ddSMike Snitzer break; 1319318716ddSMike Snitzer 1320318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1321318716ddSMike Snitzer } 1322318716ddSMike Snitzer if (try) 1323bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1324318716ddSMike Snitzer if (bio_nr == num_bios) 1325318716ddSMike Snitzer return; 1326318716ddSMike Snitzer 1327318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1328318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1329318716ddSMike Snitzer free_tio(tio); 1330318716ddSMike Snitzer } 1331318716ddSMike Snitzer } 1332318716ddSMike Snitzer } 1333318716ddSMike Snitzer 1334978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1335318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 1336318716ddSMike Snitzer { 1337dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13389015df24SAlasdair G Kergon 13391dd40c3eSMikulas Patocka tio->len_ptr = len; 13401dd40c3eSMikulas Patocka 13411c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1342bd2a49b8SAlasdair G Kergon if (len) 13431dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1344f9ab94ceSMikulas Patocka 1345978e51baSMike Snitzer return __map_bio(tio); 1346f9ab94ceSMikulas Patocka } 1347f9ab94ceSMikulas Patocka 134814fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13491dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 135006a426ceSMike Snitzer { 1351318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1352318716ddSMike Snitzer struct bio *bio; 1353318716ddSMike Snitzer struct dm_target_io *tio; 135406a426ceSMike Snitzer 1355318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1356318716ddSMike Snitzer 1357318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1358318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1359978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1360318716ddSMike Snitzer } 136106a426ceSMike Snitzer } 136206a426ceSMike Snitzer 136314fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1364f9ab94ceSMikulas Patocka { 136506a426ceSMike Snitzer unsigned target_nr = 0; 1366f9ab94ceSMikulas Patocka struct dm_target *ti; 1367f9ab94ceSMikulas Patocka 1368b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1369f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 13701dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1371f9ab94ceSMikulas Patocka 1372f9ab94ceSMikulas Patocka return 0; 1373f9ab94ceSMikulas Patocka } 1374f9ab94ceSMikulas Patocka 1375c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 13761dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 13775ae89a87SMike Snitzer { 1378dba14160SMikulas Patocka struct bio *bio = ci->bio; 13795ae89a87SMike Snitzer struct dm_target_io *tio; 1380f31c21e4SNeilBrown int r; 13815ae89a87SMike Snitzer 1382318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 13831dd40c3eSMikulas Patocka tio->len_ptr = len; 1384c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1385072623deSMikulas Patocka if (r < 0) { 1386cfae7529SMike Snitzer free_tio(tio); 1387f31c21e4SNeilBrown return r; 1388072623deSMikulas Patocka } 1389978e51baSMike Snitzer (void) __map_bio(tio); 1390c80914e8SMike Snitzer 1391f31c21e4SNeilBrown return 0; 1392b0d8ed4dSAlasdair G Kergon } 13935ae89a87SMike Snitzer 139455a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 139523508a96SMike Snitzer 139655a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 139723508a96SMike Snitzer { 139855a62eefSAlasdair G Kergon return ti->num_discard_bios; 139923508a96SMike Snitzer } 140023508a96SMike Snitzer 140155a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 140223508a96SMike Snitzer { 140355a62eefSAlasdair G Kergon return ti->num_write_same_bios; 140423508a96SMike Snitzer } 140523508a96SMike Snitzer 1406ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1407ac62d620SChristoph Hellwig { 1408ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1409ac62d620SChristoph Hellwig } 1410ac62d620SChristoph Hellwig 141123508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 141223508a96SMike Snitzer 141323508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 141423508a96SMike Snitzer { 141555a62eefSAlasdair G Kergon return ti->split_discard_bios; 141623508a96SMike Snitzer } 141723508a96SMike Snitzer 14183d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 141955a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 142023508a96SMike Snitzer is_split_required_fn is_split_required) 14215ae89a87SMike Snitzer { 1422e0d6609aSMikulas Patocka unsigned len; 142355a62eefSAlasdair G Kergon unsigned num_bios; 14245ae89a87SMike Snitzer 14255ae89a87SMike Snitzer /* 142623508a96SMike Snitzer * Even though the device advertised support for this type of 142723508a96SMike Snitzer * request, that does not mean every target supports it, and 1428936688d7SMike Snitzer * reconfiguration might also have changed that since the 14295ae89a87SMike Snitzer * check was performed. 14305ae89a87SMike Snitzer */ 143155a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 143255a62eefSAlasdair G Kergon if (!num_bios) 14335ae89a87SMike Snitzer return -EOPNOTSUPP; 14345ae89a87SMike Snitzer 143523508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1436e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 14377acf0277SMikulas Patocka else 1438e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 14395ae89a87SMike Snitzer 14401dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 14415ae89a87SMike Snitzer 1442a79245b3SMike Snitzer ci->sector += len; 14433d7f4562SMike Snitzer ci->sector_count -= len; 14445ae89a87SMike Snitzer 14455ae89a87SMike Snitzer return 0; 14465ae89a87SMike Snitzer } 14475ae89a87SMike Snitzer 14483d7f4562SMike Snitzer static int __send_discard(struct clone_info *ci, struct dm_target *ti) 144923508a96SMike Snitzer { 14503d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_discard_bios, 145123508a96SMike Snitzer is_split_required_for_discard); 145223508a96SMike Snitzer } 145323508a96SMike Snitzer 14543d7f4562SMike Snitzer static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 145523508a96SMike Snitzer { 14563d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL); 145723508a96SMike Snitzer } 145823508a96SMike Snitzer 14593d7f4562SMike Snitzer static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1460ac62d620SChristoph Hellwig { 14613d7f4562SMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL); 1462ac62d620SChristoph Hellwig } 1463ac62d620SChristoph Hellwig 1464e4c93811SAlasdair G Kergon /* 1465e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1466e4c93811SAlasdair G Kergon */ 1467e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1468e4c93811SAlasdair G Kergon { 1469e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1470e4c93811SAlasdair G Kergon struct dm_target *ti; 14711c3b13e6SKent Overstreet unsigned len; 1472c80914e8SMike Snitzer int r; 1473e4c93811SAlasdair G Kergon 1474e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1475e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1476e4c93811SAlasdair G Kergon return -EIO; 1477e4c93811SAlasdair G Kergon 14783d7f4562SMike Snitzer if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 14793d7f4562SMike Snitzer return __send_discard(ci, ti); 14803d7f4562SMike Snitzer else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 14813d7f4562SMike Snitzer return __send_write_same(ci, ti); 14823d7f4562SMike Snitzer else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) 14833d7f4562SMike Snitzer return __send_write_zeroes(ci, ti); 14843d7f4562SMike Snitzer 1485264c869dSDamien Le Moal if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1486264c869dSDamien Le Moal len = ci->sector_count; 1487264c869dSDamien Le Moal else 1488264c869dSDamien Le Moal len = min_t(sector_t, max_io_len(ci->sector, ti), 1489264c869dSDamien Le Moal ci->sector_count); 1490e4c93811SAlasdair G Kergon 1491c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1492c80914e8SMike Snitzer if (r < 0) 1493c80914e8SMike Snitzer return r; 1494e4c93811SAlasdair G Kergon 1495e4c93811SAlasdair G Kergon ci->sector += len; 1496e4c93811SAlasdair G Kergon ci->sector_count -= len; 1497e4c93811SAlasdair G Kergon 1498e4c93811SAlasdair G Kergon return 0; 1499e4c93811SAlasdair G Kergon } 1500e4c93811SAlasdair G Kergon 1501978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1502978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1503978e51baSMike Snitzer { 1504978e51baSMike Snitzer ci->map = map; 1505978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1506978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1507978e51baSMike Snitzer } 1508978e51baSMike Snitzer 1509e4c93811SAlasdair G Kergon /* 151014fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 15111da177e4SLinus Torvalds */ 1512978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 151383d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 15141da177e4SLinus Torvalds { 15151da177e4SLinus Torvalds struct clone_info ci; 1516978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1517512875bdSJun'ichi Nomura int error = 0; 15181da177e4SLinus Torvalds 151983d5e5b0SMikulas Patocka if (unlikely(!map)) { 1520f0b9a450SMikulas Patocka bio_io_error(bio); 1521978e51baSMike Snitzer return ret; 1522f0b9a450SMikulas Patocka } 1523692d0eb9SMikulas Patocka 1524978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1525bd2a49b8SAlasdair G Kergon 15261eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1527bc02cdbeSMike Snitzer ci.bio = &ci.io->md->flush_bio; 1528b372d360SMike Snitzer ci.sector_count = 0; 152914fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1530b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1531a4aa5e56SDamien Le Moal } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1532a4aa5e56SDamien Le Moal ci.bio = bio; 1533a4aa5e56SDamien Le Moal ci.sector_count = 0; 1534a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1535b372d360SMike Snitzer } else { 15366a8736d1STejun Heo ci.bio = bio; 15371da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 153818a25da8SNeilBrown while (ci.sector_count && !error) { 153914fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 154018a25da8SNeilBrown if (current->bio_list && ci.sector_count && !error) { 154118a25da8SNeilBrown /* 154218a25da8SNeilBrown * Remainder must be passed to generic_make_request() 154318a25da8SNeilBrown * so that it gets handled *after* bios already submitted 154418a25da8SNeilBrown * have been completely processed. 154518a25da8SNeilBrown * We take a clone of the original to store in 1546745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 154718a25da8SNeilBrown * for dec_pending to use for completion handling. 154818a25da8SNeilBrown * As this path is not used for REQ_OP_ZONE_REPORT, 1549745dc570SMike Snitzer * the usage of io->orig_bio in dm_remap_zone_report() 155018a25da8SNeilBrown * won't be affected by this reassignment. 155118a25da8SNeilBrown */ 155218a25da8SNeilBrown struct bio *b = bio_clone_bioset(bio, GFP_NOIO, 155318a25da8SNeilBrown md->queue->bio_split); 1554745dc570SMike Snitzer ci.io->orig_bio = b; 155518a25da8SNeilBrown bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9); 155618a25da8SNeilBrown bio_chain(b, bio); 1557978e51baSMike Snitzer ret = generic_make_request(bio); 155818a25da8SNeilBrown break; 155918a25da8SNeilBrown } 156018a25da8SNeilBrown } 1561d87f4c14STejun Heo } 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds /* drop the extra reference count */ 156454385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 1565978e51baSMike Snitzer return ret; 15661da177e4SLinus Torvalds } 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds /* 1569978e51baSMike Snitzer * Optimized variant of __split_and_process_bio that leverages the 1570978e51baSMike Snitzer * fact that targets that use it do _not_ have a need to split bios. 15711da177e4SLinus Torvalds */ 1572978e51baSMike Snitzer static blk_qc_t __process_bio(struct mapped_device *md, 1573978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1574978e51baSMike Snitzer { 1575978e51baSMike Snitzer struct clone_info ci; 1576978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1577978e51baSMike Snitzer int error = 0; 1578978e51baSMike Snitzer 1579978e51baSMike Snitzer if (unlikely(!map)) { 1580978e51baSMike Snitzer bio_io_error(bio); 1581978e51baSMike Snitzer return ret; 1582978e51baSMike Snitzer } 1583978e51baSMike Snitzer 1584978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1585978e51baSMike Snitzer 1586978e51baSMike Snitzer if (bio->bi_opf & REQ_PREFLUSH) { 1587978e51baSMike Snitzer ci.bio = &ci.io->md->flush_bio; 1588978e51baSMike Snitzer ci.sector_count = 0; 1589978e51baSMike Snitzer error = __send_empty_flush(&ci); 1590978e51baSMike Snitzer /* dec_pending submits any data associated with flush */ 1591978e51baSMike Snitzer } else { 1592978e51baSMike Snitzer struct dm_target *ti = md->immutable_target; 1593978e51baSMike Snitzer struct dm_target_io *tio; 1594978e51baSMike Snitzer 1595978e51baSMike Snitzer /* 1596978e51baSMike Snitzer * Defend against IO still getting in during teardown 1597978e51baSMike Snitzer * - as was seen for a time with nvme-fcloop 1598978e51baSMike Snitzer */ 1599978e51baSMike Snitzer if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) { 1600978e51baSMike Snitzer error = -EIO; 1601978e51baSMike Snitzer goto out; 1602978e51baSMike Snitzer } 1603978e51baSMike Snitzer 1604978e51baSMike Snitzer tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1605978e51baSMike Snitzer ci.bio = bio; 1606978e51baSMike Snitzer ci.sector_count = bio_sectors(bio); 1607978e51baSMike Snitzer ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1608978e51baSMike Snitzer } 1609978e51baSMike Snitzer out: 1610978e51baSMike Snitzer /* drop the extra reference count */ 1611978e51baSMike Snitzer dec_pending(ci.io, errno_to_blk_status(error)); 1612978e51baSMike Snitzer return ret; 1613978e51baSMike Snitzer } 1614978e51baSMike Snitzer 1615978e51baSMike Snitzer typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *); 1616978e51baSMike Snitzer 1617978e51baSMike Snitzer static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, 1618978e51baSMike Snitzer process_bio_fn process_bio) 16191da177e4SLinus Torvalds { 16201da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1621978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 162283d5e5b0SMikulas Patocka int srcu_idx; 162383d5e5b0SMikulas Patocka struct dm_table *map; 16241da177e4SLinus Torvalds 162583d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 16261da177e4SLinus Torvalds 16276a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 16286a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 162983d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 16301da177e4SLinus Torvalds 16311eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 163292c63902SMikulas Patocka queue_io(md, bio); 16336a8736d1STejun Heo else 16346a8736d1STejun Heo bio_io_error(bio); 1635978e51baSMike Snitzer return ret; 16361da177e4SLinus Torvalds } 16371da177e4SLinus Torvalds 1638978e51baSMike Snitzer ret = process_bio(md, map, bio); 1639978e51baSMike Snitzer 164083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1641978e51baSMike Snitzer return ret; 1642978e51baSMike Snitzer } 1643978e51baSMike Snitzer 1644978e51baSMike Snitzer /* 1645978e51baSMike Snitzer * The request function that remaps the bio to one target and 1646978e51baSMike Snitzer * splits off any remainder. 1647978e51baSMike Snitzer */ 1648978e51baSMike Snitzer static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1649978e51baSMike Snitzer { 1650978e51baSMike Snitzer return __dm_make_request(q, bio, __split_and_process_bio); 1651978e51baSMike Snitzer } 1652978e51baSMike Snitzer 1653978e51baSMike Snitzer static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio) 1654978e51baSMike Snitzer { 1655978e51baSMike Snitzer return __dm_make_request(q, bio, __process_bio); 1656cec47e3dSKiyoshi Ueda } 1657cec47e3dSKiyoshi Ueda 16581da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 16591da177e4SLinus Torvalds { 16608a57dfc6SChandra Seetharaman int r = bdi_bits; 16618a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 16628a57dfc6SChandra Seetharaman struct dm_table *map; 16631da177e4SLinus Torvalds 16641eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1665e522c039SMike Snitzer if (dm_request_based(md)) { 1666cec47e3dSKiyoshi Ueda /* 1667e522c039SMike Snitzer * With request-based DM we only need to check the 1668e522c039SMike Snitzer * top-level queue for congestion. 1669cec47e3dSKiyoshi Ueda */ 1670dc3b17ccSJan Kara r = md->queue->backing_dev_info->wb.state & bdi_bits; 1671e522c039SMike Snitzer } else { 1672e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1673e522c039SMike Snitzer if (map) 16741da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 167583d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 16768a57dfc6SChandra Seetharaman } 1677e522c039SMike Snitzer } 16788a57dfc6SChandra Seetharaman 16791da177e4SLinus Torvalds return r; 16801da177e4SLinus Torvalds } 16811da177e4SLinus Torvalds 16821da177e4SLinus Torvalds /*----------------------------------------------------------------- 16831da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 16841da177e4SLinus Torvalds *---------------------------------------------------------------*/ 16852b06cfffSAlasdair G Kergon static void free_minor(int minor) 16861da177e4SLinus Torvalds { 1687f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16881da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1689f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 16901da177e4SLinus Torvalds } 16911da177e4SLinus Torvalds 16921da177e4SLinus Torvalds /* 16931da177e4SLinus Torvalds * See if the device with a specific minor # is free. 16941da177e4SLinus Torvalds */ 1695cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 16961da177e4SLinus Torvalds { 1697c9d76be6STejun Heo int r; 16981da177e4SLinus Torvalds 16991da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17001da177e4SLinus Torvalds return -EINVAL; 17011da177e4SLinus Torvalds 1702c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1703f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17041da177e4SLinus Torvalds 1705c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 17061da177e4SLinus Torvalds 1707f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1708c9d76be6STejun Heo idr_preload_end(); 1709c9d76be6STejun Heo if (r < 0) 1710c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1711c9d76be6STejun Heo return 0; 17121da177e4SLinus Torvalds } 17131da177e4SLinus Torvalds 1714cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 17151da177e4SLinus Torvalds { 1716c9d76be6STejun Heo int r; 17171da177e4SLinus Torvalds 1718c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1719f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17201da177e4SLinus Torvalds 1721c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 17221da177e4SLinus Torvalds 1723f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1724c9d76be6STejun Heo idr_preload_end(); 1725c9d76be6STejun Heo if (r < 0) 17261da177e4SLinus Torvalds return r; 1727c9d76be6STejun Heo *minor = r; 1728c9d76be6STejun Heo return 0; 17291da177e4SLinus Torvalds } 17301da177e4SLinus Torvalds 173183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1732f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 17331da177e4SLinus Torvalds 173453d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 173553d5914fSMikulas Patocka 1736*c12c9a3cSMike Snitzer static void dm_init_normal_md_queue(struct mapped_device *md) 1737bfebd1cdSMike Snitzer { 173817e149b8SMike Snitzer md->use_blk_mq = false; 1739bfebd1cdSMike Snitzer 1740bfebd1cdSMike Snitzer /* 1741bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1742bfebd1cdSMike Snitzer */ 1743dc3b17ccSJan Kara md->queue->backing_dev_info->congested_fn = dm_any_congested; 17444a0b4ddfSMike Snitzer } 17454a0b4ddfSMike Snitzer 17460f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 17470f20972fSMike Snitzer { 17480f20972fSMike Snitzer if (md->wq) 17490f20972fSMike Snitzer destroy_workqueue(md->wq); 17500f20972fSMike Snitzer if (md->kworker_task) 17510f20972fSMike Snitzer kthread_stop(md->kworker_task); 17520f20972fSMike Snitzer if (md->bs) 17530f20972fSMike Snitzer bioset_free(md->bs); 175464f52b0eSMike Snitzer if (md->io_bs) 175564f52b0eSMike Snitzer bioset_free(md->io_bs); 17560f20972fSMike Snitzer 1757f26c5719SDan Williams if (md->dax_dev) { 1758f26c5719SDan Williams kill_dax(md->dax_dev); 1759f26c5719SDan Williams put_dax(md->dax_dev); 1760f26c5719SDan Williams md->dax_dev = NULL; 1761f26c5719SDan Williams } 1762f26c5719SDan Williams 17630f20972fSMike Snitzer if (md->disk) { 17640f20972fSMike Snitzer spin_lock(&_minor_lock); 17650f20972fSMike Snitzer md->disk->private_data = NULL; 17660f20972fSMike Snitzer spin_unlock(&_minor_lock); 17670f20972fSMike Snitzer del_gendisk(md->disk); 17680f20972fSMike Snitzer put_disk(md->disk); 17690f20972fSMike Snitzer } 17700f20972fSMike Snitzer 17710f20972fSMike Snitzer if (md->queue) 17720f20972fSMike Snitzer blk_cleanup_queue(md->queue); 17730f20972fSMike Snitzer 1774d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1775d09960b0STahsin Erdogan 17760f20972fSMike Snitzer if (md->bdev) { 17770f20972fSMike Snitzer bdput(md->bdev); 17780f20972fSMike Snitzer md->bdev = NULL; 17790f20972fSMike Snitzer } 17804cc96131SMike Snitzer 1781d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1782d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1783d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1784d5ffebddSMike Snitzer 17854cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 17860f20972fSMike Snitzer } 17870f20972fSMike Snitzer 17881da177e4SLinus Torvalds /* 17891da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 17901da177e4SLinus Torvalds */ 17912b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 17921da177e4SLinus Torvalds { 1793115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1794f26c5719SDan Williams struct dax_device *dax_dev; 1795115485e8SMike Snitzer struct mapped_device *md; 1796ba61fdd1SJeff Mahoney void *old_md; 17971da177e4SLinus Torvalds 1798856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 17991da177e4SLinus Torvalds if (!md) { 18001da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18011da177e4SLinus Torvalds return NULL; 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds 180410da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18056ed7ade8SMilan Broz goto bad_module_get; 180610da4f79SJeff Mahoney 18071da177e4SLinus Torvalds /* get a minor number for the dev */ 18082b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1809cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18102b06cfffSAlasdair G Kergon else 1811cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18121da177e4SLinus Torvalds if (r < 0) 18136ed7ade8SMilan Broz goto bad_minor; 18141da177e4SLinus Torvalds 181583d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 181683d5e5b0SMikulas Patocka if (r < 0) 181783d5e5b0SMikulas Patocka goto bad_io_barrier; 181883d5e5b0SMikulas Patocka 1819115485e8SMike Snitzer md->numa_node_id = numa_node_id; 18204cc96131SMike Snitzer md->use_blk_mq = dm_use_blk_mq_default(); 1821591ddcfcSMike Snitzer md->init_tio_pdu = false; 1822a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1823e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1824a5664dadSMike Snitzer mutex_init(&md->type_lock); 182586f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1826022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18271da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18285c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 18291da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 18307a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 18317a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 183286f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 18337a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 18341da177e4SLinus Torvalds 1835115485e8SMike Snitzer md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 18361da177e4SLinus Torvalds if (!md->queue) 18370f20972fSMike Snitzer goto bad; 1838*c12c9a3cSMike Snitzer md->queue->queuedata = md; 1839*c12c9a3cSMike Snitzer md->queue->backing_dev_info->congested_data = md; 18401da177e4SLinus Torvalds 1841*c12c9a3cSMike Snitzer md->disk = alloc_disk_node(1, md->numa_node_id); 18421da177e4SLinus Torvalds if (!md->disk) 18430f20972fSMike Snitzer goto bad; 18441da177e4SLinus Torvalds 1845316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1846316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1847f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 184853d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1849f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 18502995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 18512eb6e1e3SKeith Busch md->kworker_task = NULL; 1852f0b04115SJeff Mahoney 18531da177e4SLinus Torvalds md->disk->major = _major; 18541da177e4SLinus Torvalds md->disk->first_minor = minor; 18551da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 18561da177e4SLinus Torvalds md->disk->queue = md->queue; 18571da177e4SLinus Torvalds md->disk->private_data = md; 18581da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1859f26c5719SDan Williams 1860f26c5719SDan Williams dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1861f26c5719SDan Williams if (!dax_dev) 1862f26c5719SDan Williams goto bad; 1863f26c5719SDan Williams md->dax_dev = dax_dev; 1864f26c5719SDan Williams 18651da177e4SLinus Torvalds add_disk(md->disk); 18667e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 18671da177e4SLinus Torvalds 1868670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1869304f3f6aSMilan Broz if (!md->wq) 18700f20972fSMike Snitzer goto bad; 1871304f3f6aSMilan Broz 187232a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 187332a926daSMikulas Patocka if (!md->bdev) 18740f20972fSMike Snitzer goto bad; 187532a926daSMikulas Patocka 18763a83f467SMing Lei bio_init(&md->flush_bio, NULL, 0); 187774d46992SChristoph Hellwig bio_set_dev(&md->flush_bio, md->bdev); 1878ff0361b3SJan Kara md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 18796a8736d1STejun Heo 1880fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1881fd2ed4d2SMikulas Patocka 1882ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1883f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1884ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1885f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1886ba61fdd1SJeff Mahoney 1887ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1888ba61fdd1SJeff Mahoney 18891da177e4SLinus Torvalds return md; 18901da177e4SLinus Torvalds 18910f20972fSMike Snitzer bad: 18920f20972fSMike Snitzer cleanup_mapped_device(md); 189383d5e5b0SMikulas Patocka bad_io_barrier: 18941da177e4SLinus Torvalds free_minor(minor); 18956ed7ade8SMilan Broz bad_minor: 189610da4f79SJeff Mahoney module_put(THIS_MODULE); 18976ed7ade8SMilan Broz bad_module_get: 1898856eb091SMikulas Patocka kvfree(md); 18991da177e4SLinus Torvalds return NULL; 19001da177e4SLinus Torvalds } 19011da177e4SLinus Torvalds 1902ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1903ae9da83fSJun'ichi Nomura 19041da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19051da177e4SLinus Torvalds { 1906f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 190763d94e48SJun'ichi Nomura 1908ae9da83fSJun'ichi Nomura unlock_fs(md); 19092eb6e1e3SKeith Busch 19100f20972fSMike Snitzer cleanup_mapped_device(md); 19110f20972fSMike Snitzer 19120f20972fSMike Snitzer free_table_devices(&md->table_devices); 19130f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 191463a4f065SMike Snitzer free_minor(minor); 191563a4f065SMike Snitzer 191610da4f79SJeff Mahoney module_put(THIS_MODULE); 1917856eb091SMikulas Patocka kvfree(md); 19181da177e4SLinus Torvalds } 19191da177e4SLinus Torvalds 1920e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1921e6ee8c0bSKiyoshi Ueda { 1922c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1923e6ee8c0bSKiyoshi Ueda 1924545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1925c0820cf5SMikulas Patocka /* 192664f52b0eSMike Snitzer * The md may already have mempools that need changing. 192764f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 192816245bdcSJun'ichi Nomura * because a different table was loaded. 1929c0820cf5SMikulas Patocka */ 193064f52b0eSMike Snitzer if (md->bs) { 1931c0820cf5SMikulas Patocka bioset_free(md->bs); 19320776aa0eSMike Snitzer md->bs = NULL; 1933c0820cf5SMikulas Patocka } 193464f52b0eSMike Snitzer if (md->io_bs) { 193564f52b0eSMike Snitzer bioset_free(md->io_bs); 193664f52b0eSMike Snitzer md->io_bs = NULL; 193764f52b0eSMike Snitzer } 19380776aa0eSMike Snitzer 19390776aa0eSMike Snitzer } else if (md->bs) { 1940cbc4e3c1SMike Snitzer /* 19414e6e36c3SMike Snitzer * There's no need to reload with request-based dm 19424e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 19434e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 19444e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 19454e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 19464e6e36c3SMike Snitzer * through the queue to unprep. 1947cbc4e3c1SMike Snitzer */ 1948cbc4e3c1SMike Snitzer goto out; 1949cbc4e3c1SMike Snitzer } 1950cbc4e3c1SMike Snitzer 1951dde1e1ecSMike Snitzer BUG_ON(!p || md->bs || md->io_bs); 1952e6ee8c0bSKiyoshi Ueda 1953e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 1954e6ee8c0bSKiyoshi Ueda p->bs = NULL; 195564f52b0eSMike Snitzer md->io_bs = p->io_bs; 195664f52b0eSMike Snitzer p->io_bs = NULL; 1957e6ee8c0bSKiyoshi Ueda out: 195802233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1959e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 1960e6ee8c0bSKiyoshi Ueda } 1961e6ee8c0bSKiyoshi Ueda 19621da177e4SLinus Torvalds /* 19631da177e4SLinus Torvalds * Bind a table to the device. 19641da177e4SLinus Torvalds */ 19651da177e4SLinus Torvalds static void event_callback(void *context) 19661da177e4SLinus Torvalds { 19677a8c3d3bSMike Anderson unsigned long flags; 19687a8c3d3bSMike Anderson LIST_HEAD(uevents); 19691da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 19701da177e4SLinus Torvalds 19717a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 19727a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 19737a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 19747a8c3d3bSMike Anderson 1975ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 19767a8c3d3bSMike Anderson 19771da177e4SLinus Torvalds atomic_inc(&md->event_nr); 19781da177e4SLinus Torvalds wake_up(&md->eventq); 197962e08243SMikulas Patocka dm_issue_global_event(); 19801da177e4SLinus Torvalds } 19811da177e4SLinus Torvalds 1982c217649bSMike Snitzer /* 1983c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 1984c217649bSMike Snitzer */ 19854e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 19861da177e4SLinus Torvalds { 19871ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 19881ea0654eSBart Van Assche 19894e90188bSAlasdair G Kergon set_capacity(md->disk, size); 19901da177e4SLinus Torvalds 1991db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 19921da177e4SLinus Torvalds } 19931da177e4SLinus Torvalds 1994042d2a9bSAlasdair G Kergon /* 1995042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 1996042d2a9bSAlasdair G Kergon */ 1997042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1998754c5fc7SMike Snitzer struct queue_limits *limits) 19991da177e4SLinus Torvalds { 2000042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2001165125e1SJens Axboe struct request_queue *q = md->queue; 2002978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 20031da177e4SLinus Torvalds sector_t size; 20041da177e4SLinus Torvalds 20055a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 20065a8f1f80SBart Van Assche 20071da177e4SLinus Torvalds size = dm_table_get_size(t); 20083ac51e74SDarrick J. Wong 20093ac51e74SDarrick J. Wong /* 20103ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20113ac51e74SDarrick J. Wong */ 2012fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 20133ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20143ac51e74SDarrick J. Wong 20154e90188bSAlasdair G Kergon __set_size(md, size); 20161da177e4SLinus Torvalds 2017cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20182ca3310eSAlasdair G Kergon 2019e6ee8c0bSKiyoshi Ueda /* 2020e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2021e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2022e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2023e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2024e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2025e6ee8c0bSKiyoshi Ueda */ 2026978e51baSMike Snitzer if (request_based) 2027eca7ee6dSMike Snitzer dm_stop_queue(q); 2028978e51baSMike Snitzer 2029978e51baSMike Snitzer if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 203016f12266SMike Snitzer /* 2031978e51baSMike Snitzer * Leverage the fact that request-based DM targets and 2032978e51baSMike Snitzer * NVMe bio based targets are immutable singletons 2033978e51baSMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2034978e51baSMike Snitzer * and __process_bio. 203516f12266SMike Snitzer */ 203616f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 203716f12266SMike Snitzer } 2038e6ee8c0bSKiyoshi Ueda 2039e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2040e6ee8c0bSKiyoshi Ueda 2041a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 20421d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 204336a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 204436a0456fSAlasdair G Kergon 2045754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 204641abc4e1SHannes Reinecke if (old_map) 204783d5e5b0SMikulas Patocka dm_sync_table(md); 20482ca3310eSAlasdair G Kergon 2049042d2a9bSAlasdair G Kergon return old_map; 20501da177e4SLinus Torvalds } 20511da177e4SLinus Torvalds 2052a7940155SAlasdair G Kergon /* 2053a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2054a7940155SAlasdair G Kergon */ 2055a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 20561da177e4SLinus Torvalds { 2057a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 20581da177e4SLinus Torvalds 20591da177e4SLinus Torvalds if (!map) 2060a7940155SAlasdair G Kergon return NULL; 20611da177e4SLinus Torvalds 20621da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 20639cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 206483d5e5b0SMikulas Patocka dm_sync_table(md); 2065a7940155SAlasdair G Kergon 2066a7940155SAlasdair G Kergon return map; 20671da177e4SLinus Torvalds } 20681da177e4SLinus Torvalds 20691da177e4SLinus Torvalds /* 20701da177e4SLinus Torvalds * Constructor for a new device. 20711da177e4SLinus Torvalds */ 20722b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 20731da177e4SLinus Torvalds { 2074*c12c9a3cSMike Snitzer int r; 20751da177e4SLinus Torvalds struct mapped_device *md; 20761da177e4SLinus Torvalds 20772b06cfffSAlasdair G Kergon md = alloc_dev(minor); 20781da177e4SLinus Torvalds if (!md) 20791da177e4SLinus Torvalds return -ENXIO; 20801da177e4SLinus Torvalds 2081*c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2082*c12c9a3cSMike Snitzer if (r) { 2083*c12c9a3cSMike Snitzer free_dev(md); 2084*c12c9a3cSMike Snitzer return r; 2085*c12c9a3cSMike Snitzer } 2086784aae73SMilan Broz 20871da177e4SLinus Torvalds *result = md; 20881da177e4SLinus Torvalds return 0; 20891da177e4SLinus Torvalds } 20901da177e4SLinus Torvalds 2091a5664dadSMike Snitzer /* 2092a5664dadSMike Snitzer * Functions to manage md->type. 2093a5664dadSMike Snitzer * All are required to hold md->type_lock. 2094a5664dadSMike Snitzer */ 2095a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2096a5664dadSMike Snitzer { 2097a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2098a5664dadSMike Snitzer } 2099a5664dadSMike Snitzer 2100a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2101a5664dadSMike Snitzer { 2102a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2103a5664dadSMike Snitzer } 2104a5664dadSMike Snitzer 21057e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2106a5664dadSMike Snitzer { 210700c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2108a5664dadSMike Snitzer md->type = type; 2109a5664dadSMike Snitzer } 2110a5664dadSMike Snitzer 21117e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2112a5664dadSMike Snitzer { 2113a5664dadSMike Snitzer return md->type; 2114a5664dadSMike Snitzer } 2115a5664dadSMike Snitzer 211636a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 211736a0456fSAlasdair G Kergon { 211836a0456fSAlasdair G Kergon return md->immutable_target_type; 211936a0456fSAlasdair G Kergon } 212036a0456fSAlasdair G Kergon 21214a0b4ddfSMike Snitzer /* 2122f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2123f84cb8a4SMike Snitzer * count on 'md'. 2124f84cb8a4SMike Snitzer */ 2125f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2126f84cb8a4SMike Snitzer { 2127f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2128f84cb8a4SMike Snitzer return &md->queue->limits; 2129f84cb8a4SMike Snitzer } 2130f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2131f84cb8a4SMike Snitzer 21324a0b4ddfSMike Snitzer /* 21334a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 21344a0b4ddfSMike Snitzer */ 2135591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 21364a0b4ddfSMike Snitzer { 2137bfebd1cdSMike Snitzer int r; 21387e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2139bfebd1cdSMike Snitzer 2140545ed20eSToshi Kani switch (type) { 2141bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2142*c12c9a3cSMike Snitzer dm_init_normal_md_queue(md); 2143eb8db831SChristoph Hellwig r = dm_old_init_request_queue(md, t); 2144bfebd1cdSMike Snitzer if (r) { 2145eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based mapped device"); 2146bfebd1cdSMike Snitzer return r; 21474a0b4ddfSMike Snitzer } 2148bfebd1cdSMike Snitzer break; 2149bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 2150e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2151bfebd1cdSMike Snitzer if (r) { 2152eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2153bfebd1cdSMike Snitzer return r; 2154bfebd1cdSMike Snitzer } 2155bfebd1cdSMike Snitzer break; 2156bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2157545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2158eca7ee6dSMike Snitzer dm_init_normal_md_queue(md); 2159ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2160bfebd1cdSMike Snitzer break; 2161978e51baSMike Snitzer case DM_TYPE_NVME_BIO_BASED: 2162978e51baSMike Snitzer dm_init_normal_md_queue(md); 2163978e51baSMike Snitzer blk_queue_make_request(md->queue, dm_make_request_nvme); 2164978e51baSMike Snitzer break; 21657e0d574fSBart Van Assche case DM_TYPE_NONE: 21667e0d574fSBart Van Assche WARN_ON_ONCE(true); 21677e0d574fSBart Van Assche break; 2168ff36ab34SMike Snitzer } 21694a0b4ddfSMike Snitzer 21704a0b4ddfSMike Snitzer return 0; 21714a0b4ddfSMike Snitzer } 21724a0b4ddfSMike Snitzer 21732bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 21741da177e4SLinus Torvalds { 21751da177e4SLinus Torvalds struct mapped_device *md; 21761da177e4SLinus Torvalds unsigned minor = MINOR(dev); 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 21791da177e4SLinus Torvalds return NULL; 21801da177e4SLinus Torvalds 2181f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21821da177e4SLinus Torvalds 21831da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 218449de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 218549de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2186637842cfSDavid Teigland md = NULL; 2187fba9f90eSJeff Mahoney goto out; 2188fba9f90eSJeff Mahoney } 21892bec1f4aSMikulas Patocka dm_get(md); 2190fba9f90eSJeff Mahoney out: 2191f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21921da177e4SLinus Torvalds 2193637842cfSDavid Teigland return md; 2194637842cfSDavid Teigland } 21953cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2196d229a958SDavid Teigland 21979ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2198637842cfSDavid Teigland { 21999ade92a9SAlasdair G Kergon return md->interface_ptr; 22001da177e4SLinus Torvalds } 22011da177e4SLinus Torvalds 22021da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 22031da177e4SLinus Torvalds { 22041da177e4SLinus Torvalds md->interface_ptr = ptr; 22051da177e4SLinus Torvalds } 22061da177e4SLinus Torvalds 22071da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 22081da177e4SLinus Torvalds { 22091da177e4SLinus Torvalds atomic_inc(&md->holders); 22103f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 22111da177e4SLinus Torvalds } 22121da177e4SLinus Torvalds 221309ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 221409ee96b2SMikulas Patocka { 221509ee96b2SMikulas Patocka spin_lock(&_minor_lock); 221609ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 221709ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 221809ee96b2SMikulas Patocka return -EBUSY; 221909ee96b2SMikulas Patocka } 222009ee96b2SMikulas Patocka dm_get(md); 222109ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 222209ee96b2SMikulas Patocka return 0; 222309ee96b2SMikulas Patocka } 222409ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 222509ee96b2SMikulas Patocka 222672d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 222772d94861SAlasdair G Kergon { 222872d94861SAlasdair G Kergon return md->name; 222972d94861SAlasdair G Kergon } 223072d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 223172d94861SAlasdair G Kergon 22323f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 22331da177e4SLinus Torvalds { 22341134e5aeSMike Anderson struct dm_table *map; 223583d5e5b0SMikulas Patocka int srcu_idx; 22361da177e4SLinus Torvalds 22373f77316dSKiyoshi Ueda might_sleep(); 2238fba9f90eSJeff Mahoney 223963a4f065SMike Snitzer spin_lock(&_minor_lock); 22403f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2241fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2242f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22433f77316dSKiyoshi Ueda 2244*c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 22453b785fbcSBart Van Assche 224602233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 22473989144fSPetr Mladek kthread_flush_worker(&md->kworker); 22482eb6e1e3SKeith Busch 2249ab7c7bb6SMikulas Patocka /* 2250ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2251ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2252ab7c7bb6SMikulas Patocka */ 2253ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 22542a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 22554f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 22561da177e4SLinus Torvalds dm_table_presuspend_targets(map); 22571da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 22581da177e4SLinus Torvalds } 225983d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 226083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 22612a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 226283d5e5b0SMikulas Patocka 22633f77316dSKiyoshi Ueda /* 22643f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 22653f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 22663f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 22673f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 22683f77316dSKiyoshi Ueda */ 22693f77316dSKiyoshi Ueda if (wait) 22703f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 22713f77316dSKiyoshi Ueda msleep(1); 22723f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 22733f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 22743f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 22753f77316dSKiyoshi Ueda 2276784aae73SMilan Broz dm_sysfs_exit(md); 2277a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 22781da177e4SLinus Torvalds free_dev(md); 22791da177e4SLinus Torvalds } 22803f77316dSKiyoshi Ueda 22813f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 22823f77316dSKiyoshi Ueda { 22833f77316dSKiyoshi Ueda __dm_destroy(md, true); 22843f77316dSKiyoshi Ueda } 22853f77316dSKiyoshi Ueda 22863f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 22873f77316dSKiyoshi Ueda { 22883f77316dSKiyoshi Ueda __dm_destroy(md, false); 22893f77316dSKiyoshi Ueda } 22903f77316dSKiyoshi Ueda 22913f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 22923f77316dSKiyoshi Ueda { 22933f77316dSKiyoshi Ueda atomic_dec(&md->holders); 22941da177e4SLinus Torvalds } 229579eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 22961da177e4SLinus Torvalds 2297b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 229846125c1cSMilan Broz { 229946125c1cSMilan Broz int r = 0; 23009f4c3f87SBart Van Assche DEFINE_WAIT(wait); 230146125c1cSMilan Broz 230246125c1cSMilan Broz while (1) { 23039f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 230446125c1cSMilan Broz 2305b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 230646125c1cSMilan Broz break; 230746125c1cSMilan Broz 2308e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 230946125c1cSMilan Broz r = -EINTR; 231046125c1cSMilan Broz break; 231146125c1cSMilan Broz } 231246125c1cSMilan Broz 231346125c1cSMilan Broz io_schedule(); 231446125c1cSMilan Broz } 23159f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2316b44ebeb0SMikulas Patocka 231746125c1cSMilan Broz return r; 231846125c1cSMilan Broz } 231946125c1cSMilan Broz 23201da177e4SLinus Torvalds /* 23211da177e4SLinus Torvalds * Process the deferred bios 23221da177e4SLinus Torvalds */ 2323ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 23241da177e4SLinus Torvalds { 2325ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2326ef208587SMikulas Patocka work); 23276d6f10dfSMilan Broz struct bio *c; 232883d5e5b0SMikulas Patocka int srcu_idx; 232983d5e5b0SMikulas Patocka struct dm_table *map; 23301da177e4SLinus Torvalds 233183d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2332ef208587SMikulas Patocka 23333b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2334022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2335022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2336022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2337022c2611SMikulas Patocka 23386a8736d1STejun Heo if (!c) 2339df12ee99SAlasdair G Kergon break; 234073d410c0SMilan Broz 2341e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2342e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2343af7e466aSMikulas Patocka else 234483d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2345e6ee8c0bSKiyoshi Ueda } 23463b00b203SMikulas Patocka 234783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 23481da177e4SLinus Torvalds } 23491da177e4SLinus Torvalds 23509a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2351304f3f6aSMilan Broz { 23523b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 23534e857c58SPeter Zijlstra smp_mb__after_atomic(); 235453d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2355304f3f6aSMilan Broz } 2356304f3f6aSMilan Broz 23571da177e4SLinus Torvalds /* 2358042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 23591da177e4SLinus Torvalds */ 2360042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 23611da177e4SLinus Torvalds { 236287eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2363754c5fc7SMike Snitzer struct queue_limits limits; 2364042d2a9bSAlasdair G Kergon int r; 23651da177e4SLinus Torvalds 2366e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds /* device must be suspended */ 23694f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 237093c534aeSAlasdair G Kergon goto out; 23711da177e4SLinus Torvalds 23723ae70656SMike Snitzer /* 23733ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 23743ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 23753ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 23763ae70656SMike Snitzer * reappear. 23773ae70656SMike Snitzer */ 23783ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 237983d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 23803ae70656SMike Snitzer if (live_map) 23813ae70656SMike Snitzer limits = md->queue->limits; 238283d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 23833ae70656SMike Snitzer } 23843ae70656SMike Snitzer 238587eb5b21SMike Christie if (!live_map) { 2386754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2387042d2a9bSAlasdair G Kergon if (r) { 2388042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2389754c5fc7SMike Snitzer goto out; 2390042d2a9bSAlasdair G Kergon } 239187eb5b21SMike Christie } 2392754c5fc7SMike Snitzer 2393042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 239462e08243SMikulas Patocka dm_issue_global_event(); 23951da177e4SLinus Torvalds 239693c534aeSAlasdair G Kergon out: 2397e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2398042d2a9bSAlasdair G Kergon return map; 23991da177e4SLinus Torvalds } 24001da177e4SLinus Torvalds 24011da177e4SLinus Torvalds /* 24021da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 24031da177e4SLinus Torvalds * device. 24041da177e4SLinus Torvalds */ 24052ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 24061da177e4SLinus Torvalds { 2407e39e2e95SAlasdair G Kergon int r; 24081da177e4SLinus Torvalds 24091da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2410dfbe03f6SAlasdair G Kergon 2411db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2412dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2413cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2414e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2415e39e2e95SAlasdair G Kergon return r; 2416dfbe03f6SAlasdair G Kergon } 2417dfbe03f6SAlasdair G Kergon 2418aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2419aa8d7c2fSAlasdair G Kergon 24201da177e4SLinus Torvalds return 0; 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds 24232ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 24241da177e4SLinus Torvalds { 2425aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2426aa8d7c2fSAlasdair G Kergon return; 2427aa8d7c2fSAlasdair G Kergon 2428db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 24291da177e4SLinus Torvalds md->frozen_sb = NULL; 2430aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 24311da177e4SLinus Torvalds } 24321da177e4SLinus Torvalds 24331da177e4SLinus Torvalds /* 2434b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2435b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2436b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2437b48633f8SBart Van Assche * 2438ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2439ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2440ffcc3936SMike Snitzer * are being added to md->deferred list. 2441cec47e3dSKiyoshi Ueda */ 2442ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2443b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2444eaf9a736SMike Snitzer int dmf_suspended_flag) 24451da177e4SLinus Torvalds { 2446ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2447ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2448ffcc3936SMike Snitzer int r; 2449cf222b37SAlasdair G Kergon 24505a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 24515a8f1f80SBart Van Assche 24522e93ccc1SKiyoshi Ueda /* 24532e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 24542e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 24552e93ccc1SKiyoshi Ueda */ 24562e93ccc1SKiyoshi Ueda if (noflush) 24572e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 245886331f39SBart Van Assche else 245986331f39SBart Van Assche pr_debug("%s: suspending with flush\n", dm_device_name(md)); 24602e93ccc1SKiyoshi Ueda 2461d67ee213SMike Snitzer /* 2462d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2463d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2464d67ee213SMike Snitzer */ 24651da177e4SLinus Torvalds dm_table_presuspend_targets(map); 24661da177e4SLinus Torvalds 24672e93ccc1SKiyoshi Ueda /* 24689f518b27SKiyoshi Ueda * Flush I/O to the device. 24699f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 24709f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 24719f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 24722e93ccc1SKiyoshi Ueda */ 247332a926daSMikulas Patocka if (!noflush && do_lockfs) { 24742ca3310eSAlasdair G Kergon r = lock_fs(md); 2475d67ee213SMike Snitzer if (r) { 2476d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2477ffcc3936SMike Snitzer return r; 2478aa8d7c2fSAlasdair G Kergon } 2479d67ee213SMike Snitzer } 24801da177e4SLinus Torvalds 24811da177e4SLinus Torvalds /* 24823b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 24833b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 24843b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 24853b00b203SMikulas Patocka * dm_wq_work. 24863b00b203SMikulas Patocka * 24873b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 24883b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 24896a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 24906a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 24916a8736d1STejun Heo * flush_workqueue(md->wq). 24921da177e4SLinus Torvalds */ 24931eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 249441abc4e1SHannes Reinecke if (map) 249583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 24961da177e4SLinus Torvalds 2497d0bcb878SKiyoshi Ueda /* 249829e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 249929e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2500d0bcb878SKiyoshi Ueda */ 25012eb6e1e3SKeith Busch if (dm_request_based(md)) { 2502eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 250302233342SMike Snitzer if (md->kworker_task) 25043989144fSPetr Mladek kthread_flush_worker(&md->kworker); 25052eb6e1e3SKeith Busch } 2506cec47e3dSKiyoshi Ueda 2507d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2508d0bcb878SKiyoshi Ueda 25091da177e4SLinus Torvalds /* 25103b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 25113b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 25123b00b203SMikulas Patocka * to finish. 25131da177e4SLinus Torvalds */ 2514b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2515eaf9a736SMike Snitzer if (!r) 2516eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 25171da177e4SLinus Torvalds 25186d6f10dfSMilan Broz if (noflush) 2519022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 252041abc4e1SHannes Reinecke if (map) 252183d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25222e93ccc1SKiyoshi Ueda 25231da177e4SLinus Torvalds /* were we interrupted ? */ 252446125c1cSMilan Broz if (r < 0) { 25259a1fb464SMikulas Patocka dm_queue_flush(md); 252673d410c0SMilan Broz 2527cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2528eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2529cec47e3dSKiyoshi Ueda 25302ca3310eSAlasdair G Kergon unlock_fs(md); 2531d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2532ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2533ffcc3936SMike Snitzer } 2534ffcc3936SMike Snitzer 2535ffcc3936SMike Snitzer return r; 25362ca3310eSAlasdair G Kergon } 25372ca3310eSAlasdair G Kergon 25383b00b203SMikulas Patocka /* 2539ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2540ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2541ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2542ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2543ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 25443b00b203SMikulas Patocka */ 2545ffcc3936SMike Snitzer /* 2546ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2547ffcc3936SMike Snitzer * 2548ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2549ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2550ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2551ffcc3936SMike Snitzer * 2552ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2553ffcc3936SMike Snitzer */ 2554ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2555ffcc3936SMike Snitzer { 2556ffcc3936SMike Snitzer struct dm_table *map = NULL; 2557ffcc3936SMike Snitzer int r = 0; 2558ffcc3936SMike Snitzer 2559ffcc3936SMike Snitzer retry: 2560ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2561ffcc3936SMike Snitzer 2562ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2563ffcc3936SMike Snitzer r = -EINVAL; 2564ffcc3936SMike Snitzer goto out_unlock; 2565ffcc3936SMike Snitzer } 2566ffcc3936SMike Snitzer 2567ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2568ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2569ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2570ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2571ffcc3936SMike Snitzer if (r) 2572ffcc3936SMike Snitzer return r; 2573ffcc3936SMike Snitzer goto retry; 2574ffcc3936SMike Snitzer } 2575ffcc3936SMike Snitzer 2576a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2577ffcc3936SMike Snitzer 2578eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2579ffcc3936SMike Snitzer if (r) 2580ffcc3936SMike Snitzer goto out_unlock; 25813b00b203SMikulas Patocka 25824d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 25834d4471cbSKiyoshi Ueda 2584d287483dSAlasdair G Kergon out_unlock: 2585e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2586cf222b37SAlasdair G Kergon return r; 25871da177e4SLinus Torvalds } 25881da177e4SLinus Torvalds 2589ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 25901da177e4SLinus Torvalds { 2591ffcc3936SMike Snitzer if (map) { 2592ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 25938757b776SMilan Broz if (r) 2594ffcc3936SMike Snitzer return r; 2595ffcc3936SMike Snitzer } 25962ca3310eSAlasdair G Kergon 25979a1fb464SMikulas Patocka dm_queue_flush(md); 25982ca3310eSAlasdair G Kergon 2599cec47e3dSKiyoshi Ueda /* 2600cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2601cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2602cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2603cec47e3dSKiyoshi Ueda */ 2604cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2605eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2606cec47e3dSKiyoshi Ueda 26072ca3310eSAlasdair G Kergon unlock_fs(md); 26082ca3310eSAlasdair G Kergon 2609ffcc3936SMike Snitzer return 0; 2610ffcc3936SMike Snitzer } 2611ffcc3936SMike Snitzer 2612ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2613ffcc3936SMike Snitzer { 26148dc23658SMinfei Huang int r; 2615ffcc3936SMike Snitzer struct dm_table *map = NULL; 2616ffcc3936SMike Snitzer 2617ffcc3936SMike Snitzer retry: 26188dc23658SMinfei Huang r = -EINVAL; 2619ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2620ffcc3936SMike Snitzer 2621ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2622ffcc3936SMike Snitzer goto out; 2623ffcc3936SMike Snitzer 2624ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2625ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2626ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2627ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2628ffcc3936SMike Snitzer if (r) 2629ffcc3936SMike Snitzer return r; 2630ffcc3936SMike Snitzer goto retry; 2631ffcc3936SMike Snitzer } 2632ffcc3936SMike Snitzer 2633a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2634ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2635ffcc3936SMike Snitzer goto out; 2636ffcc3936SMike Snitzer 2637ffcc3936SMike Snitzer r = __dm_resume(md, map); 2638ffcc3936SMike Snitzer if (r) 2639ffcc3936SMike Snitzer goto out; 2640ffcc3936SMike Snitzer 26412ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2642cf222b37SAlasdair G Kergon out: 2643e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 26442ca3310eSAlasdair G Kergon 2645cf222b37SAlasdair G Kergon return r; 26461da177e4SLinus Torvalds } 26471da177e4SLinus Torvalds 2648fd2ed4d2SMikulas Patocka /* 2649fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2650fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2651fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2652fd2ed4d2SMikulas Patocka */ 2653fd2ed4d2SMikulas Patocka 2654ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2655ffcc3936SMike Snitzer { 2656ffcc3936SMike Snitzer struct dm_table *map = NULL; 2657ffcc3936SMike Snitzer 26581ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 26591ea0654eSBart Van Assche 266096b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2661ffcc3936SMike Snitzer return; /* nested internal suspend */ 2662ffcc3936SMike Snitzer 2663ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2664ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2665ffcc3936SMike Snitzer return; /* nest suspend */ 2666ffcc3936SMike Snitzer } 2667ffcc3936SMike Snitzer 2668a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2669ffcc3936SMike Snitzer 2670ffcc3936SMike Snitzer /* 2671ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2672ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2673ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2674ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2675ffcc3936SMike Snitzer */ 2676eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2677eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2678ffcc3936SMike Snitzer 2679ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2680ffcc3936SMike Snitzer } 2681ffcc3936SMike Snitzer 2682ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2683ffcc3936SMike Snitzer { 268496b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 268596b26c8cSMikulas Patocka 268696b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2687ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2688ffcc3936SMike Snitzer 2689ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2690ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2691ffcc3936SMike Snitzer 2692ffcc3936SMike Snitzer /* 2693ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2694ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2695ffcc3936SMike Snitzer */ 2696ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2697ffcc3936SMike Snitzer 2698ffcc3936SMike Snitzer done: 2699ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2700ffcc3936SMike Snitzer smp_mb__after_atomic(); 2701ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2702ffcc3936SMike Snitzer } 2703ffcc3936SMike Snitzer 2704ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2705fd2ed4d2SMikulas Patocka { 2706fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2707ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2708ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2709ffcc3936SMike Snitzer } 2710ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2711ffcc3936SMike Snitzer 2712ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2713ffcc3936SMike Snitzer { 2714ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2715ffcc3936SMike Snitzer __dm_internal_resume(md); 2716ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2717ffcc3936SMike Snitzer } 2718ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2719ffcc3936SMike Snitzer 2720ffcc3936SMike Snitzer /* 2721ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2722ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2723ffcc3936SMike Snitzer */ 2724ffcc3936SMike Snitzer 2725ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2726ffcc3936SMike Snitzer { 2727ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2728ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2729fd2ed4d2SMikulas Patocka return; 2730fd2ed4d2SMikulas Patocka 2731fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2732fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2733fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2734fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2735fd2ed4d2SMikulas Patocka } 2736b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2737fd2ed4d2SMikulas Patocka 2738ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2739fd2ed4d2SMikulas Patocka { 2740ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2741fd2ed4d2SMikulas Patocka goto done; 2742fd2ed4d2SMikulas Patocka 2743fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2744fd2ed4d2SMikulas Patocka 2745fd2ed4d2SMikulas Patocka done: 2746fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2747fd2ed4d2SMikulas Patocka } 2748b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2749fd2ed4d2SMikulas Patocka 27501da177e4SLinus Torvalds /*----------------------------------------------------------------- 27511da177e4SLinus Torvalds * Event notification. 27521da177e4SLinus Torvalds *---------------------------------------------------------------*/ 27533abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 275460935eb2SMilan Broz unsigned cookie) 275569267a30SAlasdair G Kergon { 275660935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 275760935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 275860935eb2SMilan Broz 275960935eb2SMilan Broz if (!cookie) 27603abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 276160935eb2SMilan Broz else { 276260935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 276360935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 27643abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 27653abf85b5SPeter Rajnoha action, envp); 276660935eb2SMilan Broz } 276769267a30SAlasdair G Kergon } 276869267a30SAlasdair G Kergon 27697a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 27707a8c3d3bSMike Anderson { 27717a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 27727a8c3d3bSMike Anderson } 27737a8c3d3bSMike Anderson 27741da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 27751da177e4SLinus Torvalds { 27761da177e4SLinus Torvalds return atomic_read(&md->event_nr); 27771da177e4SLinus Torvalds } 27781da177e4SLinus Torvalds 27791da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 27801da177e4SLinus Torvalds { 27811da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 27821da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 27831da177e4SLinus Torvalds } 27841da177e4SLinus Torvalds 27857a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 27867a8c3d3bSMike Anderson { 27877a8c3d3bSMike Anderson unsigned long flags; 27887a8c3d3bSMike Anderson 27897a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 27907a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 27917a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 27927a8c3d3bSMike Anderson } 27937a8c3d3bSMike Anderson 27941da177e4SLinus Torvalds /* 27951da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 27961da177e4SLinus Torvalds * count on 'md'. 27971da177e4SLinus Torvalds */ 27981da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 27991da177e4SLinus Torvalds { 28001da177e4SLinus Torvalds return md->disk; 28011da177e4SLinus Torvalds } 280265ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 28031da177e4SLinus Torvalds 2804784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2805784aae73SMilan Broz { 28062995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2807784aae73SMilan Broz } 2808784aae73SMilan Broz 2809784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2810784aae73SMilan Broz { 2811784aae73SMilan Broz struct mapped_device *md; 2812784aae73SMilan Broz 28132995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2814784aae73SMilan Broz 2815b9a41d21SHou Tao spin_lock(&_minor_lock); 2816b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2817b9a41d21SHou Tao md = NULL; 2818b9a41d21SHou Tao goto out; 2819b9a41d21SHou Tao } 2820784aae73SMilan Broz dm_get(md); 2821b9a41d21SHou Tao out: 2822b9a41d21SHou Tao spin_unlock(&_minor_lock); 2823b9a41d21SHou Tao 2824784aae73SMilan Broz return md; 2825784aae73SMilan Broz } 2826784aae73SMilan Broz 28274f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 28281da177e4SLinus Torvalds { 28291da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 28301da177e4SLinus Torvalds } 28311da177e4SLinus Torvalds 2832ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2833ffcc3936SMike Snitzer { 2834ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2835ffcc3936SMike Snitzer } 2836ffcc3936SMike Snitzer 28372c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 28382c140a24SMikulas Patocka { 28392c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 28402c140a24SMikulas Patocka } 28412c140a24SMikulas Patocka 284264dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 284364dbce58SKiyoshi Ueda { 2844ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 284564dbce58SKiyoshi Ueda } 284664dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 284764dbce58SKiyoshi Ueda 28482e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 28492e93ccc1SKiyoshi Ueda { 2850ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 28512e93ccc1SKiyoshi Ueda } 28522e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 28532e93ccc1SKiyoshi Ueda 28547e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 28550776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 28560776aa0eSMike Snitzer unsigned min_pool_size) 2857e6ee8c0bSKiyoshi Ueda { 2858115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 285978d8e58aSMike Snitzer unsigned int pool_size = 0; 286064f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 2861e6ee8c0bSKiyoshi Ueda 2862e6ee8c0bSKiyoshi Ueda if (!pools) 28634e6e36c3SMike Snitzer return NULL; 2864e6ee8c0bSKiyoshi Ueda 286578d8e58aSMike Snitzer switch (type) { 286678d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2867545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 286822c11858SMike Snitzer case DM_TYPE_NVME_BIO_BASED: 28690776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 287030187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 287164f52b0eSMike Snitzer io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 287264f52b0eSMike Snitzer pools->io_bs = bioset_create(pool_size, io_front_pad, 0); 287364f52b0eSMike Snitzer if (!pools->io_bs) 287464f52b0eSMike Snitzer goto out; 287564f52b0eSMike Snitzer if (integrity && bioset_integrity_create(pools->io_bs, pool_size)) 287664f52b0eSMike Snitzer goto out; 287778d8e58aSMike Snitzer break; 287878d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 287978d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 28800776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 288178d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2882591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 288378d8e58aSMike Snitzer break; 288478d8e58aSMike Snitzer default: 288578d8e58aSMike Snitzer BUG(); 288678d8e58aSMike Snitzer } 288778d8e58aSMike Snitzer 28884a3f54d9SMike Snitzer pools->bs = bioset_create(pool_size, front_pad, 0); 2889e6ee8c0bSKiyoshi Ueda if (!pools->bs) 28905f015204SJun'ichi Nomura goto out; 2891e6ee8c0bSKiyoshi Ueda 2892a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 28935f015204SJun'ichi Nomura goto out; 2894a91a2785SMartin K. Petersen 2895e6ee8c0bSKiyoshi Ueda return pools; 289678d8e58aSMike Snitzer 28975f015204SJun'ichi Nomura out: 28985f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2899e6ee8c0bSKiyoshi Ueda 29004e6e36c3SMike Snitzer return NULL; 2901e6ee8c0bSKiyoshi Ueda } 2902e6ee8c0bSKiyoshi Ueda 2903e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2904e6ee8c0bSKiyoshi Ueda { 2905e6ee8c0bSKiyoshi Ueda if (!pools) 2906e6ee8c0bSKiyoshi Ueda return; 2907e6ee8c0bSKiyoshi Ueda 2908e6ee8c0bSKiyoshi Ueda if (pools->bs) 2909e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 291064f52b0eSMike Snitzer if (pools->io_bs) 291164f52b0eSMike Snitzer bioset_free(pools->io_bs); 2912e6ee8c0bSKiyoshi Ueda 2913e6ee8c0bSKiyoshi Ueda kfree(pools); 2914e6ee8c0bSKiyoshi Ueda } 2915e6ee8c0bSKiyoshi Ueda 29169c72bad1SChristoph Hellwig struct dm_pr { 29179c72bad1SChristoph Hellwig u64 old_key; 29189c72bad1SChristoph Hellwig u64 new_key; 29199c72bad1SChristoph Hellwig u32 flags; 29209c72bad1SChristoph Hellwig bool fail_early; 29219c72bad1SChristoph Hellwig }; 29229c72bad1SChristoph Hellwig 29239c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 29249c72bad1SChristoph Hellwig void *data) 29259c72bad1SChristoph Hellwig { 29269c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 29279c72bad1SChristoph Hellwig struct dm_table *table; 29289c72bad1SChristoph Hellwig struct dm_target *ti; 29299c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 29309c72bad1SChristoph Hellwig 29319c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 29329c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 29339c72bad1SChristoph Hellwig goto out; 29349c72bad1SChristoph Hellwig 29359c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 29369c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 29379c72bad1SChristoph Hellwig goto out; 29389c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 29399c72bad1SChristoph Hellwig 29409c72bad1SChristoph Hellwig ret = -EINVAL; 29419c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 29429c72bad1SChristoph Hellwig goto out; 29439c72bad1SChristoph Hellwig 29449c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 29459c72bad1SChristoph Hellwig out: 29469c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 29479c72bad1SChristoph Hellwig return ret; 29489c72bad1SChristoph Hellwig } 29499c72bad1SChristoph Hellwig 29509c72bad1SChristoph Hellwig /* 29519c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 29529c72bad1SChristoph Hellwig */ 29539c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 29549c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 29559c72bad1SChristoph Hellwig { 29569c72bad1SChristoph Hellwig struct dm_pr *pr = data; 29579c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 29589c72bad1SChristoph Hellwig 29599c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 29609c72bad1SChristoph Hellwig return -EOPNOTSUPP; 29619c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 29629c72bad1SChristoph Hellwig } 29639c72bad1SChristoph Hellwig 296471cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 296571cdb697SChristoph Hellwig u32 flags) 296671cdb697SChristoph Hellwig { 29679c72bad1SChristoph Hellwig struct dm_pr pr = { 29689c72bad1SChristoph Hellwig .old_key = old_key, 29699c72bad1SChristoph Hellwig .new_key = new_key, 29709c72bad1SChristoph Hellwig .flags = flags, 29719c72bad1SChristoph Hellwig .fail_early = true, 29729c72bad1SChristoph Hellwig }; 29739c72bad1SChristoph Hellwig int ret; 297471cdb697SChristoph Hellwig 29759c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 29769c72bad1SChristoph Hellwig if (ret && new_key) { 29779c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 29789c72bad1SChristoph Hellwig pr.old_key = new_key; 29799c72bad1SChristoph Hellwig pr.new_key = 0; 29809c72bad1SChristoph Hellwig pr.flags = 0; 29819c72bad1SChristoph Hellwig pr.fail_early = false; 29829c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 29839c72bad1SChristoph Hellwig } 298471cdb697SChristoph Hellwig 29859c72bad1SChristoph Hellwig return ret; 298671cdb697SChristoph Hellwig } 298771cdb697SChristoph Hellwig 298871cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 298971cdb697SChristoph Hellwig u32 flags) 299071cdb697SChristoph Hellwig { 299171cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 299271cdb697SChristoph Hellwig const struct pr_ops *ops; 299371cdb697SChristoph Hellwig fmode_t mode; 2994956a4025SMike Snitzer int r; 299571cdb697SChristoph Hellwig 2996956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 299771cdb697SChristoph Hellwig if (r < 0) 299871cdb697SChristoph Hellwig return r; 299971cdb697SChristoph Hellwig 300071cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 300171cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 300271cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 300371cdb697SChristoph Hellwig else 300471cdb697SChristoph Hellwig r = -EOPNOTSUPP; 300571cdb697SChristoph Hellwig 3006956a4025SMike Snitzer bdput(bdev); 300771cdb697SChristoph Hellwig return r; 300871cdb697SChristoph Hellwig } 300971cdb697SChristoph Hellwig 301071cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 301171cdb697SChristoph Hellwig { 301271cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 301371cdb697SChristoph Hellwig const struct pr_ops *ops; 301471cdb697SChristoph Hellwig fmode_t mode; 3015956a4025SMike Snitzer int r; 301671cdb697SChristoph Hellwig 3017956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 301871cdb697SChristoph Hellwig if (r < 0) 301971cdb697SChristoph Hellwig return r; 302071cdb697SChristoph Hellwig 302171cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 302271cdb697SChristoph Hellwig if (ops && ops->pr_release) 302371cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 302471cdb697SChristoph Hellwig else 302571cdb697SChristoph Hellwig r = -EOPNOTSUPP; 302671cdb697SChristoph Hellwig 3027956a4025SMike Snitzer bdput(bdev); 302871cdb697SChristoph Hellwig return r; 302971cdb697SChristoph Hellwig } 303071cdb697SChristoph Hellwig 303171cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 303271cdb697SChristoph Hellwig enum pr_type type, bool abort) 303371cdb697SChristoph Hellwig { 303471cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 303571cdb697SChristoph Hellwig const struct pr_ops *ops; 303671cdb697SChristoph Hellwig fmode_t mode; 3037956a4025SMike Snitzer int r; 303871cdb697SChristoph Hellwig 3039956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 304071cdb697SChristoph Hellwig if (r < 0) 304171cdb697SChristoph Hellwig return r; 304271cdb697SChristoph Hellwig 304371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 304471cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 304571cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 304671cdb697SChristoph Hellwig else 304771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 304871cdb697SChristoph Hellwig 3049956a4025SMike Snitzer bdput(bdev); 305071cdb697SChristoph Hellwig return r; 305171cdb697SChristoph Hellwig } 305271cdb697SChristoph Hellwig 305371cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 305471cdb697SChristoph Hellwig { 305571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 305671cdb697SChristoph Hellwig const struct pr_ops *ops; 305771cdb697SChristoph Hellwig fmode_t mode; 3058956a4025SMike Snitzer int r; 305971cdb697SChristoph Hellwig 3060956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 306171cdb697SChristoph Hellwig if (r < 0) 306271cdb697SChristoph Hellwig return r; 306371cdb697SChristoph Hellwig 306471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 306571cdb697SChristoph Hellwig if (ops && ops->pr_clear) 306671cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 306771cdb697SChristoph Hellwig else 306871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 306971cdb697SChristoph Hellwig 3070956a4025SMike Snitzer bdput(bdev); 307171cdb697SChristoph Hellwig return r; 307271cdb697SChristoph Hellwig } 307371cdb697SChristoph Hellwig 307471cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 307571cdb697SChristoph Hellwig .pr_register = dm_pr_register, 307671cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 307771cdb697SChristoph Hellwig .pr_release = dm_pr_release, 307871cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 307971cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 308071cdb697SChristoph Hellwig }; 308171cdb697SChristoph Hellwig 308283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 30831da177e4SLinus Torvalds .open = dm_blk_open, 30841da177e4SLinus Torvalds .release = dm_blk_close, 3085aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 30863ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 308771cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 30881da177e4SLinus Torvalds .owner = THIS_MODULE 30891da177e4SLinus Torvalds }; 30901da177e4SLinus Torvalds 3091f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3092f26c5719SDan Williams .direct_access = dm_dax_direct_access, 30937e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3094f26c5719SDan Williams }; 3095f26c5719SDan Williams 30961da177e4SLinus Torvalds /* 30971da177e4SLinus Torvalds * module hooks 30981da177e4SLinus Torvalds */ 30991da177e4SLinus Torvalds module_init(dm_init); 31001da177e4SLinus Torvalds module_exit(dm_exit); 31011da177e4SLinus Torvalds 31021da177e4SLinus Torvalds module_param(major, uint, 0); 31031da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3104f4790826SMike Snitzer 3105e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3106e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3107e8603136SMike Snitzer 3108115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3109115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3110115485e8SMike Snitzer 31111da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 31121da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 31131da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3114