11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 223ac51e74SDarrick J. Wong #include <linux/hdreg.h> 233f77316dSKiyoshi Ueda #include <linux/delay.h> 24ffcc3936SMike Snitzer #include <linux/wait.h> 2571cdb697SChristoph Hellwig #include <linux/pr.h> 2655782138SLi Zefan 2772d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2872d94861SAlasdair G Kergon 2971a16736SNamhyung Kim #ifdef CONFIG_PRINTK 3071a16736SNamhyung Kim /* 3171a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3271a16736SNamhyung Kim */ 3371a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3471a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3571a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 3671a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 3771a16736SNamhyung Kim #endif 3871a16736SNamhyung Kim 3960935eb2SMilan Broz /* 4060935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 4160935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4260935eb2SMilan Broz */ 4360935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4460935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4560935eb2SMilan Broz 461da177e4SLinus Torvalds static const char *_name = DM_NAME; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds static unsigned int major = 0; 491da177e4SLinus Torvalds static unsigned int _major = 0; 501da177e4SLinus Torvalds 51d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 52d15b774cSAlasdair G Kergon 53f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 542c140a24SMikulas Patocka 552c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 562c140a24SMikulas Patocka 572c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 582c140a24SMikulas Patocka 59acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 60acfe0ad7SMikulas Patocka 611da177e4SLinus Torvalds /* 621da177e4SLinus Torvalds * One of these is allocated per bio. 631da177e4SLinus Torvalds */ 641da177e4SLinus Torvalds struct dm_io { 651da177e4SLinus Torvalds struct mapped_device *md; 661da177e4SLinus Torvalds int error; 671da177e4SLinus Torvalds atomic_t io_count; 686ae2fa67SRichard Kennedy struct bio *bio; 693eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 70f88fb981SKiyoshi Ueda spinlock_t endio_lock; 71fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 721da177e4SLinus Torvalds }; 731da177e4SLinus Torvalds 74ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 75ba61fdd1SJeff Mahoney 761da177e4SLinus Torvalds /* 771da177e4SLinus Torvalds * Bits for the md->flags field. 781da177e4SLinus Torvalds */ 791eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 801da177e4SLinus Torvalds #define DMF_SUSPENDED 1 81aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 82fba9f90eSJeff Mahoney #define DMF_FREEING 3 835c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 842e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 858ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 868ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 871da177e4SLinus Torvalds 88115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 89115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 90faad87dfSMike Snitzer 91e6ee8c0bSKiyoshi Ueda /* 92e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 93e6ee8c0bSKiyoshi Ueda */ 94e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 95e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 96e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 97e6ee8c0bSKiyoshi Ueda }; 98e6ee8c0bSKiyoshi Ueda 9986f1152bSBenjamin Marzinski struct table_device { 10086f1152bSBenjamin Marzinski struct list_head list; 10186f1152bSBenjamin Marzinski atomic_t count; 10286f1152bSBenjamin Marzinski struct dm_dev dm_dev; 10386f1152bSBenjamin Marzinski }; 10486f1152bSBenjamin Marzinski 105e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 1068fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 1071ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 10894818742SKent Overstreet 109f4790826SMike Snitzer /* 110e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 111e8603136SMike Snitzer */ 1124cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 113e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 114e8603136SMike Snitzer 115115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 116115485e8SMike Snitzer { 117115485e8SMike Snitzer int param = ACCESS_ONCE(*module_param); 118115485e8SMike Snitzer int modified_param = 0; 119115485e8SMike Snitzer bool modified = true; 120115485e8SMike Snitzer 121115485e8SMike Snitzer if (param < min) 122115485e8SMike Snitzer modified_param = min; 123115485e8SMike Snitzer else if (param > max) 124115485e8SMike Snitzer modified_param = max; 125115485e8SMike Snitzer else 126115485e8SMike Snitzer modified = false; 127115485e8SMike Snitzer 128115485e8SMike Snitzer if (modified) { 129115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 130115485e8SMike Snitzer param = modified_param; 131115485e8SMike Snitzer } 132115485e8SMike Snitzer 133115485e8SMike Snitzer return param; 134115485e8SMike Snitzer } 135115485e8SMike Snitzer 1364cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 137f4790826SMike Snitzer unsigned def, unsigned max) 138f4790826SMike Snitzer { 13909c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 14009c2d531SMike Snitzer unsigned modified_param = 0; 141f4790826SMike Snitzer 14209c2d531SMike Snitzer if (!param) 14309c2d531SMike Snitzer modified_param = def; 14409c2d531SMike Snitzer else if (param > max) 14509c2d531SMike Snitzer modified_param = max; 146f4790826SMike Snitzer 14709c2d531SMike Snitzer if (modified_param) { 14809c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 14909c2d531SMike Snitzer param = modified_param; 150f4790826SMike Snitzer } 151f4790826SMike Snitzer 15209c2d531SMike Snitzer return param; 153f4790826SMike Snitzer } 154f4790826SMike Snitzer 155e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 156e8603136SMike Snitzer { 15709c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1584cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 159e8603136SMike Snitzer } 160e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 161e8603136SMike Snitzer 162115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 163115485e8SMike Snitzer { 164115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 165115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 166115485e8SMike Snitzer } 167115485e8SMike Snitzer 1681da177e4SLinus Torvalds static int __init local_init(void) 1691da177e4SLinus Torvalds { 17051157b4aSKiyoshi Ueda int r = -ENOMEM; 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 173028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 1741da177e4SLinus Torvalds if (!_io_cache) 17551157b4aSKiyoshi Ueda return r; 1761da177e4SLinus Torvalds 1778fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 1788fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 179dba14160SMikulas Patocka goto out_free_io_cache; 1808fbf26adSKiyoshi Ueda 181eca7ee6dSMike Snitzer _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 1821ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 1831ae49ea2SMike Snitzer if (!_rq_cache) 1841ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 1851ae49ea2SMike Snitzer 18651e5b2bdSMike Anderson r = dm_uevent_init(); 18751157b4aSKiyoshi Ueda if (r) 1881ae49ea2SMike Snitzer goto out_free_rq_cache; 18951e5b2bdSMike Anderson 190acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 191acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 192acfe0ad7SMikulas Patocka r = -ENOMEM; 193acfe0ad7SMikulas Patocka goto out_uevent_exit; 194acfe0ad7SMikulas Patocka } 195acfe0ad7SMikulas Patocka 1961da177e4SLinus Torvalds _major = major; 1971da177e4SLinus Torvalds r = register_blkdev(_major, _name); 19851157b4aSKiyoshi Ueda if (r < 0) 199acfe0ad7SMikulas Patocka goto out_free_workqueue; 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds if (!_major) 2021da177e4SLinus Torvalds _major = r; 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds return 0; 20551157b4aSKiyoshi Ueda 206acfe0ad7SMikulas Patocka out_free_workqueue: 207acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 20851157b4aSKiyoshi Ueda out_uevent_exit: 20951157b4aSKiyoshi Ueda dm_uevent_exit(); 2101ae49ea2SMike Snitzer out_free_rq_cache: 2111ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2128fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2138fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 21451157b4aSKiyoshi Ueda out_free_io_cache: 21551157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 21651157b4aSKiyoshi Ueda 21751157b4aSKiyoshi Ueda return r; 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds 2201da177e4SLinus Torvalds static void local_exit(void) 2211da177e4SLinus Torvalds { 2222c140a24SMikulas Patocka flush_scheduled_work(); 223acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2242c140a24SMikulas Patocka 2251ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2268fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 2271da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 22800d59405SAkinobu Mita unregister_blkdev(_major, _name); 22951e5b2bdSMike Anderson dm_uevent_exit(); 2301da177e4SLinus Torvalds 2311da177e4SLinus Torvalds _major = 0; 2321da177e4SLinus Torvalds 2331da177e4SLinus Torvalds DMINFO("cleaned up"); 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 236b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2371da177e4SLinus Torvalds local_init, 2381da177e4SLinus Torvalds dm_target_init, 2391da177e4SLinus Torvalds dm_linear_init, 2401da177e4SLinus Torvalds dm_stripe_init, 241952b3557SMikulas Patocka dm_io_init, 242945fa4d2SMikulas Patocka dm_kcopyd_init, 2431da177e4SLinus Torvalds dm_interface_init, 244fd2ed4d2SMikulas Patocka dm_statistics_init, 2451da177e4SLinus Torvalds }; 2461da177e4SLinus Torvalds 247b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2481da177e4SLinus Torvalds local_exit, 2491da177e4SLinus Torvalds dm_target_exit, 2501da177e4SLinus Torvalds dm_linear_exit, 2511da177e4SLinus Torvalds dm_stripe_exit, 252952b3557SMikulas Patocka dm_io_exit, 253945fa4d2SMikulas Patocka dm_kcopyd_exit, 2541da177e4SLinus Torvalds dm_interface_exit, 255fd2ed4d2SMikulas Patocka dm_statistics_exit, 2561da177e4SLinus Torvalds }; 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds static int __init dm_init(void) 2591da177e4SLinus Torvalds { 2601da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds int r, i; 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2651da177e4SLinus Torvalds r = _inits[i](); 2661da177e4SLinus Torvalds if (r) 2671da177e4SLinus Torvalds goto bad; 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds return 0; 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds bad: 2731da177e4SLinus Torvalds while (i--) 2741da177e4SLinus Torvalds _exits[i](); 2751da177e4SLinus Torvalds 2761da177e4SLinus Torvalds return r; 2771da177e4SLinus Torvalds } 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds static void __exit dm_exit(void) 2801da177e4SLinus Torvalds { 2811da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2821da177e4SLinus Torvalds 2831da177e4SLinus Torvalds while (i--) 2841da177e4SLinus Torvalds _exits[i](); 285d15b774cSAlasdair G Kergon 286d15b774cSAlasdair G Kergon /* 287d15b774cSAlasdair G Kergon * Should be empty by this point. 288d15b774cSAlasdair G Kergon */ 289d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 2901da177e4SLinus Torvalds } 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds /* 2931da177e4SLinus Torvalds * Block device functions 2941da177e4SLinus Torvalds */ 295432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 296432a212cSMike Anderson { 297432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 298432a212cSMike Anderson } 299432a212cSMike Anderson 300fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3011da177e4SLinus Torvalds { 3021da177e4SLinus Torvalds struct mapped_device *md; 3031da177e4SLinus Torvalds 304fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 305fba9f90eSJeff Mahoney 306fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 307fba9f90eSJeff Mahoney if (!md) 308fba9f90eSJeff Mahoney goto out; 309fba9f90eSJeff Mahoney 3105c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 311432a212cSMike Anderson dm_deleting_md(md)) { 312fba9f90eSJeff Mahoney md = NULL; 313fba9f90eSJeff Mahoney goto out; 314fba9f90eSJeff Mahoney } 315fba9f90eSJeff Mahoney 3161da177e4SLinus Torvalds dm_get(md); 3175c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 318fba9f90eSJeff Mahoney out: 319fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 320fba9f90eSJeff Mahoney 321fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3221da177e4SLinus Torvalds } 3231da177e4SLinus Torvalds 324db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3251da177e4SLinus Torvalds { 32663a4f065SMike Snitzer struct mapped_device *md; 3276e9624b8SArnd Bergmann 3284a1aeb98SMilan Broz spin_lock(&_minor_lock); 3294a1aeb98SMilan Broz 33063a4f065SMike Snitzer md = disk->private_data; 33163a4f065SMike Snitzer if (WARN_ON(!md)) 33263a4f065SMike Snitzer goto out; 33363a4f065SMike Snitzer 3342c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3352c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 336acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3372c140a24SMikulas Patocka 3381da177e4SLinus Torvalds dm_put(md); 33963a4f065SMike Snitzer out: 3404a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3411da177e4SLinus Torvalds } 3421da177e4SLinus Torvalds 3435c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3445c6bd75dSAlasdair G Kergon { 3455c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3465c6bd75dSAlasdair G Kergon } 3475c6bd75dSAlasdair G Kergon 3485c6bd75dSAlasdair G Kergon /* 3495c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3505c6bd75dSAlasdair G Kergon */ 3512c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3525c6bd75dSAlasdair G Kergon { 3535c6bd75dSAlasdair G Kergon int r = 0; 3545c6bd75dSAlasdair G Kergon 3555c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3565c6bd75dSAlasdair G Kergon 3572c140a24SMikulas Patocka if (dm_open_count(md)) { 3585c6bd75dSAlasdair G Kergon r = -EBUSY; 3592c140a24SMikulas Patocka if (mark_deferred) 3602c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3612c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3622c140a24SMikulas Patocka r = -EEXIST; 3635c6bd75dSAlasdair G Kergon else 3645c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3655c6bd75dSAlasdair G Kergon 3665c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3675c6bd75dSAlasdair G Kergon 3685c6bd75dSAlasdair G Kergon return r; 3695c6bd75dSAlasdair G Kergon } 3705c6bd75dSAlasdair G Kergon 3712c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3722c140a24SMikulas Patocka { 3732c140a24SMikulas Patocka int r = 0; 3742c140a24SMikulas Patocka 3752c140a24SMikulas Patocka spin_lock(&_minor_lock); 3762c140a24SMikulas Patocka 3772c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3782c140a24SMikulas Patocka r = -EBUSY; 3792c140a24SMikulas Patocka else 3802c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3812c140a24SMikulas Patocka 3822c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3832c140a24SMikulas Patocka 3842c140a24SMikulas Patocka return r; 3852c140a24SMikulas Patocka } 3862c140a24SMikulas Patocka 3872c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 3882c140a24SMikulas Patocka { 3892c140a24SMikulas Patocka dm_deferred_remove(); 3902c140a24SMikulas Patocka } 3912c140a24SMikulas Patocka 392fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 393fd2ed4d2SMikulas Patocka { 394fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 395fd2ed4d2SMikulas Patocka } 396fd2ed4d2SMikulas Patocka 3979974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 3989974fa2cSMike Snitzer { 3999974fa2cSMike Snitzer return md->queue; 4009974fa2cSMike Snitzer } 4019974fa2cSMike Snitzer 402fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 403fd2ed4d2SMikulas Patocka { 404fd2ed4d2SMikulas Patocka return &md->stats; 405fd2ed4d2SMikulas Patocka } 406fd2ed4d2SMikulas Patocka 4073ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4083ac51e74SDarrick J. Wong { 4093ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4103ac51e74SDarrick J. Wong 4113ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4123ac51e74SDarrick J. Wong } 4133ac51e74SDarrick J. Wong 414956a4025SMike Snitzer static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 41566482026SMike Snitzer struct block_device **bdev, 416956a4025SMike Snitzer fmode_t *mode) 417aa129a22SMilan Broz { 41866482026SMike Snitzer struct dm_target *tgt; 4196c182cd8SHannes Reinecke struct dm_table *map; 420956a4025SMike Snitzer int srcu_idx, r; 421aa129a22SMilan Broz 4226c182cd8SHannes Reinecke retry: 423e56f81e0SChristoph Hellwig r = -ENOTTY; 424956a4025SMike Snitzer map = dm_get_live_table(md, &srcu_idx); 425aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 426aa129a22SMilan Broz goto out; 427aa129a22SMilan Broz 428aa129a22SMilan Broz /* We only support devices that have a single target */ 429aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 430aa129a22SMilan Broz goto out; 431aa129a22SMilan Broz 43266482026SMike Snitzer tgt = dm_table_get_target(map, 0); 43366482026SMike Snitzer if (!tgt->type->prepare_ioctl) 4344d341d82SMike Snitzer goto out; 435aa129a22SMilan Broz 4364f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 437aa129a22SMilan Broz r = -EAGAIN; 438aa129a22SMilan Broz goto out; 439aa129a22SMilan Broz } 440aa129a22SMilan Broz 44166482026SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev, mode); 442e56f81e0SChristoph Hellwig if (r < 0) 443e56f81e0SChristoph Hellwig goto out; 444e56f81e0SChristoph Hellwig 445956a4025SMike Snitzer bdgrab(*bdev); 446956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 447e56f81e0SChristoph Hellwig return r; 448aa129a22SMilan Broz 449aa129a22SMilan Broz out: 450956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 4515bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 4526c182cd8SHannes Reinecke msleep(10); 4536c182cd8SHannes Reinecke goto retry; 4546c182cd8SHannes Reinecke } 455e56f81e0SChristoph Hellwig return r; 456e56f81e0SChristoph Hellwig } 4576c182cd8SHannes Reinecke 458e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 459e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 460e56f81e0SChristoph Hellwig { 461e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 462956a4025SMike Snitzer int r; 463e56f81e0SChristoph Hellwig 464956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 465e56f81e0SChristoph Hellwig if (r < 0) 466e56f81e0SChristoph Hellwig return r; 467e56f81e0SChristoph Hellwig 468e56f81e0SChristoph Hellwig if (r > 0) { 469e56f81e0SChristoph Hellwig /* 470e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 471e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 472e56f81e0SChristoph Hellwig */ 473e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 474e980f623SChristoph Hellwig DMWARN_LIMIT( 475e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 476e980f623SChristoph Hellwig current->comm, cmd); 477e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 478e56f81e0SChristoph Hellwig goto out; 479e56f81e0SChristoph Hellwig } 480e980f623SChristoph Hellwig } 481e56f81e0SChristoph Hellwig 48266482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 483e56f81e0SChristoph Hellwig out: 484956a4025SMike Snitzer bdput(bdev); 485aa129a22SMilan Broz return r; 486aa129a22SMilan Broz } 487aa129a22SMilan Broz 488028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 4891da177e4SLinus Torvalds { 4901da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds 493028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 4941da177e4SLinus Torvalds { 4951da177e4SLinus Torvalds mempool_free(io, md->io_pool); 4961da177e4SLinus Torvalds } 4971da177e4SLinus Torvalds 498cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 4991da177e4SLinus Torvalds { 500dba14160SMikulas Patocka bio_put(&tio->clone); 5011da177e4SLinus Torvalds } 5021da177e4SLinus Torvalds 5034cc96131SMike Snitzer int md_in_flight(struct mapped_device *md) 50490abb8c4SKiyoshi Ueda { 50590abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 50690abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 50790abb8c4SKiyoshi Ueda } 50890abb8c4SKiyoshi Ueda 5093eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 5103eaf840eSJun'ichi "Nick" Nomura { 5113eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 512fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 513c9959059STejun Heo int cpu; 514fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 5153eaf840eSJun'ichi "Nick" Nomura 5163eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 5173eaf840eSJun'ichi "Nick" Nomura 518074a7acaSTejun Heo cpu = part_stat_lock(); 519074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 520074a7acaSTejun Heo part_stat_unlock(); 5211e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 5221e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 523fd2ed4d2SMikulas Patocka 524fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 525528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 526528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 527528ec5abSMike Christie false, 0, &io->stats_aux); 5283eaf840eSJun'ichi "Nick" Nomura } 5293eaf840eSJun'ichi "Nick" Nomura 530d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 5313eaf840eSJun'ichi "Nick" Nomura { 5323eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 5333eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 5343eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 53518c0b223SGu Zheng int pending; 5363eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 5373eaf840eSJun'ichi "Nick" Nomura 53818c0b223SGu Zheng generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 5393eaf840eSJun'ichi "Nick" Nomura 540fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 541528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 542528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 543528ec5abSMike Christie true, duration, &io->stats_aux); 544fd2ed4d2SMikulas Patocka 545af7e466aSMikulas Patocka /* 546af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 547d87f4c14STejun Heo * a flush. 548af7e466aSMikulas Patocka */ 5491e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 5501e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 551316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 5523eaf840eSJun'ichi "Nick" Nomura 553d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 554d221d2e7SMikulas Patocka if (!pending) 555d221d2e7SMikulas Patocka wake_up(&md->wait); 5563eaf840eSJun'ichi "Nick" Nomura } 5573eaf840eSJun'ichi "Nick" Nomura 5581da177e4SLinus Torvalds /* 5591da177e4SLinus Torvalds * Add the bio to the list of deferred io. 5601da177e4SLinus Torvalds */ 56192c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 5621da177e4SLinus Torvalds { 56305447420SKiyoshi Ueda unsigned long flags; 5641da177e4SLinus Torvalds 56505447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 5661da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 56705447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 56892c63902SMikulas Patocka queue_work(md->wq, &md->work); 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds /* 5721da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5731da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 57483d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 5751da177e4SLinus Torvalds */ 57683d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 5771da177e4SLinus Torvalds { 57883d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 5791da177e4SLinus Torvalds 58083d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 58183d5e5b0SMikulas Patocka } 5821da177e4SLinus Torvalds 58383d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 58483d5e5b0SMikulas Patocka { 58583d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 58683d5e5b0SMikulas Patocka } 58783d5e5b0SMikulas Patocka 58883d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 58983d5e5b0SMikulas Patocka { 59083d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 59183d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 59283d5e5b0SMikulas Patocka } 59383d5e5b0SMikulas Patocka 59483d5e5b0SMikulas Patocka /* 59583d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 59683d5e5b0SMikulas Patocka * The caller must not block between these two functions. 59783d5e5b0SMikulas Patocka */ 59883d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 59983d5e5b0SMikulas Patocka { 60083d5e5b0SMikulas Patocka rcu_read_lock(); 60183d5e5b0SMikulas Patocka return rcu_dereference(md->map); 60283d5e5b0SMikulas Patocka } 60383d5e5b0SMikulas Patocka 60483d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 60583d5e5b0SMikulas Patocka { 60683d5e5b0SMikulas Patocka rcu_read_unlock(); 6071da177e4SLinus Torvalds } 6081da177e4SLinus Torvalds 6093ac51e74SDarrick J. Wong /* 61086f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 61186f1152bSBenjamin Marzinski */ 61286f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 61386f1152bSBenjamin Marzinski struct mapped_device *md) 61486f1152bSBenjamin Marzinski { 61586f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 61686f1152bSBenjamin Marzinski struct block_device *bdev; 61786f1152bSBenjamin Marzinski 61886f1152bSBenjamin Marzinski int r; 61986f1152bSBenjamin Marzinski 62086f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 62186f1152bSBenjamin Marzinski 62286f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 62386f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 62486f1152bSBenjamin Marzinski return PTR_ERR(bdev); 62586f1152bSBenjamin Marzinski 62686f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 62786f1152bSBenjamin Marzinski if (r) { 62886f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 62986f1152bSBenjamin Marzinski return r; 63086f1152bSBenjamin Marzinski } 63186f1152bSBenjamin Marzinski 63286f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 633817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 63486f1152bSBenjamin Marzinski return 0; 63586f1152bSBenjamin Marzinski } 63686f1152bSBenjamin Marzinski 63786f1152bSBenjamin Marzinski /* 63886f1152bSBenjamin Marzinski * Close a table device that we've been using. 63986f1152bSBenjamin Marzinski */ 64086f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 64186f1152bSBenjamin Marzinski { 64286f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 64386f1152bSBenjamin Marzinski return; 64486f1152bSBenjamin Marzinski 64586f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 64686f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 647817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 64886f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 649817bf402SDan Williams td->dm_dev.dax_dev = NULL; 65086f1152bSBenjamin Marzinski } 65186f1152bSBenjamin Marzinski 65286f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 65386f1152bSBenjamin Marzinski fmode_t mode) { 65486f1152bSBenjamin Marzinski struct table_device *td; 65586f1152bSBenjamin Marzinski 65686f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 65786f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 65886f1152bSBenjamin Marzinski return td; 65986f1152bSBenjamin Marzinski 66086f1152bSBenjamin Marzinski return NULL; 66186f1152bSBenjamin Marzinski } 66286f1152bSBenjamin Marzinski 66386f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 66486f1152bSBenjamin Marzinski struct dm_dev **result) { 66586f1152bSBenjamin Marzinski int r; 66686f1152bSBenjamin Marzinski struct table_device *td; 66786f1152bSBenjamin Marzinski 66886f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 66986f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 67086f1152bSBenjamin Marzinski if (!td) { 671115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 67286f1152bSBenjamin Marzinski if (!td) { 67386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 67486f1152bSBenjamin Marzinski return -ENOMEM; 67586f1152bSBenjamin Marzinski } 67686f1152bSBenjamin Marzinski 67786f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 67886f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 67986f1152bSBenjamin Marzinski 68086f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 68186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 68286f1152bSBenjamin Marzinski kfree(td); 68386f1152bSBenjamin Marzinski return r; 68486f1152bSBenjamin Marzinski } 68586f1152bSBenjamin Marzinski 68686f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 68786f1152bSBenjamin Marzinski 68886f1152bSBenjamin Marzinski atomic_set(&td->count, 0); 68986f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 69086f1152bSBenjamin Marzinski } 69186f1152bSBenjamin Marzinski atomic_inc(&td->count); 69286f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 69386f1152bSBenjamin Marzinski 69486f1152bSBenjamin Marzinski *result = &td->dm_dev; 69586f1152bSBenjamin Marzinski return 0; 69686f1152bSBenjamin Marzinski } 69786f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 69886f1152bSBenjamin Marzinski 69986f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 70086f1152bSBenjamin Marzinski { 70186f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 70286f1152bSBenjamin Marzinski 70386f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 70486f1152bSBenjamin Marzinski if (atomic_dec_and_test(&td->count)) { 70586f1152bSBenjamin Marzinski close_table_device(td, md); 70686f1152bSBenjamin Marzinski list_del(&td->list); 70786f1152bSBenjamin Marzinski kfree(td); 70886f1152bSBenjamin Marzinski } 70986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 71086f1152bSBenjamin Marzinski } 71186f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 71286f1152bSBenjamin Marzinski 71386f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 71486f1152bSBenjamin Marzinski { 71586f1152bSBenjamin Marzinski struct list_head *tmp, *next; 71686f1152bSBenjamin Marzinski 71786f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 71886f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 71986f1152bSBenjamin Marzinski 72086f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 72186f1152bSBenjamin Marzinski td->dm_dev.name, atomic_read(&td->count)); 72286f1152bSBenjamin Marzinski kfree(td); 72386f1152bSBenjamin Marzinski } 72486f1152bSBenjamin Marzinski } 72586f1152bSBenjamin Marzinski 72686f1152bSBenjamin Marzinski /* 7273ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 7283ac51e74SDarrick J. Wong */ 7293ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 7303ac51e74SDarrick J. Wong { 7313ac51e74SDarrick J. Wong *geo = md->geometry; 7323ac51e74SDarrick J. Wong 7333ac51e74SDarrick J. Wong return 0; 7343ac51e74SDarrick J. Wong } 7353ac51e74SDarrick J. Wong 7363ac51e74SDarrick J. Wong /* 7373ac51e74SDarrick J. Wong * Set the geometry of a device. 7383ac51e74SDarrick J. Wong */ 7393ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 7403ac51e74SDarrick J. Wong { 7413ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 7423ac51e74SDarrick J. Wong 7433ac51e74SDarrick J. Wong if (geo->start > sz) { 7443ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 7453ac51e74SDarrick J. Wong return -EINVAL; 7463ac51e74SDarrick J. Wong } 7473ac51e74SDarrick J. Wong 7483ac51e74SDarrick J. Wong md->geometry = *geo; 7493ac51e74SDarrick J. Wong 7503ac51e74SDarrick J. Wong return 0; 7513ac51e74SDarrick J. Wong } 7523ac51e74SDarrick J. Wong 7531da177e4SLinus Torvalds /*----------------------------------------------------------------- 7541da177e4SLinus Torvalds * CRUD START: 7551da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 7561da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 7571da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 7581da177e4SLinus Torvalds * interests of getting something for people to use I give 7591da177e4SLinus Torvalds * you this clearly demarcated crap. 7601da177e4SLinus Torvalds *---------------------------------------------------------------*/ 7611da177e4SLinus Torvalds 7622e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 7632e93ccc1SKiyoshi Ueda { 7642e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7652e93ccc1SKiyoshi Ueda } 7662e93ccc1SKiyoshi Ueda 7671da177e4SLinus Torvalds /* 7681da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7691da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7701da177e4SLinus Torvalds */ 771858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 7721da177e4SLinus Torvalds { 7732e93ccc1SKiyoshi Ueda unsigned long flags; 774b35f8caaSMilan Broz int io_error; 775b35f8caaSMilan Broz struct bio *bio; 776b35f8caaSMilan Broz struct mapped_device *md = io->md; 7772e93ccc1SKiyoshi Ueda 7782e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 779f88fb981SKiyoshi Ueda if (unlikely(error)) { 780f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 781f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 7821da177e4SLinus Torvalds io->error = error; 783f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 784f88fb981SKiyoshi Ueda } 7851da177e4SLinus Torvalds 7861da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 7872e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 7882e93ccc1SKiyoshi Ueda /* 7892e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 7902e93ccc1SKiyoshi Ueda */ 791022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 7926a8736d1STejun Heo if (__noflush_suspending(md)) 7936a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 7946a8736d1STejun Heo else 7952e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 7962e93ccc1SKiyoshi Ueda io->error = -EIO; 797022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 7982e93ccc1SKiyoshi Ueda } 7992e93ccc1SKiyoshi Ueda 800b35f8caaSMilan Broz io_error = io->error; 801b35f8caaSMilan Broz bio = io->bio; 802af7e466aSMikulas Patocka end_io_acct(io); 803a97f925aSMikulas Patocka free_io(md, io); 8041da177e4SLinus Torvalds 8056a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 8066a8736d1STejun Heo return; 8076a8736d1STejun Heo 8081eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8091da177e4SLinus Torvalds /* 8106a8736d1STejun Heo * Preflush done for flush with data, reissue 81128a8f0d3SMike Christie * without REQ_PREFLUSH. 8121da177e4SLinus Torvalds */ 8131eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 8146a8736d1STejun Heo queue_io(md, bio); 8155f3ea37cSArnaldo Carvalho de Melo } else { 816b372d360SMike Snitzer /* done with normal IO or empty flush */ 8174246a0b6SChristoph Hellwig bio->bi_error = io_error; 8184246a0b6SChristoph Hellwig bio_endio(bio); 8192e93ccc1SKiyoshi Ueda } 8201da177e4SLinus Torvalds } 821af7e466aSMikulas Patocka } 8221da177e4SLinus Torvalds 8234cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 8247eee4ae2SMike Snitzer { 8257eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 8267eee4ae2SMike Snitzer 8277eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 8287eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 8297eee4ae2SMike Snitzer } 8307eee4ae2SMike Snitzer 831ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 832ac62d620SChristoph Hellwig { 833ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 834ac62d620SChristoph Hellwig 835ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 836ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 837ac62d620SChristoph Hellwig } 838ac62d620SChristoph Hellwig 8394246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 8401da177e4SLinus Torvalds { 8414246a0b6SChristoph Hellwig int error = bio->bi_error; 8425164beceSzhendong chen int r = error; 843bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 844b35f8caaSMilan Broz struct dm_io *io = tio->io; 8459faf400fSStefan Bader struct mapped_device *md = tio->io->md; 8461da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 8471da177e4SLinus Torvalds 848*1be56909SChristoph Hellwig if (unlikely(error == -EREMOTEIO)) { 849ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_SAME && 850ac62d620SChristoph Hellwig !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) 8517eee4ae2SMike Snitzer disable_write_same(md); 852ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 853ac62d620SChristoph Hellwig !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 854ac62d620SChristoph Hellwig disable_write_zeroes(md); 855ac62d620SChristoph Hellwig } 8567eee4ae2SMike Snitzer 857*1be56909SChristoph Hellwig if (endio) { 858*1be56909SChristoph Hellwig r = endio(tio->ti, bio, &error); 859*1be56909SChristoph Hellwig switch (r) { 860*1be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 861*1be56909SChristoph Hellwig error = DM_ENDIO_REQUEUE; 862*1be56909SChristoph Hellwig /*FALLTHRU*/ 863*1be56909SChristoph Hellwig case DM_ENDIO_DONE: 864*1be56909SChristoph Hellwig break; 865*1be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 866*1be56909SChristoph Hellwig /* The target will handle the io */ 867*1be56909SChristoph Hellwig return; 868*1be56909SChristoph Hellwig default: 869*1be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 870*1be56909SChristoph Hellwig BUG(); 871*1be56909SChristoph Hellwig } 872*1be56909SChristoph Hellwig } 873*1be56909SChristoph Hellwig 874cfae7529SMike Snitzer free_tio(tio); 875b35f8caaSMilan Broz dec_pending(io, error); 8761da177e4SLinus Torvalds } 8771da177e4SLinus Torvalds 87878d8e58aSMike Snitzer /* 87956a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 88056a67df7SMike Snitzer * target boundary. 88156a67df7SMike Snitzer */ 88256a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 8831da177e4SLinus Torvalds { 88456a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 88556a67df7SMike Snitzer 88656a67df7SMike Snitzer return ti->len - target_offset; 88756a67df7SMike Snitzer } 88856a67df7SMike Snitzer 88956a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 89056a67df7SMike Snitzer { 89156a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 892542f9038SMike Snitzer sector_t offset, max_len; 8931da177e4SLinus Torvalds 8941da177e4SLinus Torvalds /* 8951da177e4SLinus Torvalds * Does the target need to split even further? 8961da177e4SLinus Torvalds */ 897542f9038SMike Snitzer if (ti->max_io_len) { 898542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 899542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 900542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 901542f9038SMike Snitzer else 902542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 903542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 904542f9038SMike Snitzer 905542f9038SMike Snitzer if (len > max_len) 906542f9038SMike Snitzer len = max_len; 9071da177e4SLinus Torvalds } 9081da177e4SLinus Torvalds 9091da177e4SLinus Torvalds return len; 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds 912542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 913542f9038SMike Snitzer { 914542f9038SMike Snitzer if (len > UINT_MAX) { 915542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 916542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 917542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 918542f9038SMike Snitzer return -EINVAL; 919542f9038SMike Snitzer } 920542f9038SMike Snitzer 921542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 922542f9038SMike Snitzer 923542f9038SMike Snitzer return 0; 924542f9038SMike Snitzer } 925542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 926542f9038SMike Snitzer 927f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 928f26c5719SDan Williams sector_t sector, int *srcu_idx) 929545ed20eSToshi Kani { 930545ed20eSToshi Kani struct dm_table *map; 931545ed20eSToshi Kani struct dm_target *ti; 932545ed20eSToshi Kani 933f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 934545ed20eSToshi Kani if (!map) 935f26c5719SDan Williams return NULL; 936545ed20eSToshi Kani 937545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 938545ed20eSToshi Kani if (!dm_target_is_valid(ti)) 939f26c5719SDan Williams return NULL; 940f26c5719SDan Williams 941f26c5719SDan Williams return ti; 942f26c5719SDan Williams } 943f26c5719SDan Williams 944f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 945f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 946f26c5719SDan Williams { 947f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 948f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 949f26c5719SDan Williams struct dm_target *ti; 950f26c5719SDan Williams long len, ret = -EIO; 951f26c5719SDan Williams int srcu_idx; 952f26c5719SDan Williams 953f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 954f26c5719SDan Williams 955f26c5719SDan Williams if (!ti) 956545ed20eSToshi Kani goto out; 957f26c5719SDan Williams if (!ti->type->direct_access) 958f26c5719SDan Williams goto out; 959f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 960f26c5719SDan Williams if (len < 1) 961f26c5719SDan Williams goto out; 962f26c5719SDan Williams nr_pages = min(len, nr_pages); 963545ed20eSToshi Kani if (ti->type->direct_access) 964817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 965817bf402SDan Williams 966545ed20eSToshi Kani out: 967545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 968f26c5719SDan Williams 969f26c5719SDan Williams return ret; 970545ed20eSToshi Kani } 971545ed20eSToshi Kani 9721dd40c3eSMikulas Patocka /* 9731dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 97428a8f0d3SMike Christie * allowed for all bio types except REQ_PREFLUSH. 9751dd40c3eSMikulas Patocka * 9761dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 9771dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 9781dd40c3eSMikulas Patocka * sent in a next bio. 9791dd40c3eSMikulas Patocka * 9801dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 9811dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 9821dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 9831dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 9841dd40c3eSMikulas Patocka * 9851dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 9861dd40c3eSMikulas Patocka * <------- bi_size -------> 9871dd40c3eSMikulas Patocka * <-- n_sectors --> 9881dd40c3eSMikulas Patocka * 9891dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 9901dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 9911dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 9921dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 9931dd40c3eSMikulas Patocka * to make it empty) 9941dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 9951dd40c3eSMikulas Patocka * 9961dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 9971dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 9981dd40c3eSMikulas Patocka * copies of the bio. 9991dd40c3eSMikulas Patocka */ 10001dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 10011dd40c3eSMikulas Patocka { 10021dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 10031dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 10041eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 10051dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 10061dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 10071dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 10081dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 10091dd40c3eSMikulas Patocka } 10101dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 10111dd40c3eSMikulas Patocka 1012d67a5f4bSMikulas Patocka /* 1013d67a5f4bSMikulas Patocka * Flush current->bio_list when the target map method blocks. 1014d67a5f4bSMikulas Patocka * This fixes deadlocks in snapshot and possibly in other targets. 1015d67a5f4bSMikulas Patocka */ 1016d67a5f4bSMikulas Patocka struct dm_offload { 1017d67a5f4bSMikulas Patocka struct blk_plug plug; 1018d67a5f4bSMikulas Patocka struct blk_plug_cb cb; 1019d67a5f4bSMikulas Patocka }; 1020d67a5f4bSMikulas Patocka 1021d67a5f4bSMikulas Patocka static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) 1022d67a5f4bSMikulas Patocka { 1023d67a5f4bSMikulas Patocka struct dm_offload *o = container_of(cb, struct dm_offload, cb); 1024d67a5f4bSMikulas Patocka struct bio_list list; 1025d67a5f4bSMikulas Patocka struct bio *bio; 1026f5fe1b51SNeilBrown int i; 1027d67a5f4bSMikulas Patocka 1028d67a5f4bSMikulas Patocka INIT_LIST_HEAD(&o->cb.list); 1029d67a5f4bSMikulas Patocka 1030d67a5f4bSMikulas Patocka if (unlikely(!current->bio_list)) 1031d67a5f4bSMikulas Patocka return; 1032d67a5f4bSMikulas Patocka 1033f5fe1b51SNeilBrown for (i = 0; i < 2; i++) { 1034f5fe1b51SNeilBrown list = current->bio_list[i]; 1035f5fe1b51SNeilBrown bio_list_init(¤t->bio_list[i]); 1036d67a5f4bSMikulas Patocka 1037d67a5f4bSMikulas Patocka while ((bio = bio_list_pop(&list))) { 1038d67a5f4bSMikulas Patocka struct bio_set *bs = bio->bi_pool; 1039d67a5f4bSMikulas Patocka if (unlikely(!bs) || bs == fs_bio_set) { 1040f5fe1b51SNeilBrown bio_list_add(¤t->bio_list[i], bio); 1041d67a5f4bSMikulas Patocka continue; 1042d67a5f4bSMikulas Patocka } 1043d67a5f4bSMikulas Patocka 1044d67a5f4bSMikulas Patocka spin_lock(&bs->rescue_lock); 1045d67a5f4bSMikulas Patocka bio_list_add(&bs->rescue_list, bio); 1046d67a5f4bSMikulas Patocka queue_work(bs->rescue_workqueue, &bs->rescue_work); 1047d67a5f4bSMikulas Patocka spin_unlock(&bs->rescue_lock); 1048d67a5f4bSMikulas Patocka } 1049d67a5f4bSMikulas Patocka } 1050f5fe1b51SNeilBrown } 1051d67a5f4bSMikulas Patocka 1052d67a5f4bSMikulas Patocka static void dm_offload_start(struct dm_offload *o) 1053d67a5f4bSMikulas Patocka { 1054d67a5f4bSMikulas Patocka blk_start_plug(&o->plug); 1055d67a5f4bSMikulas Patocka o->cb.callback = flush_current_bio_list; 1056d67a5f4bSMikulas Patocka list_add(&o->cb.list, ¤t->plug->cb_list); 1057d67a5f4bSMikulas Patocka } 1058d67a5f4bSMikulas Patocka 1059d67a5f4bSMikulas Patocka static void dm_offload_end(struct dm_offload *o) 1060d67a5f4bSMikulas Patocka { 1061d67a5f4bSMikulas Patocka list_del(&o->cb.list); 1062d67a5f4bSMikulas Patocka blk_finish_plug(&o->plug); 1063d67a5f4bSMikulas Patocka } 1064d67a5f4bSMikulas Patocka 1065bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 10661da177e4SLinus Torvalds { 10671da177e4SLinus Torvalds int r; 10682056a782SJens Axboe sector_t sector; 1069d67a5f4bSMikulas Patocka struct dm_offload o; 1070dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1071bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 10721da177e4SLinus Torvalds 10731da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 10741da177e4SLinus Torvalds 10751da177e4SLinus Torvalds /* 10761da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 10771da177e4SLinus Torvalds * anything, the target has assumed ownership of 10781da177e4SLinus Torvalds * this io. 10791da177e4SLinus Torvalds */ 10801da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 10814f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1082d67a5f4bSMikulas Patocka 1083d67a5f4bSMikulas Patocka dm_offload_start(&o); 10847de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1085d67a5f4bSMikulas Patocka dm_offload_end(&o); 1086d67a5f4bSMikulas Patocka 1087846785e6SChristoph Hellwig switch (r) { 1088846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1089846785e6SChristoph Hellwig break; 1090846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 10911da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 1092d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 109322a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 10941da177e4SLinus Torvalds generic_make_request(clone); 1095846785e6SChristoph Hellwig break; 1096846785e6SChristoph Hellwig case DM_MAPIO_KILL: 1097846785e6SChristoph Hellwig r = -EIO; 1098846785e6SChristoph Hellwig /*FALLTHRU*/ 1099846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 11002e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 11019faf400fSStefan Bader dec_pending(tio->io, r); 1102cfae7529SMike Snitzer free_tio(tio); 1103846785e6SChristoph Hellwig break; 1104846785e6SChristoph Hellwig default: 110545cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 110645cbcd79SKiyoshi Ueda BUG(); 11071da177e4SLinus Torvalds } 11081da177e4SLinus Torvalds } 11091da177e4SLinus Torvalds 11101da177e4SLinus Torvalds struct clone_info { 11111da177e4SLinus Torvalds struct mapped_device *md; 11121da177e4SLinus Torvalds struct dm_table *map; 11131da177e4SLinus Torvalds struct bio *bio; 11141da177e4SLinus Torvalds struct dm_io *io; 11151da177e4SLinus Torvalds sector_t sector; 1116e0d6609aSMikulas Patocka unsigned sector_count; 11171da177e4SLinus Torvalds }; 11181da177e4SLinus Torvalds 1119e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1120bd2a49b8SAlasdair G Kergon { 11214f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 11224f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 11231da177e4SLinus Torvalds } 11241da177e4SLinus Torvalds 11251da177e4SLinus Torvalds /* 11261da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 11271da177e4SLinus Torvalds */ 1128c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 11291c3b13e6SKent Overstreet sector_t sector, unsigned len) 11301da177e4SLinus Torvalds { 1131dba14160SMikulas Patocka struct bio *clone = &tio->clone; 11321da177e4SLinus Torvalds 11331c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 11349c47008dSMartin K. Petersen 1135e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) { 1136e2460f2aSMikulas Patocka int r; 1137e2460f2aSMikulas Patocka 1138e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1139e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1140e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1141e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1142e2460f2aSMikulas Patocka tio->ti->type->name); 1143e2460f2aSMikulas Patocka return -EIO; 1144e2460f2aSMikulas Patocka } 1145e2460f2aSMikulas Patocka 1146e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1147c80914e8SMike Snitzer if (r < 0) 1148c80914e8SMike Snitzer return r; 1149c80914e8SMike Snitzer } 11501c3b13e6SKent Overstreet 11511c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 11521c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 11531c3b13e6SKent Overstreet 1154e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) 11551c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 1156c80914e8SMike Snitzer 1157c80914e8SMike Snitzer return 0; 11581da177e4SLinus Torvalds } 11591da177e4SLinus Torvalds 11609015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 116199778273SJunichi Nomura struct dm_target *ti, 116255a62eefSAlasdair G Kergon unsigned target_bio_nr) 1163f9ab94ceSMikulas Patocka { 1164dba14160SMikulas Patocka struct dm_target_io *tio; 1165dba14160SMikulas Patocka struct bio *clone; 1166dba14160SMikulas Patocka 116799778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1168dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1169f9ab94ceSMikulas Patocka 1170f9ab94ceSMikulas Patocka tio->io = ci->io; 1171f9ab94ceSMikulas Patocka tio->ti = ti; 117255a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 11739015df24SAlasdair G Kergon 11749015df24SAlasdair G Kergon return tio; 11759015df24SAlasdair G Kergon } 11769015df24SAlasdair G Kergon 117714fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 117814fe594dSAlasdair G Kergon struct dm_target *ti, 11791dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 11809015df24SAlasdair G Kergon { 118199778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1182dba14160SMikulas Patocka struct bio *clone = &tio->clone; 11839015df24SAlasdair G Kergon 11841dd40c3eSMikulas Patocka tio->len_ptr = len; 11851dd40c3eSMikulas Patocka 11861c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1187bd2a49b8SAlasdair G Kergon if (len) 11881dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1189f9ab94ceSMikulas Patocka 1190bd2a49b8SAlasdair G Kergon __map_bio(tio); 1191f9ab94ceSMikulas Patocka } 1192f9ab94ceSMikulas Patocka 119314fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 11941dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 119506a426ceSMike Snitzer { 119655a62eefSAlasdair G Kergon unsigned target_bio_nr; 119706a426ceSMike Snitzer 119855a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 119914fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 120006a426ceSMike Snitzer } 120106a426ceSMike Snitzer 120214fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1203f9ab94ceSMikulas Patocka { 120406a426ceSMike Snitzer unsigned target_nr = 0; 1205f9ab94ceSMikulas Patocka struct dm_target *ti; 1206f9ab94ceSMikulas Patocka 1207b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1208f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 12091dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1210f9ab94ceSMikulas Patocka 1211f9ab94ceSMikulas Patocka return 0; 1212f9ab94ceSMikulas Patocka } 1213f9ab94ceSMikulas Patocka 1214c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 12151dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 12165ae89a87SMike Snitzer { 1217dba14160SMikulas Patocka struct bio *bio = ci->bio; 12185ae89a87SMike Snitzer struct dm_target_io *tio; 1219b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1220b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 1221c80914e8SMike Snitzer int r = 0; 12225ae89a87SMike Snitzer 1223b0d8ed4dSAlasdair G Kergon /* 1224b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1225b0d8ed4dSAlasdair G Kergon */ 1226b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1227b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1228e4c93811SAlasdair G Kergon 1229b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 123099778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 12311dd40c3eSMikulas Patocka tio->len_ptr = len; 1232c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1233072623deSMikulas Patocka if (r < 0) { 1234cfae7529SMike Snitzer free_tio(tio); 1235c80914e8SMike Snitzer break; 1236072623deSMikulas Patocka } 1237bd2a49b8SAlasdair G Kergon __map_bio(tio); 12385ae89a87SMike Snitzer } 1239c80914e8SMike Snitzer 1240c80914e8SMike Snitzer return r; 1241b0d8ed4dSAlasdair G Kergon } 12425ae89a87SMike Snitzer 124355a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 124423508a96SMike Snitzer 124555a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 124623508a96SMike Snitzer { 124755a62eefSAlasdair G Kergon return ti->num_discard_bios; 124823508a96SMike Snitzer } 124923508a96SMike Snitzer 125055a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 125123508a96SMike Snitzer { 125255a62eefSAlasdair G Kergon return ti->num_write_same_bios; 125323508a96SMike Snitzer } 125423508a96SMike Snitzer 1255ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1256ac62d620SChristoph Hellwig { 1257ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1258ac62d620SChristoph Hellwig } 1259ac62d620SChristoph Hellwig 126023508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 126123508a96SMike Snitzer 126223508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 126323508a96SMike Snitzer { 126455a62eefSAlasdair G Kergon return ti->split_discard_bios; 126523508a96SMike Snitzer } 126623508a96SMike Snitzer 126714fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 126855a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 126923508a96SMike Snitzer is_split_required_fn is_split_required) 12705ae89a87SMike Snitzer { 12715ae89a87SMike Snitzer struct dm_target *ti; 1272e0d6609aSMikulas Patocka unsigned len; 127355a62eefSAlasdair G Kergon unsigned num_bios; 12745ae89a87SMike Snitzer 1275a79245b3SMike Snitzer do { 12765ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 12775ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 12785ae89a87SMike Snitzer return -EIO; 12795ae89a87SMike Snitzer 12805ae89a87SMike Snitzer /* 128123508a96SMike Snitzer * Even though the device advertised support for this type of 128223508a96SMike Snitzer * request, that does not mean every target supports it, and 1283936688d7SMike Snitzer * reconfiguration might also have changed that since the 12845ae89a87SMike Snitzer * check was performed. 12855ae89a87SMike Snitzer */ 128655a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 128755a62eefSAlasdair G Kergon if (!num_bios) 12885ae89a87SMike Snitzer return -EOPNOTSUPP; 12895ae89a87SMike Snitzer 129023508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1291e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 12927acf0277SMikulas Patocka else 1293e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 12945ae89a87SMike Snitzer 12951dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 12965ae89a87SMike Snitzer 1297a79245b3SMike Snitzer ci->sector += len; 1298a79245b3SMike Snitzer } while (ci->sector_count -= len); 12995ae89a87SMike Snitzer 13005ae89a87SMike Snitzer return 0; 13015ae89a87SMike Snitzer } 13025ae89a87SMike Snitzer 130314fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 130423508a96SMike Snitzer { 130514fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 130623508a96SMike Snitzer is_split_required_for_discard); 130723508a96SMike Snitzer } 130823508a96SMike Snitzer 130914fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 131023508a96SMike Snitzer { 131114fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 131223508a96SMike Snitzer } 131323508a96SMike Snitzer 1314ac62d620SChristoph Hellwig static int __send_write_zeroes(struct clone_info *ci) 1315ac62d620SChristoph Hellwig { 1316ac62d620SChristoph Hellwig return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); 1317ac62d620SChristoph Hellwig } 1318ac62d620SChristoph Hellwig 1319e4c93811SAlasdair G Kergon /* 1320e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1321e4c93811SAlasdair G Kergon */ 1322e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1323e4c93811SAlasdair G Kergon { 1324e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1325e4c93811SAlasdair G Kergon struct dm_target *ti; 13261c3b13e6SKent Overstreet unsigned len; 1327c80914e8SMike Snitzer int r; 1328e4c93811SAlasdair G Kergon 1329e6047149SMike Christie if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1330e4c93811SAlasdair G Kergon return __send_discard(ci); 1331e6047149SMike Christie else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1332e4c93811SAlasdair G Kergon return __send_write_same(ci); 1333ac62d620SChristoph Hellwig else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) 1334ac62d620SChristoph Hellwig return __send_write_zeroes(ci); 1335e4c93811SAlasdair G Kergon 1336e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1337e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1338e4c93811SAlasdair G Kergon return -EIO; 1339e4c93811SAlasdair G Kergon 13401c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1341e4c93811SAlasdair G Kergon 1342c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1343c80914e8SMike Snitzer if (r < 0) 1344c80914e8SMike Snitzer return r; 1345e4c93811SAlasdair G Kergon 1346e4c93811SAlasdair G Kergon ci->sector += len; 1347e4c93811SAlasdair G Kergon ci->sector_count -= len; 1348e4c93811SAlasdair G Kergon 1349e4c93811SAlasdair G Kergon return 0; 1350e4c93811SAlasdair G Kergon } 1351e4c93811SAlasdair G Kergon 1352e4c93811SAlasdair G Kergon /* 135314fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 13541da177e4SLinus Torvalds */ 135583d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 135683d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 13571da177e4SLinus Torvalds { 13581da177e4SLinus Torvalds struct clone_info ci; 1359512875bdSJun'ichi Nomura int error = 0; 13601da177e4SLinus Torvalds 136183d5e5b0SMikulas Patocka if (unlikely(!map)) { 1362f0b9a450SMikulas Patocka bio_io_error(bio); 1363f0b9a450SMikulas Patocka return; 1364f0b9a450SMikulas Patocka } 1365692d0eb9SMikulas Patocka 136683d5e5b0SMikulas Patocka ci.map = map; 13671da177e4SLinus Torvalds ci.md = md; 13681da177e4SLinus Torvalds ci.io = alloc_io(md); 13691da177e4SLinus Torvalds ci.io->error = 0; 13701da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 13711da177e4SLinus Torvalds ci.io->bio = bio; 13721da177e4SLinus Torvalds ci.io->md = md; 1373f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 13744f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 13751da177e4SLinus Torvalds 13763eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1377bd2a49b8SAlasdair G Kergon 13781eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1379b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1380b372d360SMike Snitzer ci.sector_count = 0; 138114fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1382b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1383b372d360SMike Snitzer } else { 13846a8736d1STejun Heo ci.bio = bio; 1385f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1386512875bdSJun'ichi Nomura while (ci.sector_count && !error) 138714fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1388d87f4c14STejun Heo } 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds /* drop the extra reference count */ 1391512875bdSJun'ichi Nomura dec_pending(ci.io, error); 13929e4e5f87SMilan Broz } 13939e4e5f87SMilan Broz /*----------------------------------------------------------------- 13941da177e4SLinus Torvalds * CRUD END 13951da177e4SLinus Torvalds *---------------------------------------------------------------*/ 13961da177e4SLinus Torvalds 13971da177e4SLinus Torvalds /* 13981da177e4SLinus Torvalds * The request function that just remaps the bio built up by 13991da177e4SLinus Torvalds * dm_merge_bvec. 14001da177e4SLinus Torvalds */ 1401dece1635SJens Axboe static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 14021da177e4SLinus Torvalds { 140312f03a49SKevin Corry int rw = bio_data_dir(bio); 14041da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 140583d5e5b0SMikulas Patocka int srcu_idx; 140683d5e5b0SMikulas Patocka struct dm_table *map; 14071da177e4SLinus Torvalds 140883d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 14091da177e4SLinus Torvalds 141018c0b223SGu Zheng generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 141112f03a49SKevin Corry 14126a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 14136a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 141483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 14151da177e4SLinus Torvalds 14161eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 141792c63902SMikulas Patocka queue_io(md, bio); 14186a8736d1STejun Heo else 14196a8736d1STejun Heo bio_io_error(bio); 1420dece1635SJens Axboe return BLK_QC_T_NONE; 14211da177e4SLinus Torvalds } 14221da177e4SLinus Torvalds 142383d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 142483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1425dece1635SJens Axboe return BLK_QC_T_NONE; 1426cec47e3dSKiyoshi Ueda } 1427cec47e3dSKiyoshi Ueda 14281da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 14291da177e4SLinus Torvalds { 14308a57dfc6SChandra Seetharaman int r = bdi_bits; 14318a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 14328a57dfc6SChandra Seetharaman struct dm_table *map; 14331da177e4SLinus Torvalds 14341eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1435e522c039SMike Snitzer if (dm_request_based(md)) { 1436cec47e3dSKiyoshi Ueda /* 1437e522c039SMike Snitzer * With request-based DM we only need to check the 1438e522c039SMike Snitzer * top-level queue for congestion. 1439cec47e3dSKiyoshi Ueda */ 1440dc3b17ccSJan Kara r = md->queue->backing_dev_info->wb.state & bdi_bits; 1441e522c039SMike Snitzer } else { 1442e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1443e522c039SMike Snitzer if (map) 14441da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 144583d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 14468a57dfc6SChandra Seetharaman } 1447e522c039SMike Snitzer } 14488a57dfc6SChandra Seetharaman 14491da177e4SLinus Torvalds return r; 14501da177e4SLinus Torvalds } 14511da177e4SLinus Torvalds 14521da177e4SLinus Torvalds /*----------------------------------------------------------------- 14531da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 14541da177e4SLinus Torvalds *---------------------------------------------------------------*/ 14552b06cfffSAlasdair G Kergon static void free_minor(int minor) 14561da177e4SLinus Torvalds { 1457f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 14581da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1459f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 14601da177e4SLinus Torvalds } 14611da177e4SLinus Torvalds 14621da177e4SLinus Torvalds /* 14631da177e4SLinus Torvalds * See if the device with a specific minor # is free. 14641da177e4SLinus Torvalds */ 1465cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 14661da177e4SLinus Torvalds { 1467c9d76be6STejun Heo int r; 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 14701da177e4SLinus Torvalds return -EINVAL; 14711da177e4SLinus Torvalds 1472c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1473f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 14741da177e4SLinus Torvalds 1475c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 14761da177e4SLinus Torvalds 1477f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1478c9d76be6STejun Heo idr_preload_end(); 1479c9d76be6STejun Heo if (r < 0) 1480c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1481c9d76be6STejun Heo return 0; 14821da177e4SLinus Torvalds } 14831da177e4SLinus Torvalds 1484cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 14851da177e4SLinus Torvalds { 1486c9d76be6STejun Heo int r; 14871da177e4SLinus Torvalds 1488c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1489f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 14901da177e4SLinus Torvalds 1491c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 14921da177e4SLinus Torvalds 1493f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1494c9d76be6STejun Heo idr_preload_end(); 1495c9d76be6STejun Heo if (r < 0) 14961da177e4SLinus Torvalds return r; 1497c9d76be6STejun Heo *minor = r; 1498c9d76be6STejun Heo return 0; 14991da177e4SLinus Torvalds } 15001da177e4SLinus Torvalds 150183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1502f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 15031da177e4SLinus Torvalds 150453d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 150553d5914fSMikulas Patocka 15064cc96131SMike Snitzer void dm_init_md_queue(struct mapped_device *md) 15074a0b4ddfSMike Snitzer { 15084a0b4ddfSMike Snitzer /* 15094a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 1510bfebd1cdSMike Snitzer * devices. The type of this dm device may not have been decided yet. 15114a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 15124a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 15134a0b4ddfSMike Snitzer * for request stacking support until then. 15144a0b4ddfSMike Snitzer * 15154a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 15164a0b4ddfSMike Snitzer */ 15174a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1518ad5f498fSMikulas Patocka 1519ad5f498fSMikulas Patocka /* 1520ad5f498fSMikulas Patocka * Initialize data that will only be used by a non-blk-mq DM queue 1521ad5f498fSMikulas Patocka * - must do so here (in alloc_dev callchain) before queue is used 1522ad5f498fSMikulas Patocka */ 1523ad5f498fSMikulas Patocka md->queue->queuedata = md; 1524dc3b17ccSJan Kara md->queue->backing_dev_info->congested_data = md; 1525bfebd1cdSMike Snitzer } 15264a0b4ddfSMike Snitzer 15274cc96131SMike Snitzer void dm_init_normal_md_queue(struct mapped_device *md) 1528bfebd1cdSMike Snitzer { 152917e149b8SMike Snitzer md->use_blk_mq = false; 1530bfebd1cdSMike Snitzer dm_init_md_queue(md); 1531bfebd1cdSMike Snitzer 1532bfebd1cdSMike Snitzer /* 1533bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1534bfebd1cdSMike Snitzer */ 1535dc3b17ccSJan Kara md->queue->backing_dev_info->congested_fn = dm_any_congested; 15364a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 15374a0b4ddfSMike Snitzer } 15384a0b4ddfSMike Snitzer 15390f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 15400f20972fSMike Snitzer { 15410f20972fSMike Snitzer if (md->wq) 15420f20972fSMike Snitzer destroy_workqueue(md->wq); 15430f20972fSMike Snitzer if (md->kworker_task) 15440f20972fSMike Snitzer kthread_stop(md->kworker_task); 15450f20972fSMike Snitzer mempool_destroy(md->io_pool); 15460f20972fSMike Snitzer if (md->bs) 15470f20972fSMike Snitzer bioset_free(md->bs); 15480f20972fSMike Snitzer 1549f26c5719SDan Williams if (md->dax_dev) { 1550f26c5719SDan Williams kill_dax(md->dax_dev); 1551f26c5719SDan Williams put_dax(md->dax_dev); 1552f26c5719SDan Williams md->dax_dev = NULL; 1553f26c5719SDan Williams } 1554f26c5719SDan Williams 15550f20972fSMike Snitzer if (md->disk) { 15560f20972fSMike Snitzer spin_lock(&_minor_lock); 15570f20972fSMike Snitzer md->disk->private_data = NULL; 15580f20972fSMike Snitzer spin_unlock(&_minor_lock); 15590f20972fSMike Snitzer del_gendisk(md->disk); 15600f20972fSMike Snitzer put_disk(md->disk); 15610f20972fSMike Snitzer } 15620f20972fSMike Snitzer 15630f20972fSMike Snitzer if (md->queue) 15640f20972fSMike Snitzer blk_cleanup_queue(md->queue); 15650f20972fSMike Snitzer 1566d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1567d09960b0STahsin Erdogan 15680f20972fSMike Snitzer if (md->bdev) { 15690f20972fSMike Snitzer bdput(md->bdev); 15700f20972fSMike Snitzer md->bdev = NULL; 15710f20972fSMike Snitzer } 15724cc96131SMike Snitzer 15734cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 15740f20972fSMike Snitzer } 15750f20972fSMike Snitzer 15761da177e4SLinus Torvalds /* 15771da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 15781da177e4SLinus Torvalds */ 15792b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 15801da177e4SLinus Torvalds { 1581115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1582f26c5719SDan Williams struct dax_device *dax_dev; 1583115485e8SMike Snitzer struct mapped_device *md; 1584ba61fdd1SJeff Mahoney void *old_md; 15851da177e4SLinus Torvalds 1586115485e8SMike Snitzer md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 15871da177e4SLinus Torvalds if (!md) { 15881da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 15891da177e4SLinus Torvalds return NULL; 15901da177e4SLinus Torvalds } 15911da177e4SLinus Torvalds 159210da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 15936ed7ade8SMilan Broz goto bad_module_get; 159410da4f79SJeff Mahoney 15951da177e4SLinus Torvalds /* get a minor number for the dev */ 15962b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1597cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 15982b06cfffSAlasdair G Kergon else 1599cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 16001da177e4SLinus Torvalds if (r < 0) 16016ed7ade8SMilan Broz goto bad_minor; 16021da177e4SLinus Torvalds 160383d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 160483d5e5b0SMikulas Patocka if (r < 0) 160583d5e5b0SMikulas Patocka goto bad_io_barrier; 160683d5e5b0SMikulas Patocka 1607115485e8SMike Snitzer md->numa_node_id = numa_node_id; 16084cc96131SMike Snitzer md->use_blk_mq = dm_use_blk_mq_default(); 1609591ddcfcSMike Snitzer md->init_tio_pdu = false; 1610a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1611e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1612a5664dadSMike Snitzer mutex_init(&md->type_lock); 161386f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1614022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 16151da177e4SLinus Torvalds atomic_set(&md->holders, 1); 16165c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 16171da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 16187a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 16197a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 162086f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 16217a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 16221da177e4SLinus Torvalds 1623115485e8SMike Snitzer md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 16241da177e4SLinus Torvalds if (!md->queue) 16250f20972fSMike Snitzer goto bad; 16261da177e4SLinus Torvalds 16274a0b4ddfSMike Snitzer dm_init_md_queue(md); 16289faf400fSStefan Bader 1629115485e8SMike Snitzer md->disk = alloc_disk_node(1, numa_node_id); 16301da177e4SLinus Torvalds if (!md->disk) 16310f20972fSMike Snitzer goto bad; 16321da177e4SLinus Torvalds 1633316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1634316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1635f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 163653d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1637f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 16382995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 16392eb6e1e3SKeith Busch md->kworker_task = NULL; 1640f0b04115SJeff Mahoney 16411da177e4SLinus Torvalds md->disk->major = _major; 16421da177e4SLinus Torvalds md->disk->first_minor = minor; 16431da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 16441da177e4SLinus Torvalds md->disk->queue = md->queue; 16451da177e4SLinus Torvalds md->disk->private_data = md; 16461da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1647f26c5719SDan Williams 1648f26c5719SDan Williams dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1649f26c5719SDan Williams if (!dax_dev) 1650f26c5719SDan Williams goto bad; 1651f26c5719SDan Williams md->dax_dev = dax_dev; 1652f26c5719SDan Williams 16531da177e4SLinus Torvalds add_disk(md->disk); 16547e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 16551da177e4SLinus Torvalds 1656670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1657304f3f6aSMilan Broz if (!md->wq) 16580f20972fSMike Snitzer goto bad; 1659304f3f6aSMilan Broz 166032a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 166132a926daSMikulas Patocka if (!md->bdev) 16620f20972fSMike Snitzer goto bad; 166332a926daSMikulas Patocka 16643a83f467SMing Lei bio_init(&md->flush_bio, NULL, 0); 16656a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 166670fd7614SChristoph Hellwig md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 16676a8736d1STejun Heo 1668fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1669fd2ed4d2SMikulas Patocka 1670ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1671f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1672ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1673f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1674ba61fdd1SJeff Mahoney 1675ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1676ba61fdd1SJeff Mahoney 16771da177e4SLinus Torvalds return md; 16781da177e4SLinus Torvalds 16790f20972fSMike Snitzer bad: 16800f20972fSMike Snitzer cleanup_mapped_device(md); 168183d5e5b0SMikulas Patocka bad_io_barrier: 16821da177e4SLinus Torvalds free_minor(minor); 16836ed7ade8SMilan Broz bad_minor: 168410da4f79SJeff Mahoney module_put(THIS_MODULE); 16856ed7ade8SMilan Broz bad_module_get: 16861da177e4SLinus Torvalds kfree(md); 16871da177e4SLinus Torvalds return NULL; 16881da177e4SLinus Torvalds } 16891da177e4SLinus Torvalds 1690ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1691ae9da83fSJun'ichi Nomura 16921da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 16931da177e4SLinus Torvalds { 1694f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 169563d94e48SJun'ichi Nomura 1696ae9da83fSJun'ichi Nomura unlock_fs(md); 16972eb6e1e3SKeith Busch 16980f20972fSMike Snitzer cleanup_mapped_device(md); 16990f20972fSMike Snitzer 17000f20972fSMike Snitzer free_table_devices(&md->table_devices); 17010f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 170263a4f065SMike Snitzer free_minor(minor); 170363a4f065SMike Snitzer 170410da4f79SJeff Mahoney module_put(THIS_MODULE); 17051da177e4SLinus Torvalds kfree(md); 17061da177e4SLinus Torvalds } 17071da177e4SLinus Torvalds 1708e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1709e6ee8c0bSKiyoshi Ueda { 1710c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1711e6ee8c0bSKiyoshi Ueda 17124e6e36c3SMike Snitzer if (md->bs) { 17134e6e36c3SMike Snitzer /* The md already has necessary mempools. */ 1714545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1715c0820cf5SMikulas Patocka /* 171616245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 171716245bdcSJun'ichi Nomura * because a different table was loaded. 1718c0820cf5SMikulas Patocka */ 1719c0820cf5SMikulas Patocka bioset_free(md->bs); 1720c0820cf5SMikulas Patocka md->bs = p->bs; 1721c0820cf5SMikulas Patocka p->bs = NULL; 1722c0820cf5SMikulas Patocka } 1723cbc4e3c1SMike Snitzer /* 17244e6e36c3SMike Snitzer * There's no need to reload with request-based dm 17254e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 17264e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 17274e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 17284e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 17294e6e36c3SMike Snitzer * through the queue to unprep. 1730cbc4e3c1SMike Snitzer */ 1731cbc4e3c1SMike Snitzer goto out; 1732cbc4e3c1SMike Snitzer } 1733cbc4e3c1SMike Snitzer 1734eb8db831SChristoph Hellwig BUG_ON(!p || md->io_pool || md->bs); 1735e6ee8c0bSKiyoshi Ueda 1736e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 1737e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 1738e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 1739e6ee8c0bSKiyoshi Ueda p->bs = NULL; 17404e6e36c3SMike Snitzer 1741e6ee8c0bSKiyoshi Ueda out: 174202233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1743e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 1744e6ee8c0bSKiyoshi Ueda } 1745e6ee8c0bSKiyoshi Ueda 17461da177e4SLinus Torvalds /* 17471da177e4SLinus Torvalds * Bind a table to the device. 17481da177e4SLinus Torvalds */ 17491da177e4SLinus Torvalds static void event_callback(void *context) 17501da177e4SLinus Torvalds { 17517a8c3d3bSMike Anderson unsigned long flags; 17527a8c3d3bSMike Anderson LIST_HEAD(uevents); 17531da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 17541da177e4SLinus Torvalds 17557a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 17567a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 17577a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 17587a8c3d3bSMike Anderson 1759ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 17607a8c3d3bSMike Anderson 17611da177e4SLinus Torvalds atomic_inc(&md->event_nr); 17621da177e4SLinus Torvalds wake_up(&md->eventq); 17631da177e4SLinus Torvalds } 17641da177e4SLinus Torvalds 1765c217649bSMike Snitzer /* 1766c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 1767c217649bSMike Snitzer */ 17684e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 17691da177e4SLinus Torvalds { 17701ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 17711ea0654eSBart Van Assche 17724e90188bSAlasdair G Kergon set_capacity(md->disk, size); 17731da177e4SLinus Torvalds 1774db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 17751da177e4SLinus Torvalds } 17761da177e4SLinus Torvalds 1777042d2a9bSAlasdair G Kergon /* 1778042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 1779042d2a9bSAlasdair G Kergon */ 1780042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1781754c5fc7SMike Snitzer struct queue_limits *limits) 17821da177e4SLinus Torvalds { 1783042d2a9bSAlasdair G Kergon struct dm_table *old_map; 1784165125e1SJens Axboe struct request_queue *q = md->queue; 17851da177e4SLinus Torvalds sector_t size; 17861da177e4SLinus Torvalds 17875a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 17885a8f1f80SBart Van Assche 17891da177e4SLinus Torvalds size = dm_table_get_size(t); 17903ac51e74SDarrick J. Wong 17913ac51e74SDarrick J. Wong /* 17923ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 17933ac51e74SDarrick J. Wong */ 1794fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 17953ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 17963ac51e74SDarrick J. Wong 17974e90188bSAlasdair G Kergon __set_size(md, size); 17981da177e4SLinus Torvalds 1799cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 18002ca3310eSAlasdair G Kergon 1801e6ee8c0bSKiyoshi Ueda /* 1802e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 1803e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 1804e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 1805e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 1806e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 1807e6ee8c0bSKiyoshi Ueda */ 180816f12266SMike Snitzer if (dm_table_request_based(t)) { 1809eca7ee6dSMike Snitzer dm_stop_queue(q); 181016f12266SMike Snitzer /* 181116f12266SMike Snitzer * Leverage the fact that request-based DM targets are 181216f12266SMike Snitzer * immutable singletons and establish md->immutable_target 181316f12266SMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq 181416f12266SMike Snitzer */ 181516f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 181616f12266SMike Snitzer } 1817e6ee8c0bSKiyoshi Ueda 1818e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 1819e6ee8c0bSKiyoshi Ueda 1820a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 18211d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 182236a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 182336a0456fSAlasdair G Kergon 1824754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 182541abc4e1SHannes Reinecke if (old_map) 182683d5e5b0SMikulas Patocka dm_sync_table(md); 18272ca3310eSAlasdair G Kergon 1828042d2a9bSAlasdair G Kergon return old_map; 18291da177e4SLinus Torvalds } 18301da177e4SLinus Torvalds 1831a7940155SAlasdair G Kergon /* 1832a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 1833a7940155SAlasdair G Kergon */ 1834a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 18351da177e4SLinus Torvalds { 1836a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 18371da177e4SLinus Torvalds 18381da177e4SLinus Torvalds if (!map) 1839a7940155SAlasdair G Kergon return NULL; 18401da177e4SLinus Torvalds 18411da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 18429cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 184383d5e5b0SMikulas Patocka dm_sync_table(md); 1844a7940155SAlasdair G Kergon 1845a7940155SAlasdair G Kergon return map; 18461da177e4SLinus Torvalds } 18471da177e4SLinus Torvalds 18481da177e4SLinus Torvalds /* 18491da177e4SLinus Torvalds * Constructor for a new device. 18501da177e4SLinus Torvalds */ 18512b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 18521da177e4SLinus Torvalds { 18531da177e4SLinus Torvalds struct mapped_device *md; 18541da177e4SLinus Torvalds 18552b06cfffSAlasdair G Kergon md = alloc_dev(minor); 18561da177e4SLinus Torvalds if (!md) 18571da177e4SLinus Torvalds return -ENXIO; 18581da177e4SLinus Torvalds 1859784aae73SMilan Broz dm_sysfs_init(md); 1860784aae73SMilan Broz 18611da177e4SLinus Torvalds *result = md; 18621da177e4SLinus Torvalds return 0; 18631da177e4SLinus Torvalds } 18641da177e4SLinus Torvalds 1865a5664dadSMike Snitzer /* 1866a5664dadSMike Snitzer * Functions to manage md->type. 1867a5664dadSMike Snitzer * All are required to hold md->type_lock. 1868a5664dadSMike Snitzer */ 1869a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 1870a5664dadSMike Snitzer { 1871a5664dadSMike Snitzer mutex_lock(&md->type_lock); 1872a5664dadSMike Snitzer } 1873a5664dadSMike Snitzer 1874a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 1875a5664dadSMike Snitzer { 1876a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 1877a5664dadSMike Snitzer } 1878a5664dadSMike Snitzer 18797e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 1880a5664dadSMike Snitzer { 188100c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 1882a5664dadSMike Snitzer md->type = type; 1883a5664dadSMike Snitzer } 1884a5664dadSMike Snitzer 18857e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 1886a5664dadSMike Snitzer { 1887a5664dadSMike Snitzer return md->type; 1888a5664dadSMike Snitzer } 1889a5664dadSMike Snitzer 189036a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 189136a0456fSAlasdair G Kergon { 189236a0456fSAlasdair G Kergon return md->immutable_target_type; 189336a0456fSAlasdair G Kergon } 189436a0456fSAlasdair G Kergon 18954a0b4ddfSMike Snitzer /* 1896f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 1897f84cb8a4SMike Snitzer * count on 'md'. 1898f84cb8a4SMike Snitzer */ 1899f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 1900f84cb8a4SMike Snitzer { 1901f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 1902f84cb8a4SMike Snitzer return &md->queue->limits; 1903f84cb8a4SMike Snitzer } 1904f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 1905f84cb8a4SMike Snitzer 19064a0b4ddfSMike Snitzer /* 19074a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 19084a0b4ddfSMike Snitzer */ 1909591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 19104a0b4ddfSMike Snitzer { 1911bfebd1cdSMike Snitzer int r; 19127e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 1913bfebd1cdSMike Snitzer 1914545ed20eSToshi Kani switch (type) { 1915bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 1916eb8db831SChristoph Hellwig r = dm_old_init_request_queue(md, t); 1917bfebd1cdSMike Snitzer if (r) { 1918eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based mapped device"); 1919bfebd1cdSMike Snitzer return r; 19204a0b4ddfSMike Snitzer } 1921bfebd1cdSMike Snitzer break; 1922bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 1923e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 1924bfebd1cdSMike Snitzer if (r) { 1925eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 1926bfebd1cdSMike Snitzer return r; 1927bfebd1cdSMike Snitzer } 1928bfebd1cdSMike Snitzer break; 1929bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 1930545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 1931eca7ee6dSMike Snitzer dm_init_normal_md_queue(md); 1932ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 1933dbba42d8SMikulas Patocka /* 1934dbba42d8SMikulas Patocka * DM handles splitting bios as needed. Free the bio_split bioset 1935dbba42d8SMikulas Patocka * since it won't be used (saves 1 process per bio-based DM device). 1936dbba42d8SMikulas Patocka */ 1937dbba42d8SMikulas Patocka bioset_free(md->queue->bio_split); 1938dbba42d8SMikulas Patocka md->queue->bio_split = NULL; 1939545ed20eSToshi Kani 1940545ed20eSToshi Kani if (type == DM_TYPE_DAX_BIO_BASED) 1941545ed20eSToshi Kani queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); 1942bfebd1cdSMike Snitzer break; 19437e0d574fSBart Van Assche case DM_TYPE_NONE: 19447e0d574fSBart Van Assche WARN_ON_ONCE(true); 19457e0d574fSBart Van Assche break; 1946ff36ab34SMike Snitzer } 19474a0b4ddfSMike Snitzer 19484a0b4ddfSMike Snitzer return 0; 19494a0b4ddfSMike Snitzer } 19504a0b4ddfSMike Snitzer 19512bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 19521da177e4SLinus Torvalds { 19531da177e4SLinus Torvalds struct mapped_device *md; 19541da177e4SLinus Torvalds unsigned minor = MINOR(dev); 19551da177e4SLinus Torvalds 19561da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 19571da177e4SLinus Torvalds return NULL; 19581da177e4SLinus Torvalds 1959f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 19601da177e4SLinus Torvalds 19611da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 19622bec1f4aSMikulas Patocka if (md) { 19632bec1f4aSMikulas Patocka if ((md == MINOR_ALLOCED || 1964f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 1965abdc568bSKiyoshi Ueda dm_deleting_md(md) || 1966fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 1967637842cfSDavid Teigland md = NULL; 1968fba9f90eSJeff Mahoney goto out; 1969fba9f90eSJeff Mahoney } 19702bec1f4aSMikulas Patocka dm_get(md); 19712bec1f4aSMikulas Patocka } 19721da177e4SLinus Torvalds 1973fba9f90eSJeff Mahoney out: 1974f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 19751da177e4SLinus Torvalds 1976637842cfSDavid Teigland return md; 1977637842cfSDavid Teigland } 19783cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 1979d229a958SDavid Teigland 19809ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 1981637842cfSDavid Teigland { 19829ade92a9SAlasdair G Kergon return md->interface_ptr; 19831da177e4SLinus Torvalds } 19841da177e4SLinus Torvalds 19851da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 19861da177e4SLinus Torvalds { 19871da177e4SLinus Torvalds md->interface_ptr = ptr; 19881da177e4SLinus Torvalds } 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 19911da177e4SLinus Torvalds { 19921da177e4SLinus Torvalds atomic_inc(&md->holders); 19933f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 19941da177e4SLinus Torvalds } 19951da177e4SLinus Torvalds 199609ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 199709ee96b2SMikulas Patocka { 199809ee96b2SMikulas Patocka spin_lock(&_minor_lock); 199909ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 200009ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 200109ee96b2SMikulas Patocka return -EBUSY; 200209ee96b2SMikulas Patocka } 200309ee96b2SMikulas Patocka dm_get(md); 200409ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 200509ee96b2SMikulas Patocka return 0; 200609ee96b2SMikulas Patocka } 200709ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 200809ee96b2SMikulas Patocka 200972d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 201072d94861SAlasdair G Kergon { 201172d94861SAlasdair G Kergon return md->name; 201272d94861SAlasdair G Kergon } 201372d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 201472d94861SAlasdair G Kergon 20153f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 20161da177e4SLinus Torvalds { 20173b785fbcSBart Van Assche struct request_queue *q = dm_get_md_queue(md); 20181134e5aeSMike Anderson struct dm_table *map; 201983d5e5b0SMikulas Patocka int srcu_idx; 20201da177e4SLinus Torvalds 20213f77316dSKiyoshi Ueda might_sleep(); 2022fba9f90eSJeff Mahoney 202363a4f065SMike Snitzer spin_lock(&_minor_lock); 20243f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2025fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2026f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 20273f77316dSKiyoshi Ueda 20282e91c369SBart Van Assche blk_set_queue_dying(q); 20293b785fbcSBart Van Assche 203002233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 20313989144fSPetr Mladek kthread_flush_worker(&md->kworker); 20322eb6e1e3SKeith Busch 2033ab7c7bb6SMikulas Patocka /* 2034ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2035ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2036ab7c7bb6SMikulas Patocka */ 2037ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 20382a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 20394f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 20401da177e4SLinus Torvalds dm_table_presuspend_targets(map); 20411da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 20421da177e4SLinus Torvalds } 204383d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 204483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 20452a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 204683d5e5b0SMikulas Patocka 20473f77316dSKiyoshi Ueda /* 20483f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 20493f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 20503f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 20513f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 20523f77316dSKiyoshi Ueda */ 20533f77316dSKiyoshi Ueda if (wait) 20543f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 20553f77316dSKiyoshi Ueda msleep(1); 20563f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 20573f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 20583f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 20593f77316dSKiyoshi Ueda 2060784aae73SMilan Broz dm_sysfs_exit(md); 2061a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 20621da177e4SLinus Torvalds free_dev(md); 20631da177e4SLinus Torvalds } 20643f77316dSKiyoshi Ueda 20653f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 20663f77316dSKiyoshi Ueda { 20673f77316dSKiyoshi Ueda __dm_destroy(md, true); 20683f77316dSKiyoshi Ueda } 20693f77316dSKiyoshi Ueda 20703f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 20713f77316dSKiyoshi Ueda { 20723f77316dSKiyoshi Ueda __dm_destroy(md, false); 20733f77316dSKiyoshi Ueda } 20743f77316dSKiyoshi Ueda 20753f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 20763f77316dSKiyoshi Ueda { 20773f77316dSKiyoshi Ueda atomic_dec(&md->holders); 20781da177e4SLinus Torvalds } 207979eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 20801da177e4SLinus Torvalds 2081b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 208246125c1cSMilan Broz { 208346125c1cSMilan Broz int r = 0; 20849f4c3f87SBart Van Assche DEFINE_WAIT(wait); 208546125c1cSMilan Broz 208646125c1cSMilan Broz while (1) { 20879f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 208846125c1cSMilan Broz 2089b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 209046125c1cSMilan Broz break; 209146125c1cSMilan Broz 2092e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 209346125c1cSMilan Broz r = -EINTR; 209446125c1cSMilan Broz break; 209546125c1cSMilan Broz } 209646125c1cSMilan Broz 209746125c1cSMilan Broz io_schedule(); 209846125c1cSMilan Broz } 20999f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2100b44ebeb0SMikulas Patocka 210146125c1cSMilan Broz return r; 210246125c1cSMilan Broz } 210346125c1cSMilan Broz 21041da177e4SLinus Torvalds /* 21051da177e4SLinus Torvalds * Process the deferred bios 21061da177e4SLinus Torvalds */ 2107ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 21081da177e4SLinus Torvalds { 2109ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2110ef208587SMikulas Patocka work); 21116d6f10dfSMilan Broz struct bio *c; 211283d5e5b0SMikulas Patocka int srcu_idx; 211383d5e5b0SMikulas Patocka struct dm_table *map; 21141da177e4SLinus Torvalds 211583d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2116ef208587SMikulas Patocka 21173b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2118022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2119022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2120022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2121022c2611SMikulas Patocka 21226a8736d1STejun Heo if (!c) 2123df12ee99SAlasdair G Kergon break; 212473d410c0SMilan Broz 2125e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2126e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2127af7e466aSMikulas Patocka else 212883d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2129e6ee8c0bSKiyoshi Ueda } 21303b00b203SMikulas Patocka 213183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds 21349a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2135304f3f6aSMilan Broz { 21363b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 21374e857c58SPeter Zijlstra smp_mb__after_atomic(); 213853d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2139304f3f6aSMilan Broz } 2140304f3f6aSMilan Broz 21411da177e4SLinus Torvalds /* 2142042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 21431da177e4SLinus Torvalds */ 2144042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 21451da177e4SLinus Torvalds { 214687eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2147754c5fc7SMike Snitzer struct queue_limits limits; 2148042d2a9bSAlasdair G Kergon int r; 21491da177e4SLinus Torvalds 2150e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 21511da177e4SLinus Torvalds 21521da177e4SLinus Torvalds /* device must be suspended */ 21534f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 215493c534aeSAlasdair G Kergon goto out; 21551da177e4SLinus Torvalds 21563ae70656SMike Snitzer /* 21573ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 21583ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 21593ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 21603ae70656SMike Snitzer * reappear. 21613ae70656SMike Snitzer */ 21623ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 216383d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 21643ae70656SMike Snitzer if (live_map) 21653ae70656SMike Snitzer limits = md->queue->limits; 216683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 21673ae70656SMike Snitzer } 21683ae70656SMike Snitzer 216987eb5b21SMike Christie if (!live_map) { 2170754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2171042d2a9bSAlasdair G Kergon if (r) { 2172042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2173754c5fc7SMike Snitzer goto out; 2174042d2a9bSAlasdair G Kergon } 217587eb5b21SMike Christie } 2176754c5fc7SMike Snitzer 2177042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 21781da177e4SLinus Torvalds 217993c534aeSAlasdair G Kergon out: 2180e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2181042d2a9bSAlasdair G Kergon return map; 21821da177e4SLinus Torvalds } 21831da177e4SLinus Torvalds 21841da177e4SLinus Torvalds /* 21851da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 21861da177e4SLinus Torvalds * device. 21871da177e4SLinus Torvalds */ 21882ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 21891da177e4SLinus Torvalds { 2190e39e2e95SAlasdair G Kergon int r; 21911da177e4SLinus Torvalds 21921da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2193dfbe03f6SAlasdair G Kergon 2194db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2195dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2196cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2197e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2198e39e2e95SAlasdair G Kergon return r; 2199dfbe03f6SAlasdair G Kergon } 2200dfbe03f6SAlasdair G Kergon 2201aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2202aa8d7c2fSAlasdair G Kergon 22031da177e4SLinus Torvalds return 0; 22041da177e4SLinus Torvalds } 22051da177e4SLinus Torvalds 22062ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 22071da177e4SLinus Torvalds { 2208aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2209aa8d7c2fSAlasdair G Kergon return; 2210aa8d7c2fSAlasdair G Kergon 2211db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 22121da177e4SLinus Torvalds md->frozen_sb = NULL; 2213aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 22141da177e4SLinus Torvalds } 22151da177e4SLinus Torvalds 22161da177e4SLinus Torvalds /* 2217b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2218b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2219b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2220b48633f8SBart Van Assche * 2221ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2222ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2223ffcc3936SMike Snitzer * are being added to md->deferred list. 2224cec47e3dSKiyoshi Ueda */ 2225ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2226b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2227eaf9a736SMike Snitzer int dmf_suspended_flag) 22281da177e4SLinus Torvalds { 2229ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2230ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2231ffcc3936SMike Snitzer int r; 2232cf222b37SAlasdair G Kergon 22335a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 22345a8f1f80SBart Van Assche 22352e93ccc1SKiyoshi Ueda /* 22362e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 22372e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 22382e93ccc1SKiyoshi Ueda */ 22392e93ccc1SKiyoshi Ueda if (noflush) 22402e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 224186331f39SBart Van Assche else 224286331f39SBart Van Assche pr_debug("%s: suspending with flush\n", dm_device_name(md)); 22432e93ccc1SKiyoshi Ueda 2244d67ee213SMike Snitzer /* 2245d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2246d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2247d67ee213SMike Snitzer */ 22481da177e4SLinus Torvalds dm_table_presuspend_targets(map); 22491da177e4SLinus Torvalds 22502e93ccc1SKiyoshi Ueda /* 22519f518b27SKiyoshi Ueda * Flush I/O to the device. 22529f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 22539f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 22549f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 22552e93ccc1SKiyoshi Ueda */ 225632a926daSMikulas Patocka if (!noflush && do_lockfs) { 22572ca3310eSAlasdair G Kergon r = lock_fs(md); 2258d67ee213SMike Snitzer if (r) { 2259d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2260ffcc3936SMike Snitzer return r; 2261aa8d7c2fSAlasdair G Kergon } 2262d67ee213SMike Snitzer } 22631da177e4SLinus Torvalds 22641da177e4SLinus Torvalds /* 22653b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 22663b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 22673b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 22683b00b203SMikulas Patocka * dm_wq_work. 22693b00b203SMikulas Patocka * 22703b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 22713b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 22726a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 22736a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 22746a8736d1STejun Heo * flush_workqueue(md->wq). 22751da177e4SLinus Torvalds */ 22761eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 227741abc4e1SHannes Reinecke if (map) 227883d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 22791da177e4SLinus Torvalds 2280d0bcb878SKiyoshi Ueda /* 228129e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 228229e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2283d0bcb878SKiyoshi Ueda */ 22842eb6e1e3SKeith Busch if (dm_request_based(md)) { 2285eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 228602233342SMike Snitzer if (md->kworker_task) 22873989144fSPetr Mladek kthread_flush_worker(&md->kworker); 22882eb6e1e3SKeith Busch } 2289cec47e3dSKiyoshi Ueda 2290d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2291d0bcb878SKiyoshi Ueda 22921da177e4SLinus Torvalds /* 22933b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 22943b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 22953b00b203SMikulas Patocka * to finish. 22961da177e4SLinus Torvalds */ 2297b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2298eaf9a736SMike Snitzer if (!r) 2299eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 23001da177e4SLinus Torvalds 23016d6f10dfSMilan Broz if (noflush) 2302022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 230341abc4e1SHannes Reinecke if (map) 230483d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 23052e93ccc1SKiyoshi Ueda 23061da177e4SLinus Torvalds /* were we interrupted ? */ 230746125c1cSMilan Broz if (r < 0) { 23089a1fb464SMikulas Patocka dm_queue_flush(md); 230973d410c0SMilan Broz 2310cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2311eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2312cec47e3dSKiyoshi Ueda 23132ca3310eSAlasdair G Kergon unlock_fs(md); 2314d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2315ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2316ffcc3936SMike Snitzer } 2317ffcc3936SMike Snitzer 2318ffcc3936SMike Snitzer return r; 23192ca3310eSAlasdair G Kergon } 23202ca3310eSAlasdair G Kergon 23213b00b203SMikulas Patocka /* 2322ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2323ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2324ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2325ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2326ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 23273b00b203SMikulas Patocka */ 2328ffcc3936SMike Snitzer /* 2329ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2330ffcc3936SMike Snitzer * 2331ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2332ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2333ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2334ffcc3936SMike Snitzer * 2335ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2336ffcc3936SMike Snitzer */ 2337ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2338ffcc3936SMike Snitzer { 2339ffcc3936SMike Snitzer struct dm_table *map = NULL; 2340ffcc3936SMike Snitzer int r = 0; 2341ffcc3936SMike Snitzer 2342ffcc3936SMike Snitzer retry: 2343ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2344ffcc3936SMike Snitzer 2345ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2346ffcc3936SMike Snitzer r = -EINVAL; 2347ffcc3936SMike Snitzer goto out_unlock; 2348ffcc3936SMike Snitzer } 2349ffcc3936SMike Snitzer 2350ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2351ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2352ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2353ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2354ffcc3936SMike Snitzer if (r) 2355ffcc3936SMike Snitzer return r; 2356ffcc3936SMike Snitzer goto retry; 2357ffcc3936SMike Snitzer } 2358ffcc3936SMike Snitzer 2359a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2360ffcc3936SMike Snitzer 2361eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2362ffcc3936SMike Snitzer if (r) 2363ffcc3936SMike Snitzer goto out_unlock; 23643b00b203SMikulas Patocka 23654d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 23664d4471cbSKiyoshi Ueda 2367d287483dSAlasdair G Kergon out_unlock: 2368e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2369cf222b37SAlasdair G Kergon return r; 23701da177e4SLinus Torvalds } 23711da177e4SLinus Torvalds 2372ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 23731da177e4SLinus Torvalds { 2374ffcc3936SMike Snitzer if (map) { 2375ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 23768757b776SMilan Broz if (r) 2377ffcc3936SMike Snitzer return r; 2378ffcc3936SMike Snitzer } 23792ca3310eSAlasdair G Kergon 23809a1fb464SMikulas Patocka dm_queue_flush(md); 23812ca3310eSAlasdair G Kergon 2382cec47e3dSKiyoshi Ueda /* 2383cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2384cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2385cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2386cec47e3dSKiyoshi Ueda */ 2387cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2388eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2389cec47e3dSKiyoshi Ueda 23902ca3310eSAlasdair G Kergon unlock_fs(md); 23912ca3310eSAlasdair G Kergon 2392ffcc3936SMike Snitzer return 0; 2393ffcc3936SMike Snitzer } 2394ffcc3936SMike Snitzer 2395ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2396ffcc3936SMike Snitzer { 23978dc23658SMinfei Huang int r; 2398ffcc3936SMike Snitzer struct dm_table *map = NULL; 2399ffcc3936SMike Snitzer 2400ffcc3936SMike Snitzer retry: 24018dc23658SMinfei Huang r = -EINVAL; 2402ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2403ffcc3936SMike Snitzer 2404ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2405ffcc3936SMike Snitzer goto out; 2406ffcc3936SMike Snitzer 2407ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2408ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2409ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2410ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2411ffcc3936SMike Snitzer if (r) 2412ffcc3936SMike Snitzer return r; 2413ffcc3936SMike Snitzer goto retry; 2414ffcc3936SMike Snitzer } 2415ffcc3936SMike Snitzer 2416a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2417ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2418ffcc3936SMike Snitzer goto out; 2419ffcc3936SMike Snitzer 2420ffcc3936SMike Snitzer r = __dm_resume(md, map); 2421ffcc3936SMike Snitzer if (r) 2422ffcc3936SMike Snitzer goto out; 2423ffcc3936SMike Snitzer 24242ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2425cf222b37SAlasdair G Kergon out: 2426e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 24272ca3310eSAlasdair G Kergon 2428cf222b37SAlasdair G Kergon return r; 24291da177e4SLinus Torvalds } 24301da177e4SLinus Torvalds 2431fd2ed4d2SMikulas Patocka /* 2432fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2433fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2434fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2435fd2ed4d2SMikulas Patocka */ 2436fd2ed4d2SMikulas Patocka 2437ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2438ffcc3936SMike Snitzer { 2439ffcc3936SMike Snitzer struct dm_table *map = NULL; 2440ffcc3936SMike Snitzer 24411ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 24421ea0654eSBart Van Assche 244396b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2444ffcc3936SMike Snitzer return; /* nested internal suspend */ 2445ffcc3936SMike Snitzer 2446ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2447ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2448ffcc3936SMike Snitzer return; /* nest suspend */ 2449ffcc3936SMike Snitzer } 2450ffcc3936SMike Snitzer 2451a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2452ffcc3936SMike Snitzer 2453ffcc3936SMike Snitzer /* 2454ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2455ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2456ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2457ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2458ffcc3936SMike Snitzer */ 2459eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2460eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2461ffcc3936SMike Snitzer 2462ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2463ffcc3936SMike Snitzer } 2464ffcc3936SMike Snitzer 2465ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2466ffcc3936SMike Snitzer { 246796b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 246896b26c8cSMikulas Patocka 246996b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2470ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2471ffcc3936SMike Snitzer 2472ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2473ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2474ffcc3936SMike Snitzer 2475ffcc3936SMike Snitzer /* 2476ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2477ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2478ffcc3936SMike Snitzer */ 2479ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2480ffcc3936SMike Snitzer 2481ffcc3936SMike Snitzer done: 2482ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2483ffcc3936SMike Snitzer smp_mb__after_atomic(); 2484ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2485ffcc3936SMike Snitzer } 2486ffcc3936SMike Snitzer 2487ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2488fd2ed4d2SMikulas Patocka { 2489fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2490ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2491ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2492ffcc3936SMike Snitzer } 2493ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2494ffcc3936SMike Snitzer 2495ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2496ffcc3936SMike Snitzer { 2497ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2498ffcc3936SMike Snitzer __dm_internal_resume(md); 2499ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2500ffcc3936SMike Snitzer } 2501ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2502ffcc3936SMike Snitzer 2503ffcc3936SMike Snitzer /* 2504ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2505ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2506ffcc3936SMike Snitzer */ 2507ffcc3936SMike Snitzer 2508ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2509ffcc3936SMike Snitzer { 2510ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2511ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2512fd2ed4d2SMikulas Patocka return; 2513fd2ed4d2SMikulas Patocka 2514fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2515fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2516fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2517fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2518fd2ed4d2SMikulas Patocka } 2519b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2520fd2ed4d2SMikulas Patocka 2521ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2522fd2ed4d2SMikulas Patocka { 2523ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2524fd2ed4d2SMikulas Patocka goto done; 2525fd2ed4d2SMikulas Patocka 2526fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2527fd2ed4d2SMikulas Patocka 2528fd2ed4d2SMikulas Patocka done: 2529fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2530fd2ed4d2SMikulas Patocka } 2531b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2532fd2ed4d2SMikulas Patocka 25331da177e4SLinus Torvalds /*----------------------------------------------------------------- 25341da177e4SLinus Torvalds * Event notification. 25351da177e4SLinus Torvalds *---------------------------------------------------------------*/ 25363abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 253760935eb2SMilan Broz unsigned cookie) 253869267a30SAlasdair G Kergon { 253960935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 254060935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 254160935eb2SMilan Broz 254260935eb2SMilan Broz if (!cookie) 25433abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 254460935eb2SMilan Broz else { 254560935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 254660935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 25473abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 25483abf85b5SPeter Rajnoha action, envp); 254960935eb2SMilan Broz } 255069267a30SAlasdair G Kergon } 255169267a30SAlasdair G Kergon 25527a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 25537a8c3d3bSMike Anderson { 25547a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 25557a8c3d3bSMike Anderson } 25567a8c3d3bSMike Anderson 25571da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 25581da177e4SLinus Torvalds { 25591da177e4SLinus Torvalds return atomic_read(&md->event_nr); 25601da177e4SLinus Torvalds } 25611da177e4SLinus Torvalds 25621da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 25631da177e4SLinus Torvalds { 25641da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 25651da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 25661da177e4SLinus Torvalds } 25671da177e4SLinus Torvalds 25687a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 25697a8c3d3bSMike Anderson { 25707a8c3d3bSMike Anderson unsigned long flags; 25717a8c3d3bSMike Anderson 25727a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 25737a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 25747a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 25757a8c3d3bSMike Anderson } 25767a8c3d3bSMike Anderson 25771da177e4SLinus Torvalds /* 25781da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 25791da177e4SLinus Torvalds * count on 'md'. 25801da177e4SLinus Torvalds */ 25811da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 25821da177e4SLinus Torvalds { 25831da177e4SLinus Torvalds return md->disk; 25841da177e4SLinus Torvalds } 258565ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 25861da177e4SLinus Torvalds 2587784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2588784aae73SMilan Broz { 25892995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2590784aae73SMilan Broz } 2591784aae73SMilan Broz 2592784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2593784aae73SMilan Broz { 2594784aae73SMilan Broz struct mapped_device *md; 2595784aae73SMilan Broz 25962995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2597784aae73SMilan Broz 25984d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 2599432a212cSMike Anderson dm_deleting_md(md)) 26004d89b7b4SMilan Broz return NULL; 26014d89b7b4SMilan Broz 2602784aae73SMilan Broz dm_get(md); 2603784aae73SMilan Broz return md; 2604784aae73SMilan Broz } 2605784aae73SMilan Broz 26064f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 26071da177e4SLinus Torvalds { 26081da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 26091da177e4SLinus Torvalds } 26101da177e4SLinus Torvalds 2611ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2612ffcc3936SMike Snitzer { 2613ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2614ffcc3936SMike Snitzer } 2615ffcc3936SMike Snitzer 26162c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 26172c140a24SMikulas Patocka { 26182c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 26192c140a24SMikulas Patocka } 26202c140a24SMikulas Patocka 262164dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 262264dbce58SKiyoshi Ueda { 2623ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 262464dbce58SKiyoshi Ueda } 262564dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 262664dbce58SKiyoshi Ueda 26272e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 26282e93ccc1SKiyoshi Ueda { 2629ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 26302e93ccc1SKiyoshi Ueda } 26312e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 26322e93ccc1SKiyoshi Ueda 26337e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 263430187e1dSMike Snitzer unsigned integrity, unsigned per_io_data_size) 2635e6ee8c0bSKiyoshi Ueda { 2636115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 263778d8e58aSMike Snitzer unsigned int pool_size = 0; 26385f015204SJun'ichi Nomura unsigned int front_pad; 2639e6ee8c0bSKiyoshi Ueda 2640e6ee8c0bSKiyoshi Ueda if (!pools) 26414e6e36c3SMike Snitzer return NULL; 2642e6ee8c0bSKiyoshi Ueda 264378d8e58aSMike Snitzer switch (type) { 264478d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2645545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 264678d8e58aSMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 264730187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2648eb8db831SChristoph Hellwig 2649eb8db831SChristoph Hellwig pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); 2650eb8db831SChristoph Hellwig if (!pools->io_pool) 2651eb8db831SChristoph Hellwig goto out; 265278d8e58aSMike Snitzer break; 265378d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 265478d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 265578d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 265678d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2657591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 265878d8e58aSMike Snitzer break; 265978d8e58aSMike Snitzer default: 266078d8e58aSMike Snitzer BUG(); 266178d8e58aSMike Snitzer } 266278d8e58aSMike Snitzer 26633d8aab2dSJunichi Nomura pools->bs = bioset_create_nobvec(pool_size, front_pad); 2664e6ee8c0bSKiyoshi Ueda if (!pools->bs) 26655f015204SJun'ichi Nomura goto out; 2666e6ee8c0bSKiyoshi Ueda 2667a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 26685f015204SJun'ichi Nomura goto out; 2669a91a2785SMartin K. Petersen 2670e6ee8c0bSKiyoshi Ueda return pools; 267178d8e58aSMike Snitzer 26725f015204SJun'ichi Nomura out: 26735f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2674e6ee8c0bSKiyoshi Ueda 26754e6e36c3SMike Snitzer return NULL; 2676e6ee8c0bSKiyoshi Ueda } 2677e6ee8c0bSKiyoshi Ueda 2678e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2679e6ee8c0bSKiyoshi Ueda { 2680e6ee8c0bSKiyoshi Ueda if (!pools) 2681e6ee8c0bSKiyoshi Ueda return; 2682e6ee8c0bSKiyoshi Ueda 2683e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 26841ae49ea2SMike Snitzer 2685e6ee8c0bSKiyoshi Ueda if (pools->bs) 2686e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2687e6ee8c0bSKiyoshi Ueda 2688e6ee8c0bSKiyoshi Ueda kfree(pools); 2689e6ee8c0bSKiyoshi Ueda } 2690e6ee8c0bSKiyoshi Ueda 26919c72bad1SChristoph Hellwig struct dm_pr { 26929c72bad1SChristoph Hellwig u64 old_key; 26939c72bad1SChristoph Hellwig u64 new_key; 26949c72bad1SChristoph Hellwig u32 flags; 26959c72bad1SChristoph Hellwig bool fail_early; 26969c72bad1SChristoph Hellwig }; 26979c72bad1SChristoph Hellwig 26989c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 26999c72bad1SChristoph Hellwig void *data) 27009c72bad1SChristoph Hellwig { 27019c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 27029c72bad1SChristoph Hellwig struct dm_table *table; 27039c72bad1SChristoph Hellwig struct dm_target *ti; 27049c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 27059c72bad1SChristoph Hellwig 27069c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 27079c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 27089c72bad1SChristoph Hellwig goto out; 27099c72bad1SChristoph Hellwig 27109c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 27119c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 27129c72bad1SChristoph Hellwig goto out; 27139c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 27149c72bad1SChristoph Hellwig 27159c72bad1SChristoph Hellwig ret = -EINVAL; 27169c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 27179c72bad1SChristoph Hellwig goto out; 27189c72bad1SChristoph Hellwig 27199c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 27209c72bad1SChristoph Hellwig out: 27219c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 27229c72bad1SChristoph Hellwig return ret; 27239c72bad1SChristoph Hellwig } 27249c72bad1SChristoph Hellwig 27259c72bad1SChristoph Hellwig /* 27269c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 27279c72bad1SChristoph Hellwig */ 27289c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 27299c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 27309c72bad1SChristoph Hellwig { 27319c72bad1SChristoph Hellwig struct dm_pr *pr = data; 27329c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 27339c72bad1SChristoph Hellwig 27349c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 27359c72bad1SChristoph Hellwig return -EOPNOTSUPP; 27369c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 27379c72bad1SChristoph Hellwig } 27389c72bad1SChristoph Hellwig 273971cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 274071cdb697SChristoph Hellwig u32 flags) 274171cdb697SChristoph Hellwig { 27429c72bad1SChristoph Hellwig struct dm_pr pr = { 27439c72bad1SChristoph Hellwig .old_key = old_key, 27449c72bad1SChristoph Hellwig .new_key = new_key, 27459c72bad1SChristoph Hellwig .flags = flags, 27469c72bad1SChristoph Hellwig .fail_early = true, 27479c72bad1SChristoph Hellwig }; 27489c72bad1SChristoph Hellwig int ret; 274971cdb697SChristoph Hellwig 27509c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 27519c72bad1SChristoph Hellwig if (ret && new_key) { 27529c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 27539c72bad1SChristoph Hellwig pr.old_key = new_key; 27549c72bad1SChristoph Hellwig pr.new_key = 0; 27559c72bad1SChristoph Hellwig pr.flags = 0; 27569c72bad1SChristoph Hellwig pr.fail_early = false; 27579c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 27589c72bad1SChristoph Hellwig } 275971cdb697SChristoph Hellwig 27609c72bad1SChristoph Hellwig return ret; 276171cdb697SChristoph Hellwig } 276271cdb697SChristoph Hellwig 276371cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 276471cdb697SChristoph Hellwig u32 flags) 276571cdb697SChristoph Hellwig { 276671cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 276771cdb697SChristoph Hellwig const struct pr_ops *ops; 276871cdb697SChristoph Hellwig fmode_t mode; 2769956a4025SMike Snitzer int r; 277071cdb697SChristoph Hellwig 2771956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 277271cdb697SChristoph Hellwig if (r < 0) 277371cdb697SChristoph Hellwig return r; 277471cdb697SChristoph Hellwig 277571cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 277671cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 277771cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 277871cdb697SChristoph Hellwig else 277971cdb697SChristoph Hellwig r = -EOPNOTSUPP; 278071cdb697SChristoph Hellwig 2781956a4025SMike Snitzer bdput(bdev); 278271cdb697SChristoph Hellwig return r; 278371cdb697SChristoph Hellwig } 278471cdb697SChristoph Hellwig 278571cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 278671cdb697SChristoph Hellwig { 278771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 278871cdb697SChristoph Hellwig const struct pr_ops *ops; 278971cdb697SChristoph Hellwig fmode_t mode; 2790956a4025SMike Snitzer int r; 279171cdb697SChristoph Hellwig 2792956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 279371cdb697SChristoph Hellwig if (r < 0) 279471cdb697SChristoph Hellwig return r; 279571cdb697SChristoph Hellwig 279671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 279771cdb697SChristoph Hellwig if (ops && ops->pr_release) 279871cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 279971cdb697SChristoph Hellwig else 280071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 280171cdb697SChristoph Hellwig 2802956a4025SMike Snitzer bdput(bdev); 280371cdb697SChristoph Hellwig return r; 280471cdb697SChristoph Hellwig } 280571cdb697SChristoph Hellwig 280671cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 280771cdb697SChristoph Hellwig enum pr_type type, bool abort) 280871cdb697SChristoph Hellwig { 280971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 281071cdb697SChristoph Hellwig const struct pr_ops *ops; 281171cdb697SChristoph Hellwig fmode_t mode; 2812956a4025SMike Snitzer int r; 281371cdb697SChristoph Hellwig 2814956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 281571cdb697SChristoph Hellwig if (r < 0) 281671cdb697SChristoph Hellwig return r; 281771cdb697SChristoph Hellwig 281871cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 281971cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 282071cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 282171cdb697SChristoph Hellwig else 282271cdb697SChristoph Hellwig r = -EOPNOTSUPP; 282371cdb697SChristoph Hellwig 2824956a4025SMike Snitzer bdput(bdev); 282571cdb697SChristoph Hellwig return r; 282671cdb697SChristoph Hellwig } 282771cdb697SChristoph Hellwig 282871cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 282971cdb697SChristoph Hellwig { 283071cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 283171cdb697SChristoph Hellwig const struct pr_ops *ops; 283271cdb697SChristoph Hellwig fmode_t mode; 2833956a4025SMike Snitzer int r; 283471cdb697SChristoph Hellwig 2835956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 283671cdb697SChristoph Hellwig if (r < 0) 283771cdb697SChristoph Hellwig return r; 283871cdb697SChristoph Hellwig 283971cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 284071cdb697SChristoph Hellwig if (ops && ops->pr_clear) 284171cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 284271cdb697SChristoph Hellwig else 284371cdb697SChristoph Hellwig r = -EOPNOTSUPP; 284471cdb697SChristoph Hellwig 2845956a4025SMike Snitzer bdput(bdev); 284671cdb697SChristoph Hellwig return r; 284771cdb697SChristoph Hellwig } 284871cdb697SChristoph Hellwig 284971cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 285071cdb697SChristoph Hellwig .pr_register = dm_pr_register, 285171cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 285271cdb697SChristoph Hellwig .pr_release = dm_pr_release, 285371cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 285471cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 285571cdb697SChristoph Hellwig }; 285671cdb697SChristoph Hellwig 285783d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 28581da177e4SLinus Torvalds .open = dm_blk_open, 28591da177e4SLinus Torvalds .release = dm_blk_close, 2860aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 28613ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 286271cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 28631da177e4SLinus Torvalds .owner = THIS_MODULE 28641da177e4SLinus Torvalds }; 28651da177e4SLinus Torvalds 2866f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 2867f26c5719SDan Williams .direct_access = dm_dax_direct_access, 2868f26c5719SDan Williams }; 2869f26c5719SDan Williams 28701da177e4SLinus Torvalds /* 28711da177e4SLinus Torvalds * module hooks 28721da177e4SLinus Torvalds */ 28731da177e4SLinus Torvalds module_init(dm_init); 28741da177e4SLinus Torvalds module_exit(dm_exit); 28751da177e4SLinus Torvalds 28761da177e4SLinus Torvalds module_param(major, uint, 0); 28771da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 2878f4790826SMike Snitzer 2879e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2880e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2881e8603136SMike Snitzer 2882115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 2883115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 2884115485e8SMike Snitzer 28851da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 28861da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 28871da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2888