11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 22ffcc3936SMike Snitzer #include <linux/wait.h> 2371cdb697SChristoph Hellwig #include <linux/pr.h> 2455782138SLi Zefan 2572d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2672d94861SAlasdair G Kergon 2771a16736SNamhyung Kim #ifdef CONFIG_PRINTK 2871a16736SNamhyung Kim /* 2971a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3071a16736SNamhyung Kim */ 3171a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3271a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3371a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 3471a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 3571a16736SNamhyung Kim #endif 3671a16736SNamhyung Kim 3760935eb2SMilan Broz /* 3860935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3960935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4060935eb2SMilan Broz */ 4160935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4260935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4360935eb2SMilan Broz 441da177e4SLinus Torvalds static const char *_name = DM_NAME; 451da177e4SLinus Torvalds 461da177e4SLinus Torvalds static unsigned int major = 0; 471da177e4SLinus Torvalds static unsigned int _major = 0; 481da177e4SLinus Torvalds 49d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 50d15b774cSAlasdair G Kergon 51f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 522c140a24SMikulas Patocka 532c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 542c140a24SMikulas Patocka 552c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 562c140a24SMikulas Patocka 57acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 58acfe0ad7SMikulas Patocka 591da177e4SLinus Torvalds /* 601da177e4SLinus Torvalds * One of these is allocated per bio. 611da177e4SLinus Torvalds */ 621da177e4SLinus Torvalds struct dm_io { 631da177e4SLinus Torvalds struct mapped_device *md; 641da177e4SLinus Torvalds int error; 651da177e4SLinus Torvalds atomic_t io_count; 666ae2fa67SRichard Kennedy struct bio *bio; 673eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 68f88fb981SKiyoshi Ueda spinlock_t endio_lock; 69fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 701da177e4SLinus Torvalds }; 711da177e4SLinus Torvalds 72ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 73ba61fdd1SJeff Mahoney 741da177e4SLinus Torvalds /* 751da177e4SLinus Torvalds * Bits for the md->flags field. 761da177e4SLinus Torvalds */ 771eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 781da177e4SLinus Torvalds #define DMF_SUSPENDED 1 79aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 80fba9f90eSJeff Mahoney #define DMF_FREEING 3 815c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 822e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 838ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 848ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 851da177e4SLinus Torvalds 86115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 87115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 88faad87dfSMike Snitzer 89e6ee8c0bSKiyoshi Ueda /* 90e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 91e6ee8c0bSKiyoshi Ueda */ 92e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 93e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 941ae49ea2SMike Snitzer mempool_t *rq_pool; 95e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 96e6ee8c0bSKiyoshi Ueda }; 97e6ee8c0bSKiyoshi Ueda 9886f1152bSBenjamin Marzinski struct table_device { 9986f1152bSBenjamin Marzinski struct list_head list; 10086f1152bSBenjamin Marzinski atomic_t count; 10186f1152bSBenjamin Marzinski struct dm_dev dm_dev; 10286f1152bSBenjamin Marzinski }; 10386f1152bSBenjamin Marzinski 104e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 1058fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 1061ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 10794818742SKent Overstreet 108f4790826SMike Snitzer /* 109e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 110e8603136SMike Snitzer */ 1114cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 112e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 113e8603136SMike Snitzer 114115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 115115485e8SMike Snitzer { 116115485e8SMike Snitzer int param = ACCESS_ONCE(*module_param); 117115485e8SMike Snitzer int modified_param = 0; 118115485e8SMike Snitzer bool modified = true; 119115485e8SMike Snitzer 120115485e8SMike Snitzer if (param < min) 121115485e8SMike Snitzer modified_param = min; 122115485e8SMike Snitzer else if (param > max) 123115485e8SMike Snitzer modified_param = max; 124115485e8SMike Snitzer else 125115485e8SMike Snitzer modified = false; 126115485e8SMike Snitzer 127115485e8SMike Snitzer if (modified) { 128115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 129115485e8SMike Snitzer param = modified_param; 130115485e8SMike Snitzer } 131115485e8SMike Snitzer 132115485e8SMike Snitzer return param; 133115485e8SMike Snitzer } 134115485e8SMike Snitzer 1354cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 136f4790826SMike Snitzer unsigned def, unsigned max) 137f4790826SMike Snitzer { 13809c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 13909c2d531SMike Snitzer unsigned modified_param = 0; 140f4790826SMike Snitzer 14109c2d531SMike Snitzer if (!param) 14209c2d531SMike Snitzer modified_param = def; 14309c2d531SMike Snitzer else if (param > max) 14409c2d531SMike Snitzer modified_param = max; 145f4790826SMike Snitzer 14609c2d531SMike Snitzer if (modified_param) { 14709c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 14809c2d531SMike Snitzer param = modified_param; 149f4790826SMike Snitzer } 150f4790826SMike Snitzer 15109c2d531SMike Snitzer return param; 152f4790826SMike Snitzer } 153f4790826SMike Snitzer 154e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 155e8603136SMike Snitzer { 15609c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1574cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 158e8603136SMike Snitzer } 159e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 160e8603136SMike Snitzer 161115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 162115485e8SMike Snitzer { 163115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 164115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 165115485e8SMike Snitzer } 166115485e8SMike Snitzer 1671da177e4SLinus Torvalds static int __init local_init(void) 1681da177e4SLinus Torvalds { 16951157b4aSKiyoshi Ueda int r = -ENOMEM; 1701da177e4SLinus Torvalds 1711da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 172028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 1731da177e4SLinus Torvalds if (!_io_cache) 17451157b4aSKiyoshi Ueda return r; 1751da177e4SLinus Torvalds 1768fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 1778fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 178dba14160SMikulas Patocka goto out_free_io_cache; 1798fbf26adSKiyoshi Ueda 180eca7ee6dSMike Snitzer _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 1811ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 1821ae49ea2SMike Snitzer if (!_rq_cache) 1831ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 1841ae49ea2SMike Snitzer 18551e5b2bdSMike Anderson r = dm_uevent_init(); 18651157b4aSKiyoshi Ueda if (r) 1871ae49ea2SMike Snitzer goto out_free_rq_cache; 18851e5b2bdSMike Anderson 189acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 190acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 191acfe0ad7SMikulas Patocka r = -ENOMEM; 192acfe0ad7SMikulas Patocka goto out_uevent_exit; 193acfe0ad7SMikulas Patocka } 194acfe0ad7SMikulas Patocka 1951da177e4SLinus Torvalds _major = major; 1961da177e4SLinus Torvalds r = register_blkdev(_major, _name); 19751157b4aSKiyoshi Ueda if (r < 0) 198acfe0ad7SMikulas Patocka goto out_free_workqueue; 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds if (!_major) 2011da177e4SLinus Torvalds _major = r; 2021da177e4SLinus Torvalds 2031da177e4SLinus Torvalds return 0; 20451157b4aSKiyoshi Ueda 205acfe0ad7SMikulas Patocka out_free_workqueue: 206acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 20751157b4aSKiyoshi Ueda out_uevent_exit: 20851157b4aSKiyoshi Ueda dm_uevent_exit(); 2091ae49ea2SMike Snitzer out_free_rq_cache: 2101ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2118fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2128fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 21351157b4aSKiyoshi Ueda out_free_io_cache: 21451157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 21551157b4aSKiyoshi Ueda 21651157b4aSKiyoshi Ueda return r; 2171da177e4SLinus Torvalds } 2181da177e4SLinus Torvalds 2191da177e4SLinus Torvalds static void local_exit(void) 2201da177e4SLinus Torvalds { 2212c140a24SMikulas Patocka flush_scheduled_work(); 222acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2232c140a24SMikulas Patocka 2241ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2258fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 2261da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 22700d59405SAkinobu Mita unregister_blkdev(_major, _name); 22851e5b2bdSMike Anderson dm_uevent_exit(); 2291da177e4SLinus Torvalds 2301da177e4SLinus Torvalds _major = 0; 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds DMINFO("cleaned up"); 2331da177e4SLinus Torvalds } 2341da177e4SLinus Torvalds 235b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2361da177e4SLinus Torvalds local_init, 2371da177e4SLinus Torvalds dm_target_init, 2381da177e4SLinus Torvalds dm_linear_init, 2391da177e4SLinus Torvalds dm_stripe_init, 240952b3557SMikulas Patocka dm_io_init, 241945fa4d2SMikulas Patocka dm_kcopyd_init, 2421da177e4SLinus Torvalds dm_interface_init, 243fd2ed4d2SMikulas Patocka dm_statistics_init, 2441da177e4SLinus Torvalds }; 2451da177e4SLinus Torvalds 246b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2471da177e4SLinus Torvalds local_exit, 2481da177e4SLinus Torvalds dm_target_exit, 2491da177e4SLinus Torvalds dm_linear_exit, 2501da177e4SLinus Torvalds dm_stripe_exit, 251952b3557SMikulas Patocka dm_io_exit, 252945fa4d2SMikulas Patocka dm_kcopyd_exit, 2531da177e4SLinus Torvalds dm_interface_exit, 254fd2ed4d2SMikulas Patocka dm_statistics_exit, 2551da177e4SLinus Torvalds }; 2561da177e4SLinus Torvalds 2571da177e4SLinus Torvalds static int __init dm_init(void) 2581da177e4SLinus Torvalds { 2591da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2601da177e4SLinus Torvalds 2611da177e4SLinus Torvalds int r, i; 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2641da177e4SLinus Torvalds r = _inits[i](); 2651da177e4SLinus Torvalds if (r) 2661da177e4SLinus Torvalds goto bad; 2671da177e4SLinus Torvalds } 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds return 0; 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds bad: 2721da177e4SLinus Torvalds while (i--) 2731da177e4SLinus Torvalds _exits[i](); 2741da177e4SLinus Torvalds 2751da177e4SLinus Torvalds return r; 2761da177e4SLinus Torvalds } 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds static void __exit dm_exit(void) 2791da177e4SLinus Torvalds { 2801da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds while (i--) 2831da177e4SLinus Torvalds _exits[i](); 284d15b774cSAlasdair G Kergon 285d15b774cSAlasdair G Kergon /* 286d15b774cSAlasdair G Kergon * Should be empty by this point. 287d15b774cSAlasdair G Kergon */ 288d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 2891da177e4SLinus Torvalds } 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds /* 2921da177e4SLinus Torvalds * Block device functions 2931da177e4SLinus Torvalds */ 294432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 295432a212cSMike Anderson { 296432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 297432a212cSMike Anderson } 298432a212cSMike Anderson 299fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3001da177e4SLinus Torvalds { 3011da177e4SLinus Torvalds struct mapped_device *md; 3021da177e4SLinus Torvalds 303fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 304fba9f90eSJeff Mahoney 305fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 306fba9f90eSJeff Mahoney if (!md) 307fba9f90eSJeff Mahoney goto out; 308fba9f90eSJeff Mahoney 3095c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 310432a212cSMike Anderson dm_deleting_md(md)) { 311fba9f90eSJeff Mahoney md = NULL; 312fba9f90eSJeff Mahoney goto out; 313fba9f90eSJeff Mahoney } 314fba9f90eSJeff Mahoney 3151da177e4SLinus Torvalds dm_get(md); 3165c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 317fba9f90eSJeff Mahoney out: 318fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 319fba9f90eSJeff Mahoney 320fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3211da177e4SLinus Torvalds } 3221da177e4SLinus Torvalds 323db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3241da177e4SLinus Torvalds { 32563a4f065SMike Snitzer struct mapped_device *md; 3266e9624b8SArnd Bergmann 3274a1aeb98SMilan Broz spin_lock(&_minor_lock); 3284a1aeb98SMilan Broz 32963a4f065SMike Snitzer md = disk->private_data; 33063a4f065SMike Snitzer if (WARN_ON(!md)) 33163a4f065SMike Snitzer goto out; 33263a4f065SMike Snitzer 3332c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3342c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 335acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3362c140a24SMikulas Patocka 3371da177e4SLinus Torvalds dm_put(md); 33863a4f065SMike Snitzer out: 3394a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3401da177e4SLinus Torvalds } 3411da177e4SLinus Torvalds 3425c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3435c6bd75dSAlasdair G Kergon { 3445c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3455c6bd75dSAlasdair G Kergon } 3465c6bd75dSAlasdair G Kergon 3475c6bd75dSAlasdair G Kergon /* 3485c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3495c6bd75dSAlasdair G Kergon */ 3502c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3515c6bd75dSAlasdair G Kergon { 3525c6bd75dSAlasdair G Kergon int r = 0; 3535c6bd75dSAlasdair G Kergon 3545c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3555c6bd75dSAlasdair G Kergon 3562c140a24SMikulas Patocka if (dm_open_count(md)) { 3575c6bd75dSAlasdair G Kergon r = -EBUSY; 3582c140a24SMikulas Patocka if (mark_deferred) 3592c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3602c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3612c140a24SMikulas Patocka r = -EEXIST; 3625c6bd75dSAlasdair G Kergon else 3635c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3645c6bd75dSAlasdair G Kergon 3655c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3665c6bd75dSAlasdair G Kergon 3675c6bd75dSAlasdair G Kergon return r; 3685c6bd75dSAlasdair G Kergon } 3695c6bd75dSAlasdair G Kergon 3702c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3712c140a24SMikulas Patocka { 3722c140a24SMikulas Patocka int r = 0; 3732c140a24SMikulas Patocka 3742c140a24SMikulas Patocka spin_lock(&_minor_lock); 3752c140a24SMikulas Patocka 3762c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3772c140a24SMikulas Patocka r = -EBUSY; 3782c140a24SMikulas Patocka else 3792c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3802c140a24SMikulas Patocka 3812c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3822c140a24SMikulas Patocka 3832c140a24SMikulas Patocka return r; 3842c140a24SMikulas Patocka } 3852c140a24SMikulas Patocka 3862c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 3872c140a24SMikulas Patocka { 3882c140a24SMikulas Patocka dm_deferred_remove(); 3892c140a24SMikulas Patocka } 3902c140a24SMikulas Patocka 391fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 392fd2ed4d2SMikulas Patocka { 393fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 394fd2ed4d2SMikulas Patocka } 395fd2ed4d2SMikulas Patocka 3969974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 3979974fa2cSMike Snitzer { 3989974fa2cSMike Snitzer return md->queue; 3999974fa2cSMike Snitzer } 4009974fa2cSMike Snitzer 401fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 402fd2ed4d2SMikulas Patocka { 403fd2ed4d2SMikulas Patocka return &md->stats; 404fd2ed4d2SMikulas Patocka } 405fd2ed4d2SMikulas Patocka 4063ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4073ac51e74SDarrick J. Wong { 4083ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4093ac51e74SDarrick J. Wong 4103ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4113ac51e74SDarrick J. Wong } 4123ac51e74SDarrick J. Wong 413956a4025SMike Snitzer static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 41466482026SMike Snitzer struct block_device **bdev, 415956a4025SMike Snitzer fmode_t *mode) 416aa129a22SMilan Broz { 41766482026SMike Snitzer struct dm_target *tgt; 4186c182cd8SHannes Reinecke struct dm_table *map; 419956a4025SMike Snitzer int srcu_idx, r; 420aa129a22SMilan Broz 4216c182cd8SHannes Reinecke retry: 422e56f81e0SChristoph Hellwig r = -ENOTTY; 423956a4025SMike Snitzer map = dm_get_live_table(md, &srcu_idx); 424aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 425aa129a22SMilan Broz goto out; 426aa129a22SMilan Broz 427aa129a22SMilan Broz /* We only support devices that have a single target */ 428aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 429aa129a22SMilan Broz goto out; 430aa129a22SMilan Broz 43166482026SMike Snitzer tgt = dm_table_get_target(map, 0); 43266482026SMike Snitzer if (!tgt->type->prepare_ioctl) 4334d341d82SMike Snitzer goto out; 434aa129a22SMilan Broz 4354f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 436aa129a22SMilan Broz r = -EAGAIN; 437aa129a22SMilan Broz goto out; 438aa129a22SMilan Broz } 439aa129a22SMilan Broz 44066482026SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev, mode); 441e56f81e0SChristoph Hellwig if (r < 0) 442e56f81e0SChristoph Hellwig goto out; 443e56f81e0SChristoph Hellwig 444956a4025SMike Snitzer bdgrab(*bdev); 445956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 446e56f81e0SChristoph Hellwig return r; 447aa129a22SMilan Broz 448aa129a22SMilan Broz out: 449956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 4505bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 4516c182cd8SHannes Reinecke msleep(10); 4526c182cd8SHannes Reinecke goto retry; 4536c182cd8SHannes Reinecke } 454e56f81e0SChristoph Hellwig return r; 455e56f81e0SChristoph Hellwig } 4566c182cd8SHannes Reinecke 457e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 458e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 459e56f81e0SChristoph Hellwig { 460e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 461956a4025SMike Snitzer int r; 462e56f81e0SChristoph Hellwig 463956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 464e56f81e0SChristoph Hellwig if (r < 0) 465e56f81e0SChristoph Hellwig return r; 466e56f81e0SChristoph Hellwig 467e56f81e0SChristoph Hellwig if (r > 0) { 468e56f81e0SChristoph Hellwig /* 469e56f81e0SChristoph Hellwig * Target determined this ioctl is being issued against 470e56f81e0SChristoph Hellwig * a logical partition of the parent bdev; so extra 471e56f81e0SChristoph Hellwig * validation is needed. 472e56f81e0SChristoph Hellwig */ 473e56f81e0SChristoph Hellwig r = scsi_verify_blk_ioctl(NULL, cmd); 474e56f81e0SChristoph Hellwig if (r) 475e56f81e0SChristoph Hellwig goto out; 476e56f81e0SChristoph Hellwig } 477e56f81e0SChristoph Hellwig 47866482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 479e56f81e0SChristoph Hellwig out: 480956a4025SMike Snitzer bdput(bdev); 481aa129a22SMilan Broz return r; 482aa129a22SMilan Broz } 483aa129a22SMilan Broz 484028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 4851da177e4SLinus Torvalds { 4861da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 4871da177e4SLinus Torvalds } 4881da177e4SLinus Torvalds 489028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 4901da177e4SLinus Torvalds { 4911da177e4SLinus Torvalds mempool_free(io, md->io_pool); 4921da177e4SLinus Torvalds } 4931da177e4SLinus Torvalds 494cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 4951da177e4SLinus Torvalds { 496dba14160SMikulas Patocka bio_put(&tio->clone); 4971da177e4SLinus Torvalds } 4981da177e4SLinus Torvalds 4994cc96131SMike Snitzer int md_in_flight(struct mapped_device *md) 50090abb8c4SKiyoshi Ueda { 50190abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 50290abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 50390abb8c4SKiyoshi Ueda } 50490abb8c4SKiyoshi Ueda 5053eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 5063eaf840eSJun'ichi "Nick" Nomura { 5073eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 508fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 509c9959059STejun Heo int cpu; 510fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 5113eaf840eSJun'ichi "Nick" Nomura 5123eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 5133eaf840eSJun'ichi "Nick" Nomura 514074a7acaSTejun Heo cpu = part_stat_lock(); 515074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 516074a7acaSTejun Heo part_stat_unlock(); 5171e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 5181e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 519fd2ed4d2SMikulas Patocka 520fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 521528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 522528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 523528ec5abSMike Christie false, 0, &io->stats_aux); 5243eaf840eSJun'ichi "Nick" Nomura } 5253eaf840eSJun'ichi "Nick" Nomura 526d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 5273eaf840eSJun'ichi "Nick" Nomura { 5283eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 5293eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 5303eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 53118c0b223SGu Zheng int pending; 5323eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 5333eaf840eSJun'ichi "Nick" Nomura 53418c0b223SGu Zheng generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 5353eaf840eSJun'ichi "Nick" Nomura 536fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 537528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 538528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 539528ec5abSMike Christie true, duration, &io->stats_aux); 540fd2ed4d2SMikulas Patocka 541af7e466aSMikulas Patocka /* 542af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 543d87f4c14STejun Heo * a flush. 544af7e466aSMikulas Patocka */ 5451e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 5461e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 547316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 5483eaf840eSJun'ichi "Nick" Nomura 549d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 550d221d2e7SMikulas Patocka if (!pending) 551d221d2e7SMikulas Patocka wake_up(&md->wait); 5523eaf840eSJun'ichi "Nick" Nomura } 5533eaf840eSJun'ichi "Nick" Nomura 5541da177e4SLinus Torvalds /* 5551da177e4SLinus Torvalds * Add the bio to the list of deferred io. 5561da177e4SLinus Torvalds */ 55792c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 5581da177e4SLinus Torvalds { 55905447420SKiyoshi Ueda unsigned long flags; 5601da177e4SLinus Torvalds 56105447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 5621da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 56305447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 56492c63902SMikulas Patocka queue_work(md->wq, &md->work); 5651da177e4SLinus Torvalds } 5661da177e4SLinus Torvalds 5671da177e4SLinus Torvalds /* 5681da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5691da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 57083d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 5711da177e4SLinus Torvalds */ 57283d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 5731da177e4SLinus Torvalds { 57483d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 5751da177e4SLinus Torvalds 57683d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 57783d5e5b0SMikulas Patocka } 5781da177e4SLinus Torvalds 57983d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 58083d5e5b0SMikulas Patocka { 58183d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 58283d5e5b0SMikulas Patocka } 58383d5e5b0SMikulas Patocka 58483d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 58583d5e5b0SMikulas Patocka { 58683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 58783d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 58883d5e5b0SMikulas Patocka } 58983d5e5b0SMikulas Patocka 59083d5e5b0SMikulas Patocka /* 59183d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 59283d5e5b0SMikulas Patocka * The caller must not block between these two functions. 59383d5e5b0SMikulas Patocka */ 59483d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 59583d5e5b0SMikulas Patocka { 59683d5e5b0SMikulas Patocka rcu_read_lock(); 59783d5e5b0SMikulas Patocka return rcu_dereference(md->map); 59883d5e5b0SMikulas Patocka } 59983d5e5b0SMikulas Patocka 60083d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 60183d5e5b0SMikulas Patocka { 60283d5e5b0SMikulas Patocka rcu_read_unlock(); 6031da177e4SLinus Torvalds } 6041da177e4SLinus Torvalds 6053ac51e74SDarrick J. Wong /* 60686f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 60786f1152bSBenjamin Marzinski */ 60886f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 60986f1152bSBenjamin Marzinski struct mapped_device *md) 61086f1152bSBenjamin Marzinski { 61186f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 61286f1152bSBenjamin Marzinski struct block_device *bdev; 61386f1152bSBenjamin Marzinski 61486f1152bSBenjamin Marzinski int r; 61586f1152bSBenjamin Marzinski 61686f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 61786f1152bSBenjamin Marzinski 61886f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 61986f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 62086f1152bSBenjamin Marzinski return PTR_ERR(bdev); 62186f1152bSBenjamin Marzinski 62286f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 62386f1152bSBenjamin Marzinski if (r) { 62486f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 62586f1152bSBenjamin Marzinski return r; 62686f1152bSBenjamin Marzinski } 62786f1152bSBenjamin Marzinski 62886f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 62986f1152bSBenjamin Marzinski return 0; 63086f1152bSBenjamin Marzinski } 63186f1152bSBenjamin Marzinski 63286f1152bSBenjamin Marzinski /* 63386f1152bSBenjamin Marzinski * Close a table device that we've been using. 63486f1152bSBenjamin Marzinski */ 63586f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 63686f1152bSBenjamin Marzinski { 63786f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 63886f1152bSBenjamin Marzinski return; 63986f1152bSBenjamin Marzinski 64086f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 64186f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 64286f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 64386f1152bSBenjamin Marzinski } 64486f1152bSBenjamin Marzinski 64586f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 64686f1152bSBenjamin Marzinski fmode_t mode) { 64786f1152bSBenjamin Marzinski struct table_device *td; 64886f1152bSBenjamin Marzinski 64986f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 65086f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 65186f1152bSBenjamin Marzinski return td; 65286f1152bSBenjamin Marzinski 65386f1152bSBenjamin Marzinski return NULL; 65486f1152bSBenjamin Marzinski } 65586f1152bSBenjamin Marzinski 65686f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 65786f1152bSBenjamin Marzinski struct dm_dev **result) { 65886f1152bSBenjamin Marzinski int r; 65986f1152bSBenjamin Marzinski struct table_device *td; 66086f1152bSBenjamin Marzinski 66186f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 66286f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 66386f1152bSBenjamin Marzinski if (!td) { 664115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 66586f1152bSBenjamin Marzinski if (!td) { 66686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 66786f1152bSBenjamin Marzinski return -ENOMEM; 66886f1152bSBenjamin Marzinski } 66986f1152bSBenjamin Marzinski 67086f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 67186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 67286f1152bSBenjamin Marzinski 67386f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 67486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 67586f1152bSBenjamin Marzinski kfree(td); 67686f1152bSBenjamin Marzinski return r; 67786f1152bSBenjamin Marzinski } 67886f1152bSBenjamin Marzinski 67986f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 68086f1152bSBenjamin Marzinski 68186f1152bSBenjamin Marzinski atomic_set(&td->count, 0); 68286f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 68386f1152bSBenjamin Marzinski } 68486f1152bSBenjamin Marzinski atomic_inc(&td->count); 68586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 68686f1152bSBenjamin Marzinski 68786f1152bSBenjamin Marzinski *result = &td->dm_dev; 68886f1152bSBenjamin Marzinski return 0; 68986f1152bSBenjamin Marzinski } 69086f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 69186f1152bSBenjamin Marzinski 69286f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 69386f1152bSBenjamin Marzinski { 69486f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 69586f1152bSBenjamin Marzinski 69686f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 69786f1152bSBenjamin Marzinski if (atomic_dec_and_test(&td->count)) { 69886f1152bSBenjamin Marzinski close_table_device(td, md); 69986f1152bSBenjamin Marzinski list_del(&td->list); 70086f1152bSBenjamin Marzinski kfree(td); 70186f1152bSBenjamin Marzinski } 70286f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 70386f1152bSBenjamin Marzinski } 70486f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 70586f1152bSBenjamin Marzinski 70686f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 70786f1152bSBenjamin Marzinski { 70886f1152bSBenjamin Marzinski struct list_head *tmp, *next; 70986f1152bSBenjamin Marzinski 71086f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 71186f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 71286f1152bSBenjamin Marzinski 71386f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 71486f1152bSBenjamin Marzinski td->dm_dev.name, atomic_read(&td->count)); 71586f1152bSBenjamin Marzinski kfree(td); 71686f1152bSBenjamin Marzinski } 71786f1152bSBenjamin Marzinski } 71886f1152bSBenjamin Marzinski 71986f1152bSBenjamin Marzinski /* 7203ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 7213ac51e74SDarrick J. Wong */ 7223ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 7233ac51e74SDarrick J. Wong { 7243ac51e74SDarrick J. Wong *geo = md->geometry; 7253ac51e74SDarrick J. Wong 7263ac51e74SDarrick J. Wong return 0; 7273ac51e74SDarrick J. Wong } 7283ac51e74SDarrick J. Wong 7293ac51e74SDarrick J. Wong /* 7303ac51e74SDarrick J. Wong * Set the geometry of a device. 7313ac51e74SDarrick J. Wong */ 7323ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 7333ac51e74SDarrick J. Wong { 7343ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 7353ac51e74SDarrick J. Wong 7363ac51e74SDarrick J. Wong if (geo->start > sz) { 7373ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 7383ac51e74SDarrick J. Wong return -EINVAL; 7393ac51e74SDarrick J. Wong } 7403ac51e74SDarrick J. Wong 7413ac51e74SDarrick J. Wong md->geometry = *geo; 7423ac51e74SDarrick J. Wong 7433ac51e74SDarrick J. Wong return 0; 7443ac51e74SDarrick J. Wong } 7453ac51e74SDarrick J. Wong 7461da177e4SLinus Torvalds /*----------------------------------------------------------------- 7471da177e4SLinus Torvalds * CRUD START: 7481da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 7491da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 7501da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 7511da177e4SLinus Torvalds * interests of getting something for people to use I give 7521da177e4SLinus Torvalds * you this clearly demarcated crap. 7531da177e4SLinus Torvalds *---------------------------------------------------------------*/ 7541da177e4SLinus Torvalds 7552e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 7562e93ccc1SKiyoshi Ueda { 7572e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7582e93ccc1SKiyoshi Ueda } 7592e93ccc1SKiyoshi Ueda 7601da177e4SLinus Torvalds /* 7611da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7621da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7631da177e4SLinus Torvalds */ 764858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 7651da177e4SLinus Torvalds { 7662e93ccc1SKiyoshi Ueda unsigned long flags; 767b35f8caaSMilan Broz int io_error; 768b35f8caaSMilan Broz struct bio *bio; 769b35f8caaSMilan Broz struct mapped_device *md = io->md; 7702e93ccc1SKiyoshi Ueda 7712e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 772f88fb981SKiyoshi Ueda if (unlikely(error)) { 773f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 774f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 7751da177e4SLinus Torvalds io->error = error; 776f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 777f88fb981SKiyoshi Ueda } 7781da177e4SLinus Torvalds 7791da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 7802e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 7812e93ccc1SKiyoshi Ueda /* 7822e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 7832e93ccc1SKiyoshi Ueda */ 784022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 7856a8736d1STejun Heo if (__noflush_suspending(md)) 7866a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 7876a8736d1STejun Heo else 7882e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 7892e93ccc1SKiyoshi Ueda io->error = -EIO; 790022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 7912e93ccc1SKiyoshi Ueda } 7922e93ccc1SKiyoshi Ueda 793b35f8caaSMilan Broz io_error = io->error; 794b35f8caaSMilan Broz bio = io->bio; 795af7e466aSMikulas Patocka end_io_acct(io); 796a97f925aSMikulas Patocka free_io(md, io); 7971da177e4SLinus Torvalds 7986a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 7996a8736d1STejun Heo return; 8006a8736d1STejun Heo 80128a8f0d3SMike Christie if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8021da177e4SLinus Torvalds /* 8036a8736d1STejun Heo * Preflush done for flush with data, reissue 80428a8f0d3SMike Christie * without REQ_PREFLUSH. 8051da177e4SLinus Torvalds */ 80628a8f0d3SMike Christie bio->bi_rw &= ~REQ_PREFLUSH; 8076a8736d1STejun Heo queue_io(md, bio); 8085f3ea37cSArnaldo Carvalho de Melo } else { 809b372d360SMike Snitzer /* done with normal IO or empty flush */ 8100a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 8114246a0b6SChristoph Hellwig bio->bi_error = io_error; 8124246a0b6SChristoph Hellwig bio_endio(bio); 8132e93ccc1SKiyoshi Ueda } 8141da177e4SLinus Torvalds } 815af7e466aSMikulas Patocka } 8161da177e4SLinus Torvalds 8174cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 8187eee4ae2SMike Snitzer { 8197eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 8207eee4ae2SMike Snitzer 8217eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 8227eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 8237eee4ae2SMike Snitzer } 8247eee4ae2SMike Snitzer 8254246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 8261da177e4SLinus Torvalds { 8274246a0b6SChristoph Hellwig int error = bio->bi_error; 8285164beceSzhendong chen int r = error; 829bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 830b35f8caaSMilan Broz struct dm_io *io = tio->io; 8319faf400fSStefan Bader struct mapped_device *md = tio->io->md; 8321da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 8331da177e4SLinus Torvalds 8341da177e4SLinus Torvalds if (endio) { 8357de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 8362e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 8372e93ccc1SKiyoshi Ueda /* 8382e93ccc1SKiyoshi Ueda * error and requeue request are handled 8392e93ccc1SKiyoshi Ueda * in dec_pending(). 8402e93ccc1SKiyoshi Ueda */ 8411da177e4SLinus Torvalds error = r; 84245cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 84345cbcd79SKiyoshi Ueda /* The target will handle the io */ 8446712ecf8SNeilBrown return; 84545cbcd79SKiyoshi Ueda else if (r) { 84645cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 84745cbcd79SKiyoshi Ueda BUG(); 84845cbcd79SKiyoshi Ueda } 8491da177e4SLinus Torvalds } 8501da177e4SLinus Torvalds 851e6047149SMike Christie if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) && 8527eee4ae2SMike Snitzer !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 8537eee4ae2SMike Snitzer disable_write_same(md); 8547eee4ae2SMike Snitzer 855cfae7529SMike Snitzer free_tio(tio); 856b35f8caaSMilan Broz dec_pending(io, error); 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds 85978d8e58aSMike Snitzer /* 86056a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 86156a67df7SMike Snitzer * target boundary. 86256a67df7SMike Snitzer */ 86356a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 8641da177e4SLinus Torvalds { 86556a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 86656a67df7SMike Snitzer 86756a67df7SMike Snitzer return ti->len - target_offset; 86856a67df7SMike Snitzer } 86956a67df7SMike Snitzer 87056a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 87156a67df7SMike Snitzer { 87256a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 873542f9038SMike Snitzer sector_t offset, max_len; 8741da177e4SLinus Torvalds 8751da177e4SLinus Torvalds /* 8761da177e4SLinus Torvalds * Does the target need to split even further? 8771da177e4SLinus Torvalds */ 878542f9038SMike Snitzer if (ti->max_io_len) { 879542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 880542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 881542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 882542f9038SMike Snitzer else 883542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 884542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 885542f9038SMike Snitzer 886542f9038SMike Snitzer if (len > max_len) 887542f9038SMike Snitzer len = max_len; 8881da177e4SLinus Torvalds } 8891da177e4SLinus Torvalds 8901da177e4SLinus Torvalds return len; 8911da177e4SLinus Torvalds } 8921da177e4SLinus Torvalds 893542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 894542f9038SMike Snitzer { 895542f9038SMike Snitzer if (len > UINT_MAX) { 896542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 897542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 898542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 899542f9038SMike Snitzer return -EINVAL; 900542f9038SMike Snitzer } 901542f9038SMike Snitzer 902542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 903542f9038SMike Snitzer 904542f9038SMike Snitzer return 0; 905542f9038SMike Snitzer } 906542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 907542f9038SMike Snitzer 908545ed20eSToshi Kani static long dm_blk_direct_access(struct block_device *bdev, sector_t sector, 909f0c98ebcSLinus Torvalds void **kaddr, pfn_t *pfn, long size) 910545ed20eSToshi Kani { 911545ed20eSToshi Kani struct mapped_device *md = bdev->bd_disk->private_data; 912545ed20eSToshi Kani struct dm_table *map; 913545ed20eSToshi Kani struct dm_target *ti; 914545ed20eSToshi Kani int srcu_idx; 915545ed20eSToshi Kani long len, ret = -EIO; 916545ed20eSToshi Kani 917545ed20eSToshi Kani map = dm_get_live_table(md, &srcu_idx); 918545ed20eSToshi Kani if (!map) 919545ed20eSToshi Kani goto out; 920545ed20eSToshi Kani 921545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 922545ed20eSToshi Kani if (!dm_target_is_valid(ti)) 923545ed20eSToshi Kani goto out; 924545ed20eSToshi Kani 925545ed20eSToshi Kani len = max_io_len(sector, ti) << SECTOR_SHIFT; 926545ed20eSToshi Kani size = min(len, size); 927545ed20eSToshi Kani 928545ed20eSToshi Kani if (ti->type->direct_access) 929545ed20eSToshi Kani ret = ti->type->direct_access(ti, sector, kaddr, pfn, size); 930545ed20eSToshi Kani out: 931545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 932545ed20eSToshi Kani return min(ret, size); 933545ed20eSToshi Kani } 934545ed20eSToshi Kani 9351dd40c3eSMikulas Patocka /* 9361dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 93728a8f0d3SMike Christie * allowed for all bio types except REQ_PREFLUSH. 9381dd40c3eSMikulas Patocka * 9391dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 9401dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 9411dd40c3eSMikulas Patocka * sent in a next bio. 9421dd40c3eSMikulas Patocka * 9431dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 9441dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 9451dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 9461dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 9471dd40c3eSMikulas Patocka * 9481dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 9491dd40c3eSMikulas Patocka * <------- bi_size -------> 9501dd40c3eSMikulas Patocka * <-- n_sectors --> 9511dd40c3eSMikulas Patocka * 9521dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 9531dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 9541dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 9551dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 9561dd40c3eSMikulas Patocka * to make it empty) 9571dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 9581dd40c3eSMikulas Patocka * 9591dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 9601dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 9611dd40c3eSMikulas Patocka * copies of the bio. 9621dd40c3eSMikulas Patocka */ 9631dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 9641dd40c3eSMikulas Patocka { 9651dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 9661dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 96728a8f0d3SMike Christie BUG_ON(bio->bi_rw & REQ_PREFLUSH); 9681dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 9691dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 9701dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 9711dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 9721dd40c3eSMikulas Patocka } 9731dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 9741dd40c3eSMikulas Patocka 975bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 9761da177e4SLinus Torvalds { 9771da177e4SLinus Torvalds int r; 9782056a782SJens Axboe sector_t sector; 979dba14160SMikulas Patocka struct bio *clone = &tio->clone; 980bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 9811da177e4SLinus Torvalds 9821da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 9831da177e4SLinus Torvalds 9841da177e4SLinus Torvalds /* 9851da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 9861da177e4SLinus Torvalds * anything, the target has assumed ownership of 9871da177e4SLinus Torvalds * this io. 9881da177e4SLinus Torvalds */ 9891da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 9904f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 9917de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 99245cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 9931da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 9942056a782SJens Axboe 995d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 99622a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 9972056a782SJens Axboe 9981da177e4SLinus Torvalds generic_make_request(clone); 9992e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 10002e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 10019faf400fSStefan Bader dec_pending(tio->io, r); 1002cfae7529SMike Snitzer free_tio(tio); 1003ab37844dSMikulas Patocka } else if (r != DM_MAPIO_SUBMITTED) { 100445cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 100545cbcd79SKiyoshi Ueda BUG(); 10061da177e4SLinus Torvalds } 10071da177e4SLinus Torvalds } 10081da177e4SLinus Torvalds 10091da177e4SLinus Torvalds struct clone_info { 10101da177e4SLinus Torvalds struct mapped_device *md; 10111da177e4SLinus Torvalds struct dm_table *map; 10121da177e4SLinus Torvalds struct bio *bio; 10131da177e4SLinus Torvalds struct dm_io *io; 10141da177e4SLinus Torvalds sector_t sector; 1015e0d6609aSMikulas Patocka unsigned sector_count; 10161da177e4SLinus Torvalds }; 10171da177e4SLinus Torvalds 1018e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1019bd2a49b8SAlasdair G Kergon { 10204f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 10214f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds 10241da177e4SLinus Torvalds /* 10251da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 10261da177e4SLinus Torvalds */ 1027c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 10281c3b13e6SKent Overstreet sector_t sector, unsigned len) 10291da177e4SLinus Torvalds { 1030dba14160SMikulas Patocka struct bio *clone = &tio->clone; 10311da177e4SLinus Torvalds 10321c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 10339c47008dSMartin K. Petersen 1034c80914e8SMike Snitzer if (bio_integrity(bio)) { 1035c80914e8SMike Snitzer int r = bio_integrity_clone(clone, bio, GFP_NOIO); 1036c80914e8SMike Snitzer if (r < 0) 1037c80914e8SMike Snitzer return r; 1038c80914e8SMike Snitzer } 10391c3b13e6SKent Overstreet 10401c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 10411c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 10421c3b13e6SKent Overstreet 10431c3b13e6SKent Overstreet if (bio_integrity(bio)) 10441c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 1045c80914e8SMike Snitzer 1046c80914e8SMike Snitzer return 0; 10471da177e4SLinus Torvalds } 10481da177e4SLinus Torvalds 10499015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 105099778273SJunichi Nomura struct dm_target *ti, 105155a62eefSAlasdair G Kergon unsigned target_bio_nr) 1052f9ab94ceSMikulas Patocka { 1053dba14160SMikulas Patocka struct dm_target_io *tio; 1054dba14160SMikulas Patocka struct bio *clone; 1055dba14160SMikulas Patocka 105699778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1057dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1058f9ab94ceSMikulas Patocka 1059f9ab94ceSMikulas Patocka tio->io = ci->io; 1060f9ab94ceSMikulas Patocka tio->ti = ti; 106155a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 10629015df24SAlasdair G Kergon 10639015df24SAlasdair G Kergon return tio; 10649015df24SAlasdair G Kergon } 10659015df24SAlasdair G Kergon 106614fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 106714fe594dSAlasdair G Kergon struct dm_target *ti, 10681dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 10699015df24SAlasdair G Kergon { 107099778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1071dba14160SMikulas Patocka struct bio *clone = &tio->clone; 10729015df24SAlasdair G Kergon 10731dd40c3eSMikulas Patocka tio->len_ptr = len; 10741dd40c3eSMikulas Patocka 10751c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1076bd2a49b8SAlasdair G Kergon if (len) 10771dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1078f9ab94ceSMikulas Patocka 1079bd2a49b8SAlasdair G Kergon __map_bio(tio); 1080f9ab94ceSMikulas Patocka } 1081f9ab94ceSMikulas Patocka 108214fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 10831dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 108406a426ceSMike Snitzer { 108555a62eefSAlasdair G Kergon unsigned target_bio_nr; 108606a426ceSMike Snitzer 108755a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 108814fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 108906a426ceSMike Snitzer } 109006a426ceSMike Snitzer 109114fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1092f9ab94ceSMikulas Patocka { 109306a426ceSMike Snitzer unsigned target_nr = 0; 1094f9ab94ceSMikulas Patocka struct dm_target *ti; 1095f9ab94ceSMikulas Patocka 1096b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1097f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 10981dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1099f9ab94ceSMikulas Patocka 1100f9ab94ceSMikulas Patocka return 0; 1101f9ab94ceSMikulas Patocka } 1102f9ab94ceSMikulas Patocka 1103c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 11041dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 11055ae89a87SMike Snitzer { 1106dba14160SMikulas Patocka struct bio *bio = ci->bio; 11075ae89a87SMike Snitzer struct dm_target_io *tio; 1108b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1109b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 1110c80914e8SMike Snitzer int r = 0; 11115ae89a87SMike Snitzer 1112b0d8ed4dSAlasdair G Kergon /* 1113b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1114b0d8ed4dSAlasdair G Kergon */ 1115b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1116b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1117e4c93811SAlasdair G Kergon 1118b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 111999778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 11201dd40c3eSMikulas Patocka tio->len_ptr = len; 1121c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1122072623deSMikulas Patocka if (r < 0) { 1123cfae7529SMike Snitzer free_tio(tio); 1124c80914e8SMike Snitzer break; 1125072623deSMikulas Patocka } 1126bd2a49b8SAlasdair G Kergon __map_bio(tio); 11275ae89a87SMike Snitzer } 1128c80914e8SMike Snitzer 1129c80914e8SMike Snitzer return r; 1130b0d8ed4dSAlasdair G Kergon } 11315ae89a87SMike Snitzer 113255a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 113323508a96SMike Snitzer 113455a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 113523508a96SMike Snitzer { 113655a62eefSAlasdair G Kergon return ti->num_discard_bios; 113723508a96SMike Snitzer } 113823508a96SMike Snitzer 113955a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 114023508a96SMike Snitzer { 114155a62eefSAlasdair G Kergon return ti->num_write_same_bios; 114223508a96SMike Snitzer } 114323508a96SMike Snitzer 114423508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 114523508a96SMike Snitzer 114623508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 114723508a96SMike Snitzer { 114855a62eefSAlasdair G Kergon return ti->split_discard_bios; 114923508a96SMike Snitzer } 115023508a96SMike Snitzer 115114fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 115255a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 115323508a96SMike Snitzer is_split_required_fn is_split_required) 11545ae89a87SMike Snitzer { 11555ae89a87SMike Snitzer struct dm_target *ti; 1156e0d6609aSMikulas Patocka unsigned len; 115755a62eefSAlasdair G Kergon unsigned num_bios; 11585ae89a87SMike Snitzer 1159a79245b3SMike Snitzer do { 11605ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 11615ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 11625ae89a87SMike Snitzer return -EIO; 11635ae89a87SMike Snitzer 11645ae89a87SMike Snitzer /* 116523508a96SMike Snitzer * Even though the device advertised support for this type of 116623508a96SMike Snitzer * request, that does not mean every target supports it, and 1167936688d7SMike Snitzer * reconfiguration might also have changed that since the 11685ae89a87SMike Snitzer * check was performed. 11695ae89a87SMike Snitzer */ 117055a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 117155a62eefSAlasdair G Kergon if (!num_bios) 11725ae89a87SMike Snitzer return -EOPNOTSUPP; 11735ae89a87SMike Snitzer 117423508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1175e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 11767acf0277SMikulas Patocka else 1177e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 11785ae89a87SMike Snitzer 11791dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 11805ae89a87SMike Snitzer 1181a79245b3SMike Snitzer ci->sector += len; 1182a79245b3SMike Snitzer } while (ci->sector_count -= len); 11835ae89a87SMike Snitzer 11845ae89a87SMike Snitzer return 0; 11855ae89a87SMike Snitzer } 11865ae89a87SMike Snitzer 118714fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 118823508a96SMike Snitzer { 118914fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 119023508a96SMike Snitzer is_split_required_for_discard); 119123508a96SMike Snitzer } 119223508a96SMike Snitzer 119314fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 119423508a96SMike Snitzer { 119514fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 119623508a96SMike Snitzer } 119723508a96SMike Snitzer 1198e4c93811SAlasdair G Kergon /* 1199e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1200e4c93811SAlasdair G Kergon */ 1201e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1202e4c93811SAlasdair G Kergon { 1203e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1204e4c93811SAlasdair G Kergon struct dm_target *ti; 12051c3b13e6SKent Overstreet unsigned len; 1206c80914e8SMike Snitzer int r; 1207e4c93811SAlasdair G Kergon 1208e6047149SMike Christie if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1209e4c93811SAlasdair G Kergon return __send_discard(ci); 1210e6047149SMike Christie else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1211e4c93811SAlasdair G Kergon return __send_write_same(ci); 1212e4c93811SAlasdair G Kergon 1213e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1214e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1215e4c93811SAlasdair G Kergon return -EIO; 1216e4c93811SAlasdair G Kergon 12171c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1218e4c93811SAlasdair G Kergon 1219c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1220c80914e8SMike Snitzer if (r < 0) 1221c80914e8SMike Snitzer return r; 1222e4c93811SAlasdair G Kergon 1223e4c93811SAlasdair G Kergon ci->sector += len; 1224e4c93811SAlasdair G Kergon ci->sector_count -= len; 1225e4c93811SAlasdair G Kergon 1226e4c93811SAlasdair G Kergon return 0; 1227e4c93811SAlasdair G Kergon } 1228e4c93811SAlasdair G Kergon 1229e4c93811SAlasdair G Kergon /* 123014fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 12311da177e4SLinus Torvalds */ 123283d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 123383d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 12341da177e4SLinus Torvalds { 12351da177e4SLinus Torvalds struct clone_info ci; 1236512875bdSJun'ichi Nomura int error = 0; 12371da177e4SLinus Torvalds 123883d5e5b0SMikulas Patocka if (unlikely(!map)) { 1239f0b9a450SMikulas Patocka bio_io_error(bio); 1240f0b9a450SMikulas Patocka return; 1241f0b9a450SMikulas Patocka } 1242692d0eb9SMikulas Patocka 124383d5e5b0SMikulas Patocka ci.map = map; 12441da177e4SLinus Torvalds ci.md = md; 12451da177e4SLinus Torvalds ci.io = alloc_io(md); 12461da177e4SLinus Torvalds ci.io->error = 0; 12471da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 12481da177e4SLinus Torvalds ci.io->bio = bio; 12491da177e4SLinus Torvalds ci.io->md = md; 1250f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 12514f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 12521da177e4SLinus Torvalds 12533eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1254bd2a49b8SAlasdair G Kergon 125528a8f0d3SMike Christie if (bio->bi_rw & REQ_PREFLUSH) { 1256b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1257b372d360SMike Snitzer ci.sector_count = 0; 125814fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1259b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1260b372d360SMike Snitzer } else { 12616a8736d1STejun Heo ci.bio = bio; 1262f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1263512875bdSJun'ichi Nomura while (ci.sector_count && !error) 126414fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1265d87f4c14STejun Heo } 12661da177e4SLinus Torvalds 12671da177e4SLinus Torvalds /* drop the extra reference count */ 1268512875bdSJun'ichi Nomura dec_pending(ci.io, error); 12699e4e5f87SMilan Broz } 12709e4e5f87SMilan Broz /*----------------------------------------------------------------- 12711da177e4SLinus Torvalds * CRUD END 12721da177e4SLinus Torvalds *---------------------------------------------------------------*/ 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds /* 12751da177e4SLinus Torvalds * The request function that just remaps the bio built up by 12761da177e4SLinus Torvalds * dm_merge_bvec. 12771da177e4SLinus Torvalds */ 1278dece1635SJens Axboe static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 12791da177e4SLinus Torvalds { 128012f03a49SKevin Corry int rw = bio_data_dir(bio); 12811da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 128283d5e5b0SMikulas Patocka int srcu_idx; 128383d5e5b0SMikulas Patocka struct dm_table *map; 12841da177e4SLinus Torvalds 128583d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 12861da177e4SLinus Torvalds 128718c0b223SGu Zheng generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 128812f03a49SKevin Corry 12896a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 12906a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 129183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 12921da177e4SLinus Torvalds 129370246286SChristoph Hellwig if (!(bio->bi_rw & REQ_RAHEAD)) 129492c63902SMikulas Patocka queue_io(md, bio); 12956a8736d1STejun Heo else 12966a8736d1STejun Heo bio_io_error(bio); 1297dece1635SJens Axboe return BLK_QC_T_NONE; 12981da177e4SLinus Torvalds } 12991da177e4SLinus Torvalds 130083d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 130183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1302dece1635SJens Axboe return BLK_QC_T_NONE; 1303cec47e3dSKiyoshi Ueda } 1304cec47e3dSKiyoshi Ueda 13051da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 13061da177e4SLinus Torvalds { 13078a57dfc6SChandra Seetharaman int r = bdi_bits; 13088a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 13098a57dfc6SChandra Seetharaman struct dm_table *map; 13101da177e4SLinus Torvalds 13111eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1312e522c039SMike Snitzer if (dm_request_based(md)) { 1313cec47e3dSKiyoshi Ueda /* 1314e522c039SMike Snitzer * With request-based DM we only need to check the 1315e522c039SMike Snitzer * top-level queue for congestion. 1316cec47e3dSKiyoshi Ueda */ 1317e522c039SMike Snitzer r = md->queue->backing_dev_info.wb.state & bdi_bits; 1318e522c039SMike Snitzer } else { 1319e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1320e522c039SMike Snitzer if (map) 13211da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 132283d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 13238a57dfc6SChandra Seetharaman } 1324e522c039SMike Snitzer } 13258a57dfc6SChandra Seetharaman 13261da177e4SLinus Torvalds return r; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds /*----------------------------------------------------------------- 13301da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 13311da177e4SLinus Torvalds *---------------------------------------------------------------*/ 13322b06cfffSAlasdair G Kergon static void free_minor(int minor) 13331da177e4SLinus Torvalds { 1334f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 13351da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1336f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 13371da177e4SLinus Torvalds } 13381da177e4SLinus Torvalds 13391da177e4SLinus Torvalds /* 13401da177e4SLinus Torvalds * See if the device with a specific minor # is free. 13411da177e4SLinus Torvalds */ 1342cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 13431da177e4SLinus Torvalds { 1344c9d76be6STejun Heo int r; 13451da177e4SLinus Torvalds 13461da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 13471da177e4SLinus Torvalds return -EINVAL; 13481da177e4SLinus Torvalds 1349c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1350f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 13511da177e4SLinus Torvalds 1352c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 13531da177e4SLinus Torvalds 1354f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1355c9d76be6STejun Heo idr_preload_end(); 1356c9d76be6STejun Heo if (r < 0) 1357c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1358c9d76be6STejun Heo return 0; 13591da177e4SLinus Torvalds } 13601da177e4SLinus Torvalds 1361cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 13621da177e4SLinus Torvalds { 1363c9d76be6STejun Heo int r; 13641da177e4SLinus Torvalds 1365c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1366f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 13671da177e4SLinus Torvalds 1368c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 13691da177e4SLinus Torvalds 1370f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1371c9d76be6STejun Heo idr_preload_end(); 1372c9d76be6STejun Heo if (r < 0) 13731da177e4SLinus Torvalds return r; 1374c9d76be6STejun Heo *minor = r; 1375c9d76be6STejun Heo return 0; 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 137883d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 13791da177e4SLinus Torvalds 138053d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 138153d5914fSMikulas Patocka 13824cc96131SMike Snitzer void dm_init_md_queue(struct mapped_device *md) 13834a0b4ddfSMike Snitzer { 13844a0b4ddfSMike Snitzer /* 13854a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 1386bfebd1cdSMike Snitzer * devices. The type of this dm device may not have been decided yet. 13874a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 13884a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 13894a0b4ddfSMike Snitzer * for request stacking support until then. 13904a0b4ddfSMike Snitzer * 13914a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 13924a0b4ddfSMike Snitzer */ 13934a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1394ad5f498fSMikulas Patocka 1395ad5f498fSMikulas Patocka /* 1396ad5f498fSMikulas Patocka * Initialize data that will only be used by a non-blk-mq DM queue 1397ad5f498fSMikulas Patocka * - must do so here (in alloc_dev callchain) before queue is used 1398ad5f498fSMikulas Patocka */ 1399ad5f498fSMikulas Patocka md->queue->queuedata = md; 1400ad5f498fSMikulas Patocka md->queue->backing_dev_info.congested_data = md; 1401bfebd1cdSMike Snitzer } 14024a0b4ddfSMike Snitzer 14034cc96131SMike Snitzer void dm_init_normal_md_queue(struct mapped_device *md) 1404bfebd1cdSMike Snitzer { 140517e149b8SMike Snitzer md->use_blk_mq = false; 1406bfebd1cdSMike Snitzer dm_init_md_queue(md); 1407bfebd1cdSMike Snitzer 1408bfebd1cdSMike Snitzer /* 1409bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1410bfebd1cdSMike Snitzer */ 14114a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 14124a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 14134a0b4ddfSMike Snitzer } 14144a0b4ddfSMike Snitzer 14150f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 14160f20972fSMike Snitzer { 14170f20972fSMike Snitzer if (md->wq) 14180f20972fSMike Snitzer destroy_workqueue(md->wq); 14190f20972fSMike Snitzer if (md->kworker_task) 14200f20972fSMike Snitzer kthread_stop(md->kworker_task); 14210f20972fSMike Snitzer mempool_destroy(md->io_pool); 14220f20972fSMike Snitzer mempool_destroy(md->rq_pool); 14230f20972fSMike Snitzer if (md->bs) 14240f20972fSMike Snitzer bioset_free(md->bs); 14250f20972fSMike Snitzer 1426b06075a9SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 1427b06075a9SMikulas Patocka 14280f20972fSMike Snitzer if (md->disk) { 14290f20972fSMike Snitzer spin_lock(&_minor_lock); 14300f20972fSMike Snitzer md->disk->private_data = NULL; 14310f20972fSMike Snitzer spin_unlock(&_minor_lock); 14320f20972fSMike Snitzer del_gendisk(md->disk); 14330f20972fSMike Snitzer put_disk(md->disk); 14340f20972fSMike Snitzer } 14350f20972fSMike Snitzer 14360f20972fSMike Snitzer if (md->queue) 14370f20972fSMike Snitzer blk_cleanup_queue(md->queue); 14380f20972fSMike Snitzer 14390f20972fSMike Snitzer if (md->bdev) { 14400f20972fSMike Snitzer bdput(md->bdev); 14410f20972fSMike Snitzer md->bdev = NULL; 14420f20972fSMike Snitzer } 14434cc96131SMike Snitzer 14444cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 14450f20972fSMike Snitzer } 14460f20972fSMike Snitzer 14471da177e4SLinus Torvalds /* 14481da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 14491da177e4SLinus Torvalds */ 14502b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 14511da177e4SLinus Torvalds { 1452115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1453115485e8SMike Snitzer struct mapped_device *md; 1454ba61fdd1SJeff Mahoney void *old_md; 14551da177e4SLinus Torvalds 1456115485e8SMike Snitzer md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 14571da177e4SLinus Torvalds if (!md) { 14581da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 14591da177e4SLinus Torvalds return NULL; 14601da177e4SLinus Torvalds } 14611da177e4SLinus Torvalds 146210da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 14636ed7ade8SMilan Broz goto bad_module_get; 146410da4f79SJeff Mahoney 14651da177e4SLinus Torvalds /* get a minor number for the dev */ 14662b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1467cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 14682b06cfffSAlasdair G Kergon else 1469cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 14701da177e4SLinus Torvalds if (r < 0) 14716ed7ade8SMilan Broz goto bad_minor; 14721da177e4SLinus Torvalds 147383d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 147483d5e5b0SMikulas Patocka if (r < 0) 147583d5e5b0SMikulas Patocka goto bad_io_barrier; 147683d5e5b0SMikulas Patocka 1477115485e8SMike Snitzer md->numa_node_id = numa_node_id; 14784cc96131SMike Snitzer md->use_blk_mq = dm_use_blk_mq_default(); 1479591ddcfcSMike Snitzer md->init_tio_pdu = false; 1480a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1481e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1482a5664dadSMike Snitzer mutex_init(&md->type_lock); 148386f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1484022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 14851da177e4SLinus Torvalds atomic_set(&md->holders, 1); 14865c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 14871da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 14887a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 14897a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 149086f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 14917a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 14921da177e4SLinus Torvalds 1493115485e8SMike Snitzer md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 14941da177e4SLinus Torvalds if (!md->queue) 14950f20972fSMike Snitzer goto bad; 14961da177e4SLinus Torvalds 14974a0b4ddfSMike Snitzer dm_init_md_queue(md); 14989faf400fSStefan Bader 1499115485e8SMike Snitzer md->disk = alloc_disk_node(1, numa_node_id); 15001da177e4SLinus Torvalds if (!md->disk) 15010f20972fSMike Snitzer goto bad; 15021da177e4SLinus Torvalds 1503316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1504316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1505f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 150653d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1507f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 15082995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 15092eb6e1e3SKeith Busch md->kworker_task = NULL; 1510f0b04115SJeff Mahoney 15111da177e4SLinus Torvalds md->disk->major = _major; 15121da177e4SLinus Torvalds md->disk->first_minor = minor; 15131da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 15141da177e4SLinus Torvalds md->disk->queue = md->queue; 15151da177e4SLinus Torvalds md->disk->private_data = md; 15161da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 15171da177e4SLinus Torvalds add_disk(md->disk); 15187e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 15191da177e4SLinus Torvalds 1520670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1521304f3f6aSMilan Broz if (!md->wq) 15220f20972fSMike Snitzer goto bad; 1523304f3f6aSMilan Broz 152432a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 152532a926daSMikulas Patocka if (!md->bdev) 15260f20972fSMike Snitzer goto bad; 152732a926daSMikulas Patocka 15286a8736d1STejun Heo bio_init(&md->flush_bio); 15296a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 1530e6047149SMike Christie bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); 15316a8736d1STejun Heo 1532fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1533fd2ed4d2SMikulas Patocka 1534ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1535f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1536ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1537f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1538ba61fdd1SJeff Mahoney 1539ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1540ba61fdd1SJeff Mahoney 15411da177e4SLinus Torvalds return md; 15421da177e4SLinus Torvalds 15430f20972fSMike Snitzer bad: 15440f20972fSMike Snitzer cleanup_mapped_device(md); 154583d5e5b0SMikulas Patocka bad_io_barrier: 15461da177e4SLinus Torvalds free_minor(minor); 15476ed7ade8SMilan Broz bad_minor: 154810da4f79SJeff Mahoney module_put(THIS_MODULE); 15496ed7ade8SMilan Broz bad_module_get: 15501da177e4SLinus Torvalds kfree(md); 15511da177e4SLinus Torvalds return NULL; 15521da177e4SLinus Torvalds } 15531da177e4SLinus Torvalds 1554ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1555ae9da83fSJun'ichi Nomura 15561da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 15571da177e4SLinus Torvalds { 1558f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 155963d94e48SJun'ichi Nomura 1560ae9da83fSJun'ichi Nomura unlock_fs(md); 15612eb6e1e3SKeith Busch 15620f20972fSMike Snitzer cleanup_mapped_device(md); 15630f20972fSMike Snitzer 15640f20972fSMike Snitzer free_table_devices(&md->table_devices); 15650f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 156663a4f065SMike Snitzer free_minor(minor); 156763a4f065SMike Snitzer 156810da4f79SJeff Mahoney module_put(THIS_MODULE); 15691da177e4SLinus Torvalds kfree(md); 15701da177e4SLinus Torvalds } 15711da177e4SLinus Torvalds 1572e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1573e6ee8c0bSKiyoshi Ueda { 1574c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1575e6ee8c0bSKiyoshi Ueda 15764e6e36c3SMike Snitzer if (md->bs) { 15774e6e36c3SMike Snitzer /* The md already has necessary mempools. */ 1578545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1579c0820cf5SMikulas Patocka /* 158016245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 158116245bdcSJun'ichi Nomura * because a different table was loaded. 1582c0820cf5SMikulas Patocka */ 1583c0820cf5SMikulas Patocka bioset_free(md->bs); 1584c0820cf5SMikulas Patocka md->bs = p->bs; 1585c0820cf5SMikulas Patocka p->bs = NULL; 1586c0820cf5SMikulas Patocka } 1587cbc4e3c1SMike Snitzer /* 15884e6e36c3SMike Snitzer * There's no need to reload with request-based dm 15894e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 15904e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 15914e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 15924e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 15934e6e36c3SMike Snitzer * through the queue to unprep. 1594cbc4e3c1SMike Snitzer */ 1595cbc4e3c1SMike Snitzer goto out; 1596cbc4e3c1SMike Snitzer } 1597cbc4e3c1SMike Snitzer 1598cbc4e3c1SMike Snitzer BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 1599e6ee8c0bSKiyoshi Ueda 1600e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 1601e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 16021ae49ea2SMike Snitzer md->rq_pool = p->rq_pool; 16031ae49ea2SMike Snitzer p->rq_pool = NULL; 1604e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 1605e6ee8c0bSKiyoshi Ueda p->bs = NULL; 16064e6e36c3SMike Snitzer 1607e6ee8c0bSKiyoshi Ueda out: 160802233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1609e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 1610e6ee8c0bSKiyoshi Ueda } 1611e6ee8c0bSKiyoshi Ueda 16121da177e4SLinus Torvalds /* 16131da177e4SLinus Torvalds * Bind a table to the device. 16141da177e4SLinus Torvalds */ 16151da177e4SLinus Torvalds static void event_callback(void *context) 16161da177e4SLinus Torvalds { 16177a8c3d3bSMike Anderson unsigned long flags; 16187a8c3d3bSMike Anderson LIST_HEAD(uevents); 16191da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 16201da177e4SLinus Torvalds 16217a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 16227a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 16237a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 16247a8c3d3bSMike Anderson 1625ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 16267a8c3d3bSMike Anderson 16271da177e4SLinus Torvalds atomic_inc(&md->event_nr); 16281da177e4SLinus Torvalds wake_up(&md->eventq); 16291da177e4SLinus Torvalds } 16301da177e4SLinus Torvalds 1631c217649bSMike Snitzer /* 1632c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 1633c217649bSMike Snitzer */ 16344e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 16351da177e4SLinus Torvalds { 16364e90188bSAlasdair G Kergon set_capacity(md->disk, size); 16371da177e4SLinus Torvalds 1638db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 16391da177e4SLinus Torvalds } 16401da177e4SLinus Torvalds 1641042d2a9bSAlasdair G Kergon /* 1642042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 1643042d2a9bSAlasdair G Kergon */ 1644042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1645754c5fc7SMike Snitzer struct queue_limits *limits) 16461da177e4SLinus Torvalds { 1647042d2a9bSAlasdair G Kergon struct dm_table *old_map; 1648165125e1SJens Axboe struct request_queue *q = md->queue; 16491da177e4SLinus Torvalds sector_t size; 16501da177e4SLinus Torvalds 16511da177e4SLinus Torvalds size = dm_table_get_size(t); 16523ac51e74SDarrick J. Wong 16533ac51e74SDarrick J. Wong /* 16543ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 16553ac51e74SDarrick J. Wong */ 1656fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 16573ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 16583ac51e74SDarrick J. Wong 16594e90188bSAlasdair G Kergon __set_size(md, size); 16601da177e4SLinus Torvalds 1661cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 16622ca3310eSAlasdair G Kergon 1663e6ee8c0bSKiyoshi Ueda /* 1664e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 1665e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 1666e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 1667e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 1668e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 1669e6ee8c0bSKiyoshi Ueda */ 167016f12266SMike Snitzer if (dm_table_request_based(t)) { 1671eca7ee6dSMike Snitzer dm_stop_queue(q); 167216f12266SMike Snitzer /* 167316f12266SMike Snitzer * Leverage the fact that request-based DM targets are 167416f12266SMike Snitzer * immutable singletons and establish md->immutable_target 167516f12266SMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq 167616f12266SMike Snitzer */ 167716f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 167816f12266SMike Snitzer } 1679e6ee8c0bSKiyoshi Ueda 1680e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 1681e6ee8c0bSKiyoshi Ueda 1682a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 16831d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 168436a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 168536a0456fSAlasdair G Kergon 1686754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 168741abc4e1SHannes Reinecke if (old_map) 168883d5e5b0SMikulas Patocka dm_sync_table(md); 16892ca3310eSAlasdair G Kergon 1690042d2a9bSAlasdair G Kergon return old_map; 16911da177e4SLinus Torvalds } 16921da177e4SLinus Torvalds 1693a7940155SAlasdair G Kergon /* 1694a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 1695a7940155SAlasdair G Kergon */ 1696a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 16971da177e4SLinus Torvalds { 1698a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 16991da177e4SLinus Torvalds 17001da177e4SLinus Torvalds if (!map) 1701a7940155SAlasdair G Kergon return NULL; 17021da177e4SLinus Torvalds 17031da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 17049cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 170583d5e5b0SMikulas Patocka dm_sync_table(md); 1706a7940155SAlasdair G Kergon 1707a7940155SAlasdair G Kergon return map; 17081da177e4SLinus Torvalds } 17091da177e4SLinus Torvalds 17101da177e4SLinus Torvalds /* 17111da177e4SLinus Torvalds * Constructor for a new device. 17121da177e4SLinus Torvalds */ 17132b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 17141da177e4SLinus Torvalds { 17151da177e4SLinus Torvalds struct mapped_device *md; 17161da177e4SLinus Torvalds 17172b06cfffSAlasdair G Kergon md = alloc_dev(minor); 17181da177e4SLinus Torvalds if (!md) 17191da177e4SLinus Torvalds return -ENXIO; 17201da177e4SLinus Torvalds 1721784aae73SMilan Broz dm_sysfs_init(md); 1722784aae73SMilan Broz 17231da177e4SLinus Torvalds *result = md; 17241da177e4SLinus Torvalds return 0; 17251da177e4SLinus Torvalds } 17261da177e4SLinus Torvalds 1727a5664dadSMike Snitzer /* 1728a5664dadSMike Snitzer * Functions to manage md->type. 1729a5664dadSMike Snitzer * All are required to hold md->type_lock. 1730a5664dadSMike Snitzer */ 1731a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 1732a5664dadSMike Snitzer { 1733a5664dadSMike Snitzer mutex_lock(&md->type_lock); 1734a5664dadSMike Snitzer } 1735a5664dadSMike Snitzer 1736a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 1737a5664dadSMike Snitzer { 1738a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 1739a5664dadSMike Snitzer } 1740a5664dadSMike Snitzer 1741a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 1742a5664dadSMike Snitzer { 174300c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 1744a5664dadSMike Snitzer md->type = type; 1745a5664dadSMike Snitzer } 1746a5664dadSMike Snitzer 1747a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 1748a5664dadSMike Snitzer { 1749a5664dadSMike Snitzer return md->type; 1750a5664dadSMike Snitzer } 1751a5664dadSMike Snitzer 175236a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 175336a0456fSAlasdair G Kergon { 175436a0456fSAlasdair G Kergon return md->immutable_target_type; 175536a0456fSAlasdair G Kergon } 175636a0456fSAlasdair G Kergon 17574a0b4ddfSMike Snitzer /* 1758f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 1759f84cb8a4SMike Snitzer * count on 'md'. 1760f84cb8a4SMike Snitzer */ 1761f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 1762f84cb8a4SMike Snitzer { 1763f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 1764f84cb8a4SMike Snitzer return &md->queue->limits; 1765f84cb8a4SMike Snitzer } 1766f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 1767f84cb8a4SMike Snitzer 17684a0b4ddfSMike Snitzer /* 17694a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 17704a0b4ddfSMike Snitzer */ 1771591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 17724a0b4ddfSMike Snitzer { 1773bfebd1cdSMike Snitzer int r; 1774545ed20eSToshi Kani unsigned type = dm_get_md_type(md); 1775bfebd1cdSMike Snitzer 1776545ed20eSToshi Kani switch (type) { 1777bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 1778eca7ee6dSMike Snitzer r = dm_old_init_request_queue(md); 1779bfebd1cdSMike Snitzer if (r) { 1780eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based mapped device"); 1781bfebd1cdSMike Snitzer return r; 17824a0b4ddfSMike Snitzer } 1783bfebd1cdSMike Snitzer break; 1784bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 1785e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 1786bfebd1cdSMike Snitzer if (r) { 1787eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 1788bfebd1cdSMike Snitzer return r; 1789bfebd1cdSMike Snitzer } 1790bfebd1cdSMike Snitzer break; 1791bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 1792545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 1793eca7ee6dSMike Snitzer dm_init_normal_md_queue(md); 1794ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 1795dbba42d8SMikulas Patocka /* 1796dbba42d8SMikulas Patocka * DM handles splitting bios as needed. Free the bio_split bioset 1797dbba42d8SMikulas Patocka * since it won't be used (saves 1 process per bio-based DM device). 1798dbba42d8SMikulas Patocka */ 1799dbba42d8SMikulas Patocka bioset_free(md->queue->bio_split); 1800dbba42d8SMikulas Patocka md->queue->bio_split = NULL; 1801545ed20eSToshi Kani 1802545ed20eSToshi Kani if (type == DM_TYPE_DAX_BIO_BASED) 1803545ed20eSToshi Kani queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); 1804bfebd1cdSMike Snitzer break; 1805ff36ab34SMike Snitzer } 18064a0b4ddfSMike Snitzer 18074a0b4ddfSMike Snitzer return 0; 18084a0b4ddfSMike Snitzer } 18094a0b4ddfSMike Snitzer 18102bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 18111da177e4SLinus Torvalds { 18121da177e4SLinus Torvalds struct mapped_device *md; 18131da177e4SLinus Torvalds unsigned minor = MINOR(dev); 18141da177e4SLinus Torvalds 18151da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 18161da177e4SLinus Torvalds return NULL; 18171da177e4SLinus Torvalds 1818f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18191da177e4SLinus Torvalds 18201da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 18212bec1f4aSMikulas Patocka if (md) { 18222bec1f4aSMikulas Patocka if ((md == MINOR_ALLOCED || 1823f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 1824abdc568bSKiyoshi Ueda dm_deleting_md(md) || 1825fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 1826637842cfSDavid Teigland md = NULL; 1827fba9f90eSJeff Mahoney goto out; 1828fba9f90eSJeff Mahoney } 18292bec1f4aSMikulas Patocka dm_get(md); 18302bec1f4aSMikulas Patocka } 18311da177e4SLinus Torvalds 1832fba9f90eSJeff Mahoney out: 1833f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18341da177e4SLinus Torvalds 1835637842cfSDavid Teigland return md; 1836637842cfSDavid Teigland } 18373cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 1838d229a958SDavid Teigland 18399ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 1840637842cfSDavid Teigland { 18419ade92a9SAlasdair G Kergon return md->interface_ptr; 18421da177e4SLinus Torvalds } 18431da177e4SLinus Torvalds 18441da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 18451da177e4SLinus Torvalds { 18461da177e4SLinus Torvalds md->interface_ptr = ptr; 18471da177e4SLinus Torvalds } 18481da177e4SLinus Torvalds 18491da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 18501da177e4SLinus Torvalds { 18511da177e4SLinus Torvalds atomic_inc(&md->holders); 18523f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 18531da177e4SLinus Torvalds } 18541da177e4SLinus Torvalds 185509ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 185609ee96b2SMikulas Patocka { 185709ee96b2SMikulas Patocka spin_lock(&_minor_lock); 185809ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 185909ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 186009ee96b2SMikulas Patocka return -EBUSY; 186109ee96b2SMikulas Patocka } 186209ee96b2SMikulas Patocka dm_get(md); 186309ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 186409ee96b2SMikulas Patocka return 0; 186509ee96b2SMikulas Patocka } 186609ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 186709ee96b2SMikulas Patocka 186872d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 186972d94861SAlasdair G Kergon { 187072d94861SAlasdair G Kergon return md->name; 187172d94861SAlasdair G Kergon } 187272d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 187372d94861SAlasdair G Kergon 18743f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 18751da177e4SLinus Torvalds { 18761134e5aeSMike Anderson struct dm_table *map; 187783d5e5b0SMikulas Patocka int srcu_idx; 18781da177e4SLinus Torvalds 18793f77316dSKiyoshi Ueda might_sleep(); 1880fba9f90eSJeff Mahoney 188163a4f065SMike Snitzer spin_lock(&_minor_lock); 18823f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 1883fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 1884f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18853f77316dSKiyoshi Ueda 188602233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 18872eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 18882eb6e1e3SKeith Busch 1889ab7c7bb6SMikulas Patocka /* 1890ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 1891ab7c7bb6SMikulas Patocka * do not race with internal suspend. 1892ab7c7bb6SMikulas Patocka */ 1893ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 18942a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 18954f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 18961da177e4SLinus Torvalds dm_table_presuspend_targets(map); 18971da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 18981da177e4SLinus Torvalds } 189983d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 190083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 19012a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 190283d5e5b0SMikulas Patocka 19033f77316dSKiyoshi Ueda /* 19043f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 19053f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 19063f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 19073f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 19083f77316dSKiyoshi Ueda */ 19093f77316dSKiyoshi Ueda if (wait) 19103f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 19113f77316dSKiyoshi Ueda msleep(1); 19123f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 19133f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 19143f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 19153f77316dSKiyoshi Ueda 1916784aae73SMilan Broz dm_sysfs_exit(md); 1917a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 19181da177e4SLinus Torvalds free_dev(md); 19191da177e4SLinus Torvalds } 19203f77316dSKiyoshi Ueda 19213f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 19223f77316dSKiyoshi Ueda { 19233f77316dSKiyoshi Ueda __dm_destroy(md, true); 19243f77316dSKiyoshi Ueda } 19253f77316dSKiyoshi Ueda 19263f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 19273f77316dSKiyoshi Ueda { 19283f77316dSKiyoshi Ueda __dm_destroy(md, false); 19293f77316dSKiyoshi Ueda } 19303f77316dSKiyoshi Ueda 19313f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 19323f77316dSKiyoshi Ueda { 19333f77316dSKiyoshi Ueda atomic_dec(&md->holders); 19341da177e4SLinus Torvalds } 193579eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 19361da177e4SLinus Torvalds 1937401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 193846125c1cSMilan Broz { 193946125c1cSMilan Broz int r = 0; 1940b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 1941b44ebeb0SMikulas Patocka 1942b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 194346125c1cSMilan Broz 194446125c1cSMilan Broz while (1) { 1945401600dfSMikulas Patocka set_current_state(interruptible); 194646125c1cSMilan Broz 1947b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 194846125c1cSMilan Broz break; 194946125c1cSMilan Broz 1950401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 1951401600dfSMikulas Patocka signal_pending(current)) { 195246125c1cSMilan Broz r = -EINTR; 195346125c1cSMilan Broz break; 195446125c1cSMilan Broz } 195546125c1cSMilan Broz 195646125c1cSMilan Broz io_schedule(); 195746125c1cSMilan Broz } 195846125c1cSMilan Broz set_current_state(TASK_RUNNING); 195946125c1cSMilan Broz 1960b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 1961b44ebeb0SMikulas Patocka 196246125c1cSMilan Broz return r; 196346125c1cSMilan Broz } 196446125c1cSMilan Broz 19651da177e4SLinus Torvalds /* 19661da177e4SLinus Torvalds * Process the deferred bios 19671da177e4SLinus Torvalds */ 1968ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 19691da177e4SLinus Torvalds { 1970ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 1971ef208587SMikulas Patocka work); 19726d6f10dfSMilan Broz struct bio *c; 197383d5e5b0SMikulas Patocka int srcu_idx; 197483d5e5b0SMikulas Patocka struct dm_table *map; 19751da177e4SLinus Torvalds 197683d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 1977ef208587SMikulas Patocka 19783b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1979022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 1980022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 1981022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 1982022c2611SMikulas Patocka 19836a8736d1STejun Heo if (!c) 1984df12ee99SAlasdair G Kergon break; 198573d410c0SMilan Broz 1986e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 1987e6ee8c0bSKiyoshi Ueda generic_make_request(c); 1988af7e466aSMikulas Patocka else 198983d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 1990e6ee8c0bSKiyoshi Ueda } 19913b00b203SMikulas Patocka 199283d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 19931da177e4SLinus Torvalds } 19941da177e4SLinus Torvalds 19959a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 1996304f3f6aSMilan Broz { 19973b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 19984e857c58SPeter Zijlstra smp_mb__after_atomic(); 199953d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2000304f3f6aSMilan Broz } 2001304f3f6aSMilan Broz 20021da177e4SLinus Torvalds /* 2003042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 20041da177e4SLinus Torvalds */ 2005042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 20061da177e4SLinus Torvalds { 200787eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2008754c5fc7SMike Snitzer struct queue_limits limits; 2009042d2a9bSAlasdair G Kergon int r; 20101da177e4SLinus Torvalds 2011e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 20121da177e4SLinus Torvalds 20131da177e4SLinus Torvalds /* device must be suspended */ 20144f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 201593c534aeSAlasdair G Kergon goto out; 20161da177e4SLinus Torvalds 20173ae70656SMike Snitzer /* 20183ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 20193ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 20203ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 20213ae70656SMike Snitzer * reappear. 20223ae70656SMike Snitzer */ 20233ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 202483d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 20253ae70656SMike Snitzer if (live_map) 20263ae70656SMike Snitzer limits = md->queue->limits; 202783d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 20283ae70656SMike Snitzer } 20293ae70656SMike Snitzer 203087eb5b21SMike Christie if (!live_map) { 2031754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2032042d2a9bSAlasdair G Kergon if (r) { 2033042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2034754c5fc7SMike Snitzer goto out; 2035042d2a9bSAlasdair G Kergon } 203687eb5b21SMike Christie } 2037754c5fc7SMike Snitzer 2038042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 20391da177e4SLinus Torvalds 204093c534aeSAlasdair G Kergon out: 2041e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2042042d2a9bSAlasdair G Kergon return map; 20431da177e4SLinus Torvalds } 20441da177e4SLinus Torvalds 20451da177e4SLinus Torvalds /* 20461da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 20471da177e4SLinus Torvalds * device. 20481da177e4SLinus Torvalds */ 20492ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 20501da177e4SLinus Torvalds { 2051e39e2e95SAlasdair G Kergon int r; 20521da177e4SLinus Torvalds 20531da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2054dfbe03f6SAlasdair G Kergon 2055db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2056dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2057cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2058e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2059e39e2e95SAlasdair G Kergon return r; 2060dfbe03f6SAlasdair G Kergon } 2061dfbe03f6SAlasdair G Kergon 2062aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2063aa8d7c2fSAlasdair G Kergon 20641da177e4SLinus Torvalds return 0; 20651da177e4SLinus Torvalds } 20661da177e4SLinus Torvalds 20672ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 20681da177e4SLinus Torvalds { 2069aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2070aa8d7c2fSAlasdair G Kergon return; 2071aa8d7c2fSAlasdair G Kergon 2072db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 20731da177e4SLinus Torvalds md->frozen_sb = NULL; 2074aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 20751da177e4SLinus Torvalds } 20761da177e4SLinus Torvalds 20771da177e4SLinus Torvalds /* 2078ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2079ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2080ffcc3936SMike Snitzer * are being added to md->deferred list. 2081cec47e3dSKiyoshi Ueda * 2082ffcc3936SMike Snitzer * Caller must hold md->suspend_lock 2083cec47e3dSKiyoshi Ueda */ 2084ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2085*eaf9a736SMike Snitzer unsigned suspend_flags, int interruptible, 2086*eaf9a736SMike Snitzer int dmf_suspended_flag) 20871da177e4SLinus Torvalds { 2088ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2089ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2090ffcc3936SMike Snitzer int r; 2091cf222b37SAlasdair G Kergon 20922e93ccc1SKiyoshi Ueda /* 20932e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 20942e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 20952e93ccc1SKiyoshi Ueda */ 20962e93ccc1SKiyoshi Ueda if (noflush) 20972e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 20982e93ccc1SKiyoshi Ueda 2099d67ee213SMike Snitzer /* 2100d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2101d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2102d67ee213SMike Snitzer */ 21031da177e4SLinus Torvalds dm_table_presuspend_targets(map); 21041da177e4SLinus Torvalds 21052e93ccc1SKiyoshi Ueda /* 21069f518b27SKiyoshi Ueda * Flush I/O to the device. 21079f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 21089f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 21099f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 21102e93ccc1SKiyoshi Ueda */ 211132a926daSMikulas Patocka if (!noflush && do_lockfs) { 21122ca3310eSAlasdair G Kergon r = lock_fs(md); 2113d67ee213SMike Snitzer if (r) { 2114d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2115ffcc3936SMike Snitzer return r; 2116aa8d7c2fSAlasdair G Kergon } 2117d67ee213SMike Snitzer } 21181da177e4SLinus Torvalds 21191da177e4SLinus Torvalds /* 21203b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 21213b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 21223b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 21233b00b203SMikulas Patocka * dm_wq_work. 21243b00b203SMikulas Patocka * 21253b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 21263b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 21276a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 21286a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 21296a8736d1STejun Heo * flush_workqueue(md->wq). 21301da177e4SLinus Torvalds */ 21311eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 213241abc4e1SHannes Reinecke if (map) 213383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 21341da177e4SLinus Torvalds 2135d0bcb878SKiyoshi Ueda /* 213629e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 213729e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2138d0bcb878SKiyoshi Ueda */ 21392eb6e1e3SKeith Busch if (dm_request_based(md)) { 2140eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 214102233342SMike Snitzer if (md->kworker_task) 21422eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 21432eb6e1e3SKeith Busch } 2144cec47e3dSKiyoshi Ueda 2145d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2146d0bcb878SKiyoshi Ueda 21471da177e4SLinus Torvalds /* 21483b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 21493b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 21503b00b203SMikulas Patocka * to finish. 21511da177e4SLinus Torvalds */ 2152ffcc3936SMike Snitzer r = dm_wait_for_completion(md, interruptible); 2153*eaf9a736SMike Snitzer if (!r) 2154*eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 21551da177e4SLinus Torvalds 21566d6f10dfSMilan Broz if (noflush) 2157022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 215841abc4e1SHannes Reinecke if (map) 215983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 21602e93ccc1SKiyoshi Ueda 21611da177e4SLinus Torvalds /* were we interrupted ? */ 216246125c1cSMilan Broz if (r < 0) { 21639a1fb464SMikulas Patocka dm_queue_flush(md); 216473d410c0SMilan Broz 2165cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2166eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2167cec47e3dSKiyoshi Ueda 21682ca3310eSAlasdair G Kergon unlock_fs(md); 2169d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2170ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2171ffcc3936SMike Snitzer } 2172ffcc3936SMike Snitzer 2173ffcc3936SMike Snitzer return r; 21742ca3310eSAlasdair G Kergon } 21752ca3310eSAlasdair G Kergon 21763b00b203SMikulas Patocka /* 2177ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2178ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2179ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2180ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2181ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 21823b00b203SMikulas Patocka */ 2183ffcc3936SMike Snitzer /* 2184ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2185ffcc3936SMike Snitzer * 2186ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2187ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2188ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2189ffcc3936SMike Snitzer * 2190ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2191ffcc3936SMike Snitzer */ 2192ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2193ffcc3936SMike Snitzer { 2194ffcc3936SMike Snitzer struct dm_table *map = NULL; 2195ffcc3936SMike Snitzer int r = 0; 2196ffcc3936SMike Snitzer 2197ffcc3936SMike Snitzer retry: 2198ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2199ffcc3936SMike Snitzer 2200ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2201ffcc3936SMike Snitzer r = -EINVAL; 2202ffcc3936SMike Snitzer goto out_unlock; 2203ffcc3936SMike Snitzer } 2204ffcc3936SMike Snitzer 2205ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2206ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2207ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2208ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2209ffcc3936SMike Snitzer if (r) 2210ffcc3936SMike Snitzer return r; 2211ffcc3936SMike Snitzer goto retry; 2212ffcc3936SMike Snitzer } 2213ffcc3936SMike Snitzer 2214a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2215ffcc3936SMike Snitzer 2216*eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2217ffcc3936SMike Snitzer if (r) 2218ffcc3936SMike Snitzer goto out_unlock; 22193b00b203SMikulas Patocka 22204d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 22214d4471cbSKiyoshi Ueda 2222d287483dSAlasdair G Kergon out_unlock: 2223e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2224cf222b37SAlasdair G Kergon return r; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds 2227ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 22281da177e4SLinus Torvalds { 2229ffcc3936SMike Snitzer if (map) { 2230ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 22318757b776SMilan Broz if (r) 2232ffcc3936SMike Snitzer return r; 2233ffcc3936SMike Snitzer } 22342ca3310eSAlasdair G Kergon 22359a1fb464SMikulas Patocka dm_queue_flush(md); 22362ca3310eSAlasdair G Kergon 2237cec47e3dSKiyoshi Ueda /* 2238cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2239cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2240cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2241cec47e3dSKiyoshi Ueda */ 2242cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2243eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2244cec47e3dSKiyoshi Ueda 22452ca3310eSAlasdair G Kergon unlock_fs(md); 22462ca3310eSAlasdair G Kergon 2247ffcc3936SMike Snitzer return 0; 2248ffcc3936SMike Snitzer } 2249ffcc3936SMike Snitzer 2250ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2251ffcc3936SMike Snitzer { 2252ffcc3936SMike Snitzer int r = -EINVAL; 2253ffcc3936SMike Snitzer struct dm_table *map = NULL; 2254ffcc3936SMike Snitzer 2255ffcc3936SMike Snitzer retry: 2256ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2257ffcc3936SMike Snitzer 2258ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2259ffcc3936SMike Snitzer goto out; 2260ffcc3936SMike Snitzer 2261ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2262ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2263ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2264ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2265ffcc3936SMike Snitzer if (r) 2266ffcc3936SMike Snitzer return r; 2267ffcc3936SMike Snitzer goto retry; 2268ffcc3936SMike Snitzer } 2269ffcc3936SMike Snitzer 2270a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2271ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2272ffcc3936SMike Snitzer goto out; 2273ffcc3936SMike Snitzer 2274ffcc3936SMike Snitzer r = __dm_resume(md, map); 2275ffcc3936SMike Snitzer if (r) 2276ffcc3936SMike Snitzer goto out; 2277ffcc3936SMike Snitzer 22782ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 22792ca3310eSAlasdair G Kergon 2280cf222b37SAlasdair G Kergon r = 0; 2281cf222b37SAlasdair G Kergon out: 2282e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 22832ca3310eSAlasdair G Kergon 2284cf222b37SAlasdair G Kergon return r; 22851da177e4SLinus Torvalds } 22861da177e4SLinus Torvalds 2287fd2ed4d2SMikulas Patocka /* 2288fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2289fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2290fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2291fd2ed4d2SMikulas Patocka */ 2292fd2ed4d2SMikulas Patocka 2293ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2294ffcc3936SMike Snitzer { 2295ffcc3936SMike Snitzer struct dm_table *map = NULL; 2296ffcc3936SMike Snitzer 229796b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2298ffcc3936SMike Snitzer return; /* nested internal suspend */ 2299ffcc3936SMike Snitzer 2300ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2301ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2302ffcc3936SMike Snitzer return; /* nest suspend */ 2303ffcc3936SMike Snitzer } 2304ffcc3936SMike Snitzer 2305a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2306ffcc3936SMike Snitzer 2307ffcc3936SMike Snitzer /* 2308ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2309ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2310ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2311ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2312ffcc3936SMike Snitzer */ 2313*eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2314*eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2315ffcc3936SMike Snitzer 2316ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2317ffcc3936SMike Snitzer } 2318ffcc3936SMike Snitzer 2319ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2320ffcc3936SMike Snitzer { 232196b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 232296b26c8cSMikulas Patocka 232396b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2324ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2325ffcc3936SMike Snitzer 2326ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2327ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2328ffcc3936SMike Snitzer 2329ffcc3936SMike Snitzer /* 2330ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2331ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2332ffcc3936SMike Snitzer */ 2333ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2334ffcc3936SMike Snitzer 2335ffcc3936SMike Snitzer done: 2336ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2337ffcc3936SMike Snitzer smp_mb__after_atomic(); 2338ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2339ffcc3936SMike Snitzer } 2340ffcc3936SMike Snitzer 2341ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2342fd2ed4d2SMikulas Patocka { 2343fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2344ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2345ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2346ffcc3936SMike Snitzer } 2347ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2348ffcc3936SMike Snitzer 2349ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2350ffcc3936SMike Snitzer { 2351ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2352ffcc3936SMike Snitzer __dm_internal_resume(md); 2353ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2354ffcc3936SMike Snitzer } 2355ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2356ffcc3936SMike Snitzer 2357ffcc3936SMike Snitzer /* 2358ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2359ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2360ffcc3936SMike Snitzer */ 2361ffcc3936SMike Snitzer 2362ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2363ffcc3936SMike Snitzer { 2364ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2365ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2366fd2ed4d2SMikulas Patocka return; 2367fd2ed4d2SMikulas Patocka 2368fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2369fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2370fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2371fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2372fd2ed4d2SMikulas Patocka } 2373b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2374fd2ed4d2SMikulas Patocka 2375ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2376fd2ed4d2SMikulas Patocka { 2377ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2378fd2ed4d2SMikulas Patocka goto done; 2379fd2ed4d2SMikulas Patocka 2380fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2381fd2ed4d2SMikulas Patocka 2382fd2ed4d2SMikulas Patocka done: 2383fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2384fd2ed4d2SMikulas Patocka } 2385b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2386fd2ed4d2SMikulas Patocka 23871da177e4SLinus Torvalds /*----------------------------------------------------------------- 23881da177e4SLinus Torvalds * Event notification. 23891da177e4SLinus Torvalds *---------------------------------------------------------------*/ 23903abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 239160935eb2SMilan Broz unsigned cookie) 239269267a30SAlasdair G Kergon { 239360935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 239460935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 239560935eb2SMilan Broz 239660935eb2SMilan Broz if (!cookie) 23973abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 239860935eb2SMilan Broz else { 239960935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 240060935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 24013abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 24023abf85b5SPeter Rajnoha action, envp); 240360935eb2SMilan Broz } 240469267a30SAlasdair G Kergon } 240569267a30SAlasdair G Kergon 24067a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 24077a8c3d3bSMike Anderson { 24087a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 24097a8c3d3bSMike Anderson } 24107a8c3d3bSMike Anderson 24111da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 24121da177e4SLinus Torvalds { 24131da177e4SLinus Torvalds return atomic_read(&md->event_nr); 24141da177e4SLinus Torvalds } 24151da177e4SLinus Torvalds 24161da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 24171da177e4SLinus Torvalds { 24181da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 24191da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 24201da177e4SLinus Torvalds } 24211da177e4SLinus Torvalds 24227a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 24237a8c3d3bSMike Anderson { 24247a8c3d3bSMike Anderson unsigned long flags; 24257a8c3d3bSMike Anderson 24267a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 24277a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 24287a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 24297a8c3d3bSMike Anderson } 24307a8c3d3bSMike Anderson 24311da177e4SLinus Torvalds /* 24321da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 24331da177e4SLinus Torvalds * count on 'md'. 24341da177e4SLinus Torvalds */ 24351da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 24361da177e4SLinus Torvalds { 24371da177e4SLinus Torvalds return md->disk; 24381da177e4SLinus Torvalds } 243965ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 24401da177e4SLinus Torvalds 2441784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2442784aae73SMilan Broz { 24432995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2444784aae73SMilan Broz } 2445784aae73SMilan Broz 2446784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2447784aae73SMilan Broz { 2448784aae73SMilan Broz struct mapped_device *md; 2449784aae73SMilan Broz 24502995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2451784aae73SMilan Broz 24524d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 2453432a212cSMike Anderson dm_deleting_md(md)) 24544d89b7b4SMilan Broz return NULL; 24554d89b7b4SMilan Broz 2456784aae73SMilan Broz dm_get(md); 2457784aae73SMilan Broz return md; 2458784aae73SMilan Broz } 2459784aae73SMilan Broz 24604f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 24611da177e4SLinus Torvalds { 24621da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 24631da177e4SLinus Torvalds } 24641da177e4SLinus Torvalds 2465ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2466ffcc3936SMike Snitzer { 2467ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2468ffcc3936SMike Snitzer } 2469ffcc3936SMike Snitzer 24702c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 24712c140a24SMikulas Patocka { 24722c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 24732c140a24SMikulas Patocka } 24742c140a24SMikulas Patocka 247564dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 247664dbce58SKiyoshi Ueda { 2477ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 247864dbce58SKiyoshi Ueda } 247964dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 248064dbce58SKiyoshi Ueda 24812e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 24822e93ccc1SKiyoshi Ueda { 2483ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 24842e93ccc1SKiyoshi Ueda } 24852e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 24862e93ccc1SKiyoshi Ueda 248778d8e58aSMike Snitzer struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 248830187e1dSMike Snitzer unsigned integrity, unsigned per_io_data_size) 2489e6ee8c0bSKiyoshi Ueda { 2490115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 249178d8e58aSMike Snitzer struct kmem_cache *cachep = NULL; 249278d8e58aSMike Snitzer unsigned int pool_size = 0; 24935f015204SJun'ichi Nomura unsigned int front_pad; 2494e6ee8c0bSKiyoshi Ueda 2495e6ee8c0bSKiyoshi Ueda if (!pools) 24964e6e36c3SMike Snitzer return NULL; 2497e6ee8c0bSKiyoshi Ueda 249878d8e58aSMike Snitzer switch (type) { 249978d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2500545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 250178d8e58aSMike Snitzer cachep = _io_cache; 250278d8e58aSMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 250330187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 250478d8e58aSMike Snitzer break; 250578d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 250678d8e58aSMike Snitzer cachep = _rq_tio_cache; 250778d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 250878d8e58aSMike Snitzer pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 250978d8e58aSMike Snitzer if (!pools->rq_pool) 251078d8e58aSMike Snitzer goto out; 251178d8e58aSMike Snitzer /* fall through to setup remaining rq-based pools */ 251278d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 251378d8e58aSMike Snitzer if (!pool_size) 251478d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 251578d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2516591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 251778d8e58aSMike Snitzer break; 251878d8e58aSMike Snitzer default: 251978d8e58aSMike Snitzer BUG(); 252078d8e58aSMike Snitzer } 252178d8e58aSMike Snitzer 252278d8e58aSMike Snitzer if (cachep) { 252378d8e58aSMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 2524e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 25255f015204SJun'ichi Nomura goto out; 252678d8e58aSMike Snitzer } 2527e6ee8c0bSKiyoshi Ueda 25283d8aab2dSJunichi Nomura pools->bs = bioset_create_nobvec(pool_size, front_pad); 2529e6ee8c0bSKiyoshi Ueda if (!pools->bs) 25305f015204SJun'ichi Nomura goto out; 2531e6ee8c0bSKiyoshi Ueda 2532a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 25335f015204SJun'ichi Nomura goto out; 2534a91a2785SMartin K. Petersen 2535e6ee8c0bSKiyoshi Ueda return pools; 253678d8e58aSMike Snitzer 25375f015204SJun'ichi Nomura out: 25385f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2539e6ee8c0bSKiyoshi Ueda 25404e6e36c3SMike Snitzer return NULL; 2541e6ee8c0bSKiyoshi Ueda } 2542e6ee8c0bSKiyoshi Ueda 2543e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2544e6ee8c0bSKiyoshi Ueda { 2545e6ee8c0bSKiyoshi Ueda if (!pools) 2546e6ee8c0bSKiyoshi Ueda return; 2547e6ee8c0bSKiyoshi Ueda 2548e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 25491ae49ea2SMike Snitzer mempool_destroy(pools->rq_pool); 25501ae49ea2SMike Snitzer 2551e6ee8c0bSKiyoshi Ueda if (pools->bs) 2552e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2553e6ee8c0bSKiyoshi Ueda 2554e6ee8c0bSKiyoshi Ueda kfree(pools); 2555e6ee8c0bSKiyoshi Ueda } 2556e6ee8c0bSKiyoshi Ueda 25579c72bad1SChristoph Hellwig struct dm_pr { 25589c72bad1SChristoph Hellwig u64 old_key; 25599c72bad1SChristoph Hellwig u64 new_key; 25609c72bad1SChristoph Hellwig u32 flags; 25619c72bad1SChristoph Hellwig bool fail_early; 25629c72bad1SChristoph Hellwig }; 25639c72bad1SChristoph Hellwig 25649c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 25659c72bad1SChristoph Hellwig void *data) 25669c72bad1SChristoph Hellwig { 25679c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 25689c72bad1SChristoph Hellwig struct dm_table *table; 25699c72bad1SChristoph Hellwig struct dm_target *ti; 25709c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 25719c72bad1SChristoph Hellwig 25729c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 25739c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 25749c72bad1SChristoph Hellwig goto out; 25759c72bad1SChristoph Hellwig 25769c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 25779c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 25789c72bad1SChristoph Hellwig goto out; 25799c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 25809c72bad1SChristoph Hellwig 25819c72bad1SChristoph Hellwig ret = -EINVAL; 25829c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 25839c72bad1SChristoph Hellwig goto out; 25849c72bad1SChristoph Hellwig 25859c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 25869c72bad1SChristoph Hellwig out: 25879c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 25889c72bad1SChristoph Hellwig return ret; 25899c72bad1SChristoph Hellwig } 25909c72bad1SChristoph Hellwig 25919c72bad1SChristoph Hellwig /* 25929c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 25939c72bad1SChristoph Hellwig */ 25949c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 25959c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 25969c72bad1SChristoph Hellwig { 25979c72bad1SChristoph Hellwig struct dm_pr *pr = data; 25989c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 25999c72bad1SChristoph Hellwig 26009c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 26019c72bad1SChristoph Hellwig return -EOPNOTSUPP; 26029c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 26039c72bad1SChristoph Hellwig } 26049c72bad1SChristoph Hellwig 260571cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 260671cdb697SChristoph Hellwig u32 flags) 260771cdb697SChristoph Hellwig { 26089c72bad1SChristoph Hellwig struct dm_pr pr = { 26099c72bad1SChristoph Hellwig .old_key = old_key, 26109c72bad1SChristoph Hellwig .new_key = new_key, 26119c72bad1SChristoph Hellwig .flags = flags, 26129c72bad1SChristoph Hellwig .fail_early = true, 26139c72bad1SChristoph Hellwig }; 26149c72bad1SChristoph Hellwig int ret; 261571cdb697SChristoph Hellwig 26169c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 26179c72bad1SChristoph Hellwig if (ret && new_key) { 26189c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 26199c72bad1SChristoph Hellwig pr.old_key = new_key; 26209c72bad1SChristoph Hellwig pr.new_key = 0; 26219c72bad1SChristoph Hellwig pr.flags = 0; 26229c72bad1SChristoph Hellwig pr.fail_early = false; 26239c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 26249c72bad1SChristoph Hellwig } 262571cdb697SChristoph Hellwig 26269c72bad1SChristoph Hellwig return ret; 262771cdb697SChristoph Hellwig } 262871cdb697SChristoph Hellwig 262971cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 263071cdb697SChristoph Hellwig u32 flags) 263171cdb697SChristoph Hellwig { 263271cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 263371cdb697SChristoph Hellwig const struct pr_ops *ops; 263471cdb697SChristoph Hellwig fmode_t mode; 2635956a4025SMike Snitzer int r; 263671cdb697SChristoph Hellwig 2637956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 263871cdb697SChristoph Hellwig if (r < 0) 263971cdb697SChristoph Hellwig return r; 264071cdb697SChristoph Hellwig 264171cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 264271cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 264371cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 264471cdb697SChristoph Hellwig else 264571cdb697SChristoph Hellwig r = -EOPNOTSUPP; 264671cdb697SChristoph Hellwig 2647956a4025SMike Snitzer bdput(bdev); 264871cdb697SChristoph Hellwig return r; 264971cdb697SChristoph Hellwig } 265071cdb697SChristoph Hellwig 265171cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 265271cdb697SChristoph Hellwig { 265371cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 265471cdb697SChristoph Hellwig const struct pr_ops *ops; 265571cdb697SChristoph Hellwig fmode_t mode; 2656956a4025SMike Snitzer int r; 265771cdb697SChristoph Hellwig 2658956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 265971cdb697SChristoph Hellwig if (r < 0) 266071cdb697SChristoph Hellwig return r; 266171cdb697SChristoph Hellwig 266271cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 266371cdb697SChristoph Hellwig if (ops && ops->pr_release) 266471cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 266571cdb697SChristoph Hellwig else 266671cdb697SChristoph Hellwig r = -EOPNOTSUPP; 266771cdb697SChristoph Hellwig 2668956a4025SMike Snitzer bdput(bdev); 266971cdb697SChristoph Hellwig return r; 267071cdb697SChristoph Hellwig } 267171cdb697SChristoph Hellwig 267271cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 267371cdb697SChristoph Hellwig enum pr_type type, bool abort) 267471cdb697SChristoph Hellwig { 267571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 267671cdb697SChristoph Hellwig const struct pr_ops *ops; 267771cdb697SChristoph Hellwig fmode_t mode; 2678956a4025SMike Snitzer int r; 267971cdb697SChristoph Hellwig 2680956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 268171cdb697SChristoph Hellwig if (r < 0) 268271cdb697SChristoph Hellwig return r; 268371cdb697SChristoph Hellwig 268471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 268571cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 268671cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 268771cdb697SChristoph Hellwig else 268871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 268971cdb697SChristoph Hellwig 2690956a4025SMike Snitzer bdput(bdev); 269171cdb697SChristoph Hellwig return r; 269271cdb697SChristoph Hellwig } 269371cdb697SChristoph Hellwig 269471cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 269571cdb697SChristoph Hellwig { 269671cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 269771cdb697SChristoph Hellwig const struct pr_ops *ops; 269871cdb697SChristoph Hellwig fmode_t mode; 2699956a4025SMike Snitzer int r; 270071cdb697SChristoph Hellwig 2701956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 270271cdb697SChristoph Hellwig if (r < 0) 270371cdb697SChristoph Hellwig return r; 270471cdb697SChristoph Hellwig 270571cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 270671cdb697SChristoph Hellwig if (ops && ops->pr_clear) 270771cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 270871cdb697SChristoph Hellwig else 270971cdb697SChristoph Hellwig r = -EOPNOTSUPP; 271071cdb697SChristoph Hellwig 2711956a4025SMike Snitzer bdput(bdev); 271271cdb697SChristoph Hellwig return r; 271371cdb697SChristoph Hellwig } 271471cdb697SChristoph Hellwig 271571cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 271671cdb697SChristoph Hellwig .pr_register = dm_pr_register, 271771cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 271871cdb697SChristoph Hellwig .pr_release = dm_pr_release, 271971cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 272071cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 272171cdb697SChristoph Hellwig }; 272271cdb697SChristoph Hellwig 272383d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 27241da177e4SLinus Torvalds .open = dm_blk_open, 27251da177e4SLinus Torvalds .release = dm_blk_close, 2726aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 2727545ed20eSToshi Kani .direct_access = dm_blk_direct_access, 27283ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 272971cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 27301da177e4SLinus Torvalds .owner = THIS_MODULE 27311da177e4SLinus Torvalds }; 27321da177e4SLinus Torvalds 27331da177e4SLinus Torvalds /* 27341da177e4SLinus Torvalds * module hooks 27351da177e4SLinus Torvalds */ 27361da177e4SLinus Torvalds module_init(dm_init); 27371da177e4SLinus Torvalds module_exit(dm_exit); 27381da177e4SLinus Torvalds 27391da177e4SLinus Torvalds module_param(major, uint, 0); 27401da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 2741f4790826SMike Snitzer 2742e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2743e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2744e8603136SMike Snitzer 2745115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 2746115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 2747115485e8SMike Snitzer 27481da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 27491da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 27501da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2751