11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 22ffcc3936SMike Snitzer #include <linux/wait.h> 232eb6e1e3SKeith Busch #include <linux/kthread.h> 240ce65797SMike Snitzer #include <linux/ktime.h> 25de3ec86dSMike Snitzer #include <linux/elevator.h> /* for rq_end_sector() */ 26bfebd1cdSMike Snitzer #include <linux/blk-mq.h> 2771cdb697SChristoph Hellwig #include <linux/pr.h> 2855782138SLi Zefan 2955782138SLi Zefan #include <trace/events/block.h> 301da177e4SLinus Torvalds 3172d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3272d94861SAlasdair G Kergon 3371a16736SNamhyung Kim #ifdef CONFIG_PRINTK 3471a16736SNamhyung Kim /* 3571a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3671a16736SNamhyung Kim */ 3771a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3871a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3971a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 4071a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 4171a16736SNamhyung Kim #endif 4271a16736SNamhyung Kim 4360935eb2SMilan Broz /* 4460935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 4560935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4660935eb2SMilan Broz */ 4760935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4860935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4960935eb2SMilan Broz 501da177e4SLinus Torvalds static const char *_name = DM_NAME; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds static unsigned int major = 0; 531da177e4SLinus Torvalds static unsigned int _major = 0; 541da177e4SLinus Torvalds 55d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 56d15b774cSAlasdair G Kergon 57f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 582c140a24SMikulas Patocka 592c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 602c140a24SMikulas Patocka 612c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 622c140a24SMikulas Patocka 63acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 64acfe0ad7SMikulas Patocka 651da177e4SLinus Torvalds /* 668fbf26adSKiyoshi Ueda * For bio-based dm. 671da177e4SLinus Torvalds * One of these is allocated per bio. 681da177e4SLinus Torvalds */ 691da177e4SLinus Torvalds struct dm_io { 701da177e4SLinus Torvalds struct mapped_device *md; 711da177e4SLinus Torvalds int error; 721da177e4SLinus Torvalds atomic_t io_count; 736ae2fa67SRichard Kennedy struct bio *bio; 743eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 75f88fb981SKiyoshi Ueda spinlock_t endio_lock; 76fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 771da177e4SLinus Torvalds }; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds /* 808fbf26adSKiyoshi Ueda * For request-based dm. 818fbf26adSKiyoshi Ueda * One of these is allocated per request. 828fbf26adSKiyoshi Ueda */ 838fbf26adSKiyoshi Ueda struct dm_rq_target_io { 848fbf26adSKiyoshi Ueda struct mapped_device *md; 858fbf26adSKiyoshi Ueda struct dm_target *ti; 861ae49ea2SMike Snitzer struct request *orig, *clone; 872eb6e1e3SKeith Busch struct kthread_work work; 888fbf26adSKiyoshi Ueda int error; 898fbf26adSKiyoshi Ueda union map_info info; 90e262f347SMikulas Patocka struct dm_stats_aux stats_aux; 91e262f347SMikulas Patocka unsigned long duration_jiffies; 92e262f347SMikulas Patocka unsigned n_sectors; 938fbf26adSKiyoshi Ueda }; 948fbf26adSKiyoshi Ueda 958fbf26adSKiyoshi Ueda /* 9694818742SKent Overstreet * For request-based dm - the bio clones we allocate are embedded in these 9794818742SKent Overstreet * structs. 9894818742SKent Overstreet * 9994818742SKent Overstreet * We allocate these with bio_alloc_bioset, using the front_pad parameter when 10094818742SKent Overstreet * the bioset is created - this means the bio has to come at the end of the 10194818742SKent Overstreet * struct. 1028fbf26adSKiyoshi Ueda */ 1038fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 1048fbf26adSKiyoshi Ueda struct bio *orig; 105cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 10694818742SKent Overstreet struct bio clone; 1078fbf26adSKiyoshi Ueda }; 1088fbf26adSKiyoshi Ueda 109cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 110cec47e3dSKiyoshi Ueda { 111cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 112cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 113cec47e3dSKiyoshi Ueda return NULL; 114cec47e3dSKiyoshi Ueda } 115cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 116cec47e3dSKiyoshi Ueda 117ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 118ba61fdd1SJeff Mahoney 1191da177e4SLinus Torvalds /* 1201da177e4SLinus Torvalds * Bits for the md->flags field. 1211da177e4SLinus Torvalds */ 1221eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1231da177e4SLinus Torvalds #define DMF_SUSPENDED 1 124aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 125fba9f90eSJeff Mahoney #define DMF_FREEING 3 1265c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1272e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1288ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1298ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1301da177e4SLinus Torvalds 131304f3f6aSMilan Broz /* 13283d5e5b0SMikulas Patocka * A dummy definition to make RCU happy. 13383d5e5b0SMikulas Patocka * struct dm_table should never be dereferenced in this file. 13483d5e5b0SMikulas Patocka */ 13583d5e5b0SMikulas Patocka struct dm_table { 13683d5e5b0SMikulas Patocka int undefined__; 13783d5e5b0SMikulas Patocka }; 13883d5e5b0SMikulas Patocka 13983d5e5b0SMikulas Patocka /* 140304f3f6aSMilan Broz * Work processed by per-device workqueue. 141304f3f6aSMilan Broz */ 1421da177e4SLinus Torvalds struct mapped_device { 14383d5e5b0SMikulas Patocka struct srcu_struct io_barrier; 144e61290a4SDaniel Walker struct mutex suspend_lock; 1451da177e4SLinus Torvalds atomic_t holders; 1465c6bd75dSAlasdair G Kergon atomic_t open_count; 1471da177e4SLinus Torvalds 1482a7faeb1SMikulas Patocka /* 1492a7faeb1SMikulas Patocka * The current mapping. 1502a7faeb1SMikulas Patocka * Use dm_get_live_table{_fast} or take suspend_lock for 1512a7faeb1SMikulas Patocka * dereference. 1522a7faeb1SMikulas Patocka */ 1536fa99520SPranith Kumar struct dm_table __rcu *map; 1542a7faeb1SMikulas Patocka 15586f1152bSBenjamin Marzinski struct list_head table_devices; 15686f1152bSBenjamin Marzinski struct mutex table_devices_lock; 15786f1152bSBenjamin Marzinski 1581da177e4SLinus Torvalds unsigned long flags; 1591da177e4SLinus Torvalds 160165125e1SJens Axboe struct request_queue *queue; 161a5664dadSMike Snitzer unsigned type; 1624a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 163a5664dadSMike Snitzer struct mutex type_lock; 164a5664dadSMike Snitzer 16536a0456fSAlasdair G Kergon struct target_type *immutable_target_type; 16636a0456fSAlasdair G Kergon 1671da177e4SLinus Torvalds struct gendisk *disk; 1687e51f257SMike Anderson char name[16]; 1691da177e4SLinus Torvalds 1701da177e4SLinus Torvalds void *interface_ptr; 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds /* 1731da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1741da177e4SLinus Torvalds */ 175316d315bSNikanth Karthikesan atomic_t pending[2]; 1761da177e4SLinus Torvalds wait_queue_head_t wait; 17753d5914fSMikulas Patocka struct work_struct work; 1781da177e4SLinus Torvalds struct bio_list deferred; 179022c2611SMikulas Patocka spinlock_t deferred_lock; 1801da177e4SLinus Torvalds 1811da177e4SLinus Torvalds /* 18229e4013dSTejun Heo * Processing queue (flush) 183304f3f6aSMilan Broz */ 184304f3f6aSMilan Broz struct workqueue_struct *wq; 185304f3f6aSMilan Broz 186304f3f6aSMilan Broz /* 1871da177e4SLinus Torvalds * io objects are allocated from here. 1881da177e4SLinus Torvalds */ 1891da177e4SLinus Torvalds mempool_t *io_pool; 1901ae49ea2SMike Snitzer mempool_t *rq_pool; 1911da177e4SLinus Torvalds 1929faf400fSStefan Bader struct bio_set *bs; 1939faf400fSStefan Bader 1941da177e4SLinus Torvalds /* 1951da177e4SLinus Torvalds * Event handling. 1961da177e4SLinus Torvalds */ 1971da177e4SLinus Torvalds atomic_t event_nr; 1981da177e4SLinus Torvalds wait_queue_head_t eventq; 1997a8c3d3bSMike Anderson atomic_t uevent_seq; 2007a8c3d3bSMike Anderson struct list_head uevent_list; 2017a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 2021da177e4SLinus Torvalds 2031da177e4SLinus Torvalds /* 2041da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 2051da177e4SLinus Torvalds */ 2061da177e4SLinus Torvalds struct super_block *frozen_sb; 207db8fef4fSMikulas Patocka struct block_device *bdev; 2083ac51e74SDarrick J. Wong 2093ac51e74SDarrick J. Wong /* forced geometry settings */ 2103ac51e74SDarrick J. Wong struct hd_geometry geometry; 211784aae73SMilan Broz 2122995fa78SMikulas Patocka /* kobject and completion */ 2132995fa78SMikulas Patocka struct dm_kobject_holder kobj_holder; 214be35f486SMikulas Patocka 215d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 216d87f4c14STejun Heo struct bio flush_bio; 217fd2ed4d2SMikulas Patocka 21896b26c8cSMikulas Patocka /* the number of internal suspends */ 21996b26c8cSMikulas Patocka unsigned internal_suspend_count; 22096b26c8cSMikulas Patocka 221fd2ed4d2SMikulas Patocka struct dm_stats stats; 2222eb6e1e3SKeith Busch 2232eb6e1e3SKeith Busch struct kthread_worker kworker; 2242eb6e1e3SKeith Busch struct task_struct *kworker_task; 225de3ec86dSMike Snitzer 226de3ec86dSMike Snitzer /* for request-based merge heuristic in dm_request_fn() */ 2270ce65797SMike Snitzer unsigned seq_rq_merge_deadline_usecs; 228de3ec86dSMike Snitzer int last_rq_rw; 2290ce65797SMike Snitzer sector_t last_rq_pos; 2300ce65797SMike Snitzer ktime_t last_rq_start_time; 231bfebd1cdSMike Snitzer 232bfebd1cdSMike Snitzer /* for blk-mq request-based DM support */ 233bfebd1cdSMike Snitzer struct blk_mq_tag_set tag_set; 23417e149b8SMike Snitzer bool use_blk_mq; 2351da177e4SLinus Torvalds }; 2361da177e4SLinus Torvalds 23717e149b8SMike Snitzer #ifdef CONFIG_DM_MQ_DEFAULT 23817e149b8SMike Snitzer static bool use_blk_mq = true; 23917e149b8SMike Snitzer #else 24017e149b8SMike Snitzer static bool use_blk_mq = false; 24117e149b8SMike Snitzer #endif 24217e149b8SMike Snitzer 24317e149b8SMike Snitzer bool dm_use_blk_mq(struct mapped_device *md) 24417e149b8SMike Snitzer { 24517e149b8SMike Snitzer return md->use_blk_mq; 24617e149b8SMike Snitzer } 24717e149b8SMike Snitzer 248e6ee8c0bSKiyoshi Ueda /* 249e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 250e6ee8c0bSKiyoshi Ueda */ 251e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 252e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 2531ae49ea2SMike Snitzer mempool_t *rq_pool; 254e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 255e6ee8c0bSKiyoshi Ueda }; 256e6ee8c0bSKiyoshi Ueda 25786f1152bSBenjamin Marzinski struct table_device { 25886f1152bSBenjamin Marzinski struct list_head list; 25986f1152bSBenjamin Marzinski atomic_t count; 26086f1152bSBenjamin Marzinski struct dm_dev dm_dev; 26186f1152bSBenjamin Marzinski }; 26286f1152bSBenjamin Marzinski 2636cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 2646cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS 256 265f4790826SMike Snitzer #define RESERVED_MAX_IOS 1024 266e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 2678fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 2681ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 26994818742SKent Overstreet 270f4790826SMike Snitzer /* 271e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 272e8603136SMike Snitzer */ 273e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 274e8603136SMike Snitzer 275e8603136SMike Snitzer /* 276f4790826SMike Snitzer * Request-based DM's mempools' reserved IOs set by the user. 277f4790826SMike Snitzer */ 278f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 279f4790826SMike Snitzer 28009c2d531SMike Snitzer static unsigned __dm_get_module_param(unsigned *module_param, 281f4790826SMike Snitzer unsigned def, unsigned max) 282f4790826SMike Snitzer { 28309c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 28409c2d531SMike Snitzer unsigned modified_param = 0; 285f4790826SMike Snitzer 28609c2d531SMike Snitzer if (!param) 28709c2d531SMike Snitzer modified_param = def; 28809c2d531SMike Snitzer else if (param > max) 28909c2d531SMike Snitzer modified_param = max; 290f4790826SMike Snitzer 29109c2d531SMike Snitzer if (modified_param) { 29209c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 29309c2d531SMike Snitzer param = modified_param; 294f4790826SMike Snitzer } 295f4790826SMike Snitzer 29609c2d531SMike Snitzer return param; 297f4790826SMike Snitzer } 298f4790826SMike Snitzer 299e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 300e8603136SMike Snitzer { 30109c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 302e8603136SMike Snitzer RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 303e8603136SMike Snitzer } 304e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 305e8603136SMike Snitzer 306f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void) 307f4790826SMike Snitzer { 30809c2d531SMike Snitzer return __dm_get_module_param(&reserved_rq_based_ios, 309f4790826SMike Snitzer RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 310f4790826SMike Snitzer } 311f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 312f4790826SMike Snitzer 3131da177e4SLinus Torvalds static int __init local_init(void) 3141da177e4SLinus Torvalds { 31551157b4aSKiyoshi Ueda int r = -ENOMEM; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 318028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 3191da177e4SLinus Torvalds if (!_io_cache) 32051157b4aSKiyoshi Ueda return r; 3211da177e4SLinus Torvalds 3228fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 3238fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 324dba14160SMikulas Patocka goto out_free_io_cache; 3258fbf26adSKiyoshi Ueda 3261ae49ea2SMike Snitzer _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 3271ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 3281ae49ea2SMike Snitzer if (!_rq_cache) 3291ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 3301ae49ea2SMike Snitzer 33151e5b2bdSMike Anderson r = dm_uevent_init(); 33251157b4aSKiyoshi Ueda if (r) 3331ae49ea2SMike Snitzer goto out_free_rq_cache; 33451e5b2bdSMike Anderson 335acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 336acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 337acfe0ad7SMikulas Patocka r = -ENOMEM; 338acfe0ad7SMikulas Patocka goto out_uevent_exit; 339acfe0ad7SMikulas Patocka } 340acfe0ad7SMikulas Patocka 3411da177e4SLinus Torvalds _major = major; 3421da177e4SLinus Torvalds r = register_blkdev(_major, _name); 34351157b4aSKiyoshi Ueda if (r < 0) 344acfe0ad7SMikulas Patocka goto out_free_workqueue; 3451da177e4SLinus Torvalds 3461da177e4SLinus Torvalds if (!_major) 3471da177e4SLinus Torvalds _major = r; 3481da177e4SLinus Torvalds 3491da177e4SLinus Torvalds return 0; 35051157b4aSKiyoshi Ueda 351acfe0ad7SMikulas Patocka out_free_workqueue: 352acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 35351157b4aSKiyoshi Ueda out_uevent_exit: 35451157b4aSKiyoshi Ueda dm_uevent_exit(); 3551ae49ea2SMike Snitzer out_free_rq_cache: 3561ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3578fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 3588fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 35951157b4aSKiyoshi Ueda out_free_io_cache: 36051157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 36151157b4aSKiyoshi Ueda 36251157b4aSKiyoshi Ueda return r; 3631da177e4SLinus Torvalds } 3641da177e4SLinus Torvalds 3651da177e4SLinus Torvalds static void local_exit(void) 3661da177e4SLinus Torvalds { 3672c140a24SMikulas Patocka flush_scheduled_work(); 368acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 3692c140a24SMikulas Patocka 3701ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3718fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 3721da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 37300d59405SAkinobu Mita unregister_blkdev(_major, _name); 37451e5b2bdSMike Anderson dm_uevent_exit(); 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds _major = 0; 3771da177e4SLinus Torvalds 3781da177e4SLinus Torvalds DMINFO("cleaned up"); 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds 381b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 3821da177e4SLinus Torvalds local_init, 3831da177e4SLinus Torvalds dm_target_init, 3841da177e4SLinus Torvalds dm_linear_init, 3851da177e4SLinus Torvalds dm_stripe_init, 386952b3557SMikulas Patocka dm_io_init, 387945fa4d2SMikulas Patocka dm_kcopyd_init, 3881da177e4SLinus Torvalds dm_interface_init, 389fd2ed4d2SMikulas Patocka dm_statistics_init, 3901da177e4SLinus Torvalds }; 3911da177e4SLinus Torvalds 392b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 3931da177e4SLinus Torvalds local_exit, 3941da177e4SLinus Torvalds dm_target_exit, 3951da177e4SLinus Torvalds dm_linear_exit, 3961da177e4SLinus Torvalds dm_stripe_exit, 397952b3557SMikulas Patocka dm_io_exit, 398945fa4d2SMikulas Patocka dm_kcopyd_exit, 3991da177e4SLinus Torvalds dm_interface_exit, 400fd2ed4d2SMikulas Patocka dm_statistics_exit, 4011da177e4SLinus Torvalds }; 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds static int __init dm_init(void) 4041da177e4SLinus Torvalds { 4051da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 4061da177e4SLinus Torvalds 4071da177e4SLinus Torvalds int r, i; 4081da177e4SLinus Torvalds 4091da177e4SLinus Torvalds for (i = 0; i < count; i++) { 4101da177e4SLinus Torvalds r = _inits[i](); 4111da177e4SLinus Torvalds if (r) 4121da177e4SLinus Torvalds goto bad; 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds 4151da177e4SLinus Torvalds return 0; 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds bad: 4181da177e4SLinus Torvalds while (i--) 4191da177e4SLinus Torvalds _exits[i](); 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds return r; 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds static void __exit dm_exit(void) 4251da177e4SLinus Torvalds { 4261da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds while (i--) 4291da177e4SLinus Torvalds _exits[i](); 430d15b774cSAlasdair G Kergon 431d15b774cSAlasdair G Kergon /* 432d15b774cSAlasdair G Kergon * Should be empty by this point. 433d15b774cSAlasdair G Kergon */ 434d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 4351da177e4SLinus Torvalds } 4361da177e4SLinus Torvalds 4371da177e4SLinus Torvalds /* 4381da177e4SLinus Torvalds * Block device functions 4391da177e4SLinus Torvalds */ 440432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 441432a212cSMike Anderson { 442432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 443432a212cSMike Anderson } 444432a212cSMike Anderson 445fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 4461da177e4SLinus Torvalds { 4471da177e4SLinus Torvalds struct mapped_device *md; 4481da177e4SLinus Torvalds 449fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 450fba9f90eSJeff Mahoney 451fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 452fba9f90eSJeff Mahoney if (!md) 453fba9f90eSJeff Mahoney goto out; 454fba9f90eSJeff Mahoney 4555c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 456432a212cSMike Anderson dm_deleting_md(md)) { 457fba9f90eSJeff Mahoney md = NULL; 458fba9f90eSJeff Mahoney goto out; 459fba9f90eSJeff Mahoney } 460fba9f90eSJeff Mahoney 4611da177e4SLinus Torvalds dm_get(md); 4625c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 463fba9f90eSJeff Mahoney out: 464fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 465fba9f90eSJeff Mahoney 466fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 4671da177e4SLinus Torvalds } 4681da177e4SLinus Torvalds 469db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 4701da177e4SLinus Torvalds { 47163a4f065SMike Snitzer struct mapped_device *md; 4726e9624b8SArnd Bergmann 4734a1aeb98SMilan Broz spin_lock(&_minor_lock); 4744a1aeb98SMilan Broz 47563a4f065SMike Snitzer md = disk->private_data; 47663a4f065SMike Snitzer if (WARN_ON(!md)) 47763a4f065SMike Snitzer goto out; 47863a4f065SMike Snitzer 4792c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 4802c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 481acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 4822c140a24SMikulas Patocka 4831da177e4SLinus Torvalds dm_put(md); 48463a4f065SMike Snitzer out: 4854a1aeb98SMilan Broz spin_unlock(&_minor_lock); 4861da177e4SLinus Torvalds } 4871da177e4SLinus Torvalds 4885c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 4895c6bd75dSAlasdair G Kergon { 4905c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 4915c6bd75dSAlasdair G Kergon } 4925c6bd75dSAlasdair G Kergon 4935c6bd75dSAlasdair G Kergon /* 4945c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 4955c6bd75dSAlasdair G Kergon */ 4962c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 4975c6bd75dSAlasdair G Kergon { 4985c6bd75dSAlasdair G Kergon int r = 0; 4995c6bd75dSAlasdair G Kergon 5005c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 5015c6bd75dSAlasdair G Kergon 5022c140a24SMikulas Patocka if (dm_open_count(md)) { 5035c6bd75dSAlasdair G Kergon r = -EBUSY; 5042c140a24SMikulas Patocka if (mark_deferred) 5052c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 5062c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 5072c140a24SMikulas Patocka r = -EEXIST; 5085c6bd75dSAlasdair G Kergon else 5095c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 5105c6bd75dSAlasdair G Kergon 5115c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 5125c6bd75dSAlasdair G Kergon 5135c6bd75dSAlasdair G Kergon return r; 5145c6bd75dSAlasdair G Kergon } 5155c6bd75dSAlasdair G Kergon 5162c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 5172c140a24SMikulas Patocka { 5182c140a24SMikulas Patocka int r = 0; 5192c140a24SMikulas Patocka 5202c140a24SMikulas Patocka spin_lock(&_minor_lock); 5212c140a24SMikulas Patocka 5222c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 5232c140a24SMikulas Patocka r = -EBUSY; 5242c140a24SMikulas Patocka else 5252c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 5262c140a24SMikulas Patocka 5272c140a24SMikulas Patocka spin_unlock(&_minor_lock); 5282c140a24SMikulas Patocka 5292c140a24SMikulas Patocka return r; 5302c140a24SMikulas Patocka } 5312c140a24SMikulas Patocka 5322c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 5332c140a24SMikulas Patocka { 5342c140a24SMikulas Patocka dm_deferred_remove(); 5352c140a24SMikulas Patocka } 5362c140a24SMikulas Patocka 537fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 538fd2ed4d2SMikulas Patocka { 539fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 540fd2ed4d2SMikulas Patocka } 541fd2ed4d2SMikulas Patocka 5429974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 5439974fa2cSMike Snitzer { 5449974fa2cSMike Snitzer return md->queue; 5459974fa2cSMike Snitzer } 5469974fa2cSMike Snitzer 547fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 548fd2ed4d2SMikulas Patocka { 549fd2ed4d2SMikulas Patocka return &md->stats; 550fd2ed4d2SMikulas Patocka } 551fd2ed4d2SMikulas Patocka 5523ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5533ac51e74SDarrick J. Wong { 5543ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 5553ac51e74SDarrick J. Wong 5563ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 5573ac51e74SDarrick J. Wong } 5583ac51e74SDarrick J. Wong 559e56f81e0SChristoph Hellwig static int dm_get_live_table_for_ioctl(struct mapped_device *md, 560e56f81e0SChristoph Hellwig struct dm_target **tgt, struct block_device **bdev, 561e56f81e0SChristoph Hellwig fmode_t *mode, int *srcu_idx) 562aa129a22SMilan Broz { 5636c182cd8SHannes Reinecke struct dm_table *map; 564e56f81e0SChristoph Hellwig int r; 565aa129a22SMilan Broz 5666c182cd8SHannes Reinecke retry: 567e56f81e0SChristoph Hellwig r = -ENOTTY; 568e56f81e0SChristoph Hellwig map = dm_get_live_table(md, srcu_idx); 569aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 570aa129a22SMilan Broz goto out; 571aa129a22SMilan Broz 572aa129a22SMilan Broz /* We only support devices that have a single target */ 573aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 574aa129a22SMilan Broz goto out; 575aa129a22SMilan Broz 576e56f81e0SChristoph Hellwig *tgt = dm_table_get_target(map, 0); 577e56f81e0SChristoph Hellwig 578e56f81e0SChristoph Hellwig if (!(*tgt)->type->prepare_ioctl) 5794d341d82SMike Snitzer goto out; 580aa129a22SMilan Broz 5814f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 582aa129a22SMilan Broz r = -EAGAIN; 583aa129a22SMilan Broz goto out; 584aa129a22SMilan Broz } 585aa129a22SMilan Broz 586e56f81e0SChristoph Hellwig r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode); 587e56f81e0SChristoph Hellwig if (r < 0) 588e56f81e0SChristoph Hellwig goto out; 589e56f81e0SChristoph Hellwig 590e56f81e0SChristoph Hellwig return r; 591aa129a22SMilan Broz 592aa129a22SMilan Broz out: 593e56f81e0SChristoph Hellwig dm_put_live_table(md, *srcu_idx); 5946c182cd8SHannes Reinecke if (r == -ENOTCONN) { 5956c182cd8SHannes Reinecke msleep(10); 5966c182cd8SHannes Reinecke goto retry; 5976c182cd8SHannes Reinecke } 598e56f81e0SChristoph Hellwig return r; 599e56f81e0SChristoph Hellwig } 6006c182cd8SHannes Reinecke 601e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 602e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 603e56f81e0SChristoph Hellwig { 604e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 605e56f81e0SChristoph Hellwig struct dm_target *tgt; 606e56f81e0SChristoph Hellwig int srcu_idx, r; 607e56f81e0SChristoph Hellwig 608e56f81e0SChristoph Hellwig r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 609e56f81e0SChristoph Hellwig if (r < 0) 610e56f81e0SChristoph Hellwig return r; 611e56f81e0SChristoph Hellwig 612e56f81e0SChristoph Hellwig if (r > 0) { 613e56f81e0SChristoph Hellwig /* 614e56f81e0SChristoph Hellwig * Target determined this ioctl is being issued against 615e56f81e0SChristoph Hellwig * a logical partition of the parent bdev; so extra 616e56f81e0SChristoph Hellwig * validation is needed. 617e56f81e0SChristoph Hellwig */ 618e56f81e0SChristoph Hellwig r = scsi_verify_blk_ioctl(NULL, cmd); 619e56f81e0SChristoph Hellwig if (r) 620e56f81e0SChristoph Hellwig goto out; 621e56f81e0SChristoph Hellwig } 622e56f81e0SChristoph Hellwig 623e56f81e0SChristoph Hellwig r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 624e56f81e0SChristoph Hellwig out: 625e56f81e0SChristoph Hellwig dm_put_live_table(md, srcu_idx); 626aa129a22SMilan Broz return r; 627aa129a22SMilan Broz } 628aa129a22SMilan Broz 629028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 6301da177e4SLinus Torvalds { 6311da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds 634028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 6351da177e4SLinus Torvalds { 6361da177e4SLinus Torvalds mempool_free(io, md->io_pool); 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds 639028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 6401da177e4SLinus Torvalds { 641dba14160SMikulas Patocka bio_put(&tio->clone); 6421da177e4SLinus Torvalds } 6431da177e4SLinus Torvalds 64408885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 64508885643SKiyoshi Ueda gfp_t gfp_mask) 646cec47e3dSKiyoshi Ueda { 6475f015204SJun'ichi Nomura return mempool_alloc(md->io_pool, gfp_mask); 648cec47e3dSKiyoshi Ueda } 649cec47e3dSKiyoshi Ueda 650cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 651cec47e3dSKiyoshi Ueda { 6525f015204SJun'ichi Nomura mempool_free(tio, tio->md->io_pool); 653cec47e3dSKiyoshi Ueda } 654cec47e3dSKiyoshi Ueda 6551ae49ea2SMike Snitzer static struct request *alloc_clone_request(struct mapped_device *md, 6561ae49ea2SMike Snitzer gfp_t gfp_mask) 6571ae49ea2SMike Snitzer { 6581ae49ea2SMike Snitzer return mempool_alloc(md->rq_pool, gfp_mask); 6591ae49ea2SMike Snitzer } 6601ae49ea2SMike Snitzer 6611ae49ea2SMike Snitzer static void free_clone_request(struct mapped_device *md, struct request *rq) 6621ae49ea2SMike Snitzer { 6631ae49ea2SMike Snitzer mempool_free(rq, md->rq_pool); 6641ae49ea2SMike Snitzer } 6651ae49ea2SMike Snitzer 66690abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 66790abb8c4SKiyoshi Ueda { 66890abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 66990abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 67090abb8c4SKiyoshi Ueda } 67190abb8c4SKiyoshi Ueda 6723eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6733eaf840eSJun'ichi "Nick" Nomura { 6743eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 675fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 676c9959059STejun Heo int cpu; 677fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 6783eaf840eSJun'ichi "Nick" Nomura 6793eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6803eaf840eSJun'ichi "Nick" Nomura 681074a7acaSTejun Heo cpu = part_stat_lock(); 682074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 683074a7acaSTejun Heo part_stat_unlock(); 6841e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 6851e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 686fd2ed4d2SMikulas Patocka 687fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6884f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 689fd2ed4d2SMikulas Patocka bio_sectors(bio), false, 0, &io->stats_aux); 6903eaf840eSJun'ichi "Nick" Nomura } 6913eaf840eSJun'ichi "Nick" Nomura 692d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6933eaf840eSJun'ichi "Nick" Nomura { 6943eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 6953eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 6963eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 69718c0b223SGu Zheng int pending; 6983eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 6993eaf840eSJun'ichi "Nick" Nomura 70018c0b223SGu Zheng generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 7013eaf840eSJun'ichi "Nick" Nomura 702fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 7034f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 704fd2ed4d2SMikulas Patocka bio_sectors(bio), true, duration, &io->stats_aux); 705fd2ed4d2SMikulas Patocka 706af7e466aSMikulas Patocka /* 707af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 708d87f4c14STejun Heo * a flush. 709af7e466aSMikulas Patocka */ 7101e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 7111e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 712316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 7133eaf840eSJun'ichi "Nick" Nomura 714d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 715d221d2e7SMikulas Patocka if (!pending) 716d221d2e7SMikulas Patocka wake_up(&md->wait); 7173eaf840eSJun'ichi "Nick" Nomura } 7183eaf840eSJun'ichi "Nick" Nomura 7191da177e4SLinus Torvalds /* 7201da177e4SLinus Torvalds * Add the bio to the list of deferred io. 7211da177e4SLinus Torvalds */ 72292c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 7231da177e4SLinus Torvalds { 72405447420SKiyoshi Ueda unsigned long flags; 7251da177e4SLinus Torvalds 72605447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 7271da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 72805447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 72992c63902SMikulas Patocka queue_work(md->wq, &md->work); 7301da177e4SLinus Torvalds } 7311da177e4SLinus Torvalds 7321da177e4SLinus Torvalds /* 7331da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 7341da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 73583d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 7361da177e4SLinus Torvalds */ 73783d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7381da177e4SLinus Torvalds { 73983d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7401da177e4SLinus Torvalds 74183d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 74283d5e5b0SMikulas Patocka } 7431da177e4SLinus Torvalds 74483d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 74583d5e5b0SMikulas Patocka { 74683d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 74783d5e5b0SMikulas Patocka } 74883d5e5b0SMikulas Patocka 74983d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 75083d5e5b0SMikulas Patocka { 75183d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 75283d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 75383d5e5b0SMikulas Patocka } 75483d5e5b0SMikulas Patocka 75583d5e5b0SMikulas Patocka /* 75683d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 75783d5e5b0SMikulas Patocka * The caller must not block between these two functions. 75883d5e5b0SMikulas Patocka */ 75983d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 76083d5e5b0SMikulas Patocka { 76183d5e5b0SMikulas Patocka rcu_read_lock(); 76283d5e5b0SMikulas Patocka return rcu_dereference(md->map); 76383d5e5b0SMikulas Patocka } 76483d5e5b0SMikulas Patocka 76583d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 76683d5e5b0SMikulas Patocka { 76783d5e5b0SMikulas Patocka rcu_read_unlock(); 7681da177e4SLinus Torvalds } 7691da177e4SLinus Torvalds 7703ac51e74SDarrick J. Wong /* 77186f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 77286f1152bSBenjamin Marzinski */ 77386f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 77486f1152bSBenjamin Marzinski struct mapped_device *md) 77586f1152bSBenjamin Marzinski { 77686f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 77786f1152bSBenjamin Marzinski struct block_device *bdev; 77886f1152bSBenjamin Marzinski 77986f1152bSBenjamin Marzinski int r; 78086f1152bSBenjamin Marzinski 78186f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 78286f1152bSBenjamin Marzinski 78386f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 78486f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 78586f1152bSBenjamin Marzinski return PTR_ERR(bdev); 78686f1152bSBenjamin Marzinski 78786f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 78886f1152bSBenjamin Marzinski if (r) { 78986f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 79086f1152bSBenjamin Marzinski return r; 79186f1152bSBenjamin Marzinski } 79286f1152bSBenjamin Marzinski 79386f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 79486f1152bSBenjamin Marzinski return 0; 79586f1152bSBenjamin Marzinski } 79686f1152bSBenjamin Marzinski 79786f1152bSBenjamin Marzinski /* 79886f1152bSBenjamin Marzinski * Close a table device that we've been using. 79986f1152bSBenjamin Marzinski */ 80086f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 80186f1152bSBenjamin Marzinski { 80286f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 80386f1152bSBenjamin Marzinski return; 80486f1152bSBenjamin Marzinski 80586f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 80686f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 80786f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 80886f1152bSBenjamin Marzinski } 80986f1152bSBenjamin Marzinski 81086f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 81186f1152bSBenjamin Marzinski fmode_t mode) { 81286f1152bSBenjamin Marzinski struct table_device *td; 81386f1152bSBenjamin Marzinski 81486f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 81586f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 81686f1152bSBenjamin Marzinski return td; 81786f1152bSBenjamin Marzinski 81886f1152bSBenjamin Marzinski return NULL; 81986f1152bSBenjamin Marzinski } 82086f1152bSBenjamin Marzinski 82186f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 82286f1152bSBenjamin Marzinski struct dm_dev **result) { 82386f1152bSBenjamin Marzinski int r; 82486f1152bSBenjamin Marzinski struct table_device *td; 82586f1152bSBenjamin Marzinski 82686f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 82786f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 82886f1152bSBenjamin Marzinski if (!td) { 82986f1152bSBenjamin Marzinski td = kmalloc(sizeof(*td), GFP_KERNEL); 83086f1152bSBenjamin Marzinski if (!td) { 83186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83286f1152bSBenjamin Marzinski return -ENOMEM; 83386f1152bSBenjamin Marzinski } 83486f1152bSBenjamin Marzinski 83586f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 83686f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 83786f1152bSBenjamin Marzinski 83886f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 83986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 84086f1152bSBenjamin Marzinski kfree(td); 84186f1152bSBenjamin Marzinski return r; 84286f1152bSBenjamin Marzinski } 84386f1152bSBenjamin Marzinski 84486f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 84586f1152bSBenjamin Marzinski 84686f1152bSBenjamin Marzinski atomic_set(&td->count, 0); 84786f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 84886f1152bSBenjamin Marzinski } 84986f1152bSBenjamin Marzinski atomic_inc(&td->count); 85086f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 85186f1152bSBenjamin Marzinski 85286f1152bSBenjamin Marzinski *result = &td->dm_dev; 85386f1152bSBenjamin Marzinski return 0; 85486f1152bSBenjamin Marzinski } 85586f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 85686f1152bSBenjamin Marzinski 85786f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 85886f1152bSBenjamin Marzinski { 85986f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 86086f1152bSBenjamin Marzinski 86186f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 86286f1152bSBenjamin Marzinski if (atomic_dec_and_test(&td->count)) { 86386f1152bSBenjamin Marzinski close_table_device(td, md); 86486f1152bSBenjamin Marzinski list_del(&td->list); 86586f1152bSBenjamin Marzinski kfree(td); 86686f1152bSBenjamin Marzinski } 86786f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 86886f1152bSBenjamin Marzinski } 86986f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 87086f1152bSBenjamin Marzinski 87186f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 87286f1152bSBenjamin Marzinski { 87386f1152bSBenjamin Marzinski struct list_head *tmp, *next; 87486f1152bSBenjamin Marzinski 87586f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 87686f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 87786f1152bSBenjamin Marzinski 87886f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 87986f1152bSBenjamin Marzinski td->dm_dev.name, atomic_read(&td->count)); 88086f1152bSBenjamin Marzinski kfree(td); 88186f1152bSBenjamin Marzinski } 88286f1152bSBenjamin Marzinski } 88386f1152bSBenjamin Marzinski 88486f1152bSBenjamin Marzinski /* 8853ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8863ac51e74SDarrick J. Wong */ 8873ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8883ac51e74SDarrick J. Wong { 8893ac51e74SDarrick J. Wong *geo = md->geometry; 8903ac51e74SDarrick J. Wong 8913ac51e74SDarrick J. Wong return 0; 8923ac51e74SDarrick J. Wong } 8933ac51e74SDarrick J. Wong 8943ac51e74SDarrick J. Wong /* 8953ac51e74SDarrick J. Wong * Set the geometry of a device. 8963ac51e74SDarrick J. Wong */ 8973ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8983ac51e74SDarrick J. Wong { 8993ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 9003ac51e74SDarrick J. Wong 9013ac51e74SDarrick J. Wong if (geo->start > sz) { 9023ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 9033ac51e74SDarrick J. Wong return -EINVAL; 9043ac51e74SDarrick J. Wong } 9053ac51e74SDarrick J. Wong 9063ac51e74SDarrick J. Wong md->geometry = *geo; 9073ac51e74SDarrick J. Wong 9083ac51e74SDarrick J. Wong return 0; 9093ac51e74SDarrick J. Wong } 9103ac51e74SDarrick J. Wong 9111da177e4SLinus Torvalds /*----------------------------------------------------------------- 9121da177e4SLinus Torvalds * CRUD START: 9131da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 9141da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 9151da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 9161da177e4SLinus Torvalds * interests of getting something for people to use I give 9171da177e4SLinus Torvalds * you this clearly demarcated crap. 9181da177e4SLinus Torvalds *---------------------------------------------------------------*/ 9191da177e4SLinus Torvalds 9202e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 9212e93ccc1SKiyoshi Ueda { 9222e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 9232e93ccc1SKiyoshi Ueda } 9242e93ccc1SKiyoshi Ueda 9251da177e4SLinus Torvalds /* 9261da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 9271da177e4SLinus Torvalds * cloned into, completing the original io if necc. 9281da177e4SLinus Torvalds */ 929858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 9301da177e4SLinus Torvalds { 9312e93ccc1SKiyoshi Ueda unsigned long flags; 932b35f8caaSMilan Broz int io_error; 933b35f8caaSMilan Broz struct bio *bio; 934b35f8caaSMilan Broz struct mapped_device *md = io->md; 9352e93ccc1SKiyoshi Ueda 9362e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 937f88fb981SKiyoshi Ueda if (unlikely(error)) { 938f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 939f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 9401da177e4SLinus Torvalds io->error = error; 941f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 942f88fb981SKiyoshi Ueda } 9431da177e4SLinus Torvalds 9441da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9452e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 9462e93ccc1SKiyoshi Ueda /* 9472e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9482e93ccc1SKiyoshi Ueda */ 949022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9506a8736d1STejun Heo if (__noflush_suspending(md)) 9516a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 9526a8736d1STejun Heo else 9532e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9542e93ccc1SKiyoshi Ueda io->error = -EIO; 955022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9562e93ccc1SKiyoshi Ueda } 9572e93ccc1SKiyoshi Ueda 958b35f8caaSMilan Broz io_error = io->error; 959b35f8caaSMilan Broz bio = io->bio; 960af7e466aSMikulas Patocka end_io_acct(io); 961a97f925aSMikulas Patocka free_io(md, io); 9621da177e4SLinus Torvalds 9636a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 9646a8736d1STejun Heo return; 9656a8736d1STejun Heo 9664f024f37SKent Overstreet if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 9671da177e4SLinus Torvalds /* 9686a8736d1STejun Heo * Preflush done for flush with data, reissue 9696a8736d1STejun Heo * without REQ_FLUSH. 9701da177e4SLinus Torvalds */ 9716a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 9726a8736d1STejun Heo queue_io(md, bio); 9735f3ea37cSArnaldo Carvalho de Melo } else { 974b372d360SMike Snitzer /* done with normal IO or empty flush */ 9750a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 9764246a0b6SChristoph Hellwig bio->bi_error = io_error; 9774246a0b6SChristoph Hellwig bio_endio(bio); 9782e93ccc1SKiyoshi Ueda } 9791da177e4SLinus Torvalds } 980af7e466aSMikulas Patocka } 9811da177e4SLinus Torvalds 9827eee4ae2SMike Snitzer static void disable_write_same(struct mapped_device *md) 9837eee4ae2SMike Snitzer { 9847eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9857eee4ae2SMike Snitzer 9867eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9877eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9887eee4ae2SMike Snitzer } 9897eee4ae2SMike Snitzer 9904246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9911da177e4SLinus Torvalds { 9924246a0b6SChristoph Hellwig int error = bio->bi_error; 9935164beceSzhendong chen int r = error; 994bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 995b35f8caaSMilan Broz struct dm_io *io = tio->io; 9969faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9971da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9981da177e4SLinus Torvalds 9991da177e4SLinus Torvalds if (endio) { 10007de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 10012e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 10022e93ccc1SKiyoshi Ueda /* 10032e93ccc1SKiyoshi Ueda * error and requeue request are handled 10042e93ccc1SKiyoshi Ueda * in dec_pending(). 10052e93ccc1SKiyoshi Ueda */ 10061da177e4SLinus Torvalds error = r; 100745cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 100845cbcd79SKiyoshi Ueda /* The target will handle the io */ 10096712ecf8SNeilBrown return; 101045cbcd79SKiyoshi Ueda else if (r) { 101145cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 101245cbcd79SKiyoshi Ueda BUG(); 101345cbcd79SKiyoshi Ueda } 10141da177e4SLinus Torvalds } 10151da177e4SLinus Torvalds 10167eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 10177eee4ae2SMike Snitzer !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 10187eee4ae2SMike Snitzer disable_write_same(md); 10197eee4ae2SMike Snitzer 10209faf400fSStefan Bader free_tio(md, tio); 1021b35f8caaSMilan Broz dec_pending(io, error); 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds 102478d8e58aSMike Snitzer /* 102578d8e58aSMike Snitzer * Partial completion handling for request-based dm 102678d8e58aSMike Snitzer */ 10274246a0b6SChristoph Hellwig static void end_clone_bio(struct bio *clone) 102878d8e58aSMike Snitzer { 102978d8e58aSMike Snitzer struct dm_rq_clone_bio_info *info = 103078d8e58aSMike Snitzer container_of(clone, struct dm_rq_clone_bio_info, clone); 103178d8e58aSMike Snitzer struct dm_rq_target_io *tio = info->tio; 103278d8e58aSMike Snitzer struct bio *bio = info->orig; 103378d8e58aSMike Snitzer unsigned int nr_bytes = info->orig->bi_iter.bi_size; 103478d8e58aSMike Snitzer 103578d8e58aSMike Snitzer bio_put(clone); 103678d8e58aSMike Snitzer 103778d8e58aSMike Snitzer if (tio->error) 103878d8e58aSMike Snitzer /* 103978d8e58aSMike Snitzer * An error has already been detected on the request. 104078d8e58aSMike Snitzer * Once error occurred, just let clone->end_io() handle 104178d8e58aSMike Snitzer * the remainder. 104278d8e58aSMike Snitzer */ 104378d8e58aSMike Snitzer return; 10444246a0b6SChristoph Hellwig else if (bio->bi_error) { 104578d8e58aSMike Snitzer /* 104678d8e58aSMike Snitzer * Don't notice the error to the upper layer yet. 104778d8e58aSMike Snitzer * The error handling decision is made by the target driver, 104878d8e58aSMike Snitzer * when the request is completed. 104978d8e58aSMike Snitzer */ 10504246a0b6SChristoph Hellwig tio->error = bio->bi_error; 105178d8e58aSMike Snitzer return; 105278d8e58aSMike Snitzer } 105378d8e58aSMike Snitzer 105478d8e58aSMike Snitzer /* 105578d8e58aSMike Snitzer * I/O for the bio successfully completed. 105678d8e58aSMike Snitzer * Notice the data completion to the upper layer. 105778d8e58aSMike Snitzer */ 105878d8e58aSMike Snitzer 105978d8e58aSMike Snitzer /* 106078d8e58aSMike Snitzer * bios are processed from the head of the list. 106178d8e58aSMike Snitzer * So the completing bio should always be rq->bio. 106278d8e58aSMike Snitzer * If it's not, something wrong is happening. 106378d8e58aSMike Snitzer */ 106478d8e58aSMike Snitzer if (tio->orig->bio != bio) 106578d8e58aSMike Snitzer DMERR("bio completion is going in the middle of the request"); 106678d8e58aSMike Snitzer 106778d8e58aSMike Snitzer /* 106878d8e58aSMike Snitzer * Update the original request. 106978d8e58aSMike Snitzer * Do not use blk_end_request() here, because it may complete 107078d8e58aSMike Snitzer * the original request before the clone, and break the ordering. 107178d8e58aSMike Snitzer */ 107278d8e58aSMike Snitzer blk_update_request(tio->orig, 0, nr_bytes); 107378d8e58aSMike Snitzer } 107478d8e58aSMike Snitzer 1075bfebd1cdSMike Snitzer static struct dm_rq_target_io *tio_from_request(struct request *rq) 1076bfebd1cdSMike Snitzer { 1077bfebd1cdSMike Snitzer return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 1078bfebd1cdSMike Snitzer } 1079bfebd1cdSMike Snitzer 1080e262f347SMikulas Patocka static void rq_end_stats(struct mapped_device *md, struct request *orig) 1081e262f347SMikulas Patocka { 1082e262f347SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) { 1083e262f347SMikulas Patocka struct dm_rq_target_io *tio = tio_from_request(orig); 1084e262f347SMikulas Patocka tio->duration_jiffies = jiffies - tio->duration_jiffies; 1085e262f347SMikulas Patocka dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), 1086e262f347SMikulas Patocka tio->n_sectors, true, tio->duration_jiffies, 1087e262f347SMikulas Patocka &tio->stats_aux); 1088e262f347SMikulas Patocka } 1089e262f347SMikulas Patocka } 1090e262f347SMikulas Patocka 1091cec47e3dSKiyoshi Ueda /* 1092cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 1093cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 1094cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 1095cec47e3dSKiyoshi Ueda */ 1096466d89a6SKeith Busch static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1097cec47e3dSKiyoshi Ueda { 1098b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 1099cec47e3dSKiyoshi Ueda 1100cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 1101621739b0SMike Snitzer if (!md_in_flight(md)) 1102cec47e3dSKiyoshi Ueda wake_up(&md->wait); 1103cec47e3dSKiyoshi Ueda 1104a8c32a5cSJens Axboe /* 1105a8c32a5cSJens Axboe * Run this off this callpath, as drivers could invoke end_io while 1106a8c32a5cSJens Axboe * inside their request_fn (and holding the queue lock). Calling 1107a8c32a5cSJens Axboe * back into ->request_fn() could deadlock attempting to grab the 1108a8c32a5cSJens Axboe * queue lock again. 1109a8c32a5cSJens Axboe */ 11109a0e609eSMike Snitzer if (run_queue) { 1111bfebd1cdSMike Snitzer if (md->queue->mq_ops) 1112bfebd1cdSMike Snitzer blk_mq_run_hw_queues(md->queue, true); 1113621739b0SMike Snitzer else 1114a8c32a5cSJens Axboe blk_run_queue_async(md->queue); 11159a0e609eSMike Snitzer } 1116cec47e3dSKiyoshi Ueda 1117cec47e3dSKiyoshi Ueda /* 1118cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 1119cec47e3dSKiyoshi Ueda */ 1120cec47e3dSKiyoshi Ueda dm_put(md); 1121cec47e3dSKiyoshi Ueda } 1122cec47e3dSKiyoshi Ueda 1123e5d8de32SMike Snitzer static void free_rq_clone(struct request *clone) 1124a77e28c7SKiyoshi Ueda { 1125a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1126bfebd1cdSMike Snitzer struct mapped_device *md = tio->md; 1127a77e28c7SKiyoshi Ueda 112878d8e58aSMike Snitzer blk_rq_unprep_clone(clone); 112978d8e58aSMike Snitzer 1130aa6df8ddSMike Snitzer if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1131aa6df8ddSMike Snitzer /* stacked on blk-mq queue(s) */ 1132e5863d9aSMike Snitzer tio->ti->type->release_clone_rq(clone); 113302233342SMike Snitzer else if (!md->queue->mq_ops) 113402233342SMike Snitzer /* request_fn queue stacked on request_fn queue(s) */ 1135bfebd1cdSMike Snitzer free_clone_request(md, clone); 1136aa6df8ddSMike Snitzer /* 1137aa6df8ddSMike Snitzer * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: 1138aa6df8ddSMike Snitzer * no need to call free_clone_request() because we leverage blk-mq by 1139aa6df8ddSMike Snitzer * allocating the clone at the end of the blk-mq pdu (see: clone_rq) 1140aa6df8ddSMike Snitzer */ 1141bfebd1cdSMike Snitzer 1142bfebd1cdSMike Snitzer if (!md->queue->mq_ops) 1143a77e28c7SKiyoshi Ueda free_rq_tio(tio); 1144a77e28c7SKiyoshi Ueda } 1145a77e28c7SKiyoshi Ueda 1146980691e5SKiyoshi Ueda /* 1147980691e5SKiyoshi Ueda * Complete the clone and the original request. 1148466d89a6SKeith Busch * Must be called without clone's queue lock held, 1149466d89a6SKeith Busch * see end_clone_request() for more details. 1150980691e5SKiyoshi Ueda */ 1151980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 1152980691e5SKiyoshi Ueda { 1153980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 1154980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1155980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 1156980691e5SKiyoshi Ueda struct request *rq = tio->orig; 1157980691e5SKiyoshi Ueda 115829e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1159980691e5SKiyoshi Ueda rq->errors = clone->errors; 1160980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 1161980691e5SKiyoshi Ueda 1162980691e5SKiyoshi Ueda if (rq->sense) 1163980691e5SKiyoshi Ueda /* 1164980691e5SKiyoshi Ueda * We are using the sense buffer of the original 1165980691e5SKiyoshi Ueda * request. 1166980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 1167980691e5SKiyoshi Ueda */ 1168980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 1169980691e5SKiyoshi Ueda } 1170980691e5SKiyoshi Ueda 1171e5d8de32SMike Snitzer free_rq_clone(clone); 1172e262f347SMikulas Patocka rq_end_stats(md, rq); 1173bfebd1cdSMike Snitzer if (!rq->q->mq_ops) 1174980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 1175bfebd1cdSMike Snitzer else 1176bfebd1cdSMike Snitzer blk_mq_end_request(rq, error); 117729e4013dSTejun Heo rq_completed(md, rw, true); 1178980691e5SKiyoshi Ueda } 1179980691e5SKiyoshi Ueda 1180cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 1181cec47e3dSKiyoshi Ueda { 1182bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1183466d89a6SKeith Busch struct request *clone = tio->clone; 1184cec47e3dSKiyoshi Ueda 1185bfebd1cdSMike Snitzer if (!rq->q->mq_ops) { 1186cec47e3dSKiyoshi Ueda rq->special = NULL; 1187cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 1188bfebd1cdSMike Snitzer } 1189cec47e3dSKiyoshi Ueda 1190e5863d9aSMike Snitzer if (clone) 1191e5d8de32SMike Snitzer free_rq_clone(clone); 1192cec47e3dSKiyoshi Ueda } 1193cec47e3dSKiyoshi Ueda 1194cec47e3dSKiyoshi Ueda /* 1195cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 1196cec47e3dSKiyoshi Ueda */ 1197bfebd1cdSMike Snitzer static void old_requeue_request(struct request *rq) 1198cec47e3dSKiyoshi Ueda { 1199cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 1200cec47e3dSKiyoshi Ueda unsigned long flags; 1201cec47e3dSKiyoshi Ueda 1202cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1203cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 12044ae9944dSJunichi Nomura blk_run_queue_async(q); 1205cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 1206bfebd1cdSMike Snitzer } 1207bfebd1cdSMike Snitzer 12082d76fff1SMike Snitzer static void dm_requeue_original_request(struct mapped_device *md, 1209bfebd1cdSMike Snitzer struct request *rq) 1210bfebd1cdSMike Snitzer { 1211bfebd1cdSMike Snitzer int rw = rq_data_dir(rq); 1212bfebd1cdSMike Snitzer 1213bfebd1cdSMike Snitzer dm_unprep_request(rq); 1214bfebd1cdSMike Snitzer 1215e262f347SMikulas Patocka rq_end_stats(md, rq); 1216bfebd1cdSMike Snitzer if (!rq->q->mq_ops) 1217bfebd1cdSMike Snitzer old_requeue_request(rq); 1218bfebd1cdSMike Snitzer else { 1219bfebd1cdSMike Snitzer blk_mq_requeue_request(rq); 1220bfebd1cdSMike Snitzer blk_mq_kick_requeue_list(rq->q); 1221bfebd1cdSMike Snitzer } 1222cec47e3dSKiyoshi Ueda 1223466d89a6SKeith Busch rq_completed(md, rw, false); 1224cec47e3dSKiyoshi Ueda } 1225466d89a6SKeith Busch 1226bfebd1cdSMike Snitzer static void old_stop_queue(struct request_queue *q) 1227cec47e3dSKiyoshi Ueda { 1228bfebd1cdSMike Snitzer unsigned long flags; 1229bfebd1cdSMike Snitzer 1230bfebd1cdSMike Snitzer if (blk_queue_stopped(q)) 1231bfebd1cdSMike Snitzer return; 1232bfebd1cdSMike Snitzer 1233bfebd1cdSMike Snitzer spin_lock_irqsave(q->queue_lock, flags); 1234cec47e3dSKiyoshi Ueda blk_stop_queue(q); 1235bfebd1cdSMike Snitzer spin_unlock_irqrestore(q->queue_lock, flags); 1236cec47e3dSKiyoshi Ueda } 1237cec47e3dSKiyoshi Ueda 1238cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 1239cec47e3dSKiyoshi Ueda { 1240bfebd1cdSMike Snitzer if (!q->mq_ops) 1241bfebd1cdSMike Snitzer old_stop_queue(q); 1242bfebd1cdSMike Snitzer else 1243bfebd1cdSMike Snitzer blk_mq_stop_hw_queues(q); 1244bfebd1cdSMike Snitzer } 1245bfebd1cdSMike Snitzer 1246bfebd1cdSMike Snitzer static void old_start_queue(struct request_queue *q) 1247bfebd1cdSMike Snitzer { 1248cec47e3dSKiyoshi Ueda unsigned long flags; 1249cec47e3dSKiyoshi Ueda 1250cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1251cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 1252cec47e3dSKiyoshi Ueda blk_start_queue(q); 1253bfebd1cdSMike Snitzer spin_unlock_irqrestore(q->queue_lock, flags); 1254cec47e3dSKiyoshi Ueda } 1255cec47e3dSKiyoshi Ueda 1256cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 1257cec47e3dSKiyoshi Ueda { 1258bfebd1cdSMike Snitzer if (!q->mq_ops) 1259bfebd1cdSMike Snitzer old_start_queue(q); 1260bfebd1cdSMike Snitzer else 1261bfebd1cdSMike Snitzer blk_mq_start_stopped_hw_queues(q, true); 1262cec47e3dSKiyoshi Ueda } 1263cec47e3dSKiyoshi Ueda 126411a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 126511a68244SKiyoshi Ueda { 126611a68244SKiyoshi Ueda int r = error; 126711a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1268ba1cbad9SMike Snitzer dm_request_endio_fn rq_end_io = NULL; 1269ba1cbad9SMike Snitzer 1270ba1cbad9SMike Snitzer if (tio->ti) { 1271ba1cbad9SMike Snitzer rq_end_io = tio->ti->type->rq_end_io; 127211a68244SKiyoshi Ueda 127311a68244SKiyoshi Ueda if (mapped && rq_end_io) 127411a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 1275ba1cbad9SMike Snitzer } 127611a68244SKiyoshi Ueda 12777eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 12787eee4ae2SMike Snitzer !clone->q->limits.max_write_same_sectors)) 12797eee4ae2SMike Snitzer disable_write_same(tio->md); 12807eee4ae2SMike Snitzer 128111a68244SKiyoshi Ueda if (r <= 0) 128211a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 128311a68244SKiyoshi Ueda dm_end_request(clone, r); 128411a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 128511a68244SKiyoshi Ueda /* The target will handle the I/O */ 128611a68244SKiyoshi Ueda return; 128711a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 128811a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 12892d76fff1SMike Snitzer dm_requeue_original_request(tio->md, tio->orig); 129011a68244SKiyoshi Ueda else { 129111a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 129211a68244SKiyoshi Ueda BUG(); 129311a68244SKiyoshi Ueda } 129411a68244SKiyoshi Ueda } 129511a68244SKiyoshi Ueda 1296cec47e3dSKiyoshi Ueda /* 1297cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 1298cec47e3dSKiyoshi Ueda */ 1299cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 1300cec47e3dSKiyoshi Ueda { 130111a68244SKiyoshi Ueda bool mapped = true; 1302bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1303466d89a6SKeith Busch struct request *clone = tio->clone; 1304bfebd1cdSMike Snitzer int rw; 1305cec47e3dSKiyoshi Ueda 1306e5863d9aSMike Snitzer if (!clone) { 1307e262f347SMikulas Patocka rq_end_stats(tio->md, rq); 1308bfebd1cdSMike Snitzer rw = rq_data_dir(rq); 1309bfebd1cdSMike Snitzer if (!rq->q->mq_ops) { 1310e5863d9aSMike Snitzer blk_end_request_all(rq, tio->error); 1311bfebd1cdSMike Snitzer rq_completed(tio->md, rw, false); 1312e5863d9aSMike Snitzer free_rq_tio(tio); 1313bfebd1cdSMike Snitzer } else { 1314bfebd1cdSMike Snitzer blk_mq_end_request(rq, tio->error); 1315bfebd1cdSMike Snitzer rq_completed(tio->md, rw, false); 1316bfebd1cdSMike Snitzer } 1317e5863d9aSMike Snitzer return; 1318e5863d9aSMike Snitzer } 1319cec47e3dSKiyoshi Ueda 132011a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 132111a68244SKiyoshi Ueda mapped = false; 1322cec47e3dSKiyoshi Ueda 132311a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 1324cec47e3dSKiyoshi Ueda } 1325cec47e3dSKiyoshi Ueda 1326cec47e3dSKiyoshi Ueda /* 1327cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 1328cec47e3dSKiyoshi Ueda * through softirq context. 1329cec47e3dSKiyoshi Ueda */ 1330466d89a6SKeith Busch static void dm_complete_request(struct request *rq, int error) 1331cec47e3dSKiyoshi Ueda { 1332bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1333cec47e3dSKiyoshi Ueda 1334cec47e3dSKiyoshi Ueda tio->error = error; 1335cec47e3dSKiyoshi Ueda blk_complete_request(rq); 1336cec47e3dSKiyoshi Ueda } 1337cec47e3dSKiyoshi Ueda 1338cec47e3dSKiyoshi Ueda /* 1339cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 1340cec47e3dSKiyoshi Ueda * through softirq context. 1341cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 1342e5863d9aSMike Snitzer * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1343cec47e3dSKiyoshi Ueda */ 1344466d89a6SKeith Busch static void dm_kill_unmapped_request(struct request *rq, int error) 1345cec47e3dSKiyoshi Ueda { 1346cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 1347466d89a6SKeith Busch dm_complete_request(rq, error); 1348cec47e3dSKiyoshi Ueda } 1349cec47e3dSKiyoshi Ueda 1350cec47e3dSKiyoshi Ueda /* 1351bfebd1cdSMike Snitzer * Called with the clone's queue lock held (for non-blk-mq) 1352cec47e3dSKiyoshi Ueda */ 1353cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 1354cec47e3dSKiyoshi Ueda { 1355466d89a6SKeith Busch struct dm_rq_target_io *tio = clone->end_io_data; 1356466d89a6SKeith Busch 1357e5863d9aSMike Snitzer if (!clone->q->mq_ops) { 1358cec47e3dSKiyoshi Ueda /* 1359cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 1360cec47e3dSKiyoshi Ueda * the clone was dispatched. 1361e5863d9aSMike Snitzer * The clone is *NOT* freed actually here because it is alloced 1362e5863d9aSMike Snitzer * from dm own mempool (REQ_ALLOCED isn't set). 1363cec47e3dSKiyoshi Ueda */ 1364cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 1365e5863d9aSMike Snitzer } 1366cec47e3dSKiyoshi Ueda 1367cec47e3dSKiyoshi Ueda /* 1368cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 1369466d89a6SKeith Busch * hold the clone's queue lock. Otherwise, deadlock could occur because: 1370cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 1371cec47e3dSKiyoshi Ueda * of the stacking during the completion 1372cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 1373466d89a6SKeith Busch * against this clone's queue 1374cec47e3dSKiyoshi Ueda */ 1375466d89a6SKeith Busch dm_complete_request(tio->orig, error); 1376cec47e3dSKiyoshi Ueda } 1377cec47e3dSKiyoshi Ueda 137856a67df7SMike Snitzer /* 137956a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 138056a67df7SMike Snitzer * target boundary. 138156a67df7SMike Snitzer */ 138256a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 13831da177e4SLinus Torvalds { 138456a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 138556a67df7SMike Snitzer 138656a67df7SMike Snitzer return ti->len - target_offset; 138756a67df7SMike Snitzer } 138856a67df7SMike Snitzer 138956a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 139056a67df7SMike Snitzer { 139156a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1392542f9038SMike Snitzer sector_t offset, max_len; 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds /* 13951da177e4SLinus Torvalds * Does the target need to split even further? 13961da177e4SLinus Torvalds */ 1397542f9038SMike Snitzer if (ti->max_io_len) { 1398542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1399542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1400542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1401542f9038SMike Snitzer else 1402542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1403542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1404542f9038SMike Snitzer 1405542f9038SMike Snitzer if (len > max_len) 1406542f9038SMike Snitzer len = max_len; 14071da177e4SLinus Torvalds } 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds return len; 14101da177e4SLinus Torvalds } 14111da177e4SLinus Torvalds 1412542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1413542f9038SMike Snitzer { 1414542f9038SMike Snitzer if (len > UINT_MAX) { 1415542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1416542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1417542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1418542f9038SMike Snitzer return -EINVAL; 1419542f9038SMike Snitzer } 1420542f9038SMike Snitzer 1421542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1422542f9038SMike Snitzer 1423542f9038SMike Snitzer return 0; 1424542f9038SMike Snitzer } 1425542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1426542f9038SMike Snitzer 14271dd40c3eSMikulas Patocka /* 14281dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 14291dd40c3eSMikulas Patocka * allowed for all bio types except REQ_FLUSH. 14301dd40c3eSMikulas Patocka * 14311dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 14321dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 14331dd40c3eSMikulas Patocka * sent in a next bio. 14341dd40c3eSMikulas Patocka * 14351dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 14361dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 14371dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 14381dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 14391dd40c3eSMikulas Patocka * 14401dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 14411dd40c3eSMikulas Patocka * <------- bi_size -------> 14421dd40c3eSMikulas Patocka * <-- n_sectors --> 14431dd40c3eSMikulas Patocka * 14441dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 14451dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 14461dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 14471dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 14481dd40c3eSMikulas Patocka * to make it empty) 14491dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 14501dd40c3eSMikulas Patocka * 14511dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 14521dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 14531dd40c3eSMikulas Patocka * copies of the bio. 14541dd40c3eSMikulas Patocka */ 14551dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 14561dd40c3eSMikulas Patocka { 14571dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 14581dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 14591dd40c3eSMikulas Patocka BUG_ON(bio->bi_rw & REQ_FLUSH); 14601dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 14611dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 14621dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 14631dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 14641dd40c3eSMikulas Patocka } 14651dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 14661dd40c3eSMikulas Patocka 1467bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 14681da177e4SLinus Torvalds { 14691da177e4SLinus Torvalds int r; 14702056a782SJens Axboe sector_t sector; 14719faf400fSStefan Bader struct mapped_device *md; 1472dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1473bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 14741da177e4SLinus Torvalds 14751da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 14761da177e4SLinus Torvalds 14771da177e4SLinus Torvalds /* 14781da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 14791da177e4SLinus Torvalds * anything, the target has assumed ownership of 14801da177e4SLinus Torvalds * this io. 14811da177e4SLinus Torvalds */ 14821da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 14834f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 14847de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 148545cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 14861da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 14872056a782SJens Axboe 1488d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 148922a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 14902056a782SJens Axboe 14911da177e4SLinus Torvalds generic_make_request(clone); 14922e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 14932e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 14949faf400fSStefan Bader md = tio->io->md; 14959faf400fSStefan Bader dec_pending(tio->io, r); 14969faf400fSStefan Bader free_tio(md, tio); 1497ab37844dSMikulas Patocka } else if (r != DM_MAPIO_SUBMITTED) { 149845cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 149945cbcd79SKiyoshi Ueda BUG(); 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds } 15021da177e4SLinus Torvalds 15031da177e4SLinus Torvalds struct clone_info { 15041da177e4SLinus Torvalds struct mapped_device *md; 15051da177e4SLinus Torvalds struct dm_table *map; 15061da177e4SLinus Torvalds struct bio *bio; 15071da177e4SLinus Torvalds struct dm_io *io; 15081da177e4SLinus Torvalds sector_t sector; 1509e0d6609aSMikulas Patocka unsigned sector_count; 15101da177e4SLinus Torvalds }; 15111da177e4SLinus Torvalds 1512e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1513bd2a49b8SAlasdair G Kergon { 15144f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 15154f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 15161da177e4SLinus Torvalds } 15171da177e4SLinus Torvalds 15181da177e4SLinus Torvalds /* 15191da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 15201da177e4SLinus Torvalds */ 1521dba14160SMikulas Patocka static void clone_bio(struct dm_target_io *tio, struct bio *bio, 15221c3b13e6SKent Overstreet sector_t sector, unsigned len) 15231da177e4SLinus Torvalds { 1524dba14160SMikulas Patocka struct bio *clone = &tio->clone; 15251da177e4SLinus Torvalds 15261c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 15279c47008dSMartin K. Petersen 15281c3b13e6SKent Overstreet if (bio_integrity(bio)) 15291c3b13e6SKent Overstreet bio_integrity_clone(clone, bio, GFP_NOIO); 15301c3b13e6SKent Overstreet 15311c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 15321c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 15331c3b13e6SKent Overstreet 15341c3b13e6SKent Overstreet if (bio_integrity(bio)) 15351c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds 15389015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 153999778273SJunichi Nomura struct dm_target *ti, 154055a62eefSAlasdair G Kergon unsigned target_bio_nr) 1541f9ab94ceSMikulas Patocka { 1542dba14160SMikulas Patocka struct dm_target_io *tio; 1543dba14160SMikulas Patocka struct bio *clone; 1544dba14160SMikulas Patocka 154599778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1546dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1547f9ab94ceSMikulas Patocka 1548f9ab94ceSMikulas Patocka tio->io = ci->io; 1549f9ab94ceSMikulas Patocka tio->ti = ti; 155055a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 15519015df24SAlasdair G Kergon 15529015df24SAlasdair G Kergon return tio; 15539015df24SAlasdair G Kergon } 15549015df24SAlasdair G Kergon 155514fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 155614fe594dSAlasdair G Kergon struct dm_target *ti, 15571dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 15589015df24SAlasdair G Kergon { 155999778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1560dba14160SMikulas Patocka struct bio *clone = &tio->clone; 15619015df24SAlasdair G Kergon 15621dd40c3eSMikulas Patocka tio->len_ptr = len; 15631dd40c3eSMikulas Patocka 15641c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1565bd2a49b8SAlasdair G Kergon if (len) 15661dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1567f9ab94ceSMikulas Patocka 1568bd2a49b8SAlasdair G Kergon __map_bio(tio); 1569f9ab94ceSMikulas Patocka } 1570f9ab94ceSMikulas Patocka 157114fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 15721dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 157306a426ceSMike Snitzer { 157455a62eefSAlasdair G Kergon unsigned target_bio_nr; 157506a426ceSMike Snitzer 157655a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 157714fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 157806a426ceSMike Snitzer } 157906a426ceSMike Snitzer 158014fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1581f9ab94ceSMikulas Patocka { 158206a426ceSMike Snitzer unsigned target_nr = 0; 1583f9ab94ceSMikulas Patocka struct dm_target *ti; 1584f9ab94ceSMikulas Patocka 1585b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1586f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 15871dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1588f9ab94ceSMikulas Patocka 1589f9ab94ceSMikulas Patocka return 0; 1590f9ab94ceSMikulas Patocka } 1591f9ab94ceSMikulas Patocka 1592e4c93811SAlasdair G Kergon static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 15931dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 15945ae89a87SMike Snitzer { 1595dba14160SMikulas Patocka struct bio *bio = ci->bio; 15965ae89a87SMike Snitzer struct dm_target_io *tio; 1597b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1598b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 15995ae89a87SMike Snitzer 1600b0d8ed4dSAlasdair G Kergon /* 1601b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1602b0d8ed4dSAlasdair G Kergon */ 1603b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1604b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1605e4c93811SAlasdair G Kergon 1606b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 160799778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 16081dd40c3eSMikulas Patocka tio->len_ptr = len; 16091dd40c3eSMikulas Patocka clone_bio(tio, bio, sector, *len); 1610bd2a49b8SAlasdair G Kergon __map_bio(tio); 16115ae89a87SMike Snitzer } 1612b0d8ed4dSAlasdair G Kergon } 16135ae89a87SMike Snitzer 161455a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 161523508a96SMike Snitzer 161655a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 161723508a96SMike Snitzer { 161855a62eefSAlasdair G Kergon return ti->num_discard_bios; 161923508a96SMike Snitzer } 162023508a96SMike Snitzer 162155a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 162223508a96SMike Snitzer { 162355a62eefSAlasdair G Kergon return ti->num_write_same_bios; 162423508a96SMike Snitzer } 162523508a96SMike Snitzer 162623508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 162723508a96SMike Snitzer 162823508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 162923508a96SMike Snitzer { 163055a62eefSAlasdair G Kergon return ti->split_discard_bios; 163123508a96SMike Snitzer } 163223508a96SMike Snitzer 163314fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 163455a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 163523508a96SMike Snitzer is_split_required_fn is_split_required) 16365ae89a87SMike Snitzer { 16375ae89a87SMike Snitzer struct dm_target *ti; 1638e0d6609aSMikulas Patocka unsigned len; 163955a62eefSAlasdair G Kergon unsigned num_bios; 16405ae89a87SMike Snitzer 1641a79245b3SMike Snitzer do { 16425ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 16435ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 16445ae89a87SMike Snitzer return -EIO; 16455ae89a87SMike Snitzer 16465ae89a87SMike Snitzer /* 164723508a96SMike Snitzer * Even though the device advertised support for this type of 164823508a96SMike Snitzer * request, that does not mean every target supports it, and 1649936688d7SMike Snitzer * reconfiguration might also have changed that since the 16505ae89a87SMike Snitzer * check was performed. 16515ae89a87SMike Snitzer */ 165255a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 165355a62eefSAlasdair G Kergon if (!num_bios) 16545ae89a87SMike Snitzer return -EOPNOTSUPP; 16555ae89a87SMike Snitzer 165623508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1657e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 16587acf0277SMikulas Patocka else 1659e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 16605ae89a87SMike Snitzer 16611dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 16625ae89a87SMike Snitzer 1663a79245b3SMike Snitzer ci->sector += len; 1664a79245b3SMike Snitzer } while (ci->sector_count -= len); 16655ae89a87SMike Snitzer 16665ae89a87SMike Snitzer return 0; 16675ae89a87SMike Snitzer } 16685ae89a87SMike Snitzer 166914fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 167023508a96SMike Snitzer { 167114fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 167223508a96SMike Snitzer is_split_required_for_discard); 167323508a96SMike Snitzer } 167423508a96SMike Snitzer 167514fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 167623508a96SMike Snitzer { 167714fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 167823508a96SMike Snitzer } 167923508a96SMike Snitzer 1680e4c93811SAlasdair G Kergon /* 1681e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1682e4c93811SAlasdair G Kergon */ 1683e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1684e4c93811SAlasdair G Kergon { 1685e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1686e4c93811SAlasdair G Kergon struct dm_target *ti; 16871c3b13e6SKent Overstreet unsigned len; 1688e4c93811SAlasdair G Kergon 1689e4c93811SAlasdair G Kergon if (unlikely(bio->bi_rw & REQ_DISCARD)) 1690e4c93811SAlasdair G Kergon return __send_discard(ci); 1691e4c93811SAlasdair G Kergon else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1692e4c93811SAlasdair G Kergon return __send_write_same(ci); 1693e4c93811SAlasdair G Kergon 1694e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1695e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1696e4c93811SAlasdair G Kergon return -EIO; 1697e4c93811SAlasdair G Kergon 16981c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1699e4c93811SAlasdair G Kergon 17001dd40c3eSMikulas Patocka __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1701e4c93811SAlasdair G Kergon 1702e4c93811SAlasdair G Kergon ci->sector += len; 1703e4c93811SAlasdair G Kergon ci->sector_count -= len; 1704e4c93811SAlasdair G Kergon 1705e4c93811SAlasdair G Kergon return 0; 1706e4c93811SAlasdair G Kergon } 1707e4c93811SAlasdair G Kergon 1708e4c93811SAlasdair G Kergon /* 170914fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 17101da177e4SLinus Torvalds */ 171183d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 171283d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 17131da177e4SLinus Torvalds { 17141da177e4SLinus Torvalds struct clone_info ci; 1715512875bdSJun'ichi Nomura int error = 0; 17161da177e4SLinus Torvalds 171783d5e5b0SMikulas Patocka if (unlikely(!map)) { 1718f0b9a450SMikulas Patocka bio_io_error(bio); 1719f0b9a450SMikulas Patocka return; 1720f0b9a450SMikulas Patocka } 1721692d0eb9SMikulas Patocka 172283d5e5b0SMikulas Patocka ci.map = map; 17231da177e4SLinus Torvalds ci.md = md; 17241da177e4SLinus Torvalds ci.io = alloc_io(md); 17251da177e4SLinus Torvalds ci.io->error = 0; 17261da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 17271da177e4SLinus Torvalds ci.io->bio = bio; 17281da177e4SLinus Torvalds ci.io->md = md; 1729f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 17304f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 17311da177e4SLinus Torvalds 17323eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1733bd2a49b8SAlasdair G Kergon 1734b372d360SMike Snitzer if (bio->bi_rw & REQ_FLUSH) { 1735b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1736b372d360SMike Snitzer ci.sector_count = 0; 173714fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1738b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1739b372d360SMike Snitzer } else { 17406a8736d1STejun Heo ci.bio = bio; 1741f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1742512875bdSJun'ichi Nomura while (ci.sector_count && !error) 174314fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1744d87f4c14STejun Heo } 17451da177e4SLinus Torvalds 17461da177e4SLinus Torvalds /* drop the extra reference count */ 1747512875bdSJun'ichi Nomura dec_pending(ci.io, error); 17489e4e5f87SMilan Broz } 17499e4e5f87SMilan Broz /*----------------------------------------------------------------- 17501da177e4SLinus Torvalds * CRUD END 17511da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17521da177e4SLinus Torvalds 17531da177e4SLinus Torvalds /* 17541da177e4SLinus Torvalds * The request function that just remaps the bio built up by 17551da177e4SLinus Torvalds * dm_merge_bvec. 17561da177e4SLinus Torvalds */ 1757ff36ab34SMike Snitzer static void dm_make_request(struct request_queue *q, struct bio *bio) 17581da177e4SLinus Torvalds { 175912f03a49SKevin Corry int rw = bio_data_dir(bio); 17601da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 176183d5e5b0SMikulas Patocka int srcu_idx; 176283d5e5b0SMikulas Patocka struct dm_table *map; 17631da177e4SLinus Torvalds 176483d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17651da177e4SLinus Torvalds 176618c0b223SGu Zheng generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 176712f03a49SKevin Corry 17686a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17696a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 177083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17711da177e4SLinus Torvalds 17726a8736d1STejun Heo if (bio_rw(bio) != READA) 177392c63902SMikulas Patocka queue_io(md, bio); 17746a8736d1STejun Heo else 17756a8736d1STejun Heo bio_io_error(bio); 17765a7bbad2SChristoph Hellwig return; 17771da177e4SLinus Torvalds } 17781da177e4SLinus Torvalds 177983d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 178083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17815a7bbad2SChristoph Hellwig return; 1782cec47e3dSKiyoshi Ueda } 1783cec47e3dSKiyoshi Ueda 1784fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md) 1785cec47e3dSKiyoshi Ueda { 1786cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1787cec47e3dSKiyoshi Ueda } 1788cec47e3dSKiyoshi Ueda 1789466d89a6SKeith Busch static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1790cec47e3dSKiyoshi Ueda { 1791cec47e3dSKiyoshi Ueda int r; 1792cec47e3dSKiyoshi Ueda 1793466d89a6SKeith Busch if (blk_queue_io_stat(clone->q)) 1794466d89a6SKeith Busch clone->cmd_flags |= REQ_IO_STAT; 1795cec47e3dSKiyoshi Ueda 1796466d89a6SKeith Busch clone->start_time = jiffies; 1797466d89a6SKeith Busch r = blk_insert_cloned_request(clone->q, clone); 1798cec47e3dSKiyoshi Ueda if (r) 1799466d89a6SKeith Busch /* must complete clone in terms of original request */ 1800cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1801cec47e3dSKiyoshi Ueda } 1802cec47e3dSKiyoshi Ueda 180378d8e58aSMike Snitzer static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 180478d8e58aSMike Snitzer void *data) 1805cec47e3dSKiyoshi Ueda { 180678d8e58aSMike Snitzer struct dm_rq_target_io *tio = data; 180778d8e58aSMike Snitzer struct dm_rq_clone_bio_info *info = 180878d8e58aSMike Snitzer container_of(bio, struct dm_rq_clone_bio_info, clone); 180978d8e58aSMike Snitzer 181078d8e58aSMike Snitzer info->orig = bio_orig; 181178d8e58aSMike Snitzer info->tio = tio; 181278d8e58aSMike Snitzer bio->bi_end_io = end_clone_bio; 181378d8e58aSMike Snitzer 181478d8e58aSMike Snitzer return 0; 181578d8e58aSMike Snitzer } 181678d8e58aSMike Snitzer 181778d8e58aSMike Snitzer static int setup_clone(struct request *clone, struct request *rq, 181878d8e58aSMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 181978d8e58aSMike Snitzer { 182078d8e58aSMike Snitzer int r; 182178d8e58aSMike Snitzer 182278d8e58aSMike Snitzer r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 182378d8e58aSMike Snitzer dm_rq_bio_constructor, tio); 182478d8e58aSMike Snitzer if (r) 182578d8e58aSMike Snitzer return r; 182678d8e58aSMike Snitzer 182778d8e58aSMike Snitzer clone->cmd = rq->cmd; 182878d8e58aSMike Snitzer clone->cmd_len = rq->cmd_len; 182978d8e58aSMike Snitzer clone->sense = rq->sense; 1830cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1831cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 183278d8e58aSMike Snitzer 18331ae49ea2SMike Snitzer tio->clone = clone; 183478d8e58aSMike Snitzer 183578d8e58aSMike Snitzer return 0; 1836cec47e3dSKiyoshi Ueda } 1837cec47e3dSKiyoshi Ueda 18386facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 18391ae49ea2SMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 18406facdaffSKiyoshi Ueda { 184102233342SMike Snitzer /* 184202233342SMike Snitzer * Do not allocate a clone if tio->clone was already set 184302233342SMike Snitzer * (see: dm_mq_queue_rq). 184402233342SMike Snitzer */ 184502233342SMike Snitzer bool alloc_clone = !tio->clone; 184602233342SMike Snitzer struct request *clone; 18471ae49ea2SMike Snitzer 184802233342SMike Snitzer if (alloc_clone) { 184902233342SMike Snitzer clone = alloc_clone_request(md, gfp_mask); 18501ae49ea2SMike Snitzer if (!clone) 18511ae49ea2SMike Snitzer return NULL; 185202233342SMike Snitzer } else 185302233342SMike Snitzer clone = tio->clone; 18541ae49ea2SMike Snitzer 18551ae49ea2SMike Snitzer blk_rq_init(NULL, clone); 185678d8e58aSMike Snitzer if (setup_clone(clone, rq, tio, gfp_mask)) { 185778d8e58aSMike Snitzer /* -ENOMEM */ 185878d8e58aSMike Snitzer if (alloc_clone) 185978d8e58aSMike Snitzer free_clone_request(md, clone); 186078d8e58aSMike Snitzer return NULL; 186178d8e58aSMike Snitzer } 18621ae49ea2SMike Snitzer 18631ae49ea2SMike Snitzer return clone; 18641ae49ea2SMike Snitzer } 18651ae49ea2SMike Snitzer 18662eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work); 18672eb6e1e3SKeith Busch 1868bfebd1cdSMike Snitzer static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1869bfebd1cdSMike Snitzer struct mapped_device *md) 1870bfebd1cdSMike Snitzer { 1871bfebd1cdSMike Snitzer tio->md = md; 1872bfebd1cdSMike Snitzer tio->ti = NULL; 1873bfebd1cdSMike Snitzer tio->clone = NULL; 1874bfebd1cdSMike Snitzer tio->orig = rq; 1875bfebd1cdSMike Snitzer tio->error = 0; 1876bfebd1cdSMike Snitzer memset(&tio->info, 0, sizeof(tio->info)); 187702233342SMike Snitzer if (md->kworker_task) 1878bfebd1cdSMike Snitzer init_kthread_work(&tio->work, map_tio_request); 1879bfebd1cdSMike Snitzer } 1880bfebd1cdSMike Snitzer 1881466d89a6SKeith Busch static struct dm_rq_target_io *prep_tio(struct request *rq, 1882466d89a6SKeith Busch struct mapped_device *md, gfp_t gfp_mask) 18836facdaffSKiyoshi Ueda { 18846facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 1885e5863d9aSMike Snitzer int srcu_idx; 1886e5863d9aSMike Snitzer struct dm_table *table; 18876facdaffSKiyoshi Ueda 18886facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 18896facdaffSKiyoshi Ueda if (!tio) 18906facdaffSKiyoshi Ueda return NULL; 18916facdaffSKiyoshi Ueda 1892bfebd1cdSMike Snitzer init_tio(tio, rq, md); 18936facdaffSKiyoshi Ueda 1894e5863d9aSMike Snitzer table = dm_get_live_table(md, &srcu_idx); 1895e5863d9aSMike Snitzer if (!dm_table_mq_request_based(table)) { 1896466d89a6SKeith Busch if (!clone_rq(rq, md, tio, gfp_mask)) { 1897e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 18986facdaffSKiyoshi Ueda free_rq_tio(tio); 18996facdaffSKiyoshi Ueda return NULL; 19006facdaffSKiyoshi Ueda } 1901e5863d9aSMike Snitzer } 1902e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 19036facdaffSKiyoshi Ueda 1904466d89a6SKeith Busch return tio; 19056facdaffSKiyoshi Ueda } 19066facdaffSKiyoshi Ueda 1907cec47e3dSKiyoshi Ueda /* 1908cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1909cec47e3dSKiyoshi Ueda */ 1910cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1911cec47e3dSKiyoshi Ueda { 1912cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1913466d89a6SKeith Busch struct dm_rq_target_io *tio; 1914cec47e3dSKiyoshi Ueda 1915cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1916cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1917cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1918cec47e3dSKiyoshi Ueda } 1919cec47e3dSKiyoshi Ueda 1920466d89a6SKeith Busch tio = prep_tio(rq, md, GFP_ATOMIC); 1921466d89a6SKeith Busch if (!tio) 1922cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1923cec47e3dSKiyoshi Ueda 1924466d89a6SKeith Busch rq->special = tio; 1925cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1926cec47e3dSKiyoshi Ueda 1927cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1928cec47e3dSKiyoshi Ueda } 1929cec47e3dSKiyoshi Ueda 19309eef87daSKiyoshi Ueda /* 19319eef87daSKiyoshi Ueda * Returns: 1932e5863d9aSMike Snitzer * 0 : the request has been processed 1933e5863d9aSMike Snitzer * DM_MAPIO_REQUEUE : the original request needs to be requeued 1934e5863d9aSMike Snitzer * < 0 : the request was completed due to failure 19359eef87daSKiyoshi Ueda */ 1936bfebd1cdSMike Snitzer static int map_request(struct dm_rq_target_io *tio, struct request *rq, 1937cec47e3dSKiyoshi Ueda struct mapped_device *md) 1938cec47e3dSKiyoshi Ueda { 1939e5863d9aSMike Snitzer int r; 1940bfebd1cdSMike Snitzer struct dm_target *ti = tio->ti; 1941e5863d9aSMike Snitzer struct request *clone = NULL; 1942cec47e3dSKiyoshi Ueda 1943e5863d9aSMike Snitzer if (tio->clone) { 1944e5863d9aSMike Snitzer clone = tio->clone; 1945cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1946e5863d9aSMike Snitzer } else { 1947e5863d9aSMike Snitzer r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1948e5863d9aSMike Snitzer if (r < 0) { 1949e5863d9aSMike Snitzer /* The target wants to complete the I/O */ 1950e5863d9aSMike Snitzer dm_kill_unmapped_request(rq, r); 1951e5863d9aSMike Snitzer return r; 1952e5863d9aSMike Snitzer } 19533a140755SJunichi Nomura if (r != DM_MAPIO_REMAPPED) 19543a140755SJunichi Nomura return r; 195578d8e58aSMike Snitzer if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 195678d8e58aSMike Snitzer /* -ENOMEM */ 195778d8e58aSMike Snitzer ti->type->release_clone_rq(clone); 195878d8e58aSMike Snitzer return DM_MAPIO_REQUEUE; 195978d8e58aSMike Snitzer } 1960e5863d9aSMike Snitzer } 1961e5863d9aSMike Snitzer 1962cec47e3dSKiyoshi Ueda switch (r) { 1963cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1964cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1965cec47e3dSKiyoshi Ueda break; 1966cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1967cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 19686db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1969466d89a6SKeith Busch blk_rq_pos(rq)); 1970466d89a6SKeith Busch dm_dispatch_clone_request(clone, rq); 1971cec47e3dSKiyoshi Ueda break; 1972cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1973cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 19742d76fff1SMike Snitzer dm_requeue_original_request(md, tio->orig); 1975cec47e3dSKiyoshi Ueda break; 1976cec47e3dSKiyoshi Ueda default: 1977cec47e3dSKiyoshi Ueda if (r > 0) { 1978cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1979cec47e3dSKiyoshi Ueda BUG(); 1980cec47e3dSKiyoshi Ueda } 1981cec47e3dSKiyoshi Ueda 1982cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1983466d89a6SKeith Busch dm_kill_unmapped_request(rq, r); 1984e5863d9aSMike Snitzer return r; 1985cec47e3dSKiyoshi Ueda } 19869eef87daSKiyoshi Ueda 1987e5863d9aSMike Snitzer return 0; 1988cec47e3dSKiyoshi Ueda } 1989cec47e3dSKiyoshi Ueda 19902eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work) 1991ba1cbad9SMike Snitzer { 19922eb6e1e3SKeith Busch struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1993e5863d9aSMike Snitzer struct request *rq = tio->orig; 1994e5863d9aSMike Snitzer struct mapped_device *md = tio->md; 1995ba1cbad9SMike Snitzer 1996bfebd1cdSMike Snitzer if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 19972d76fff1SMike Snitzer dm_requeue_original_request(md, rq); 19982eb6e1e3SKeith Busch } 19992eb6e1e3SKeith Busch 2000466d89a6SKeith Busch static void dm_start_request(struct mapped_device *md, struct request *orig) 2001ba1cbad9SMike Snitzer { 2002bfebd1cdSMike Snitzer if (!orig->q->mq_ops) 2003ba1cbad9SMike Snitzer blk_start_request(orig); 2004bfebd1cdSMike Snitzer else 2005bfebd1cdSMike Snitzer blk_mq_start_request(orig); 2006466d89a6SKeith Busch atomic_inc(&md->pending[rq_data_dir(orig)]); 2007ba1cbad9SMike Snitzer 20080ce65797SMike Snitzer if (md->seq_rq_merge_deadline_usecs) { 2009de3ec86dSMike Snitzer md->last_rq_pos = rq_end_sector(orig); 2010de3ec86dSMike Snitzer md->last_rq_rw = rq_data_dir(orig); 20110ce65797SMike Snitzer md->last_rq_start_time = ktime_get(); 20120ce65797SMike Snitzer } 2013de3ec86dSMike Snitzer 2014e262f347SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) { 2015e262f347SMikulas Patocka struct dm_rq_target_io *tio = tio_from_request(orig); 2016e262f347SMikulas Patocka tio->duration_jiffies = jiffies; 2017e262f347SMikulas Patocka tio->n_sectors = blk_rq_sectors(orig); 2018e262f347SMikulas Patocka dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), 2019e262f347SMikulas Patocka tio->n_sectors, false, 0, &tio->stats_aux); 2020e262f347SMikulas Patocka } 2021e262f347SMikulas Patocka 2022ba1cbad9SMike Snitzer /* 2023ba1cbad9SMike Snitzer * Hold the md reference here for the in-flight I/O. 2024ba1cbad9SMike Snitzer * We can't rely on the reference count by device opener, 2025ba1cbad9SMike Snitzer * because the device may be closed during the request completion 2026ba1cbad9SMike Snitzer * when all bios are completed. 2027ba1cbad9SMike Snitzer * See the comment in rq_completed() too. 2028ba1cbad9SMike Snitzer */ 2029ba1cbad9SMike Snitzer dm_get(md); 2030ba1cbad9SMike Snitzer } 2031ba1cbad9SMike Snitzer 20320ce65797SMike Snitzer #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 20330ce65797SMike Snitzer 20340ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 20350ce65797SMike Snitzer { 20360ce65797SMike Snitzer return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); 20370ce65797SMike Snitzer } 20380ce65797SMike Snitzer 20390ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 20400ce65797SMike Snitzer const char *buf, size_t count) 20410ce65797SMike Snitzer { 20420ce65797SMike Snitzer unsigned deadline; 20430ce65797SMike Snitzer 204417e149b8SMike Snitzer if (!dm_request_based(md) || md->use_blk_mq) 20450ce65797SMike Snitzer return count; 20460ce65797SMike Snitzer 20470ce65797SMike Snitzer if (kstrtouint(buf, 10, &deadline)) 20480ce65797SMike Snitzer return -EINVAL; 20490ce65797SMike Snitzer 20500ce65797SMike Snitzer if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) 20510ce65797SMike Snitzer deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; 20520ce65797SMike Snitzer 20530ce65797SMike Snitzer md->seq_rq_merge_deadline_usecs = deadline; 20540ce65797SMike Snitzer 20550ce65797SMike Snitzer return count; 20560ce65797SMike Snitzer } 20570ce65797SMike Snitzer 20580ce65797SMike Snitzer static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) 20590ce65797SMike Snitzer { 20600ce65797SMike Snitzer ktime_t kt_deadline; 20610ce65797SMike Snitzer 20620ce65797SMike Snitzer if (!md->seq_rq_merge_deadline_usecs) 20630ce65797SMike Snitzer return false; 20640ce65797SMike Snitzer 20650ce65797SMike Snitzer kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); 20660ce65797SMike Snitzer kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); 20670ce65797SMike Snitzer 20680ce65797SMike Snitzer return !ktime_after(ktime_get(), kt_deadline); 20690ce65797SMike Snitzer } 20700ce65797SMike Snitzer 2071cec47e3dSKiyoshi Ueda /* 2072cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 2073cec47e3dSKiyoshi Ueda * Called with the queue lock held. 2074cec47e3dSKiyoshi Ueda */ 2075cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 2076cec47e3dSKiyoshi Ueda { 2077cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 207883d5e5b0SMikulas Patocka int srcu_idx; 207983d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2080cec47e3dSKiyoshi Ueda struct dm_target *ti; 2081466d89a6SKeith Busch struct request *rq; 20822eb6e1e3SKeith Busch struct dm_rq_target_io *tio; 208329e4013dSTejun Heo sector_t pos; 2084cec47e3dSKiyoshi Ueda 2085cec47e3dSKiyoshi Ueda /* 2086b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 2087b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 2088b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 2089b4324feeSKiyoshi Ueda * dm_suspend(). 2090cec47e3dSKiyoshi Ueda */ 20917eaceaccSJens Axboe while (!blk_queue_stopped(q)) { 2092cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 2093cec47e3dSKiyoshi Ueda if (!rq) 20949d1deb83SMike Snitzer goto out; 2095cec47e3dSKiyoshi Ueda 209629e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 209729e4013dSTejun Heo pos = 0; 209829e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 209929e4013dSTejun Heo pos = blk_rq_pos(rq); 2100d0bcb878SKiyoshi Ueda 210129e4013dSTejun Heo ti = dm_table_find_target(map, pos); 2102ba1cbad9SMike Snitzer if (!dm_target_is_valid(ti)) { 2103ba1cbad9SMike Snitzer /* 2104466d89a6SKeith Busch * Must perform setup, that rq_completed() requires, 2105ba1cbad9SMike Snitzer * before calling dm_kill_unmapped_request 2106ba1cbad9SMike Snitzer */ 2107ba1cbad9SMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 2108466d89a6SKeith Busch dm_start_request(md, rq); 2109466d89a6SKeith Busch dm_kill_unmapped_request(rq, -EIO); 2110ba1cbad9SMike Snitzer continue; 2111ba1cbad9SMike Snitzer } 211229e4013dSTejun Heo 21130ce65797SMike Snitzer if (dm_request_peeked_before_merge_deadline(md) && 21140ce65797SMike Snitzer md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2115de3ec86dSMike Snitzer md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) 2116de3ec86dSMike Snitzer goto delay_and_out; 2117de3ec86dSMike Snitzer 2118cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 21197eaceaccSJens Axboe goto delay_and_out; 2120cec47e3dSKiyoshi Ueda 2121466d89a6SKeith Busch dm_start_request(md, rq); 2122b4324feeSKiyoshi Ueda 2123bfebd1cdSMike Snitzer tio = tio_from_request(rq); 21242eb6e1e3SKeith Busch /* Establish tio->ti before queuing work (map_tio_request) */ 21252eb6e1e3SKeith Busch tio->ti = ti; 21262eb6e1e3SKeith Busch queue_kthread_work(&md->kworker, &tio->work); 2127052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 2128cec47e3dSKiyoshi Ueda } 2129cec47e3dSKiyoshi Ueda 2130cec47e3dSKiyoshi Ueda goto out; 2131cec47e3dSKiyoshi Ueda 21327eaceaccSJens Axboe delay_and_out: 2133d548b34bSMike Snitzer blk_delay_queue(q, HZ / 100); 2134cec47e3dSKiyoshi Ueda out: 213583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 2136cec47e3dSKiyoshi Ueda } 2137cec47e3dSKiyoshi Ueda 21381da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 21391da177e4SLinus Torvalds { 21408a57dfc6SChandra Seetharaman int r = bdi_bits; 21418a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 21428a57dfc6SChandra Seetharaman struct dm_table *map; 21431da177e4SLinus Torvalds 21441eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 214583d5e5b0SMikulas Patocka map = dm_get_live_table_fast(md); 21468a57dfc6SChandra Seetharaman if (map) { 2147cec47e3dSKiyoshi Ueda /* 2148cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 2149cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 2150cec47e3dSKiyoshi Ueda */ 2151cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 21524452226eSTejun Heo r = md->queue->backing_dev_info.wb.state & 2153cec47e3dSKiyoshi Ueda bdi_bits; 2154cec47e3dSKiyoshi Ueda else 21551da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 21568a57dfc6SChandra Seetharaman } 215783d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 21588a57dfc6SChandra Seetharaman } 21598a57dfc6SChandra Seetharaman 21601da177e4SLinus Torvalds return r; 21611da177e4SLinus Torvalds } 21621da177e4SLinus Torvalds 21631da177e4SLinus Torvalds /*----------------------------------------------------------------- 21641da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 21651da177e4SLinus Torvalds *---------------------------------------------------------------*/ 21662b06cfffSAlasdair G Kergon static void free_minor(int minor) 21671da177e4SLinus Torvalds { 2168f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21691da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 2170f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21711da177e4SLinus Torvalds } 21721da177e4SLinus Torvalds 21731da177e4SLinus Torvalds /* 21741da177e4SLinus Torvalds * See if the device with a specific minor # is free. 21751da177e4SLinus Torvalds */ 2176cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 21771da177e4SLinus Torvalds { 2178c9d76be6STejun Heo int r; 21791da177e4SLinus Torvalds 21801da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 21811da177e4SLinus Torvalds return -EINVAL; 21821da177e4SLinus Torvalds 2183c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2184f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21851da177e4SLinus Torvalds 2186c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 21871da177e4SLinus Torvalds 2188f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2189c9d76be6STejun Heo idr_preload_end(); 2190c9d76be6STejun Heo if (r < 0) 2191c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 2192c9d76be6STejun Heo return 0; 21931da177e4SLinus Torvalds } 21941da177e4SLinus Torvalds 2195cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 21961da177e4SLinus Torvalds { 2197c9d76be6STejun Heo int r; 21981da177e4SLinus Torvalds 2199c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2200f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22011da177e4SLinus Torvalds 2202c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 22031da177e4SLinus Torvalds 2204f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2205c9d76be6STejun Heo idr_preload_end(); 2206c9d76be6STejun Heo if (r < 0) 22071da177e4SLinus Torvalds return r; 2208c9d76be6STejun Heo *minor = r; 2209c9d76be6STejun Heo return 0; 22101da177e4SLinus Torvalds } 22111da177e4SLinus Torvalds 221283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 22131da177e4SLinus Torvalds 221453d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 221553d5914fSMikulas Patocka 22164a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 22174a0b4ddfSMike Snitzer { 22184a0b4ddfSMike Snitzer /* 22194a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 2220bfebd1cdSMike Snitzer * devices. The type of this dm device may not have been decided yet. 22214a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 22224a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 22234a0b4ddfSMike Snitzer * for request stacking support until then. 22244a0b4ddfSMike Snitzer * 22254a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 22264a0b4ddfSMike Snitzer */ 22274a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2228ad5f498fSMikulas Patocka 2229ad5f498fSMikulas Patocka /* 2230ad5f498fSMikulas Patocka * Initialize data that will only be used by a non-blk-mq DM queue 2231ad5f498fSMikulas Patocka * - must do so here (in alloc_dev callchain) before queue is used 2232ad5f498fSMikulas Patocka */ 2233ad5f498fSMikulas Patocka md->queue->queuedata = md; 2234ad5f498fSMikulas Patocka md->queue->backing_dev_info.congested_data = md; 2235bfebd1cdSMike Snitzer } 22364a0b4ddfSMike Snitzer 2237bfebd1cdSMike Snitzer static void dm_init_old_md_queue(struct mapped_device *md) 2238bfebd1cdSMike Snitzer { 223917e149b8SMike Snitzer md->use_blk_mq = false; 2240bfebd1cdSMike Snitzer dm_init_md_queue(md); 2241bfebd1cdSMike Snitzer 2242bfebd1cdSMike Snitzer /* 2243bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 2244bfebd1cdSMike Snitzer */ 22454a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 22464a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 22474a0b4ddfSMike Snitzer } 22484a0b4ddfSMike Snitzer 22490f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 22500f20972fSMike Snitzer { 22510f20972fSMike Snitzer if (md->wq) 22520f20972fSMike Snitzer destroy_workqueue(md->wq); 22530f20972fSMike Snitzer if (md->kworker_task) 22540f20972fSMike Snitzer kthread_stop(md->kworker_task); 22550f20972fSMike Snitzer mempool_destroy(md->io_pool); 22560f20972fSMike Snitzer mempool_destroy(md->rq_pool); 22570f20972fSMike Snitzer if (md->bs) 22580f20972fSMike Snitzer bioset_free(md->bs); 22590f20972fSMike Snitzer 2260b06075a9SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 2261b06075a9SMikulas Patocka 22620f20972fSMike Snitzer if (md->disk) { 22630f20972fSMike Snitzer spin_lock(&_minor_lock); 22640f20972fSMike Snitzer md->disk->private_data = NULL; 22650f20972fSMike Snitzer spin_unlock(&_minor_lock); 22660f20972fSMike Snitzer if (blk_get_integrity(md->disk)) 22670f20972fSMike Snitzer blk_integrity_unregister(md->disk); 22680f20972fSMike Snitzer del_gendisk(md->disk); 22690f20972fSMike Snitzer put_disk(md->disk); 22700f20972fSMike Snitzer } 22710f20972fSMike Snitzer 22720f20972fSMike Snitzer if (md->queue) 22730f20972fSMike Snitzer blk_cleanup_queue(md->queue); 22740f20972fSMike Snitzer 22750f20972fSMike Snitzer if (md->bdev) { 22760f20972fSMike Snitzer bdput(md->bdev); 22770f20972fSMike Snitzer md->bdev = NULL; 22780f20972fSMike Snitzer } 22790f20972fSMike Snitzer } 22800f20972fSMike Snitzer 22811da177e4SLinus Torvalds /* 22821da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 22831da177e4SLinus Torvalds */ 22842b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 22851da177e4SLinus Torvalds { 22861da177e4SLinus Torvalds int r; 2287cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 2288ba61fdd1SJeff Mahoney void *old_md; 22891da177e4SLinus Torvalds 22901da177e4SLinus Torvalds if (!md) { 22911da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 22921da177e4SLinus Torvalds return NULL; 22931da177e4SLinus Torvalds } 22941da177e4SLinus Torvalds 229510da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 22966ed7ade8SMilan Broz goto bad_module_get; 229710da4f79SJeff Mahoney 22981da177e4SLinus Torvalds /* get a minor number for the dev */ 22992b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 2300cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 23012b06cfffSAlasdair G Kergon else 2302cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 23031da177e4SLinus Torvalds if (r < 0) 23046ed7ade8SMilan Broz goto bad_minor; 23051da177e4SLinus Torvalds 230683d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 230783d5e5b0SMikulas Patocka if (r < 0) 230883d5e5b0SMikulas Patocka goto bad_io_barrier; 230983d5e5b0SMikulas Patocka 231017e149b8SMike Snitzer md->use_blk_mq = use_blk_mq; 2311a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 2312e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 2313a5664dadSMike Snitzer mutex_init(&md->type_lock); 231486f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 2315022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 23161da177e4SLinus Torvalds atomic_set(&md->holders, 1); 23175c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 23181da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 23197a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 23207a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 232186f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 23227a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 23231da177e4SLinus Torvalds 23244a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 23251da177e4SLinus Torvalds if (!md->queue) 23260f20972fSMike Snitzer goto bad; 23271da177e4SLinus Torvalds 23284a0b4ddfSMike Snitzer dm_init_md_queue(md); 23299faf400fSStefan Bader 23301da177e4SLinus Torvalds md->disk = alloc_disk(1); 23311da177e4SLinus Torvalds if (!md->disk) 23320f20972fSMike Snitzer goto bad; 23331da177e4SLinus Torvalds 2334316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 2335316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 2336f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 233753d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 2338f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 23392995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 23402eb6e1e3SKeith Busch md->kworker_task = NULL; 2341f0b04115SJeff Mahoney 23421da177e4SLinus Torvalds md->disk->major = _major; 23431da177e4SLinus Torvalds md->disk->first_minor = minor; 23441da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 23451da177e4SLinus Torvalds md->disk->queue = md->queue; 23461da177e4SLinus Torvalds md->disk->private_data = md; 23471da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 23481da177e4SLinus Torvalds add_disk(md->disk); 23497e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 23501da177e4SLinus Torvalds 2351670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2352304f3f6aSMilan Broz if (!md->wq) 23530f20972fSMike Snitzer goto bad; 2354304f3f6aSMilan Broz 235532a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 235632a926daSMikulas Patocka if (!md->bdev) 23570f20972fSMike Snitzer goto bad; 235832a926daSMikulas Patocka 23596a8736d1STejun Heo bio_init(&md->flush_bio); 23606a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 23616a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 23626a8736d1STejun Heo 2363fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2364fd2ed4d2SMikulas Patocka 2365ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2366f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2367ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2368f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2369ba61fdd1SJeff Mahoney 2370ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2371ba61fdd1SJeff Mahoney 23721da177e4SLinus Torvalds return md; 23731da177e4SLinus Torvalds 23740f20972fSMike Snitzer bad: 23750f20972fSMike Snitzer cleanup_mapped_device(md); 237683d5e5b0SMikulas Patocka bad_io_barrier: 23771da177e4SLinus Torvalds free_minor(minor); 23786ed7ade8SMilan Broz bad_minor: 237910da4f79SJeff Mahoney module_put(THIS_MODULE); 23806ed7ade8SMilan Broz bad_module_get: 23811da177e4SLinus Torvalds kfree(md); 23821da177e4SLinus Torvalds return NULL; 23831da177e4SLinus Torvalds } 23841da177e4SLinus Torvalds 2385ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2386ae9da83fSJun'ichi Nomura 23871da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 23881da177e4SLinus Torvalds { 2389f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 239063d94e48SJun'ichi Nomura 2391ae9da83fSJun'ichi Nomura unlock_fs(md); 23922eb6e1e3SKeith Busch 23930f20972fSMike Snitzer cleanup_mapped_device(md); 239417e149b8SMike Snitzer if (md->use_blk_mq) 2395bfebd1cdSMike Snitzer blk_mq_free_tag_set(&md->tag_set); 23960f20972fSMike Snitzer 23970f20972fSMike Snitzer free_table_devices(&md->table_devices); 23980f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 239963a4f065SMike Snitzer free_minor(minor); 240063a4f065SMike Snitzer 240110da4f79SJeff Mahoney module_put(THIS_MODULE); 24021da177e4SLinus Torvalds kfree(md); 24031da177e4SLinus Torvalds } 24041da177e4SLinus Torvalds 2405e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2406e6ee8c0bSKiyoshi Ueda { 2407c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2408e6ee8c0bSKiyoshi Ueda 24094e6e36c3SMike Snitzer if (md->bs) { 24104e6e36c3SMike Snitzer /* The md already has necessary mempools. */ 24114e6e36c3SMike Snitzer if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2412c0820cf5SMikulas Patocka /* 241316245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 241416245bdcSJun'ichi Nomura * because a different table was loaded. 2415c0820cf5SMikulas Patocka */ 2416c0820cf5SMikulas Patocka bioset_free(md->bs); 2417c0820cf5SMikulas Patocka md->bs = p->bs; 2418c0820cf5SMikulas Patocka p->bs = NULL; 2419c0820cf5SMikulas Patocka } 2420cbc4e3c1SMike Snitzer /* 24214e6e36c3SMike Snitzer * There's no need to reload with request-based dm 24224e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 24234e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 24244e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 24254e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 24264e6e36c3SMike Snitzer * through the queue to unprep. 2427cbc4e3c1SMike Snitzer */ 2428cbc4e3c1SMike Snitzer goto out; 2429cbc4e3c1SMike Snitzer } 2430cbc4e3c1SMike Snitzer 2431cbc4e3c1SMike Snitzer BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2432e6ee8c0bSKiyoshi Ueda 2433e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 2434e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 24351ae49ea2SMike Snitzer md->rq_pool = p->rq_pool; 24361ae49ea2SMike Snitzer p->rq_pool = NULL; 2437e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 2438e6ee8c0bSKiyoshi Ueda p->bs = NULL; 24394e6e36c3SMike Snitzer 2440e6ee8c0bSKiyoshi Ueda out: 244102233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2442e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 2443e6ee8c0bSKiyoshi Ueda } 2444e6ee8c0bSKiyoshi Ueda 24451da177e4SLinus Torvalds /* 24461da177e4SLinus Torvalds * Bind a table to the device. 24471da177e4SLinus Torvalds */ 24481da177e4SLinus Torvalds static void event_callback(void *context) 24491da177e4SLinus Torvalds { 24507a8c3d3bSMike Anderson unsigned long flags; 24517a8c3d3bSMike Anderson LIST_HEAD(uevents); 24521da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 24531da177e4SLinus Torvalds 24547a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 24557a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 24567a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 24577a8c3d3bSMike Anderson 2458ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 24597a8c3d3bSMike Anderson 24601da177e4SLinus Torvalds atomic_inc(&md->event_nr); 24611da177e4SLinus Torvalds wake_up(&md->eventq); 24621da177e4SLinus Torvalds } 24631da177e4SLinus Torvalds 2464c217649bSMike Snitzer /* 2465c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2466c217649bSMike Snitzer */ 24674e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 24681da177e4SLinus Torvalds { 24694e90188bSAlasdair G Kergon set_capacity(md->disk, size); 24701da177e4SLinus Torvalds 2471db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 24721da177e4SLinus Torvalds } 24731da177e4SLinus Torvalds 2474042d2a9bSAlasdair G Kergon /* 2475042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2476042d2a9bSAlasdair G Kergon */ 2477042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2478754c5fc7SMike Snitzer struct queue_limits *limits) 24791da177e4SLinus Torvalds { 2480042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2481165125e1SJens Axboe struct request_queue *q = md->queue; 24821da177e4SLinus Torvalds sector_t size; 24831da177e4SLinus Torvalds 24841da177e4SLinus Torvalds size = dm_table_get_size(t); 24853ac51e74SDarrick J. Wong 24863ac51e74SDarrick J. Wong /* 24873ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 24883ac51e74SDarrick J. Wong */ 2489fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 24903ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 24913ac51e74SDarrick J. Wong 24924e90188bSAlasdair G Kergon __set_size(md, size); 24931da177e4SLinus Torvalds 2494cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 24952ca3310eSAlasdair G Kergon 2496e6ee8c0bSKiyoshi Ueda /* 2497e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2498e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2499e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2500e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2501e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2502e6ee8c0bSKiyoshi Ueda */ 2503bfebd1cdSMike Snitzer if (dm_table_request_based(t)) 2504e6ee8c0bSKiyoshi Ueda stop_queue(q); 2505e6ee8c0bSKiyoshi Ueda 2506e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2507e6ee8c0bSKiyoshi Ueda 2508a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 250983d5e5b0SMikulas Patocka rcu_assign_pointer(md->map, t); 251036a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 251136a0456fSAlasdair G Kergon 2512754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 251341abc4e1SHannes Reinecke if (old_map) 251483d5e5b0SMikulas Patocka dm_sync_table(md); 25152ca3310eSAlasdair G Kergon 2516042d2a9bSAlasdair G Kergon return old_map; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 2519a7940155SAlasdair G Kergon /* 2520a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2521a7940155SAlasdair G Kergon */ 2522a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 25231da177e4SLinus Torvalds { 2524a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 25251da177e4SLinus Torvalds 25261da177e4SLinus Torvalds if (!map) 2527a7940155SAlasdair G Kergon return NULL; 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 25309cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 253183d5e5b0SMikulas Patocka dm_sync_table(md); 2532a7940155SAlasdair G Kergon 2533a7940155SAlasdair G Kergon return map; 25341da177e4SLinus Torvalds } 25351da177e4SLinus Torvalds 25361da177e4SLinus Torvalds /* 25371da177e4SLinus Torvalds * Constructor for a new device. 25381da177e4SLinus Torvalds */ 25392b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 25401da177e4SLinus Torvalds { 25411da177e4SLinus Torvalds struct mapped_device *md; 25421da177e4SLinus Torvalds 25432b06cfffSAlasdair G Kergon md = alloc_dev(minor); 25441da177e4SLinus Torvalds if (!md) 25451da177e4SLinus Torvalds return -ENXIO; 25461da177e4SLinus Torvalds 2547784aae73SMilan Broz dm_sysfs_init(md); 2548784aae73SMilan Broz 25491da177e4SLinus Torvalds *result = md; 25501da177e4SLinus Torvalds return 0; 25511da177e4SLinus Torvalds } 25521da177e4SLinus Torvalds 2553a5664dadSMike Snitzer /* 2554a5664dadSMike Snitzer * Functions to manage md->type. 2555a5664dadSMike Snitzer * All are required to hold md->type_lock. 2556a5664dadSMike Snitzer */ 2557a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2558a5664dadSMike Snitzer { 2559a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2560a5664dadSMike Snitzer } 2561a5664dadSMike Snitzer 2562a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2563a5664dadSMike Snitzer { 2564a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2565a5664dadSMike Snitzer } 2566a5664dadSMike Snitzer 2567a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2568a5664dadSMike Snitzer { 256900c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2570a5664dadSMike Snitzer md->type = type; 2571a5664dadSMike Snitzer } 2572a5664dadSMike Snitzer 2573a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2574a5664dadSMike Snitzer { 257500c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2576a5664dadSMike Snitzer return md->type; 2577a5664dadSMike Snitzer } 2578a5664dadSMike Snitzer 257936a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 258036a0456fSAlasdair G Kergon { 258136a0456fSAlasdair G Kergon return md->immutable_target_type; 258236a0456fSAlasdair G Kergon } 258336a0456fSAlasdair G Kergon 25844a0b4ddfSMike Snitzer /* 2585f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2586f84cb8a4SMike Snitzer * count on 'md'. 2587f84cb8a4SMike Snitzer */ 2588f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2589f84cb8a4SMike Snitzer { 2590f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2591f84cb8a4SMike Snitzer return &md->queue->limits; 2592f84cb8a4SMike Snitzer } 2593f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2594f84cb8a4SMike Snitzer 2595bfebd1cdSMike Snitzer static void init_rq_based_worker_thread(struct mapped_device *md) 2596bfebd1cdSMike Snitzer { 2597bfebd1cdSMike Snitzer /* Initialize the request-based DM worker thread */ 2598bfebd1cdSMike Snitzer init_kthread_worker(&md->kworker); 2599bfebd1cdSMike Snitzer md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2600bfebd1cdSMike Snitzer "kdmwork-%s", dm_device_name(md)); 2601bfebd1cdSMike Snitzer } 2602bfebd1cdSMike Snitzer 2603f84cb8a4SMike Snitzer /* 26044a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 26054a0b4ddfSMike Snitzer */ 26064a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 26074a0b4ddfSMike Snitzer { 26084a0b4ddfSMike Snitzer struct request_queue *q = NULL; 26094a0b4ddfSMike Snitzer 26104a0b4ddfSMike Snitzer /* Fully initialize the queue */ 26114a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 26124a0b4ddfSMike Snitzer if (!q) 2613bfebd1cdSMike Snitzer return -EINVAL; 26144a0b4ddfSMike Snitzer 26150ce65797SMike Snitzer /* disable dm_request_fn's merge heuristic by default */ 26160ce65797SMike Snitzer md->seq_rq_merge_deadline_usecs = 0; 26170ce65797SMike Snitzer 26184a0b4ddfSMike Snitzer md->queue = q; 2619bfebd1cdSMike Snitzer dm_init_old_md_queue(md); 26204a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 26214a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 26224a0b4ddfSMike Snitzer 2623bfebd1cdSMike Snitzer init_rq_based_worker_thread(md); 26242eb6e1e3SKeith Busch 26254a0b4ddfSMike Snitzer elv_register_queue(md->queue); 26264a0b4ddfSMike Snitzer 2627bfebd1cdSMike Snitzer return 0; 2628bfebd1cdSMike Snitzer } 2629bfebd1cdSMike Snitzer 2630bfebd1cdSMike Snitzer static int dm_mq_init_request(void *data, struct request *rq, 2631bfebd1cdSMike Snitzer unsigned int hctx_idx, unsigned int request_idx, 2632bfebd1cdSMike Snitzer unsigned int numa_node) 2633bfebd1cdSMike Snitzer { 2634bfebd1cdSMike Snitzer struct mapped_device *md = data; 2635bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2636bfebd1cdSMike Snitzer 2637bfebd1cdSMike Snitzer /* 2638bfebd1cdSMike Snitzer * Must initialize md member of tio, otherwise it won't 2639bfebd1cdSMike Snitzer * be available in dm_mq_queue_rq. 2640bfebd1cdSMike Snitzer */ 2641bfebd1cdSMike Snitzer tio->md = md; 2642bfebd1cdSMike Snitzer 2643bfebd1cdSMike Snitzer return 0; 2644bfebd1cdSMike Snitzer } 2645bfebd1cdSMike Snitzer 2646bfebd1cdSMike Snitzer static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 2647bfebd1cdSMike Snitzer const struct blk_mq_queue_data *bd) 2648bfebd1cdSMike Snitzer { 2649bfebd1cdSMike Snitzer struct request *rq = bd->rq; 2650bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2651bfebd1cdSMike Snitzer struct mapped_device *md = tio->md; 2652bfebd1cdSMike Snitzer int srcu_idx; 2653bfebd1cdSMike Snitzer struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2654bfebd1cdSMike Snitzer struct dm_target *ti; 2655bfebd1cdSMike Snitzer sector_t pos; 2656bfebd1cdSMike Snitzer 2657bfebd1cdSMike Snitzer /* always use block 0 to find the target for flushes for now */ 2658bfebd1cdSMike Snitzer pos = 0; 2659bfebd1cdSMike Snitzer if (!(rq->cmd_flags & REQ_FLUSH)) 2660bfebd1cdSMike Snitzer pos = blk_rq_pos(rq); 2661bfebd1cdSMike Snitzer 2662bfebd1cdSMike Snitzer ti = dm_table_find_target(map, pos); 2663bfebd1cdSMike Snitzer if (!dm_target_is_valid(ti)) { 2664bfebd1cdSMike Snitzer dm_put_live_table(md, srcu_idx); 2665bfebd1cdSMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 2666bfebd1cdSMike Snitzer /* 2667bfebd1cdSMike Snitzer * Must perform setup, that rq_completed() requires, 2668bfebd1cdSMike Snitzer * before returning BLK_MQ_RQ_QUEUE_ERROR 2669bfebd1cdSMike Snitzer */ 2670bfebd1cdSMike Snitzer dm_start_request(md, rq); 2671bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_ERROR; 2672bfebd1cdSMike Snitzer } 2673bfebd1cdSMike Snitzer dm_put_live_table(md, srcu_idx); 2674bfebd1cdSMike Snitzer 2675bfebd1cdSMike Snitzer if (ti->type->busy && ti->type->busy(ti)) 2676bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_BUSY; 2677bfebd1cdSMike Snitzer 2678bfebd1cdSMike Snitzer dm_start_request(md, rq); 2679bfebd1cdSMike Snitzer 2680bfebd1cdSMike Snitzer /* Init tio using md established in .init_request */ 2681bfebd1cdSMike Snitzer init_tio(tio, rq, md); 2682bfebd1cdSMike Snitzer 268302233342SMike Snitzer /* 268402233342SMike Snitzer * Establish tio->ti before queuing work (map_tio_request) 268502233342SMike Snitzer * or making direct call to map_request(). 268602233342SMike Snitzer */ 2687bfebd1cdSMike Snitzer tio->ti = ti; 268802233342SMike Snitzer 268902233342SMike Snitzer /* Clone the request if underlying devices aren't blk-mq */ 269002233342SMike Snitzer if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 269102233342SMike Snitzer /* clone request is allocated at the end of the pdu */ 269202233342SMike Snitzer tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 269345714fbeSMike Snitzer (void) clone_rq(rq, md, tio, GFP_ATOMIC); 2694bfebd1cdSMike Snitzer queue_kthread_work(&md->kworker, &tio->work); 269502233342SMike Snitzer } else { 269602233342SMike Snitzer /* Direct call is fine since .queue_rq allows allocations */ 269745714fbeSMike Snitzer if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 269845714fbeSMike Snitzer /* Undo dm_start_request() before requeuing */ 2699e262f347SMikulas Patocka rq_end_stats(md, rq); 270045714fbeSMike Snitzer rq_completed(md, rq_data_dir(rq), false); 270145714fbeSMike Snitzer return BLK_MQ_RQ_QUEUE_BUSY; 270245714fbeSMike Snitzer } 270302233342SMike Snitzer } 2704bfebd1cdSMike Snitzer 2705bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_OK; 2706bfebd1cdSMike Snitzer } 2707bfebd1cdSMike Snitzer 2708bfebd1cdSMike Snitzer static struct blk_mq_ops dm_mq_ops = { 2709bfebd1cdSMike Snitzer .queue_rq = dm_mq_queue_rq, 2710bfebd1cdSMike Snitzer .map_queue = blk_mq_map_queue, 2711bfebd1cdSMike Snitzer .complete = dm_softirq_done, 2712bfebd1cdSMike Snitzer .init_request = dm_mq_init_request, 2713bfebd1cdSMike Snitzer }; 2714bfebd1cdSMike Snitzer 2715bfebd1cdSMike Snitzer static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2716bfebd1cdSMike Snitzer { 271702233342SMike Snitzer unsigned md_type = dm_get_md_type(md); 2718bfebd1cdSMike Snitzer struct request_queue *q; 2719bfebd1cdSMike Snitzer int err; 2720bfebd1cdSMike Snitzer 2721bfebd1cdSMike Snitzer memset(&md->tag_set, 0, sizeof(md->tag_set)); 2722bfebd1cdSMike Snitzer md->tag_set.ops = &dm_mq_ops; 2723bfebd1cdSMike Snitzer md->tag_set.queue_depth = BLKDEV_MAX_RQ; 2724bfebd1cdSMike Snitzer md->tag_set.numa_node = NUMA_NO_NODE; 2725bfebd1cdSMike Snitzer md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2726bfebd1cdSMike Snitzer md->tag_set.nr_hw_queues = 1; 272702233342SMike Snitzer if (md_type == DM_TYPE_REQUEST_BASED) { 272802233342SMike Snitzer /* make the memory for non-blk-mq clone part of the pdu */ 272902233342SMike Snitzer md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request); 273002233342SMike Snitzer } else 2731bfebd1cdSMike Snitzer md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); 2732bfebd1cdSMike Snitzer md->tag_set.driver_data = md; 2733bfebd1cdSMike Snitzer 2734bfebd1cdSMike Snitzer err = blk_mq_alloc_tag_set(&md->tag_set); 2735bfebd1cdSMike Snitzer if (err) 2736bfebd1cdSMike Snitzer return err; 2737bfebd1cdSMike Snitzer 2738bfebd1cdSMike Snitzer q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); 2739bfebd1cdSMike Snitzer if (IS_ERR(q)) { 2740bfebd1cdSMike Snitzer err = PTR_ERR(q); 2741bfebd1cdSMike Snitzer goto out_tag_set; 2742bfebd1cdSMike Snitzer } 2743bfebd1cdSMike Snitzer md->queue = q; 2744bfebd1cdSMike Snitzer dm_init_md_queue(md); 2745bfebd1cdSMike Snitzer 2746bfebd1cdSMike Snitzer /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2747bfebd1cdSMike Snitzer blk_mq_register_disk(md->disk); 2748bfebd1cdSMike Snitzer 274902233342SMike Snitzer if (md_type == DM_TYPE_REQUEST_BASED) 2750bfebd1cdSMike Snitzer init_rq_based_worker_thread(md); 2751bfebd1cdSMike Snitzer 2752bfebd1cdSMike Snitzer return 0; 2753bfebd1cdSMike Snitzer 2754bfebd1cdSMike Snitzer out_tag_set: 2755bfebd1cdSMike Snitzer blk_mq_free_tag_set(&md->tag_set); 2756bfebd1cdSMike Snitzer return err; 27574a0b4ddfSMike Snitzer } 27584a0b4ddfSMike Snitzer 27594e6e36c3SMike Snitzer static unsigned filter_md_type(unsigned type, struct mapped_device *md) 27604e6e36c3SMike Snitzer { 27614e6e36c3SMike Snitzer if (type == DM_TYPE_BIO_BASED) 27624e6e36c3SMike Snitzer return type; 27634e6e36c3SMike Snitzer 27644e6e36c3SMike Snitzer return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; 27654e6e36c3SMike Snitzer } 27664e6e36c3SMike Snitzer 27674a0b4ddfSMike Snitzer /* 27684a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 27694a0b4ddfSMike Snitzer */ 27704a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 27714a0b4ddfSMike Snitzer { 2772bfebd1cdSMike Snitzer int r; 277317e149b8SMike Snitzer unsigned md_type = filter_md_type(dm_get_md_type(md), md); 2774bfebd1cdSMike Snitzer 2775bfebd1cdSMike Snitzer switch (md_type) { 2776bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2777bfebd1cdSMike Snitzer r = dm_init_request_based_queue(md); 2778bfebd1cdSMike Snitzer if (r) { 27794a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 2780bfebd1cdSMike Snitzer return r; 27814a0b4ddfSMike Snitzer } 2782bfebd1cdSMike Snitzer break; 2783bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 2784bfebd1cdSMike Snitzer r = dm_init_request_based_blk_mq_queue(md); 2785bfebd1cdSMike Snitzer if (r) { 2786bfebd1cdSMike Snitzer DMWARN("Cannot initialize queue for request-based blk-mq mapped device"); 2787bfebd1cdSMike Snitzer return r; 2788bfebd1cdSMike Snitzer } 2789bfebd1cdSMike Snitzer break; 2790bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2791bfebd1cdSMike Snitzer dm_init_old_md_queue(md); 2792ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2793*dbba42d8SMikulas Patocka /* 2794*dbba42d8SMikulas Patocka * DM handles splitting bios as needed. Free the bio_split bioset 2795*dbba42d8SMikulas Patocka * since it won't be used (saves 1 process per bio-based DM device). 2796*dbba42d8SMikulas Patocka */ 2797*dbba42d8SMikulas Patocka bioset_free(md->queue->bio_split); 2798*dbba42d8SMikulas Patocka md->queue->bio_split = NULL; 2799bfebd1cdSMike Snitzer break; 2800ff36ab34SMike Snitzer } 28014a0b4ddfSMike Snitzer 28024a0b4ddfSMike Snitzer return 0; 28034a0b4ddfSMike Snitzer } 28044a0b4ddfSMike Snitzer 28052bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 28061da177e4SLinus Torvalds { 28071da177e4SLinus Torvalds struct mapped_device *md; 28081da177e4SLinus Torvalds unsigned minor = MINOR(dev); 28091da177e4SLinus Torvalds 28101da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 28111da177e4SLinus Torvalds return NULL; 28121da177e4SLinus Torvalds 2813f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 28141da177e4SLinus Torvalds 28151da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 28162bec1f4aSMikulas Patocka if (md) { 28172bec1f4aSMikulas Patocka if ((md == MINOR_ALLOCED || 2818f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2819abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2820fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2821637842cfSDavid Teigland md = NULL; 2822fba9f90eSJeff Mahoney goto out; 2823fba9f90eSJeff Mahoney } 28242bec1f4aSMikulas Patocka dm_get(md); 28252bec1f4aSMikulas Patocka } 28261da177e4SLinus Torvalds 2827fba9f90eSJeff Mahoney out: 2828f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 28291da177e4SLinus Torvalds 2830637842cfSDavid Teigland return md; 2831637842cfSDavid Teigland } 28323cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2833d229a958SDavid Teigland 28349ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2835637842cfSDavid Teigland { 28369ade92a9SAlasdair G Kergon return md->interface_ptr; 28371da177e4SLinus Torvalds } 28381da177e4SLinus Torvalds 28391da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 28401da177e4SLinus Torvalds { 28411da177e4SLinus Torvalds md->interface_ptr = ptr; 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds 28441da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 28451da177e4SLinus Torvalds { 28461da177e4SLinus Torvalds atomic_inc(&md->holders); 28473f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 28481da177e4SLinus Torvalds } 28491da177e4SLinus Torvalds 285009ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 285109ee96b2SMikulas Patocka { 285209ee96b2SMikulas Patocka spin_lock(&_minor_lock); 285309ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 285409ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 285509ee96b2SMikulas Patocka return -EBUSY; 285609ee96b2SMikulas Patocka } 285709ee96b2SMikulas Patocka dm_get(md); 285809ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 285909ee96b2SMikulas Patocka return 0; 286009ee96b2SMikulas Patocka } 286109ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 286209ee96b2SMikulas Patocka 286372d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 286472d94861SAlasdair G Kergon { 286572d94861SAlasdair G Kergon return md->name; 286672d94861SAlasdair G Kergon } 286772d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 286872d94861SAlasdair G Kergon 28693f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 28701da177e4SLinus Torvalds { 28711134e5aeSMike Anderson struct dm_table *map; 287283d5e5b0SMikulas Patocka int srcu_idx; 28731da177e4SLinus Torvalds 28743f77316dSKiyoshi Ueda might_sleep(); 2875fba9f90eSJeff Mahoney 287683d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 287763a4f065SMike Snitzer 287863a4f065SMike Snitzer spin_lock(&_minor_lock); 28793f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2880fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2881f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 28823f77316dSKiyoshi Ueda 288302233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 28842eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 28852eb6e1e3SKeith Busch 2886ab7c7bb6SMikulas Patocka /* 2887ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2888ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2889ab7c7bb6SMikulas Patocka */ 2890ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 28914f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 28921da177e4SLinus Torvalds dm_table_presuspend_targets(map); 28931da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 28941da177e4SLinus Torvalds } 2895ab7c7bb6SMikulas Patocka mutex_unlock(&md->suspend_lock); 28963f77316dSKiyoshi Ueda 289783d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 289883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 289983d5e5b0SMikulas Patocka 29003f77316dSKiyoshi Ueda /* 29013f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 29023f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 29033f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 29043f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 29053f77316dSKiyoshi Ueda */ 29063f77316dSKiyoshi Ueda if (wait) 29073f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 29083f77316dSKiyoshi Ueda msleep(1); 29093f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 29103f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 29113f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 29123f77316dSKiyoshi Ueda 2913784aae73SMilan Broz dm_sysfs_exit(md); 2914a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 29151da177e4SLinus Torvalds free_dev(md); 29161da177e4SLinus Torvalds } 29173f77316dSKiyoshi Ueda 29183f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 29193f77316dSKiyoshi Ueda { 29203f77316dSKiyoshi Ueda __dm_destroy(md, true); 29213f77316dSKiyoshi Ueda } 29223f77316dSKiyoshi Ueda 29233f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 29243f77316dSKiyoshi Ueda { 29253f77316dSKiyoshi Ueda __dm_destroy(md, false); 29263f77316dSKiyoshi Ueda } 29273f77316dSKiyoshi Ueda 29283f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 29293f77316dSKiyoshi Ueda { 29303f77316dSKiyoshi Ueda atomic_dec(&md->holders); 29311da177e4SLinus Torvalds } 293279eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 29331da177e4SLinus Torvalds 2934401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 293546125c1cSMilan Broz { 293646125c1cSMilan Broz int r = 0; 2937b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2938b44ebeb0SMikulas Patocka 2939b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 294046125c1cSMilan Broz 294146125c1cSMilan Broz while (1) { 2942401600dfSMikulas Patocka set_current_state(interruptible); 294346125c1cSMilan Broz 2944b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 294546125c1cSMilan Broz break; 294646125c1cSMilan Broz 2947401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2948401600dfSMikulas Patocka signal_pending(current)) { 294946125c1cSMilan Broz r = -EINTR; 295046125c1cSMilan Broz break; 295146125c1cSMilan Broz } 295246125c1cSMilan Broz 295346125c1cSMilan Broz io_schedule(); 295446125c1cSMilan Broz } 295546125c1cSMilan Broz set_current_state(TASK_RUNNING); 295646125c1cSMilan Broz 2957b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2958b44ebeb0SMikulas Patocka 295946125c1cSMilan Broz return r; 296046125c1cSMilan Broz } 296146125c1cSMilan Broz 29621da177e4SLinus Torvalds /* 29631da177e4SLinus Torvalds * Process the deferred bios 29641da177e4SLinus Torvalds */ 2965ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 29661da177e4SLinus Torvalds { 2967ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2968ef208587SMikulas Patocka work); 29696d6f10dfSMilan Broz struct bio *c; 297083d5e5b0SMikulas Patocka int srcu_idx; 297183d5e5b0SMikulas Patocka struct dm_table *map; 29721da177e4SLinus Torvalds 297383d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2974ef208587SMikulas Patocka 29753b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2976022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2977022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2978022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2979022c2611SMikulas Patocka 29806a8736d1STejun Heo if (!c) 2981df12ee99SAlasdair G Kergon break; 298273d410c0SMilan Broz 2983e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2984e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2985af7e466aSMikulas Patocka else 298683d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2987e6ee8c0bSKiyoshi Ueda } 29883b00b203SMikulas Patocka 298983d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 29901da177e4SLinus Torvalds } 29911da177e4SLinus Torvalds 29929a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2993304f3f6aSMilan Broz { 29943b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 29954e857c58SPeter Zijlstra smp_mb__after_atomic(); 299653d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2997304f3f6aSMilan Broz } 2998304f3f6aSMilan Broz 29991da177e4SLinus Torvalds /* 3000042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 30011da177e4SLinus Torvalds */ 3002042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 30031da177e4SLinus Torvalds { 300487eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 3005754c5fc7SMike Snitzer struct queue_limits limits; 3006042d2a9bSAlasdair G Kergon int r; 30071da177e4SLinus Torvalds 3008e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 30091da177e4SLinus Torvalds 30101da177e4SLinus Torvalds /* device must be suspended */ 30114f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 301293c534aeSAlasdair G Kergon goto out; 30131da177e4SLinus Torvalds 30143ae70656SMike Snitzer /* 30153ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 30163ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 30173ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 30183ae70656SMike Snitzer * reappear. 30193ae70656SMike Snitzer */ 30203ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 302183d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 30223ae70656SMike Snitzer if (live_map) 30233ae70656SMike Snitzer limits = md->queue->limits; 302483d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 30253ae70656SMike Snitzer } 30263ae70656SMike Snitzer 302787eb5b21SMike Christie if (!live_map) { 3028754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 3029042d2a9bSAlasdair G Kergon if (r) { 3030042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 3031754c5fc7SMike Snitzer goto out; 3032042d2a9bSAlasdair G Kergon } 303387eb5b21SMike Christie } 3034754c5fc7SMike Snitzer 3035042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 30361da177e4SLinus Torvalds 303793c534aeSAlasdair G Kergon out: 3038e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 3039042d2a9bSAlasdair G Kergon return map; 30401da177e4SLinus Torvalds } 30411da177e4SLinus Torvalds 30421da177e4SLinus Torvalds /* 30431da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 30441da177e4SLinus Torvalds * device. 30451da177e4SLinus Torvalds */ 30462ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 30471da177e4SLinus Torvalds { 3048e39e2e95SAlasdair G Kergon int r; 30491da177e4SLinus Torvalds 30501da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 3051dfbe03f6SAlasdair G Kergon 3052db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 3053dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 3054cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 3055e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 3056e39e2e95SAlasdair G Kergon return r; 3057dfbe03f6SAlasdair G Kergon } 3058dfbe03f6SAlasdair G Kergon 3059aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 3060aa8d7c2fSAlasdair G Kergon 30611da177e4SLinus Torvalds return 0; 30621da177e4SLinus Torvalds } 30631da177e4SLinus Torvalds 30642ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 30651da177e4SLinus Torvalds { 3066aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 3067aa8d7c2fSAlasdair G Kergon return; 3068aa8d7c2fSAlasdair G Kergon 3069db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 30701da177e4SLinus Torvalds md->frozen_sb = NULL; 3071aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 30721da177e4SLinus Torvalds } 30731da177e4SLinus Torvalds 30741da177e4SLinus Torvalds /* 3075ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 3076ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 3077ffcc3936SMike Snitzer * are being added to md->deferred list. 3078cec47e3dSKiyoshi Ueda * 3079ffcc3936SMike Snitzer * Caller must hold md->suspend_lock 3080cec47e3dSKiyoshi Ueda */ 3081ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 3082ffcc3936SMike Snitzer unsigned suspend_flags, int interruptible) 30831da177e4SLinus Torvalds { 3084ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 3085ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 3086ffcc3936SMike Snitzer int r; 3087cf222b37SAlasdair G Kergon 30882e93ccc1SKiyoshi Ueda /* 30892e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 30902e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 30912e93ccc1SKiyoshi Ueda */ 30922e93ccc1SKiyoshi Ueda if (noflush) 30932e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 30942e93ccc1SKiyoshi Ueda 3095d67ee213SMike Snitzer /* 3096d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 3097d67ee213SMike Snitzer * provide the .presuspend_undo hook. 3098d67ee213SMike Snitzer */ 30991da177e4SLinus Torvalds dm_table_presuspend_targets(map); 31001da177e4SLinus Torvalds 31012e93ccc1SKiyoshi Ueda /* 31029f518b27SKiyoshi Ueda * Flush I/O to the device. 31039f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 31049f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 31059f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 31062e93ccc1SKiyoshi Ueda */ 310732a926daSMikulas Patocka if (!noflush && do_lockfs) { 31082ca3310eSAlasdair G Kergon r = lock_fs(md); 3109d67ee213SMike Snitzer if (r) { 3110d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 3111ffcc3936SMike Snitzer return r; 3112aa8d7c2fSAlasdair G Kergon } 3113d67ee213SMike Snitzer } 31141da177e4SLinus Torvalds 31151da177e4SLinus Torvalds /* 31163b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 31173b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 31183b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 31193b00b203SMikulas Patocka * dm_wq_work. 31203b00b203SMikulas Patocka * 31213b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 31223b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 31236a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 31246a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 31256a8736d1STejun Heo * flush_workqueue(md->wq). 31261da177e4SLinus Torvalds */ 31271eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 312841abc4e1SHannes Reinecke if (map) 312983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 31301da177e4SLinus Torvalds 3131d0bcb878SKiyoshi Ueda /* 313229e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 313329e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 3134d0bcb878SKiyoshi Ueda */ 31352eb6e1e3SKeith Busch if (dm_request_based(md)) { 31369f518b27SKiyoshi Ueda stop_queue(md->queue); 313702233342SMike Snitzer if (md->kworker_task) 31382eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 31392eb6e1e3SKeith Busch } 3140cec47e3dSKiyoshi Ueda 3141d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 3142d0bcb878SKiyoshi Ueda 31431da177e4SLinus Torvalds /* 31443b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 31453b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 31463b00b203SMikulas Patocka * to finish. 31471da177e4SLinus Torvalds */ 3148ffcc3936SMike Snitzer r = dm_wait_for_completion(md, interruptible); 31491da177e4SLinus Torvalds 31506d6f10dfSMilan Broz if (noflush) 3151022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 315241abc4e1SHannes Reinecke if (map) 315383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 31542e93ccc1SKiyoshi Ueda 31551da177e4SLinus Torvalds /* were we interrupted ? */ 315646125c1cSMilan Broz if (r < 0) { 31579a1fb464SMikulas Patocka dm_queue_flush(md); 315873d410c0SMilan Broz 3159cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 31609f518b27SKiyoshi Ueda start_queue(md->queue); 3161cec47e3dSKiyoshi Ueda 31622ca3310eSAlasdair G Kergon unlock_fs(md); 3163d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 3164ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 3165ffcc3936SMike Snitzer } 3166ffcc3936SMike Snitzer 3167ffcc3936SMike Snitzer return r; 31682ca3310eSAlasdair G Kergon } 31692ca3310eSAlasdair G Kergon 31703b00b203SMikulas Patocka /* 3171ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 3172ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 3173ffcc3936SMike Snitzer * the background. Before the table can be swapped with 3174ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 3175ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 31763b00b203SMikulas Patocka */ 3177ffcc3936SMike Snitzer /* 3178ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 3179ffcc3936SMike Snitzer * 3180ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 3181ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 3182ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 3183ffcc3936SMike Snitzer * 3184ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 3185ffcc3936SMike Snitzer */ 3186ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 3187ffcc3936SMike Snitzer { 3188ffcc3936SMike Snitzer struct dm_table *map = NULL; 3189ffcc3936SMike Snitzer int r = 0; 3190ffcc3936SMike Snitzer 3191ffcc3936SMike Snitzer retry: 3192ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3193ffcc3936SMike Snitzer 3194ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3195ffcc3936SMike Snitzer r = -EINVAL; 3196ffcc3936SMike Snitzer goto out_unlock; 3197ffcc3936SMike Snitzer } 3198ffcc3936SMike Snitzer 3199ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3200ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3201ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3202ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3203ffcc3936SMike Snitzer if (r) 3204ffcc3936SMike Snitzer return r; 3205ffcc3936SMike Snitzer goto retry; 3206ffcc3936SMike Snitzer } 3207ffcc3936SMike Snitzer 3208a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3209ffcc3936SMike Snitzer 3210ffcc3936SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 3211ffcc3936SMike Snitzer if (r) 3212ffcc3936SMike Snitzer goto out_unlock; 32133b00b203SMikulas Patocka 32141da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 32151da177e4SLinus Torvalds 32164d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 32174d4471cbSKiyoshi Ueda 3218d287483dSAlasdair G Kergon out_unlock: 3219e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 3220cf222b37SAlasdair G Kergon return r; 32211da177e4SLinus Torvalds } 32221da177e4SLinus Torvalds 3223ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 32241da177e4SLinus Torvalds { 3225ffcc3936SMike Snitzer if (map) { 3226ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 32278757b776SMilan Broz if (r) 3228ffcc3936SMike Snitzer return r; 3229ffcc3936SMike Snitzer } 32302ca3310eSAlasdair G Kergon 32319a1fb464SMikulas Patocka dm_queue_flush(md); 32322ca3310eSAlasdair G Kergon 3233cec47e3dSKiyoshi Ueda /* 3234cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 3235cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 3236cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 3237cec47e3dSKiyoshi Ueda */ 3238cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 3239cec47e3dSKiyoshi Ueda start_queue(md->queue); 3240cec47e3dSKiyoshi Ueda 32412ca3310eSAlasdair G Kergon unlock_fs(md); 32422ca3310eSAlasdair G Kergon 3243ffcc3936SMike Snitzer return 0; 3244ffcc3936SMike Snitzer } 3245ffcc3936SMike Snitzer 3246ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 3247ffcc3936SMike Snitzer { 3248ffcc3936SMike Snitzer int r = -EINVAL; 3249ffcc3936SMike Snitzer struct dm_table *map = NULL; 3250ffcc3936SMike Snitzer 3251ffcc3936SMike Snitzer retry: 3252ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3253ffcc3936SMike Snitzer 3254ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 3255ffcc3936SMike Snitzer goto out; 3256ffcc3936SMike Snitzer 3257ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3258ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3259ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3260ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3261ffcc3936SMike Snitzer if (r) 3262ffcc3936SMike Snitzer return r; 3263ffcc3936SMike Snitzer goto retry; 3264ffcc3936SMike Snitzer } 3265ffcc3936SMike Snitzer 3266a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3267ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 3268ffcc3936SMike Snitzer goto out; 3269ffcc3936SMike Snitzer 3270ffcc3936SMike Snitzer r = __dm_resume(md, map); 3271ffcc3936SMike Snitzer if (r) 3272ffcc3936SMike Snitzer goto out; 3273ffcc3936SMike Snitzer 32742ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 32752ca3310eSAlasdair G Kergon 3276cf222b37SAlasdair G Kergon r = 0; 3277cf222b37SAlasdair G Kergon out: 3278e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 32792ca3310eSAlasdair G Kergon 3280cf222b37SAlasdair G Kergon return r; 32811da177e4SLinus Torvalds } 32821da177e4SLinus Torvalds 3283fd2ed4d2SMikulas Patocka /* 3284fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 3285fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 3286fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 3287fd2ed4d2SMikulas Patocka */ 3288fd2ed4d2SMikulas Patocka 3289ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 3290ffcc3936SMike Snitzer { 3291ffcc3936SMike Snitzer struct dm_table *map = NULL; 3292ffcc3936SMike Snitzer 329396b26c8cSMikulas Patocka if (md->internal_suspend_count++) 3294ffcc3936SMike Snitzer return; /* nested internal suspend */ 3295ffcc3936SMike Snitzer 3296ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3297ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3298ffcc3936SMike Snitzer return; /* nest suspend */ 3299ffcc3936SMike Snitzer } 3300ffcc3936SMike Snitzer 3301a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3302ffcc3936SMike Snitzer 3303ffcc3936SMike Snitzer /* 3304ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3305ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3306ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 3307ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 3308ffcc3936SMike Snitzer */ 3309ffcc3936SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3310ffcc3936SMike Snitzer 3311ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3312ffcc3936SMike Snitzer 3313ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 3314ffcc3936SMike Snitzer } 3315ffcc3936SMike Snitzer 3316ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 3317ffcc3936SMike Snitzer { 331896b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 331996b26c8cSMikulas Patocka 332096b26c8cSMikulas Patocka if (--md->internal_suspend_count) 3321ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 3322ffcc3936SMike Snitzer 3323ffcc3936SMike Snitzer if (dm_suspended_md(md)) 3324ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 3325ffcc3936SMike Snitzer 3326ffcc3936SMike Snitzer /* 3327ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 3328ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 3329ffcc3936SMike Snitzer */ 3330ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 3331ffcc3936SMike Snitzer 3332ffcc3936SMike Snitzer done: 3333ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3334ffcc3936SMike Snitzer smp_mb__after_atomic(); 3335ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3336ffcc3936SMike Snitzer } 3337ffcc3936SMike Snitzer 3338ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 3339fd2ed4d2SMikulas Patocka { 3340fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 3341ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3342ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3343ffcc3936SMike Snitzer } 3344ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3345ffcc3936SMike Snitzer 3346ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 3347ffcc3936SMike Snitzer { 3348ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3349ffcc3936SMike Snitzer __dm_internal_resume(md); 3350ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3351ffcc3936SMike Snitzer } 3352ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 3353ffcc3936SMike Snitzer 3354ffcc3936SMike Snitzer /* 3355ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 3356ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 3357ffcc3936SMike Snitzer */ 3358ffcc3936SMike Snitzer 3359ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 3360ffcc3936SMike Snitzer { 3361ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3362ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3363fd2ed4d2SMikulas Patocka return; 3364fd2ed4d2SMikulas Patocka 3365fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3366fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 3367fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 3368fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3369fd2ed4d2SMikulas Patocka } 3370b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3371fd2ed4d2SMikulas Patocka 3372ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 3373fd2ed4d2SMikulas Patocka { 3374ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3375fd2ed4d2SMikulas Patocka goto done; 3376fd2ed4d2SMikulas Patocka 3377fd2ed4d2SMikulas Patocka dm_queue_flush(md); 3378fd2ed4d2SMikulas Patocka 3379fd2ed4d2SMikulas Patocka done: 3380fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 3381fd2ed4d2SMikulas Patocka } 3382b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3383fd2ed4d2SMikulas Patocka 33841da177e4SLinus Torvalds /*----------------------------------------------------------------- 33851da177e4SLinus Torvalds * Event notification. 33861da177e4SLinus Torvalds *---------------------------------------------------------------*/ 33873abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 338860935eb2SMilan Broz unsigned cookie) 338969267a30SAlasdair G Kergon { 339060935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 339160935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 339260935eb2SMilan Broz 339360935eb2SMilan Broz if (!cookie) 33943abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 339560935eb2SMilan Broz else { 339660935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 339760935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 33983abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 33993abf85b5SPeter Rajnoha action, envp); 340060935eb2SMilan Broz } 340169267a30SAlasdair G Kergon } 340269267a30SAlasdair G Kergon 34037a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 34047a8c3d3bSMike Anderson { 34057a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 34067a8c3d3bSMike Anderson } 34077a8c3d3bSMike Anderson 34081da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 34091da177e4SLinus Torvalds { 34101da177e4SLinus Torvalds return atomic_read(&md->event_nr); 34111da177e4SLinus Torvalds } 34121da177e4SLinus Torvalds 34131da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 34141da177e4SLinus Torvalds { 34151da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 34161da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 34171da177e4SLinus Torvalds } 34181da177e4SLinus Torvalds 34197a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 34207a8c3d3bSMike Anderson { 34217a8c3d3bSMike Anderson unsigned long flags; 34227a8c3d3bSMike Anderson 34237a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 34247a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 34257a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 34267a8c3d3bSMike Anderson } 34277a8c3d3bSMike Anderson 34281da177e4SLinus Torvalds /* 34291da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 34301da177e4SLinus Torvalds * count on 'md'. 34311da177e4SLinus Torvalds */ 34321da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 34331da177e4SLinus Torvalds { 34341da177e4SLinus Torvalds return md->disk; 34351da177e4SLinus Torvalds } 343665ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 34371da177e4SLinus Torvalds 3438784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 3439784aae73SMilan Broz { 34402995fa78SMikulas Patocka return &md->kobj_holder.kobj; 3441784aae73SMilan Broz } 3442784aae73SMilan Broz 3443784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3444784aae73SMilan Broz { 3445784aae73SMilan Broz struct mapped_device *md; 3446784aae73SMilan Broz 34472995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3448784aae73SMilan Broz 34494d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 3450432a212cSMike Anderson dm_deleting_md(md)) 34514d89b7b4SMilan Broz return NULL; 34524d89b7b4SMilan Broz 3453784aae73SMilan Broz dm_get(md); 3454784aae73SMilan Broz return md; 3455784aae73SMilan Broz } 3456784aae73SMilan Broz 34574f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 34581da177e4SLinus Torvalds { 34591da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 34601da177e4SLinus Torvalds } 34611da177e4SLinus Torvalds 3462ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 3463ffcc3936SMike Snitzer { 3464ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3465ffcc3936SMike Snitzer } 3466ffcc3936SMike Snitzer 34672c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 34682c140a24SMikulas Patocka { 34692c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 34702c140a24SMikulas Patocka } 34712c140a24SMikulas Patocka 347264dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 347364dbce58SKiyoshi Ueda { 3474ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 347564dbce58SKiyoshi Ueda } 347664dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 347764dbce58SKiyoshi Ueda 34782e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 34792e93ccc1SKiyoshi Ueda { 3480ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 34812e93ccc1SKiyoshi Ueda } 34822e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 34832e93ccc1SKiyoshi Ueda 348478d8e58aSMike Snitzer struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 348578d8e58aSMike Snitzer unsigned integrity, unsigned per_bio_data_size) 3486e6ee8c0bSKiyoshi Ueda { 348778d8e58aSMike Snitzer struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 348878d8e58aSMike Snitzer struct kmem_cache *cachep = NULL; 348978d8e58aSMike Snitzer unsigned int pool_size = 0; 34905f015204SJun'ichi Nomura unsigned int front_pad; 3491e6ee8c0bSKiyoshi Ueda 3492e6ee8c0bSKiyoshi Ueda if (!pools) 34934e6e36c3SMike Snitzer return NULL; 3494e6ee8c0bSKiyoshi Ueda 349578d8e58aSMike Snitzer type = filter_md_type(type, md); 349617e149b8SMike Snitzer 349778d8e58aSMike Snitzer switch (type) { 349878d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 349978d8e58aSMike Snitzer cachep = _io_cache; 350078d8e58aSMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 350178d8e58aSMike Snitzer front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 350278d8e58aSMike Snitzer break; 350378d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 350478d8e58aSMike Snitzer cachep = _rq_tio_cache; 350578d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 350678d8e58aSMike Snitzer pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 350778d8e58aSMike Snitzer if (!pools->rq_pool) 350878d8e58aSMike Snitzer goto out; 350978d8e58aSMike Snitzer /* fall through to setup remaining rq-based pools */ 351078d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 351178d8e58aSMike Snitzer if (!pool_size) 351278d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 351378d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 351478d8e58aSMike Snitzer /* per_bio_data_size is not used. See __bind_mempools(). */ 351578d8e58aSMike Snitzer WARN_ON(per_bio_data_size != 0); 351678d8e58aSMike Snitzer break; 351778d8e58aSMike Snitzer default: 351878d8e58aSMike Snitzer BUG(); 351978d8e58aSMike Snitzer } 352078d8e58aSMike Snitzer 352178d8e58aSMike Snitzer if (cachep) { 352278d8e58aSMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3523e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 35245f015204SJun'ichi Nomura goto out; 352578d8e58aSMike Snitzer } 3526e6ee8c0bSKiyoshi Ueda 35273d8aab2dSJunichi Nomura pools->bs = bioset_create_nobvec(pool_size, front_pad); 3528e6ee8c0bSKiyoshi Ueda if (!pools->bs) 35295f015204SJun'ichi Nomura goto out; 3530e6ee8c0bSKiyoshi Ueda 3531a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 35325f015204SJun'ichi Nomura goto out; 3533a91a2785SMartin K. Petersen 3534e6ee8c0bSKiyoshi Ueda return pools; 353578d8e58aSMike Snitzer 35365f015204SJun'ichi Nomura out: 35375f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3538e6ee8c0bSKiyoshi Ueda 35394e6e36c3SMike Snitzer return NULL; 3540e6ee8c0bSKiyoshi Ueda } 3541e6ee8c0bSKiyoshi Ueda 3542e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3543e6ee8c0bSKiyoshi Ueda { 3544e6ee8c0bSKiyoshi Ueda if (!pools) 3545e6ee8c0bSKiyoshi Ueda return; 3546e6ee8c0bSKiyoshi Ueda 3547e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 35481ae49ea2SMike Snitzer mempool_destroy(pools->rq_pool); 35491ae49ea2SMike Snitzer 3550e6ee8c0bSKiyoshi Ueda if (pools->bs) 3551e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 3552e6ee8c0bSKiyoshi Ueda 3553e6ee8c0bSKiyoshi Ueda kfree(pools); 3554e6ee8c0bSKiyoshi Ueda } 3555e6ee8c0bSKiyoshi Ueda 355671cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 355771cdb697SChristoph Hellwig u32 flags) 355871cdb697SChristoph Hellwig { 355971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 356071cdb697SChristoph Hellwig const struct pr_ops *ops; 356171cdb697SChristoph Hellwig struct dm_target *tgt; 356271cdb697SChristoph Hellwig fmode_t mode; 356371cdb697SChristoph Hellwig int srcu_idx, r; 356471cdb697SChristoph Hellwig 356571cdb697SChristoph Hellwig r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 356671cdb697SChristoph Hellwig if (r < 0) 356771cdb697SChristoph Hellwig return r; 356871cdb697SChristoph Hellwig 356971cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 357071cdb697SChristoph Hellwig if (ops && ops->pr_register) 357171cdb697SChristoph Hellwig r = ops->pr_register(bdev, old_key, new_key, flags); 357271cdb697SChristoph Hellwig else 357371cdb697SChristoph Hellwig r = -EOPNOTSUPP; 357471cdb697SChristoph Hellwig 357571cdb697SChristoph Hellwig dm_put_live_table(md, srcu_idx); 357671cdb697SChristoph Hellwig return r; 357771cdb697SChristoph Hellwig } 357871cdb697SChristoph Hellwig 357971cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 358071cdb697SChristoph Hellwig u32 flags) 358171cdb697SChristoph Hellwig { 358271cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 358371cdb697SChristoph Hellwig const struct pr_ops *ops; 358471cdb697SChristoph Hellwig struct dm_target *tgt; 358571cdb697SChristoph Hellwig fmode_t mode; 358671cdb697SChristoph Hellwig int srcu_idx, r; 358771cdb697SChristoph Hellwig 358871cdb697SChristoph Hellwig r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 358971cdb697SChristoph Hellwig if (r < 0) 359071cdb697SChristoph Hellwig return r; 359171cdb697SChristoph Hellwig 359271cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 359371cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 359471cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 359571cdb697SChristoph Hellwig else 359671cdb697SChristoph Hellwig r = -EOPNOTSUPP; 359771cdb697SChristoph Hellwig 359871cdb697SChristoph Hellwig dm_put_live_table(md, srcu_idx); 359971cdb697SChristoph Hellwig return r; 360071cdb697SChristoph Hellwig } 360171cdb697SChristoph Hellwig 360271cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 360371cdb697SChristoph Hellwig { 360471cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 360571cdb697SChristoph Hellwig const struct pr_ops *ops; 360671cdb697SChristoph Hellwig struct dm_target *tgt; 360771cdb697SChristoph Hellwig fmode_t mode; 360871cdb697SChristoph Hellwig int srcu_idx, r; 360971cdb697SChristoph Hellwig 361071cdb697SChristoph Hellwig r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 361171cdb697SChristoph Hellwig if (r < 0) 361271cdb697SChristoph Hellwig return r; 361371cdb697SChristoph Hellwig 361471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 361571cdb697SChristoph Hellwig if (ops && ops->pr_release) 361671cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 361771cdb697SChristoph Hellwig else 361871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 361971cdb697SChristoph Hellwig 362071cdb697SChristoph Hellwig dm_put_live_table(md, srcu_idx); 362171cdb697SChristoph Hellwig return r; 362271cdb697SChristoph Hellwig } 362371cdb697SChristoph Hellwig 362471cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 362571cdb697SChristoph Hellwig enum pr_type type, bool abort) 362671cdb697SChristoph Hellwig { 362771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 362871cdb697SChristoph Hellwig const struct pr_ops *ops; 362971cdb697SChristoph Hellwig struct dm_target *tgt; 363071cdb697SChristoph Hellwig fmode_t mode; 363171cdb697SChristoph Hellwig int srcu_idx, r; 363271cdb697SChristoph Hellwig 363371cdb697SChristoph Hellwig r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 363471cdb697SChristoph Hellwig if (r < 0) 363571cdb697SChristoph Hellwig return r; 363671cdb697SChristoph Hellwig 363771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 363871cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 363971cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 364071cdb697SChristoph Hellwig else 364171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 364271cdb697SChristoph Hellwig 364371cdb697SChristoph Hellwig dm_put_live_table(md, srcu_idx); 364471cdb697SChristoph Hellwig return r; 364571cdb697SChristoph Hellwig } 364671cdb697SChristoph Hellwig 364771cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 364871cdb697SChristoph Hellwig { 364971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 365071cdb697SChristoph Hellwig const struct pr_ops *ops; 365171cdb697SChristoph Hellwig struct dm_target *tgt; 365271cdb697SChristoph Hellwig fmode_t mode; 365371cdb697SChristoph Hellwig int srcu_idx, r; 365471cdb697SChristoph Hellwig 365571cdb697SChristoph Hellwig r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 365671cdb697SChristoph Hellwig if (r < 0) 365771cdb697SChristoph Hellwig return r; 365871cdb697SChristoph Hellwig 365971cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 366071cdb697SChristoph Hellwig if (ops && ops->pr_clear) 366171cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 366271cdb697SChristoph Hellwig else 366371cdb697SChristoph Hellwig r = -EOPNOTSUPP; 366471cdb697SChristoph Hellwig 366571cdb697SChristoph Hellwig dm_put_live_table(md, srcu_idx); 366671cdb697SChristoph Hellwig return r; 366771cdb697SChristoph Hellwig } 366871cdb697SChristoph Hellwig 366971cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 367071cdb697SChristoph Hellwig .pr_register = dm_pr_register, 367171cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 367271cdb697SChristoph Hellwig .pr_release = dm_pr_release, 367371cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 367471cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 367571cdb697SChristoph Hellwig }; 367671cdb697SChristoph Hellwig 367783d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 36781da177e4SLinus Torvalds .open = dm_blk_open, 36791da177e4SLinus Torvalds .release = dm_blk_close, 3680aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 36813ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 368271cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 36831da177e4SLinus Torvalds .owner = THIS_MODULE 36841da177e4SLinus Torvalds }; 36851da177e4SLinus Torvalds 36861da177e4SLinus Torvalds /* 36871da177e4SLinus Torvalds * module hooks 36881da177e4SLinus Torvalds */ 36891da177e4SLinus Torvalds module_init(dm_init); 36901da177e4SLinus Torvalds module_exit(dm_exit); 36911da177e4SLinus Torvalds 36921da177e4SLinus Torvalds module_param(major, uint, 0); 36931da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3694f4790826SMike Snitzer 3695e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3696e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3697e8603136SMike Snitzer 3698f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3699f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3700f4790826SMike Snitzer 370117e149b8SMike Snitzer module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 370217e149b8SMike Snitzer MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 370317e149b8SMike Snitzer 37041da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 37051da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 37061da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3707