11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 22ffcc3936SMike Snitzer #include <linux/wait.h> 232eb6e1e3SKeith Busch #include <linux/kthread.h> 240ce65797SMike Snitzer #include <linux/ktime.h> 25de3ec86dSMike Snitzer #include <linux/elevator.h> /* for rq_end_sector() */ 26bfebd1cdSMike Snitzer #include <linux/blk-mq.h> 2755782138SLi Zefan 2855782138SLi Zefan #include <trace/events/block.h> 291da177e4SLinus Torvalds 3072d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3172d94861SAlasdair G Kergon 3271a16736SNamhyung Kim #ifdef CONFIG_PRINTK 3371a16736SNamhyung Kim /* 3471a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3571a16736SNamhyung Kim */ 3671a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3771a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3871a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 3971a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 4071a16736SNamhyung Kim #endif 4171a16736SNamhyung Kim 4260935eb2SMilan Broz /* 4360935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 4460935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4560935eb2SMilan Broz */ 4660935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4760935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4860935eb2SMilan Broz 491da177e4SLinus Torvalds static const char *_name = DM_NAME; 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds static unsigned int major = 0; 521da177e4SLinus Torvalds static unsigned int _major = 0; 531da177e4SLinus Torvalds 54d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 55d15b774cSAlasdair G Kergon 56f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 572c140a24SMikulas Patocka 582c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 592c140a24SMikulas Patocka 602c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 612c140a24SMikulas Patocka 62acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 63acfe0ad7SMikulas Patocka 641da177e4SLinus Torvalds /* 658fbf26adSKiyoshi Ueda * For bio-based dm. 661da177e4SLinus Torvalds * One of these is allocated per bio. 671da177e4SLinus Torvalds */ 681da177e4SLinus Torvalds struct dm_io { 691da177e4SLinus Torvalds struct mapped_device *md; 701da177e4SLinus Torvalds int error; 711da177e4SLinus Torvalds atomic_t io_count; 726ae2fa67SRichard Kennedy struct bio *bio; 733eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 74f88fb981SKiyoshi Ueda spinlock_t endio_lock; 75fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 761da177e4SLinus Torvalds }; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds /* 798fbf26adSKiyoshi Ueda * For request-based dm. 808fbf26adSKiyoshi Ueda * One of these is allocated per request. 818fbf26adSKiyoshi Ueda */ 828fbf26adSKiyoshi Ueda struct dm_rq_target_io { 838fbf26adSKiyoshi Ueda struct mapped_device *md; 848fbf26adSKiyoshi Ueda struct dm_target *ti; 851ae49ea2SMike Snitzer struct request *orig, *clone; 862eb6e1e3SKeith Busch struct kthread_work work; 878fbf26adSKiyoshi Ueda int error; 888fbf26adSKiyoshi Ueda union map_info info; 898fbf26adSKiyoshi Ueda }; 908fbf26adSKiyoshi Ueda 918fbf26adSKiyoshi Ueda /* 9294818742SKent Overstreet * For request-based dm - the bio clones we allocate are embedded in these 9394818742SKent Overstreet * structs. 9494818742SKent Overstreet * 9594818742SKent Overstreet * We allocate these with bio_alloc_bioset, using the front_pad parameter when 9694818742SKent Overstreet * the bioset is created - this means the bio has to come at the end of the 9794818742SKent Overstreet * struct. 988fbf26adSKiyoshi Ueda */ 998fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 1008fbf26adSKiyoshi Ueda struct bio *orig; 101cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 10294818742SKent Overstreet struct bio clone; 1038fbf26adSKiyoshi Ueda }; 1048fbf26adSKiyoshi Ueda 105cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 106cec47e3dSKiyoshi Ueda { 107cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 108cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 109cec47e3dSKiyoshi Ueda return NULL; 110cec47e3dSKiyoshi Ueda } 111cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 112cec47e3dSKiyoshi Ueda 113ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 114ba61fdd1SJeff Mahoney 1151da177e4SLinus Torvalds /* 1161da177e4SLinus Torvalds * Bits for the md->flags field. 1171da177e4SLinus Torvalds */ 1181eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1191da177e4SLinus Torvalds #define DMF_SUSPENDED 1 120aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 121fba9f90eSJeff Mahoney #define DMF_FREEING 3 1225c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1232e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 124d5b9dd04SMikulas Patocka #define DMF_MERGE_IS_OPTIONAL 6 1252c140a24SMikulas Patocka #define DMF_DEFERRED_REMOVE 7 126ffcc3936SMike Snitzer #define DMF_SUSPENDED_INTERNALLY 8 1271da177e4SLinus Torvalds 128304f3f6aSMilan Broz /* 12983d5e5b0SMikulas Patocka * A dummy definition to make RCU happy. 13083d5e5b0SMikulas Patocka * struct dm_table should never be dereferenced in this file. 13183d5e5b0SMikulas Patocka */ 13283d5e5b0SMikulas Patocka struct dm_table { 13383d5e5b0SMikulas Patocka int undefined__; 13483d5e5b0SMikulas Patocka }; 13583d5e5b0SMikulas Patocka 13683d5e5b0SMikulas Patocka /* 137304f3f6aSMilan Broz * Work processed by per-device workqueue. 138304f3f6aSMilan Broz */ 1391da177e4SLinus Torvalds struct mapped_device { 14083d5e5b0SMikulas Patocka struct srcu_struct io_barrier; 141e61290a4SDaniel Walker struct mutex suspend_lock; 1421da177e4SLinus Torvalds atomic_t holders; 1435c6bd75dSAlasdair G Kergon atomic_t open_count; 1441da177e4SLinus Torvalds 1452a7faeb1SMikulas Patocka /* 1462a7faeb1SMikulas Patocka * The current mapping. 1472a7faeb1SMikulas Patocka * Use dm_get_live_table{_fast} or take suspend_lock for 1482a7faeb1SMikulas Patocka * dereference. 1492a7faeb1SMikulas Patocka */ 1506fa99520SPranith Kumar struct dm_table __rcu *map; 1512a7faeb1SMikulas Patocka 15286f1152bSBenjamin Marzinski struct list_head table_devices; 15386f1152bSBenjamin Marzinski struct mutex table_devices_lock; 15486f1152bSBenjamin Marzinski 1551da177e4SLinus Torvalds unsigned long flags; 1561da177e4SLinus Torvalds 157165125e1SJens Axboe struct request_queue *queue; 158a5664dadSMike Snitzer unsigned type; 1594a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 160a5664dadSMike Snitzer struct mutex type_lock; 161a5664dadSMike Snitzer 16236a0456fSAlasdair G Kergon struct target_type *immutable_target_type; 16336a0456fSAlasdair G Kergon 1641da177e4SLinus Torvalds struct gendisk *disk; 1657e51f257SMike Anderson char name[16]; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds void *interface_ptr; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds /* 1701da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1711da177e4SLinus Torvalds */ 172316d315bSNikanth Karthikesan atomic_t pending[2]; 1731da177e4SLinus Torvalds wait_queue_head_t wait; 17453d5914fSMikulas Patocka struct work_struct work; 1751da177e4SLinus Torvalds struct bio_list deferred; 176022c2611SMikulas Patocka spinlock_t deferred_lock; 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /* 17929e4013dSTejun Heo * Processing queue (flush) 180304f3f6aSMilan Broz */ 181304f3f6aSMilan Broz struct workqueue_struct *wq; 182304f3f6aSMilan Broz 183304f3f6aSMilan Broz /* 1841da177e4SLinus Torvalds * io objects are allocated from here. 1851da177e4SLinus Torvalds */ 1861da177e4SLinus Torvalds mempool_t *io_pool; 1871ae49ea2SMike Snitzer mempool_t *rq_pool; 1881da177e4SLinus Torvalds 1899faf400fSStefan Bader struct bio_set *bs; 1909faf400fSStefan Bader 1911da177e4SLinus Torvalds /* 1921da177e4SLinus Torvalds * Event handling. 1931da177e4SLinus Torvalds */ 1941da177e4SLinus Torvalds atomic_t event_nr; 1951da177e4SLinus Torvalds wait_queue_head_t eventq; 1967a8c3d3bSMike Anderson atomic_t uevent_seq; 1977a8c3d3bSMike Anderson struct list_head uevent_list; 1987a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds /* 2011da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 2021da177e4SLinus Torvalds */ 2031da177e4SLinus Torvalds struct super_block *frozen_sb; 204db8fef4fSMikulas Patocka struct block_device *bdev; 2053ac51e74SDarrick J. Wong 2063ac51e74SDarrick J. Wong /* forced geometry settings */ 2073ac51e74SDarrick J. Wong struct hd_geometry geometry; 208784aae73SMilan Broz 2092995fa78SMikulas Patocka /* kobject and completion */ 2102995fa78SMikulas Patocka struct dm_kobject_holder kobj_holder; 211be35f486SMikulas Patocka 212d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 213d87f4c14STejun Heo struct bio flush_bio; 214fd2ed4d2SMikulas Patocka 21596b26c8cSMikulas Patocka /* the number of internal suspends */ 21696b26c8cSMikulas Patocka unsigned internal_suspend_count; 21796b26c8cSMikulas Patocka 218fd2ed4d2SMikulas Patocka struct dm_stats stats; 2192eb6e1e3SKeith Busch 2202eb6e1e3SKeith Busch struct kthread_worker kworker; 2212eb6e1e3SKeith Busch struct task_struct *kworker_task; 222de3ec86dSMike Snitzer 223de3ec86dSMike Snitzer /* for request-based merge heuristic in dm_request_fn() */ 2240ce65797SMike Snitzer unsigned seq_rq_merge_deadline_usecs; 225de3ec86dSMike Snitzer int last_rq_rw; 2260ce65797SMike Snitzer sector_t last_rq_pos; 2270ce65797SMike Snitzer ktime_t last_rq_start_time; 228bfebd1cdSMike Snitzer 229bfebd1cdSMike Snitzer /* for blk-mq request-based DM support */ 230bfebd1cdSMike Snitzer struct blk_mq_tag_set tag_set; 23117e149b8SMike Snitzer bool use_blk_mq; 2321da177e4SLinus Torvalds }; 2331da177e4SLinus Torvalds 23417e149b8SMike Snitzer #ifdef CONFIG_DM_MQ_DEFAULT 23517e149b8SMike Snitzer static bool use_blk_mq = true; 23617e149b8SMike Snitzer #else 23717e149b8SMike Snitzer static bool use_blk_mq = false; 23817e149b8SMike Snitzer #endif 23917e149b8SMike Snitzer 24017e149b8SMike Snitzer bool dm_use_blk_mq(struct mapped_device *md) 24117e149b8SMike Snitzer { 24217e149b8SMike Snitzer return md->use_blk_mq; 24317e149b8SMike Snitzer } 24417e149b8SMike Snitzer 245e6ee8c0bSKiyoshi Ueda /* 246e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 247e6ee8c0bSKiyoshi Ueda */ 248e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 249e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 2501ae49ea2SMike Snitzer mempool_t *rq_pool; 251e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 252e6ee8c0bSKiyoshi Ueda }; 253e6ee8c0bSKiyoshi Ueda 25486f1152bSBenjamin Marzinski struct table_device { 25586f1152bSBenjamin Marzinski struct list_head list; 25686f1152bSBenjamin Marzinski atomic_t count; 25786f1152bSBenjamin Marzinski struct dm_dev dm_dev; 25886f1152bSBenjamin Marzinski }; 25986f1152bSBenjamin Marzinski 2606cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 2616cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS 256 262f4790826SMike Snitzer #define RESERVED_MAX_IOS 1024 263e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 2648fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 2651ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 26694818742SKent Overstreet 267f4790826SMike Snitzer /* 268e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 269e8603136SMike Snitzer */ 270e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 271e8603136SMike Snitzer 272e8603136SMike Snitzer /* 273f4790826SMike Snitzer * Request-based DM's mempools' reserved IOs set by the user. 274f4790826SMike Snitzer */ 275f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 276f4790826SMike Snitzer 27709c2d531SMike Snitzer static unsigned __dm_get_module_param(unsigned *module_param, 278f4790826SMike Snitzer unsigned def, unsigned max) 279f4790826SMike Snitzer { 28009c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 28109c2d531SMike Snitzer unsigned modified_param = 0; 282f4790826SMike Snitzer 28309c2d531SMike Snitzer if (!param) 28409c2d531SMike Snitzer modified_param = def; 28509c2d531SMike Snitzer else if (param > max) 28609c2d531SMike Snitzer modified_param = max; 287f4790826SMike Snitzer 28809c2d531SMike Snitzer if (modified_param) { 28909c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 29009c2d531SMike Snitzer param = modified_param; 291f4790826SMike Snitzer } 292f4790826SMike Snitzer 29309c2d531SMike Snitzer return param; 294f4790826SMike Snitzer } 295f4790826SMike Snitzer 296e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 297e8603136SMike Snitzer { 29809c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 299e8603136SMike Snitzer RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 300e8603136SMike Snitzer } 301e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 302e8603136SMike Snitzer 303f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void) 304f4790826SMike Snitzer { 30509c2d531SMike Snitzer return __dm_get_module_param(&reserved_rq_based_ios, 306f4790826SMike Snitzer RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 307f4790826SMike Snitzer } 308f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 309f4790826SMike Snitzer 3101da177e4SLinus Torvalds static int __init local_init(void) 3111da177e4SLinus Torvalds { 31251157b4aSKiyoshi Ueda int r = -ENOMEM; 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 315028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 3161da177e4SLinus Torvalds if (!_io_cache) 31751157b4aSKiyoshi Ueda return r; 3181da177e4SLinus Torvalds 3198fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 3208fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 321dba14160SMikulas Patocka goto out_free_io_cache; 3228fbf26adSKiyoshi Ueda 3231ae49ea2SMike Snitzer _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 3241ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 3251ae49ea2SMike Snitzer if (!_rq_cache) 3261ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 3271ae49ea2SMike Snitzer 32851e5b2bdSMike Anderson r = dm_uevent_init(); 32951157b4aSKiyoshi Ueda if (r) 3301ae49ea2SMike Snitzer goto out_free_rq_cache; 33151e5b2bdSMike Anderson 332acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 333acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 334acfe0ad7SMikulas Patocka r = -ENOMEM; 335acfe0ad7SMikulas Patocka goto out_uevent_exit; 336acfe0ad7SMikulas Patocka } 337acfe0ad7SMikulas Patocka 3381da177e4SLinus Torvalds _major = major; 3391da177e4SLinus Torvalds r = register_blkdev(_major, _name); 34051157b4aSKiyoshi Ueda if (r < 0) 341acfe0ad7SMikulas Patocka goto out_free_workqueue; 3421da177e4SLinus Torvalds 3431da177e4SLinus Torvalds if (!_major) 3441da177e4SLinus Torvalds _major = r; 3451da177e4SLinus Torvalds 3461da177e4SLinus Torvalds return 0; 34751157b4aSKiyoshi Ueda 348acfe0ad7SMikulas Patocka out_free_workqueue: 349acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 35051157b4aSKiyoshi Ueda out_uevent_exit: 35151157b4aSKiyoshi Ueda dm_uevent_exit(); 3521ae49ea2SMike Snitzer out_free_rq_cache: 3531ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3548fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 3558fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 35651157b4aSKiyoshi Ueda out_free_io_cache: 35751157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 35851157b4aSKiyoshi Ueda 35951157b4aSKiyoshi Ueda return r; 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds static void local_exit(void) 3631da177e4SLinus Torvalds { 3642c140a24SMikulas Patocka flush_scheduled_work(); 365acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 3662c140a24SMikulas Patocka 3671ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3688fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 3691da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 37000d59405SAkinobu Mita unregister_blkdev(_major, _name); 37151e5b2bdSMike Anderson dm_uevent_exit(); 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds _major = 0; 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds DMINFO("cleaned up"); 3761da177e4SLinus Torvalds } 3771da177e4SLinus Torvalds 378b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 3791da177e4SLinus Torvalds local_init, 3801da177e4SLinus Torvalds dm_target_init, 3811da177e4SLinus Torvalds dm_linear_init, 3821da177e4SLinus Torvalds dm_stripe_init, 383952b3557SMikulas Patocka dm_io_init, 384945fa4d2SMikulas Patocka dm_kcopyd_init, 3851da177e4SLinus Torvalds dm_interface_init, 386fd2ed4d2SMikulas Patocka dm_statistics_init, 3871da177e4SLinus Torvalds }; 3881da177e4SLinus Torvalds 389b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 3901da177e4SLinus Torvalds local_exit, 3911da177e4SLinus Torvalds dm_target_exit, 3921da177e4SLinus Torvalds dm_linear_exit, 3931da177e4SLinus Torvalds dm_stripe_exit, 394952b3557SMikulas Patocka dm_io_exit, 395945fa4d2SMikulas Patocka dm_kcopyd_exit, 3961da177e4SLinus Torvalds dm_interface_exit, 397fd2ed4d2SMikulas Patocka dm_statistics_exit, 3981da177e4SLinus Torvalds }; 3991da177e4SLinus Torvalds 4001da177e4SLinus Torvalds static int __init dm_init(void) 4011da177e4SLinus Torvalds { 4021da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds int r, i; 4051da177e4SLinus Torvalds 4061da177e4SLinus Torvalds for (i = 0; i < count; i++) { 4071da177e4SLinus Torvalds r = _inits[i](); 4081da177e4SLinus Torvalds if (r) 4091da177e4SLinus Torvalds goto bad; 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds return 0; 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds bad: 4151da177e4SLinus Torvalds while (i--) 4161da177e4SLinus Torvalds _exits[i](); 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds return r; 4191da177e4SLinus Torvalds } 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds static void __exit dm_exit(void) 4221da177e4SLinus Torvalds { 4231da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds while (i--) 4261da177e4SLinus Torvalds _exits[i](); 427d15b774cSAlasdair G Kergon 428d15b774cSAlasdair G Kergon /* 429d15b774cSAlasdair G Kergon * Should be empty by this point. 430d15b774cSAlasdair G Kergon */ 431d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds 4341da177e4SLinus Torvalds /* 4351da177e4SLinus Torvalds * Block device functions 4361da177e4SLinus Torvalds */ 437432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 438432a212cSMike Anderson { 439432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 440432a212cSMike Anderson } 441432a212cSMike Anderson 442fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 4431da177e4SLinus Torvalds { 4441da177e4SLinus Torvalds struct mapped_device *md; 4451da177e4SLinus Torvalds 446fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 447fba9f90eSJeff Mahoney 448fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 449fba9f90eSJeff Mahoney if (!md) 450fba9f90eSJeff Mahoney goto out; 451fba9f90eSJeff Mahoney 4525c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 453432a212cSMike Anderson dm_deleting_md(md)) { 454fba9f90eSJeff Mahoney md = NULL; 455fba9f90eSJeff Mahoney goto out; 456fba9f90eSJeff Mahoney } 457fba9f90eSJeff Mahoney 4581da177e4SLinus Torvalds dm_get(md); 4595c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 460fba9f90eSJeff Mahoney out: 461fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 462fba9f90eSJeff Mahoney 463fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 4641da177e4SLinus Torvalds } 4651da177e4SLinus Torvalds 466db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 4671da177e4SLinus Torvalds { 46863a4f065SMike Snitzer struct mapped_device *md; 4696e9624b8SArnd Bergmann 4704a1aeb98SMilan Broz spin_lock(&_minor_lock); 4714a1aeb98SMilan Broz 47263a4f065SMike Snitzer md = disk->private_data; 47363a4f065SMike Snitzer if (WARN_ON(!md)) 47463a4f065SMike Snitzer goto out; 47563a4f065SMike Snitzer 4762c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 4772c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 478acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 4792c140a24SMikulas Patocka 4801da177e4SLinus Torvalds dm_put(md); 48163a4f065SMike Snitzer out: 4824a1aeb98SMilan Broz spin_unlock(&_minor_lock); 4831da177e4SLinus Torvalds } 4841da177e4SLinus Torvalds 4855c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 4865c6bd75dSAlasdair G Kergon { 4875c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 4885c6bd75dSAlasdair G Kergon } 4895c6bd75dSAlasdair G Kergon 4905c6bd75dSAlasdair G Kergon /* 4915c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 4925c6bd75dSAlasdair G Kergon */ 4932c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 4945c6bd75dSAlasdair G Kergon { 4955c6bd75dSAlasdair G Kergon int r = 0; 4965c6bd75dSAlasdair G Kergon 4975c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4985c6bd75dSAlasdair G Kergon 4992c140a24SMikulas Patocka if (dm_open_count(md)) { 5005c6bd75dSAlasdair G Kergon r = -EBUSY; 5012c140a24SMikulas Patocka if (mark_deferred) 5022c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 5032c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 5042c140a24SMikulas Patocka r = -EEXIST; 5055c6bd75dSAlasdair G Kergon else 5065c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 5075c6bd75dSAlasdair G Kergon 5085c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 5095c6bd75dSAlasdair G Kergon 5105c6bd75dSAlasdair G Kergon return r; 5115c6bd75dSAlasdair G Kergon } 5125c6bd75dSAlasdair G Kergon 5132c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 5142c140a24SMikulas Patocka { 5152c140a24SMikulas Patocka int r = 0; 5162c140a24SMikulas Patocka 5172c140a24SMikulas Patocka spin_lock(&_minor_lock); 5182c140a24SMikulas Patocka 5192c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 5202c140a24SMikulas Patocka r = -EBUSY; 5212c140a24SMikulas Patocka else 5222c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 5232c140a24SMikulas Patocka 5242c140a24SMikulas Patocka spin_unlock(&_minor_lock); 5252c140a24SMikulas Patocka 5262c140a24SMikulas Patocka return r; 5272c140a24SMikulas Patocka } 5282c140a24SMikulas Patocka 5292c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 5302c140a24SMikulas Patocka { 5312c140a24SMikulas Patocka dm_deferred_remove(); 5322c140a24SMikulas Patocka } 5332c140a24SMikulas Patocka 534fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 535fd2ed4d2SMikulas Patocka { 536fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 537fd2ed4d2SMikulas Patocka } 538fd2ed4d2SMikulas Patocka 5399974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 5409974fa2cSMike Snitzer { 5419974fa2cSMike Snitzer return md->queue; 5429974fa2cSMike Snitzer } 5439974fa2cSMike Snitzer 544fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 545fd2ed4d2SMikulas Patocka { 546fd2ed4d2SMikulas Patocka return &md->stats; 547fd2ed4d2SMikulas Patocka } 548fd2ed4d2SMikulas Patocka 5493ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5503ac51e74SDarrick J. Wong { 5513ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 5523ac51e74SDarrick J. Wong 5533ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 5543ac51e74SDarrick J. Wong } 5553ac51e74SDarrick J. Wong 556fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 557aa129a22SMilan Broz unsigned int cmd, unsigned long arg) 558aa129a22SMilan Broz { 559fe5f9f2cSAl Viro struct mapped_device *md = bdev->bd_disk->private_data; 56083d5e5b0SMikulas Patocka int srcu_idx; 5616c182cd8SHannes Reinecke struct dm_table *map; 562aa129a22SMilan Broz struct dm_target *tgt; 563aa129a22SMilan Broz int r = -ENOTTY; 564aa129a22SMilan Broz 5656c182cd8SHannes Reinecke retry: 56683d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 56783d5e5b0SMikulas Patocka 568aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 569aa129a22SMilan Broz goto out; 570aa129a22SMilan Broz 571aa129a22SMilan Broz /* We only support devices that have a single target */ 572aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 573aa129a22SMilan Broz goto out; 574aa129a22SMilan Broz 575aa129a22SMilan Broz tgt = dm_table_get_target(map, 0); 5764d341d82SMike Snitzer if (!tgt->type->ioctl) 5774d341d82SMike Snitzer goto out; 578aa129a22SMilan Broz 5794f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 580aa129a22SMilan Broz r = -EAGAIN; 581aa129a22SMilan Broz goto out; 582aa129a22SMilan Broz } 583aa129a22SMilan Broz 584647b3d00SAl Viro r = tgt->type->ioctl(tgt, cmd, arg); 585aa129a22SMilan Broz 586aa129a22SMilan Broz out: 58783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 588aa129a22SMilan Broz 5896c182cd8SHannes Reinecke if (r == -ENOTCONN) { 5906c182cd8SHannes Reinecke msleep(10); 5916c182cd8SHannes Reinecke goto retry; 5926c182cd8SHannes Reinecke } 5936c182cd8SHannes Reinecke 594aa129a22SMilan Broz return r; 595aa129a22SMilan Broz } 596aa129a22SMilan Broz 597028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 5981da177e4SLinus Torvalds { 5991da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 6001da177e4SLinus Torvalds } 6011da177e4SLinus Torvalds 602028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 6031da177e4SLinus Torvalds { 6041da177e4SLinus Torvalds mempool_free(io, md->io_pool); 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds 607028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 6081da177e4SLinus Torvalds { 609dba14160SMikulas Patocka bio_put(&tio->clone); 6101da177e4SLinus Torvalds } 6111da177e4SLinus Torvalds 61208885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 61308885643SKiyoshi Ueda gfp_t gfp_mask) 614cec47e3dSKiyoshi Ueda { 6155f015204SJun'ichi Nomura return mempool_alloc(md->io_pool, gfp_mask); 616cec47e3dSKiyoshi Ueda } 617cec47e3dSKiyoshi Ueda 618cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 619cec47e3dSKiyoshi Ueda { 6205f015204SJun'ichi Nomura mempool_free(tio, tio->md->io_pool); 621cec47e3dSKiyoshi Ueda } 622cec47e3dSKiyoshi Ueda 6231ae49ea2SMike Snitzer static struct request *alloc_clone_request(struct mapped_device *md, 6241ae49ea2SMike Snitzer gfp_t gfp_mask) 6251ae49ea2SMike Snitzer { 6261ae49ea2SMike Snitzer return mempool_alloc(md->rq_pool, gfp_mask); 6271ae49ea2SMike Snitzer } 6281ae49ea2SMike Snitzer 6291ae49ea2SMike Snitzer static void free_clone_request(struct mapped_device *md, struct request *rq) 6301ae49ea2SMike Snitzer { 6311ae49ea2SMike Snitzer mempool_free(rq, md->rq_pool); 6321ae49ea2SMike Snitzer } 6331ae49ea2SMike Snitzer 63490abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 63590abb8c4SKiyoshi Ueda { 63690abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 63790abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 63890abb8c4SKiyoshi Ueda } 63990abb8c4SKiyoshi Ueda 6403eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6413eaf840eSJun'ichi "Nick" Nomura { 6423eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 643fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 644c9959059STejun Heo int cpu; 645fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 6463eaf840eSJun'ichi "Nick" Nomura 6473eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6483eaf840eSJun'ichi "Nick" Nomura 649074a7acaSTejun Heo cpu = part_stat_lock(); 650074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 651074a7acaSTejun Heo part_stat_unlock(); 6521e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 6531e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 654fd2ed4d2SMikulas Patocka 655fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6564f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 657fd2ed4d2SMikulas Patocka bio_sectors(bio), false, 0, &io->stats_aux); 6583eaf840eSJun'ichi "Nick" Nomura } 6593eaf840eSJun'ichi "Nick" Nomura 660d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6613eaf840eSJun'ichi "Nick" Nomura { 6623eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 6633eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 6643eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 66518c0b223SGu Zheng int pending; 6663eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 6673eaf840eSJun'ichi "Nick" Nomura 66818c0b223SGu Zheng generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 6693eaf840eSJun'ichi "Nick" Nomura 670fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6714f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 672fd2ed4d2SMikulas Patocka bio_sectors(bio), true, duration, &io->stats_aux); 673fd2ed4d2SMikulas Patocka 674af7e466aSMikulas Patocka /* 675af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 676d87f4c14STejun Heo * a flush. 677af7e466aSMikulas Patocka */ 6781e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 6791e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 680316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 6813eaf840eSJun'ichi "Nick" Nomura 682d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 683d221d2e7SMikulas Patocka if (!pending) 684d221d2e7SMikulas Patocka wake_up(&md->wait); 6853eaf840eSJun'ichi "Nick" Nomura } 6863eaf840eSJun'ichi "Nick" Nomura 6871da177e4SLinus Torvalds /* 6881da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6891da177e4SLinus Torvalds */ 69092c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6911da177e4SLinus Torvalds { 69205447420SKiyoshi Ueda unsigned long flags; 6931da177e4SLinus Torvalds 69405447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6951da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 69605447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 69792c63902SMikulas Patocka queue_work(md->wq, &md->work); 6981da177e4SLinus Torvalds } 6991da177e4SLinus Torvalds 7001da177e4SLinus Torvalds /* 7011da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 7021da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 70383d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 7041da177e4SLinus Torvalds */ 70583d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7061da177e4SLinus Torvalds { 70783d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7081da177e4SLinus Torvalds 70983d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 71083d5e5b0SMikulas Patocka } 7111da177e4SLinus Torvalds 71283d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 71383d5e5b0SMikulas Patocka { 71483d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 71583d5e5b0SMikulas Patocka } 71683d5e5b0SMikulas Patocka 71783d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 71883d5e5b0SMikulas Patocka { 71983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 72083d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 72183d5e5b0SMikulas Patocka } 72283d5e5b0SMikulas Patocka 72383d5e5b0SMikulas Patocka /* 72483d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 72583d5e5b0SMikulas Patocka * The caller must not block between these two functions. 72683d5e5b0SMikulas Patocka */ 72783d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 72883d5e5b0SMikulas Patocka { 72983d5e5b0SMikulas Patocka rcu_read_lock(); 73083d5e5b0SMikulas Patocka return rcu_dereference(md->map); 73183d5e5b0SMikulas Patocka } 73283d5e5b0SMikulas Patocka 73383d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 73483d5e5b0SMikulas Patocka { 73583d5e5b0SMikulas Patocka rcu_read_unlock(); 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds 7383ac51e74SDarrick J. Wong /* 73986f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 74086f1152bSBenjamin Marzinski */ 74186f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 74286f1152bSBenjamin Marzinski struct mapped_device *md) 74386f1152bSBenjamin Marzinski { 74486f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 74586f1152bSBenjamin Marzinski struct block_device *bdev; 74686f1152bSBenjamin Marzinski 74786f1152bSBenjamin Marzinski int r; 74886f1152bSBenjamin Marzinski 74986f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 75086f1152bSBenjamin Marzinski 75186f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 75286f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 75386f1152bSBenjamin Marzinski return PTR_ERR(bdev); 75486f1152bSBenjamin Marzinski 75586f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 75686f1152bSBenjamin Marzinski if (r) { 75786f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 75886f1152bSBenjamin Marzinski return r; 75986f1152bSBenjamin Marzinski } 76086f1152bSBenjamin Marzinski 76186f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 76286f1152bSBenjamin Marzinski return 0; 76386f1152bSBenjamin Marzinski } 76486f1152bSBenjamin Marzinski 76586f1152bSBenjamin Marzinski /* 76686f1152bSBenjamin Marzinski * Close a table device that we've been using. 76786f1152bSBenjamin Marzinski */ 76886f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 76986f1152bSBenjamin Marzinski { 77086f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 77186f1152bSBenjamin Marzinski return; 77286f1152bSBenjamin Marzinski 77386f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 77486f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 77586f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 77686f1152bSBenjamin Marzinski } 77786f1152bSBenjamin Marzinski 77886f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 77986f1152bSBenjamin Marzinski fmode_t mode) { 78086f1152bSBenjamin Marzinski struct table_device *td; 78186f1152bSBenjamin Marzinski 78286f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 78386f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 78486f1152bSBenjamin Marzinski return td; 78586f1152bSBenjamin Marzinski 78686f1152bSBenjamin Marzinski return NULL; 78786f1152bSBenjamin Marzinski } 78886f1152bSBenjamin Marzinski 78986f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 79086f1152bSBenjamin Marzinski struct dm_dev **result) { 79186f1152bSBenjamin Marzinski int r; 79286f1152bSBenjamin Marzinski struct table_device *td; 79386f1152bSBenjamin Marzinski 79486f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 79586f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 79686f1152bSBenjamin Marzinski if (!td) { 79786f1152bSBenjamin Marzinski td = kmalloc(sizeof(*td), GFP_KERNEL); 79886f1152bSBenjamin Marzinski if (!td) { 79986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80086f1152bSBenjamin Marzinski return -ENOMEM; 80186f1152bSBenjamin Marzinski } 80286f1152bSBenjamin Marzinski 80386f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 80486f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 80586f1152bSBenjamin Marzinski 80686f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 80786f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80886f1152bSBenjamin Marzinski kfree(td); 80986f1152bSBenjamin Marzinski return r; 81086f1152bSBenjamin Marzinski } 81186f1152bSBenjamin Marzinski 81286f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 81386f1152bSBenjamin Marzinski 81486f1152bSBenjamin Marzinski atomic_set(&td->count, 0); 81586f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 81686f1152bSBenjamin Marzinski } 81786f1152bSBenjamin Marzinski atomic_inc(&td->count); 81886f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 81986f1152bSBenjamin Marzinski 82086f1152bSBenjamin Marzinski *result = &td->dm_dev; 82186f1152bSBenjamin Marzinski return 0; 82286f1152bSBenjamin Marzinski } 82386f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 82486f1152bSBenjamin Marzinski 82586f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 82686f1152bSBenjamin Marzinski { 82786f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 82886f1152bSBenjamin Marzinski 82986f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 83086f1152bSBenjamin Marzinski if (atomic_dec_and_test(&td->count)) { 83186f1152bSBenjamin Marzinski close_table_device(td, md); 83286f1152bSBenjamin Marzinski list_del(&td->list); 83386f1152bSBenjamin Marzinski kfree(td); 83486f1152bSBenjamin Marzinski } 83586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83686f1152bSBenjamin Marzinski } 83786f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 83886f1152bSBenjamin Marzinski 83986f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 84086f1152bSBenjamin Marzinski { 84186f1152bSBenjamin Marzinski struct list_head *tmp, *next; 84286f1152bSBenjamin Marzinski 84386f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 84486f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 84586f1152bSBenjamin Marzinski 84686f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 84786f1152bSBenjamin Marzinski td->dm_dev.name, atomic_read(&td->count)); 84886f1152bSBenjamin Marzinski kfree(td); 84986f1152bSBenjamin Marzinski } 85086f1152bSBenjamin Marzinski } 85186f1152bSBenjamin Marzinski 85286f1152bSBenjamin Marzinski /* 8533ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8543ac51e74SDarrick J. Wong */ 8553ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8563ac51e74SDarrick J. Wong { 8573ac51e74SDarrick J. Wong *geo = md->geometry; 8583ac51e74SDarrick J. Wong 8593ac51e74SDarrick J. Wong return 0; 8603ac51e74SDarrick J. Wong } 8613ac51e74SDarrick J. Wong 8623ac51e74SDarrick J. Wong /* 8633ac51e74SDarrick J. Wong * Set the geometry of a device. 8643ac51e74SDarrick J. Wong */ 8653ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8663ac51e74SDarrick J. Wong { 8673ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8683ac51e74SDarrick J. Wong 8693ac51e74SDarrick J. Wong if (geo->start > sz) { 8703ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8713ac51e74SDarrick J. Wong return -EINVAL; 8723ac51e74SDarrick J. Wong } 8733ac51e74SDarrick J. Wong 8743ac51e74SDarrick J. Wong md->geometry = *geo; 8753ac51e74SDarrick J. Wong 8763ac51e74SDarrick J. Wong return 0; 8773ac51e74SDarrick J. Wong } 8783ac51e74SDarrick J. Wong 8791da177e4SLinus Torvalds /*----------------------------------------------------------------- 8801da177e4SLinus Torvalds * CRUD START: 8811da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 8821da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 8831da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 8841da177e4SLinus Torvalds * interests of getting something for people to use I give 8851da177e4SLinus Torvalds * you this clearly demarcated crap. 8861da177e4SLinus Torvalds *---------------------------------------------------------------*/ 8871da177e4SLinus Torvalds 8882e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8892e93ccc1SKiyoshi Ueda { 8902e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8912e93ccc1SKiyoshi Ueda } 8922e93ccc1SKiyoshi Ueda 8931da177e4SLinus Torvalds /* 8941da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 8951da177e4SLinus Torvalds * cloned into, completing the original io if necc. 8961da177e4SLinus Torvalds */ 897858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 8981da177e4SLinus Torvalds { 8992e93ccc1SKiyoshi Ueda unsigned long flags; 900b35f8caaSMilan Broz int io_error; 901b35f8caaSMilan Broz struct bio *bio; 902b35f8caaSMilan Broz struct mapped_device *md = io->md; 9032e93ccc1SKiyoshi Ueda 9042e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 905f88fb981SKiyoshi Ueda if (unlikely(error)) { 906f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 907f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 9081da177e4SLinus Torvalds io->error = error; 909f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 910f88fb981SKiyoshi Ueda } 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9132e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 9142e93ccc1SKiyoshi Ueda /* 9152e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9162e93ccc1SKiyoshi Ueda */ 917022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9186a8736d1STejun Heo if (__noflush_suspending(md)) 9196a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 9206a8736d1STejun Heo else 9212e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9222e93ccc1SKiyoshi Ueda io->error = -EIO; 923022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9242e93ccc1SKiyoshi Ueda } 9252e93ccc1SKiyoshi Ueda 926b35f8caaSMilan Broz io_error = io->error; 927b35f8caaSMilan Broz bio = io->bio; 928af7e466aSMikulas Patocka end_io_acct(io); 929a97f925aSMikulas Patocka free_io(md, io); 9301da177e4SLinus Torvalds 9316a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 9326a8736d1STejun Heo return; 9336a8736d1STejun Heo 9344f024f37SKent Overstreet if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 9351da177e4SLinus Torvalds /* 9366a8736d1STejun Heo * Preflush done for flush with data, reissue 9376a8736d1STejun Heo * without REQ_FLUSH. 9381da177e4SLinus Torvalds */ 9396a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 9406a8736d1STejun Heo queue_io(md, bio); 9415f3ea37cSArnaldo Carvalho de Melo } else { 942b372d360SMike Snitzer /* done with normal IO or empty flush */ 9430a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 944b35f8caaSMilan Broz bio_endio(bio, io_error); 9452e93ccc1SKiyoshi Ueda } 9461da177e4SLinus Torvalds } 947af7e466aSMikulas Patocka } 9481da177e4SLinus Torvalds 9497eee4ae2SMike Snitzer static void disable_write_same(struct mapped_device *md) 9507eee4ae2SMike Snitzer { 9517eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9527eee4ae2SMike Snitzer 9537eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9547eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9557eee4ae2SMike Snitzer } 9567eee4ae2SMike Snitzer 9576712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error) 9581da177e4SLinus Torvalds { 9595164beceSzhendong chen int r = error; 960bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 961b35f8caaSMilan Broz struct dm_io *io = tio->io; 9629faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9631da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9641da177e4SLinus Torvalds 9651da177e4SLinus Torvalds if (!bio_flagged(bio, BIO_UPTODATE) && !error) 9661da177e4SLinus Torvalds error = -EIO; 9671da177e4SLinus Torvalds 9681da177e4SLinus Torvalds if (endio) { 9697de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 9702e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 9712e93ccc1SKiyoshi Ueda /* 9722e93ccc1SKiyoshi Ueda * error and requeue request are handled 9732e93ccc1SKiyoshi Ueda * in dec_pending(). 9742e93ccc1SKiyoshi Ueda */ 9751da177e4SLinus Torvalds error = r; 97645cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 97745cbcd79SKiyoshi Ueda /* The target will handle the io */ 9786712ecf8SNeilBrown return; 97945cbcd79SKiyoshi Ueda else if (r) { 98045cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 98145cbcd79SKiyoshi Ueda BUG(); 98245cbcd79SKiyoshi Ueda } 9831da177e4SLinus Torvalds } 9841da177e4SLinus Torvalds 9857eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 9867eee4ae2SMike Snitzer !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 9877eee4ae2SMike Snitzer disable_write_same(md); 9887eee4ae2SMike Snitzer 9899faf400fSStefan Bader free_tio(md, tio); 990b35f8caaSMilan Broz dec_pending(io, error); 9911da177e4SLinus Torvalds } 9921da177e4SLinus Torvalds 993cec47e3dSKiyoshi Ueda /* 994cec47e3dSKiyoshi Ueda * Partial completion handling for request-based dm 995cec47e3dSKiyoshi Ueda */ 996cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error) 997cec47e3dSKiyoshi Ueda { 998bfc6d41cSMikulas Patocka struct dm_rq_clone_bio_info *info = 999bfc6d41cSMikulas Patocka container_of(clone, struct dm_rq_clone_bio_info, clone); 1000cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = info->tio; 1001cec47e3dSKiyoshi Ueda struct bio *bio = info->orig; 10024f024f37SKent Overstreet unsigned int nr_bytes = info->orig->bi_iter.bi_size; 1003cec47e3dSKiyoshi Ueda 1004cec47e3dSKiyoshi Ueda bio_put(clone); 1005cec47e3dSKiyoshi Ueda 1006cec47e3dSKiyoshi Ueda if (tio->error) 1007cec47e3dSKiyoshi Ueda /* 1008cec47e3dSKiyoshi Ueda * An error has already been detected on the request. 1009cec47e3dSKiyoshi Ueda * Once error occurred, just let clone->end_io() handle 1010cec47e3dSKiyoshi Ueda * the remainder. 1011cec47e3dSKiyoshi Ueda */ 1012cec47e3dSKiyoshi Ueda return; 1013cec47e3dSKiyoshi Ueda else if (error) { 1014cec47e3dSKiyoshi Ueda /* 1015cec47e3dSKiyoshi Ueda * Don't notice the error to the upper layer yet. 1016cec47e3dSKiyoshi Ueda * The error handling decision is made by the target driver, 1017cec47e3dSKiyoshi Ueda * when the request is completed. 1018cec47e3dSKiyoshi Ueda */ 1019cec47e3dSKiyoshi Ueda tio->error = error; 1020cec47e3dSKiyoshi Ueda return; 1021cec47e3dSKiyoshi Ueda } 1022cec47e3dSKiyoshi Ueda 1023cec47e3dSKiyoshi Ueda /* 1024cec47e3dSKiyoshi Ueda * I/O for the bio successfully completed. 1025cec47e3dSKiyoshi Ueda * Notice the data completion to the upper layer. 1026cec47e3dSKiyoshi Ueda */ 1027cec47e3dSKiyoshi Ueda 1028cec47e3dSKiyoshi Ueda /* 1029cec47e3dSKiyoshi Ueda * bios are processed from the head of the list. 1030cec47e3dSKiyoshi Ueda * So the completing bio should always be rq->bio. 1031cec47e3dSKiyoshi Ueda * If it's not, something wrong is happening. 1032cec47e3dSKiyoshi Ueda */ 1033cec47e3dSKiyoshi Ueda if (tio->orig->bio != bio) 1034cec47e3dSKiyoshi Ueda DMERR("bio completion is going in the middle of the request"); 1035cec47e3dSKiyoshi Ueda 1036cec47e3dSKiyoshi Ueda /* 1037cec47e3dSKiyoshi Ueda * Update the original request. 1038cec47e3dSKiyoshi Ueda * Do not use blk_end_request() here, because it may complete 1039cec47e3dSKiyoshi Ueda * the original request before the clone, and break the ordering. 1040cec47e3dSKiyoshi Ueda */ 1041cec47e3dSKiyoshi Ueda blk_update_request(tio->orig, 0, nr_bytes); 1042cec47e3dSKiyoshi Ueda } 1043cec47e3dSKiyoshi Ueda 1044bfebd1cdSMike Snitzer static struct dm_rq_target_io *tio_from_request(struct request *rq) 1045bfebd1cdSMike Snitzer { 1046bfebd1cdSMike Snitzer return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 1047bfebd1cdSMike Snitzer } 1048bfebd1cdSMike Snitzer 1049cec47e3dSKiyoshi Ueda /* 1050cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 1051cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 1052cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 1053cec47e3dSKiyoshi Ueda */ 1054466d89a6SKeith Busch static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1055cec47e3dSKiyoshi Ueda { 10569a0e609eSMike Snitzer int nr_requests_pending; 10579a0e609eSMike Snitzer 1058b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 1059cec47e3dSKiyoshi Ueda 1060cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 10619a0e609eSMike Snitzer nr_requests_pending = md_in_flight(md); 10629a0e609eSMike Snitzer if (!nr_requests_pending) 1063cec47e3dSKiyoshi Ueda wake_up(&md->wait); 1064cec47e3dSKiyoshi Ueda 1065a8c32a5cSJens Axboe /* 1066a8c32a5cSJens Axboe * Run this off this callpath, as drivers could invoke end_io while 1067a8c32a5cSJens Axboe * inside their request_fn (and holding the queue lock). Calling 1068a8c32a5cSJens Axboe * back into ->request_fn() could deadlock attempting to grab the 1069a8c32a5cSJens Axboe * queue lock again. 1070a8c32a5cSJens Axboe */ 10719a0e609eSMike Snitzer if (run_queue) { 1072bfebd1cdSMike Snitzer if (md->queue->mq_ops) 1073bfebd1cdSMike Snitzer blk_mq_run_hw_queues(md->queue, true); 1074bfebd1cdSMike Snitzer else if (!nr_requests_pending || 10759a0e609eSMike Snitzer (nr_requests_pending >= md->queue->nr_congestion_on)) 1076a8c32a5cSJens Axboe blk_run_queue_async(md->queue); 10779a0e609eSMike Snitzer } 1078cec47e3dSKiyoshi Ueda 1079cec47e3dSKiyoshi Ueda /* 1080cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 1081cec47e3dSKiyoshi Ueda */ 1082cec47e3dSKiyoshi Ueda dm_put(md); 1083cec47e3dSKiyoshi Ueda } 1084cec47e3dSKiyoshi Ueda 1085e5d8de32SMike Snitzer static void free_rq_clone(struct request *clone) 1086a77e28c7SKiyoshi Ueda { 1087a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1088bfebd1cdSMike Snitzer struct mapped_device *md = tio->md; 1089a77e28c7SKiyoshi Ueda 1090a77e28c7SKiyoshi Ueda blk_rq_unprep_clone(clone); 1091bfebd1cdSMike Snitzer 1092aa6df8ddSMike Snitzer if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1093aa6df8ddSMike Snitzer /* stacked on blk-mq queue(s) */ 1094e5863d9aSMike Snitzer tio->ti->type->release_clone_rq(clone); 109502233342SMike Snitzer else if (!md->queue->mq_ops) 109602233342SMike Snitzer /* request_fn queue stacked on request_fn queue(s) */ 1097bfebd1cdSMike Snitzer free_clone_request(md, clone); 1098aa6df8ddSMike Snitzer /* 1099aa6df8ddSMike Snitzer * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: 1100aa6df8ddSMike Snitzer * no need to call free_clone_request() because we leverage blk-mq by 1101aa6df8ddSMike Snitzer * allocating the clone at the end of the blk-mq pdu (see: clone_rq) 1102aa6df8ddSMike Snitzer */ 1103bfebd1cdSMike Snitzer 1104bfebd1cdSMike Snitzer if (!md->queue->mq_ops) 1105a77e28c7SKiyoshi Ueda free_rq_tio(tio); 1106a77e28c7SKiyoshi Ueda } 1107a77e28c7SKiyoshi Ueda 1108980691e5SKiyoshi Ueda /* 1109980691e5SKiyoshi Ueda * Complete the clone and the original request. 1110466d89a6SKeith Busch * Must be called without clone's queue lock held, 1111466d89a6SKeith Busch * see end_clone_request() for more details. 1112980691e5SKiyoshi Ueda */ 1113980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 1114980691e5SKiyoshi Ueda { 1115980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 1116980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1117980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 1118980691e5SKiyoshi Ueda struct request *rq = tio->orig; 1119980691e5SKiyoshi Ueda 112029e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1121980691e5SKiyoshi Ueda rq->errors = clone->errors; 1122980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 1123980691e5SKiyoshi Ueda 1124980691e5SKiyoshi Ueda if (rq->sense) 1125980691e5SKiyoshi Ueda /* 1126980691e5SKiyoshi Ueda * We are using the sense buffer of the original 1127980691e5SKiyoshi Ueda * request. 1128980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 1129980691e5SKiyoshi Ueda */ 1130980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 1131980691e5SKiyoshi Ueda } 1132980691e5SKiyoshi Ueda 1133e5d8de32SMike Snitzer free_rq_clone(clone); 1134bfebd1cdSMike Snitzer if (!rq->q->mq_ops) 1135980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 1136bfebd1cdSMike Snitzer else 1137bfebd1cdSMike Snitzer blk_mq_end_request(rq, error); 113829e4013dSTejun Heo rq_completed(md, rw, true); 1139980691e5SKiyoshi Ueda } 1140980691e5SKiyoshi Ueda 1141cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 1142cec47e3dSKiyoshi Ueda { 1143bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1144466d89a6SKeith Busch struct request *clone = tio->clone; 1145cec47e3dSKiyoshi Ueda 1146bfebd1cdSMike Snitzer if (!rq->q->mq_ops) { 1147cec47e3dSKiyoshi Ueda rq->special = NULL; 1148cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 1149bfebd1cdSMike Snitzer } 1150cec47e3dSKiyoshi Ueda 1151e5863d9aSMike Snitzer if (clone) 1152e5d8de32SMike Snitzer free_rq_clone(clone); 1153cec47e3dSKiyoshi Ueda } 1154cec47e3dSKiyoshi Ueda 1155cec47e3dSKiyoshi Ueda /* 1156cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 1157cec47e3dSKiyoshi Ueda */ 1158bfebd1cdSMike Snitzer static void old_requeue_request(struct request *rq) 1159cec47e3dSKiyoshi Ueda { 1160cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 1161cec47e3dSKiyoshi Ueda unsigned long flags; 1162cec47e3dSKiyoshi Ueda 1163cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1164cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 11654ae9944dSJunichi Nomura blk_run_queue_async(q); 1166cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 1167bfebd1cdSMike Snitzer } 1168bfebd1cdSMike Snitzer 1169bfebd1cdSMike Snitzer static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1170bfebd1cdSMike Snitzer struct request *rq) 1171bfebd1cdSMike Snitzer { 1172bfebd1cdSMike Snitzer int rw = rq_data_dir(rq); 1173bfebd1cdSMike Snitzer 1174bfebd1cdSMike Snitzer dm_unprep_request(rq); 1175bfebd1cdSMike Snitzer 1176bfebd1cdSMike Snitzer if (!rq->q->mq_ops) 1177bfebd1cdSMike Snitzer old_requeue_request(rq); 1178bfebd1cdSMike Snitzer else { 1179bfebd1cdSMike Snitzer blk_mq_requeue_request(rq); 1180bfebd1cdSMike Snitzer blk_mq_kick_requeue_list(rq->q); 1181bfebd1cdSMike Snitzer } 1182cec47e3dSKiyoshi Ueda 1183466d89a6SKeith Busch rq_completed(md, rw, false); 1184cec47e3dSKiyoshi Ueda } 1185466d89a6SKeith Busch 1186466d89a6SKeith Busch static void dm_requeue_unmapped_request(struct request *clone) 1187466d89a6SKeith Busch { 1188466d89a6SKeith Busch struct dm_rq_target_io *tio = clone->end_io_data; 1189466d89a6SKeith Busch 1190466d89a6SKeith Busch dm_requeue_unmapped_original_request(tio->md, tio->orig); 1191cec47e3dSKiyoshi Ueda } 1192cec47e3dSKiyoshi Ueda 1193bfebd1cdSMike Snitzer static void old_stop_queue(struct request_queue *q) 1194cec47e3dSKiyoshi Ueda { 1195bfebd1cdSMike Snitzer unsigned long flags; 1196bfebd1cdSMike Snitzer 1197bfebd1cdSMike Snitzer if (blk_queue_stopped(q)) 1198bfebd1cdSMike Snitzer return; 1199bfebd1cdSMike Snitzer 1200bfebd1cdSMike Snitzer spin_lock_irqsave(q->queue_lock, flags); 1201cec47e3dSKiyoshi Ueda blk_stop_queue(q); 1202bfebd1cdSMike Snitzer spin_unlock_irqrestore(q->queue_lock, flags); 1203cec47e3dSKiyoshi Ueda } 1204cec47e3dSKiyoshi Ueda 1205cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 1206cec47e3dSKiyoshi Ueda { 1207bfebd1cdSMike Snitzer if (!q->mq_ops) 1208bfebd1cdSMike Snitzer old_stop_queue(q); 1209bfebd1cdSMike Snitzer else 1210bfebd1cdSMike Snitzer blk_mq_stop_hw_queues(q); 1211bfebd1cdSMike Snitzer } 1212bfebd1cdSMike Snitzer 1213bfebd1cdSMike Snitzer static void old_start_queue(struct request_queue *q) 1214bfebd1cdSMike Snitzer { 1215cec47e3dSKiyoshi Ueda unsigned long flags; 1216cec47e3dSKiyoshi Ueda 1217cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1218cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 1219cec47e3dSKiyoshi Ueda blk_start_queue(q); 1220bfebd1cdSMike Snitzer spin_unlock_irqrestore(q->queue_lock, flags); 1221cec47e3dSKiyoshi Ueda } 1222cec47e3dSKiyoshi Ueda 1223cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 1224cec47e3dSKiyoshi Ueda { 1225bfebd1cdSMike Snitzer if (!q->mq_ops) 1226bfebd1cdSMike Snitzer old_start_queue(q); 1227bfebd1cdSMike Snitzer else 1228bfebd1cdSMike Snitzer blk_mq_start_stopped_hw_queues(q, true); 1229cec47e3dSKiyoshi Ueda } 1230cec47e3dSKiyoshi Ueda 123111a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 123211a68244SKiyoshi Ueda { 123311a68244SKiyoshi Ueda int r = error; 123411a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1235ba1cbad9SMike Snitzer dm_request_endio_fn rq_end_io = NULL; 1236ba1cbad9SMike Snitzer 1237ba1cbad9SMike Snitzer if (tio->ti) { 1238ba1cbad9SMike Snitzer rq_end_io = tio->ti->type->rq_end_io; 123911a68244SKiyoshi Ueda 124011a68244SKiyoshi Ueda if (mapped && rq_end_io) 124111a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 1242ba1cbad9SMike Snitzer } 124311a68244SKiyoshi Ueda 12447eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 12457eee4ae2SMike Snitzer !clone->q->limits.max_write_same_sectors)) 12467eee4ae2SMike Snitzer disable_write_same(tio->md); 12477eee4ae2SMike Snitzer 124811a68244SKiyoshi Ueda if (r <= 0) 124911a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 125011a68244SKiyoshi Ueda dm_end_request(clone, r); 125111a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 125211a68244SKiyoshi Ueda /* The target will handle the I/O */ 125311a68244SKiyoshi Ueda return; 125411a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 125511a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 125611a68244SKiyoshi Ueda dm_requeue_unmapped_request(clone); 125711a68244SKiyoshi Ueda else { 125811a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 125911a68244SKiyoshi Ueda BUG(); 126011a68244SKiyoshi Ueda } 126111a68244SKiyoshi Ueda } 126211a68244SKiyoshi Ueda 1263cec47e3dSKiyoshi Ueda /* 1264cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 1265cec47e3dSKiyoshi Ueda */ 1266cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 1267cec47e3dSKiyoshi Ueda { 126811a68244SKiyoshi Ueda bool mapped = true; 1269bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1270466d89a6SKeith Busch struct request *clone = tio->clone; 1271bfebd1cdSMike Snitzer int rw; 1272cec47e3dSKiyoshi Ueda 1273e5863d9aSMike Snitzer if (!clone) { 1274bfebd1cdSMike Snitzer rw = rq_data_dir(rq); 1275bfebd1cdSMike Snitzer if (!rq->q->mq_ops) { 1276e5863d9aSMike Snitzer blk_end_request_all(rq, tio->error); 1277bfebd1cdSMike Snitzer rq_completed(tio->md, rw, false); 1278e5863d9aSMike Snitzer free_rq_tio(tio); 1279bfebd1cdSMike Snitzer } else { 1280bfebd1cdSMike Snitzer blk_mq_end_request(rq, tio->error); 1281bfebd1cdSMike Snitzer rq_completed(tio->md, rw, false); 1282bfebd1cdSMike Snitzer } 1283e5863d9aSMike Snitzer return; 1284e5863d9aSMike Snitzer } 1285cec47e3dSKiyoshi Ueda 128611a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 128711a68244SKiyoshi Ueda mapped = false; 1288cec47e3dSKiyoshi Ueda 128911a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 1290cec47e3dSKiyoshi Ueda } 1291cec47e3dSKiyoshi Ueda 1292cec47e3dSKiyoshi Ueda /* 1293cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 1294cec47e3dSKiyoshi Ueda * through softirq context. 1295cec47e3dSKiyoshi Ueda */ 1296466d89a6SKeith Busch static void dm_complete_request(struct request *rq, int error) 1297cec47e3dSKiyoshi Ueda { 1298bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1299cec47e3dSKiyoshi Ueda 1300cec47e3dSKiyoshi Ueda tio->error = error; 1301cec47e3dSKiyoshi Ueda blk_complete_request(rq); 1302cec47e3dSKiyoshi Ueda } 1303cec47e3dSKiyoshi Ueda 1304cec47e3dSKiyoshi Ueda /* 1305cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 1306cec47e3dSKiyoshi Ueda * through softirq context. 1307cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 1308e5863d9aSMike Snitzer * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1309cec47e3dSKiyoshi Ueda */ 1310466d89a6SKeith Busch static void dm_kill_unmapped_request(struct request *rq, int error) 1311cec47e3dSKiyoshi Ueda { 1312cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 1313466d89a6SKeith Busch dm_complete_request(rq, error); 1314cec47e3dSKiyoshi Ueda } 1315cec47e3dSKiyoshi Ueda 1316cec47e3dSKiyoshi Ueda /* 1317bfebd1cdSMike Snitzer * Called with the clone's queue lock held (for non-blk-mq) 1318cec47e3dSKiyoshi Ueda */ 1319cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 1320cec47e3dSKiyoshi Ueda { 1321466d89a6SKeith Busch struct dm_rq_target_io *tio = clone->end_io_data; 1322466d89a6SKeith Busch 1323e5863d9aSMike Snitzer if (!clone->q->mq_ops) { 1324cec47e3dSKiyoshi Ueda /* 1325cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 1326cec47e3dSKiyoshi Ueda * the clone was dispatched. 1327e5863d9aSMike Snitzer * The clone is *NOT* freed actually here because it is alloced 1328e5863d9aSMike Snitzer * from dm own mempool (REQ_ALLOCED isn't set). 1329cec47e3dSKiyoshi Ueda */ 1330cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 1331e5863d9aSMike Snitzer } 1332cec47e3dSKiyoshi Ueda 1333cec47e3dSKiyoshi Ueda /* 1334cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 1335466d89a6SKeith Busch * hold the clone's queue lock. Otherwise, deadlock could occur because: 1336cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 1337cec47e3dSKiyoshi Ueda * of the stacking during the completion 1338cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 1339466d89a6SKeith Busch * against this clone's queue 1340cec47e3dSKiyoshi Ueda */ 1341466d89a6SKeith Busch dm_complete_request(tio->orig, error); 1342cec47e3dSKiyoshi Ueda } 1343cec47e3dSKiyoshi Ueda 134456a67df7SMike Snitzer /* 134556a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 134656a67df7SMike Snitzer * target boundary. 134756a67df7SMike Snitzer */ 134856a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 13491da177e4SLinus Torvalds { 135056a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 135156a67df7SMike Snitzer 135256a67df7SMike Snitzer return ti->len - target_offset; 135356a67df7SMike Snitzer } 135456a67df7SMike Snitzer 135556a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 135656a67df7SMike Snitzer { 135756a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1358542f9038SMike Snitzer sector_t offset, max_len; 13591da177e4SLinus Torvalds 13601da177e4SLinus Torvalds /* 13611da177e4SLinus Torvalds * Does the target need to split even further? 13621da177e4SLinus Torvalds */ 1363542f9038SMike Snitzer if (ti->max_io_len) { 1364542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1365542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1366542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1367542f9038SMike Snitzer else 1368542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1369542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1370542f9038SMike Snitzer 1371542f9038SMike Snitzer if (len > max_len) 1372542f9038SMike Snitzer len = max_len; 13731da177e4SLinus Torvalds } 13741da177e4SLinus Torvalds 13751da177e4SLinus Torvalds return len; 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 1378542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1379542f9038SMike Snitzer { 1380542f9038SMike Snitzer if (len > UINT_MAX) { 1381542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1382542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1383542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1384542f9038SMike Snitzer return -EINVAL; 1385542f9038SMike Snitzer } 1386542f9038SMike Snitzer 1387542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1388542f9038SMike Snitzer 1389542f9038SMike Snitzer return 0; 1390542f9038SMike Snitzer } 1391542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1392542f9038SMike Snitzer 13931dd40c3eSMikulas Patocka /* 13941dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 13951dd40c3eSMikulas Patocka * allowed for all bio types except REQ_FLUSH. 13961dd40c3eSMikulas Patocka * 13971dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 13981dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 13991dd40c3eSMikulas Patocka * sent in a next bio. 14001dd40c3eSMikulas Patocka * 14011dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 14021dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 14031dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 14041dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 14051dd40c3eSMikulas Patocka * 14061dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 14071dd40c3eSMikulas Patocka * <------- bi_size -------> 14081dd40c3eSMikulas Patocka * <-- n_sectors --> 14091dd40c3eSMikulas Patocka * 14101dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 14111dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 14121dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 14131dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 14141dd40c3eSMikulas Patocka * to make it empty) 14151dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 14161dd40c3eSMikulas Patocka * 14171dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 14181dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 14191dd40c3eSMikulas Patocka * copies of the bio. 14201dd40c3eSMikulas Patocka */ 14211dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 14221dd40c3eSMikulas Patocka { 14231dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 14241dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 14251dd40c3eSMikulas Patocka BUG_ON(bio->bi_rw & REQ_FLUSH); 14261dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 14271dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 14281dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 14291dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 14301dd40c3eSMikulas Patocka } 14311dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 14321dd40c3eSMikulas Patocka 1433bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 14341da177e4SLinus Torvalds { 14351da177e4SLinus Torvalds int r; 14362056a782SJens Axboe sector_t sector; 14379faf400fSStefan Bader struct mapped_device *md; 1438dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1439bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 14401da177e4SLinus Torvalds 14411da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 14421da177e4SLinus Torvalds 14431da177e4SLinus Torvalds /* 14441da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 14451da177e4SLinus Torvalds * anything, the target has assumed ownership of 14461da177e4SLinus Torvalds * this io. 14471da177e4SLinus Torvalds */ 14481da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 14494f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 14507de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 145145cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 14521da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 14532056a782SJens Axboe 1454d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 145522a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 14562056a782SJens Axboe 14571da177e4SLinus Torvalds generic_make_request(clone); 14582e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 14592e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 14609faf400fSStefan Bader md = tio->io->md; 14619faf400fSStefan Bader dec_pending(tio->io, r); 14629faf400fSStefan Bader free_tio(md, tio); 146345cbcd79SKiyoshi Ueda } else if (r) { 146445cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 146545cbcd79SKiyoshi Ueda BUG(); 14661da177e4SLinus Torvalds } 14671da177e4SLinus Torvalds } 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds struct clone_info { 14701da177e4SLinus Torvalds struct mapped_device *md; 14711da177e4SLinus Torvalds struct dm_table *map; 14721da177e4SLinus Torvalds struct bio *bio; 14731da177e4SLinus Torvalds struct dm_io *io; 14741da177e4SLinus Torvalds sector_t sector; 1475e0d6609aSMikulas Patocka unsigned sector_count; 14761da177e4SLinus Torvalds }; 14771da177e4SLinus Torvalds 1478e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1479bd2a49b8SAlasdair G Kergon { 14804f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 14814f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 14821da177e4SLinus Torvalds } 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds /* 14851da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 14861da177e4SLinus Torvalds */ 1487dba14160SMikulas Patocka static void clone_bio(struct dm_target_io *tio, struct bio *bio, 14881c3b13e6SKent Overstreet sector_t sector, unsigned len) 14891da177e4SLinus Torvalds { 1490dba14160SMikulas Patocka struct bio *clone = &tio->clone; 14911da177e4SLinus Torvalds 14921c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 14939c47008dSMartin K. Petersen 14941c3b13e6SKent Overstreet if (bio_integrity(bio)) 14951c3b13e6SKent Overstreet bio_integrity_clone(clone, bio, GFP_NOIO); 14961c3b13e6SKent Overstreet 14971c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 14981c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 14991c3b13e6SKent Overstreet 15001c3b13e6SKent Overstreet if (bio_integrity(bio)) 15011c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 15021da177e4SLinus Torvalds } 15031da177e4SLinus Torvalds 15049015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 150599778273SJunichi Nomura struct dm_target *ti, 150655a62eefSAlasdair G Kergon unsigned target_bio_nr) 1507f9ab94ceSMikulas Patocka { 1508dba14160SMikulas Patocka struct dm_target_io *tio; 1509dba14160SMikulas Patocka struct bio *clone; 1510dba14160SMikulas Patocka 151199778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1512dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1513f9ab94ceSMikulas Patocka 1514f9ab94ceSMikulas Patocka tio->io = ci->io; 1515f9ab94ceSMikulas Patocka tio->ti = ti; 151655a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 15179015df24SAlasdair G Kergon 15189015df24SAlasdair G Kergon return tio; 15199015df24SAlasdair G Kergon } 15209015df24SAlasdair G Kergon 152114fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 152214fe594dSAlasdair G Kergon struct dm_target *ti, 15231dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 15249015df24SAlasdair G Kergon { 152599778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1526dba14160SMikulas Patocka struct bio *clone = &tio->clone; 15279015df24SAlasdair G Kergon 15281dd40c3eSMikulas Patocka tio->len_ptr = len; 15291dd40c3eSMikulas Patocka 15301c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1531bd2a49b8SAlasdair G Kergon if (len) 15321dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1533f9ab94ceSMikulas Patocka 1534bd2a49b8SAlasdair G Kergon __map_bio(tio); 1535f9ab94ceSMikulas Patocka } 1536f9ab94ceSMikulas Patocka 153714fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 15381dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 153906a426ceSMike Snitzer { 154055a62eefSAlasdair G Kergon unsigned target_bio_nr; 154106a426ceSMike Snitzer 154255a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 154314fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 154406a426ceSMike Snitzer } 154506a426ceSMike Snitzer 154614fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1547f9ab94ceSMikulas Patocka { 154806a426ceSMike Snitzer unsigned target_nr = 0; 1549f9ab94ceSMikulas Patocka struct dm_target *ti; 1550f9ab94ceSMikulas Patocka 1551b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1552f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 15531dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1554f9ab94ceSMikulas Patocka 1555f9ab94ceSMikulas Patocka return 0; 1556f9ab94ceSMikulas Patocka } 1557f9ab94ceSMikulas Patocka 1558e4c93811SAlasdair G Kergon static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 15591dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 15605ae89a87SMike Snitzer { 1561dba14160SMikulas Patocka struct bio *bio = ci->bio; 15625ae89a87SMike Snitzer struct dm_target_io *tio; 1563b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1564b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 15655ae89a87SMike Snitzer 1566b0d8ed4dSAlasdair G Kergon /* 1567b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1568b0d8ed4dSAlasdair G Kergon */ 1569b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1570b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1571e4c93811SAlasdair G Kergon 1572b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 157399778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 15741dd40c3eSMikulas Patocka tio->len_ptr = len; 15751dd40c3eSMikulas Patocka clone_bio(tio, bio, sector, *len); 1576bd2a49b8SAlasdair G Kergon __map_bio(tio); 15775ae89a87SMike Snitzer } 1578b0d8ed4dSAlasdair G Kergon } 15795ae89a87SMike Snitzer 158055a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 158123508a96SMike Snitzer 158255a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 158323508a96SMike Snitzer { 158455a62eefSAlasdair G Kergon return ti->num_discard_bios; 158523508a96SMike Snitzer } 158623508a96SMike Snitzer 158755a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 158823508a96SMike Snitzer { 158955a62eefSAlasdair G Kergon return ti->num_write_same_bios; 159023508a96SMike Snitzer } 159123508a96SMike Snitzer 159223508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 159323508a96SMike Snitzer 159423508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 159523508a96SMike Snitzer { 159655a62eefSAlasdair G Kergon return ti->split_discard_bios; 159723508a96SMike Snitzer } 159823508a96SMike Snitzer 159914fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 160055a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 160123508a96SMike Snitzer is_split_required_fn is_split_required) 16025ae89a87SMike Snitzer { 16035ae89a87SMike Snitzer struct dm_target *ti; 1604e0d6609aSMikulas Patocka unsigned len; 160555a62eefSAlasdair G Kergon unsigned num_bios; 16065ae89a87SMike Snitzer 1607a79245b3SMike Snitzer do { 16085ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 16095ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 16105ae89a87SMike Snitzer return -EIO; 16115ae89a87SMike Snitzer 16125ae89a87SMike Snitzer /* 161323508a96SMike Snitzer * Even though the device advertised support for this type of 161423508a96SMike Snitzer * request, that does not mean every target supports it, and 1615936688d7SMike Snitzer * reconfiguration might also have changed that since the 16165ae89a87SMike Snitzer * check was performed. 16175ae89a87SMike Snitzer */ 161855a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 161955a62eefSAlasdair G Kergon if (!num_bios) 16205ae89a87SMike Snitzer return -EOPNOTSUPP; 16215ae89a87SMike Snitzer 162223508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1623e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 16247acf0277SMikulas Patocka else 1625e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 16265ae89a87SMike Snitzer 16271dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 16285ae89a87SMike Snitzer 1629a79245b3SMike Snitzer ci->sector += len; 1630a79245b3SMike Snitzer } while (ci->sector_count -= len); 16315ae89a87SMike Snitzer 16325ae89a87SMike Snitzer return 0; 16335ae89a87SMike Snitzer } 16345ae89a87SMike Snitzer 163514fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 163623508a96SMike Snitzer { 163714fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 163823508a96SMike Snitzer is_split_required_for_discard); 163923508a96SMike Snitzer } 164023508a96SMike Snitzer 164114fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 164223508a96SMike Snitzer { 164314fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 164423508a96SMike Snitzer } 164523508a96SMike Snitzer 1646e4c93811SAlasdair G Kergon /* 1647e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1648e4c93811SAlasdair G Kergon */ 1649e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1650e4c93811SAlasdair G Kergon { 1651e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1652e4c93811SAlasdair G Kergon struct dm_target *ti; 16531c3b13e6SKent Overstreet unsigned len; 1654e4c93811SAlasdair G Kergon 1655e4c93811SAlasdair G Kergon if (unlikely(bio->bi_rw & REQ_DISCARD)) 1656e4c93811SAlasdair G Kergon return __send_discard(ci); 1657e4c93811SAlasdair G Kergon else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1658e4c93811SAlasdair G Kergon return __send_write_same(ci); 1659e4c93811SAlasdair G Kergon 1660e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1661e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1662e4c93811SAlasdair G Kergon return -EIO; 1663e4c93811SAlasdair G Kergon 16641c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1665e4c93811SAlasdair G Kergon 16661dd40c3eSMikulas Patocka __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1667e4c93811SAlasdair G Kergon 1668e4c93811SAlasdair G Kergon ci->sector += len; 1669e4c93811SAlasdair G Kergon ci->sector_count -= len; 1670e4c93811SAlasdair G Kergon 1671e4c93811SAlasdair G Kergon return 0; 1672e4c93811SAlasdair G Kergon } 1673e4c93811SAlasdair G Kergon 1674e4c93811SAlasdair G Kergon /* 167514fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 16761da177e4SLinus Torvalds */ 167783d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 167883d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 16791da177e4SLinus Torvalds { 16801da177e4SLinus Torvalds struct clone_info ci; 1681512875bdSJun'ichi Nomura int error = 0; 16821da177e4SLinus Torvalds 168383d5e5b0SMikulas Patocka if (unlikely(!map)) { 1684f0b9a450SMikulas Patocka bio_io_error(bio); 1685f0b9a450SMikulas Patocka return; 1686f0b9a450SMikulas Patocka } 1687692d0eb9SMikulas Patocka 168883d5e5b0SMikulas Patocka ci.map = map; 16891da177e4SLinus Torvalds ci.md = md; 16901da177e4SLinus Torvalds ci.io = alloc_io(md); 16911da177e4SLinus Torvalds ci.io->error = 0; 16921da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 16931da177e4SLinus Torvalds ci.io->bio = bio; 16941da177e4SLinus Torvalds ci.io->md = md; 1695f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 16964f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 16971da177e4SLinus Torvalds 16983eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1699bd2a49b8SAlasdair G Kergon 1700b372d360SMike Snitzer if (bio->bi_rw & REQ_FLUSH) { 1701b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1702b372d360SMike Snitzer ci.sector_count = 0; 170314fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1704b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1705b372d360SMike Snitzer } else { 17066a8736d1STejun Heo ci.bio = bio; 1707f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1708512875bdSJun'ichi Nomura while (ci.sector_count && !error) 170914fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1710d87f4c14STejun Heo } 17111da177e4SLinus Torvalds 17121da177e4SLinus Torvalds /* drop the extra reference count */ 1713512875bdSJun'ichi Nomura dec_pending(ci.io, error); 17149e4e5f87SMilan Broz } 17159e4e5f87SMilan Broz /*----------------------------------------------------------------- 17161da177e4SLinus Torvalds * CRUD END 17171da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17181da177e4SLinus Torvalds 17191da177e4SLinus Torvalds static int dm_merge_bvec(struct request_queue *q, 17201da177e4SLinus Torvalds struct bvec_merge_data *bvm, 1721f6fccb12SMilan Broz struct bio_vec *biovec) 1722f6fccb12SMilan Broz { 1723f6fccb12SMilan Broz struct mapped_device *md = q->queuedata; 172483d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table_fast(md); 1725f6fccb12SMilan Broz struct dm_target *ti; 1726*1c220c69SJoe Thornber sector_t max_sectors, max_size = 0; 1727f6fccb12SMilan Broz 1728f6fccb12SMilan Broz if (unlikely(!map)) 1729f6fccb12SMilan Broz goto out; 1730f6fccb12SMilan Broz 1731f6fccb12SMilan Broz ti = dm_table_find_target(map, bvm->bi_sector); 1732f6fccb12SMilan Broz if (!dm_target_is_valid(ti)) 173383d5e5b0SMikulas Patocka goto out; 1734f6fccb12SMilan Broz 1735f6fccb12SMilan Broz /* 1736f6fccb12SMilan Broz * Find maximum amount of I/O that won't need splitting 1737f6fccb12SMilan Broz */ 173856a67df7SMike Snitzer max_sectors = min(max_io_len(bvm->bi_sector, ti), 1739148e51baSMike Snitzer (sector_t) queue_max_sectors(q)); 1740f6fccb12SMilan Broz max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1741*1c220c69SJoe Thornber 1742*1c220c69SJoe Thornber /* 1743*1c220c69SJoe Thornber * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t 1744*1c220c69SJoe Thornber * to the targets' merge function since it holds sectors not bytes). 1745*1c220c69SJoe Thornber * Just doing this as an interim fix for stable@ because the more 1746*1c220c69SJoe Thornber * comprehensive cleanup of switching to sector_t will impact every 1747*1c220c69SJoe Thornber * DM target that implements a ->merge hook. 1748*1c220c69SJoe Thornber */ 1749*1c220c69SJoe Thornber if (max_size > INT_MAX) 1750*1c220c69SJoe Thornber max_size = INT_MAX; 1751f6fccb12SMilan Broz 1752f6fccb12SMilan Broz /* 1753f6fccb12SMilan Broz * merge_bvec_fn() returns number of bytes 1754f6fccb12SMilan Broz * it can accept at this offset 1755f6fccb12SMilan Broz * max is precomputed maximal io size 1756f6fccb12SMilan Broz */ 1757f6fccb12SMilan Broz if (max_size && ti->type->merge) 1758*1c220c69SJoe Thornber max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); 17598cbeb67aSMikulas Patocka /* 17608cbeb67aSMikulas Patocka * If the target doesn't support merge method and some of the devices 1761148e51baSMike Snitzer * provided their merge_bvec method (we know this by looking for the 1762148e51baSMike Snitzer * max_hw_sectors that dm_set_device_limits may set), then we can't 1763148e51baSMike Snitzer * allow bios with multiple vector entries. So always set max_size 1764148e51baSMike Snitzer * to 0, and the code below allows just one page. 17658cbeb67aSMikulas Patocka */ 17668cbeb67aSMikulas Patocka else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 17678cbeb67aSMikulas Patocka max_size = 0; 1768f6fccb12SMilan Broz 17695037108aSMikulas Patocka out: 177083d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 1771f6fccb12SMilan Broz /* 1772f6fccb12SMilan Broz * Always allow an entire first page 1773f6fccb12SMilan Broz */ 1774f6fccb12SMilan Broz if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1775f6fccb12SMilan Broz max_size = biovec->bv_len; 1776f6fccb12SMilan Broz 1777f6fccb12SMilan Broz return max_size; 1778f6fccb12SMilan Broz } 1779f6fccb12SMilan Broz 17801da177e4SLinus Torvalds /* 17811da177e4SLinus Torvalds * The request function that just remaps the bio built up by 17821da177e4SLinus Torvalds * dm_merge_bvec. 17831da177e4SLinus Torvalds */ 1784ff36ab34SMike Snitzer static void dm_make_request(struct request_queue *q, struct bio *bio) 17851da177e4SLinus Torvalds { 178612f03a49SKevin Corry int rw = bio_data_dir(bio); 17871da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 178883d5e5b0SMikulas Patocka int srcu_idx; 178983d5e5b0SMikulas Patocka struct dm_table *map; 17901da177e4SLinus Torvalds 179183d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17921da177e4SLinus Torvalds 179318c0b223SGu Zheng generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 179412f03a49SKevin Corry 17956a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17966a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 179783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17981da177e4SLinus Torvalds 17996a8736d1STejun Heo if (bio_rw(bio) != READA) 180092c63902SMikulas Patocka queue_io(md, bio); 18016a8736d1STejun Heo else 18026a8736d1STejun Heo bio_io_error(bio); 18035a7bbad2SChristoph Hellwig return; 18041da177e4SLinus Torvalds } 18051da177e4SLinus Torvalds 180683d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 180783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 18085a7bbad2SChristoph Hellwig return; 1809cec47e3dSKiyoshi Ueda } 1810cec47e3dSKiyoshi Ueda 1811fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md) 1812cec47e3dSKiyoshi Ueda { 1813cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1814cec47e3dSKiyoshi Ueda } 1815cec47e3dSKiyoshi Ueda 1816466d89a6SKeith Busch static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1817cec47e3dSKiyoshi Ueda { 1818cec47e3dSKiyoshi Ueda int r; 1819cec47e3dSKiyoshi Ueda 1820466d89a6SKeith Busch if (blk_queue_io_stat(clone->q)) 1821466d89a6SKeith Busch clone->cmd_flags |= REQ_IO_STAT; 1822cec47e3dSKiyoshi Ueda 1823466d89a6SKeith Busch clone->start_time = jiffies; 1824466d89a6SKeith Busch r = blk_insert_cloned_request(clone->q, clone); 1825cec47e3dSKiyoshi Ueda if (r) 1826466d89a6SKeith Busch /* must complete clone in terms of original request */ 1827cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1828cec47e3dSKiyoshi Ueda } 1829cec47e3dSKiyoshi Ueda 1830cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1831cec47e3dSKiyoshi Ueda void *data) 1832cec47e3dSKiyoshi Ueda { 1833cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = data; 183494818742SKent Overstreet struct dm_rq_clone_bio_info *info = 183594818742SKent Overstreet container_of(bio, struct dm_rq_clone_bio_info, clone); 1836cec47e3dSKiyoshi Ueda 1837cec47e3dSKiyoshi Ueda info->orig = bio_orig; 1838cec47e3dSKiyoshi Ueda info->tio = tio; 1839cec47e3dSKiyoshi Ueda bio->bi_end_io = end_clone_bio; 1840cec47e3dSKiyoshi Ueda 1841cec47e3dSKiyoshi Ueda return 0; 1842cec47e3dSKiyoshi Ueda } 1843cec47e3dSKiyoshi Ueda 1844cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq, 18451ae49ea2SMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 1846cec47e3dSKiyoshi Ueda { 1847d0bcb878SKiyoshi Ueda int r; 1848cec47e3dSKiyoshi Ueda 18491ae49ea2SMike Snitzer r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 1850d0bcb878SKiyoshi Ueda dm_rq_bio_constructor, tio); 1851cec47e3dSKiyoshi Ueda if (r) 1852cec47e3dSKiyoshi Ueda return r; 1853cec47e3dSKiyoshi Ueda 1854cec47e3dSKiyoshi Ueda clone->cmd = rq->cmd; 1855cec47e3dSKiyoshi Ueda clone->cmd_len = rq->cmd_len; 1856cec47e3dSKiyoshi Ueda clone->sense = rq->sense; 1857cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1858cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 1859cec47e3dSKiyoshi Ueda 18601ae49ea2SMike Snitzer tio->clone = clone; 18611ae49ea2SMike Snitzer 1862cec47e3dSKiyoshi Ueda return 0; 1863cec47e3dSKiyoshi Ueda } 1864cec47e3dSKiyoshi Ueda 18656facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 18661ae49ea2SMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 18676facdaffSKiyoshi Ueda { 186802233342SMike Snitzer /* 186902233342SMike Snitzer * Do not allocate a clone if tio->clone was already set 187002233342SMike Snitzer * (see: dm_mq_queue_rq). 187102233342SMike Snitzer */ 187202233342SMike Snitzer bool alloc_clone = !tio->clone; 187302233342SMike Snitzer struct request *clone; 18741ae49ea2SMike Snitzer 187502233342SMike Snitzer if (alloc_clone) { 187602233342SMike Snitzer clone = alloc_clone_request(md, gfp_mask); 18771ae49ea2SMike Snitzer if (!clone) 18781ae49ea2SMike Snitzer return NULL; 187902233342SMike Snitzer } else 188002233342SMike Snitzer clone = tio->clone; 18811ae49ea2SMike Snitzer 18821ae49ea2SMike Snitzer blk_rq_init(NULL, clone); 18831ae49ea2SMike Snitzer if (setup_clone(clone, rq, tio, gfp_mask)) { 18841ae49ea2SMike Snitzer /* -ENOMEM */ 188502233342SMike Snitzer if (alloc_clone) 18861ae49ea2SMike Snitzer free_clone_request(md, clone); 18871ae49ea2SMike Snitzer return NULL; 18881ae49ea2SMike Snitzer } 18891ae49ea2SMike Snitzer 18901ae49ea2SMike Snitzer return clone; 18911ae49ea2SMike Snitzer } 18921ae49ea2SMike Snitzer 18932eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work); 18942eb6e1e3SKeith Busch 1895bfebd1cdSMike Snitzer static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1896bfebd1cdSMike Snitzer struct mapped_device *md) 1897bfebd1cdSMike Snitzer { 1898bfebd1cdSMike Snitzer tio->md = md; 1899bfebd1cdSMike Snitzer tio->ti = NULL; 1900bfebd1cdSMike Snitzer tio->clone = NULL; 1901bfebd1cdSMike Snitzer tio->orig = rq; 1902bfebd1cdSMike Snitzer tio->error = 0; 1903bfebd1cdSMike Snitzer memset(&tio->info, 0, sizeof(tio->info)); 190402233342SMike Snitzer if (md->kworker_task) 1905bfebd1cdSMike Snitzer init_kthread_work(&tio->work, map_tio_request); 1906bfebd1cdSMike Snitzer } 1907bfebd1cdSMike Snitzer 1908466d89a6SKeith Busch static struct dm_rq_target_io *prep_tio(struct request *rq, 1909466d89a6SKeith Busch struct mapped_device *md, gfp_t gfp_mask) 19106facdaffSKiyoshi Ueda { 19116facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 1912e5863d9aSMike Snitzer int srcu_idx; 1913e5863d9aSMike Snitzer struct dm_table *table; 19146facdaffSKiyoshi Ueda 19156facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 19166facdaffSKiyoshi Ueda if (!tio) 19176facdaffSKiyoshi Ueda return NULL; 19186facdaffSKiyoshi Ueda 1919bfebd1cdSMike Snitzer init_tio(tio, rq, md); 19206facdaffSKiyoshi Ueda 1921e5863d9aSMike Snitzer table = dm_get_live_table(md, &srcu_idx); 1922e5863d9aSMike Snitzer if (!dm_table_mq_request_based(table)) { 1923466d89a6SKeith Busch if (!clone_rq(rq, md, tio, gfp_mask)) { 1924e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 19256facdaffSKiyoshi Ueda free_rq_tio(tio); 19266facdaffSKiyoshi Ueda return NULL; 19276facdaffSKiyoshi Ueda } 1928e5863d9aSMike Snitzer } 1929e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 19306facdaffSKiyoshi Ueda 1931466d89a6SKeith Busch return tio; 19326facdaffSKiyoshi Ueda } 19336facdaffSKiyoshi Ueda 1934cec47e3dSKiyoshi Ueda /* 1935cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1936cec47e3dSKiyoshi Ueda */ 1937cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1938cec47e3dSKiyoshi Ueda { 1939cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1940466d89a6SKeith Busch struct dm_rq_target_io *tio; 1941cec47e3dSKiyoshi Ueda 1942cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1943cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1944cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1945cec47e3dSKiyoshi Ueda } 1946cec47e3dSKiyoshi Ueda 1947466d89a6SKeith Busch tio = prep_tio(rq, md, GFP_ATOMIC); 1948466d89a6SKeith Busch if (!tio) 1949cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1950cec47e3dSKiyoshi Ueda 1951466d89a6SKeith Busch rq->special = tio; 1952cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1953cec47e3dSKiyoshi Ueda 1954cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1955cec47e3dSKiyoshi Ueda } 1956cec47e3dSKiyoshi Ueda 19579eef87daSKiyoshi Ueda /* 19589eef87daSKiyoshi Ueda * Returns: 1959e5863d9aSMike Snitzer * 0 : the request has been processed 1960e5863d9aSMike Snitzer * DM_MAPIO_REQUEUE : the original request needs to be requeued 1961e5863d9aSMike Snitzer * < 0 : the request was completed due to failure 19629eef87daSKiyoshi Ueda */ 1963bfebd1cdSMike Snitzer static int map_request(struct dm_rq_target_io *tio, struct request *rq, 1964cec47e3dSKiyoshi Ueda struct mapped_device *md) 1965cec47e3dSKiyoshi Ueda { 1966e5863d9aSMike Snitzer int r; 1967bfebd1cdSMike Snitzer struct dm_target *ti = tio->ti; 1968e5863d9aSMike Snitzer struct request *clone = NULL; 1969cec47e3dSKiyoshi Ueda 1970e5863d9aSMike Snitzer if (tio->clone) { 1971e5863d9aSMike Snitzer clone = tio->clone; 1972cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1973e5863d9aSMike Snitzer } else { 1974e5863d9aSMike Snitzer r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1975e5863d9aSMike Snitzer if (r < 0) { 1976e5863d9aSMike Snitzer /* The target wants to complete the I/O */ 1977e5863d9aSMike Snitzer dm_kill_unmapped_request(rq, r); 1978e5863d9aSMike Snitzer return r; 1979e5863d9aSMike Snitzer } 19803a140755SJunichi Nomura if (r != DM_MAPIO_REMAPPED) 19813a140755SJunichi Nomura return r; 198202233342SMike Snitzer if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 1983e5863d9aSMike Snitzer /* -ENOMEM */ 1984e5863d9aSMike Snitzer ti->type->release_clone_rq(clone); 1985e5863d9aSMike Snitzer return DM_MAPIO_REQUEUE; 1986e5863d9aSMike Snitzer } 1987e5863d9aSMike Snitzer } 1988e5863d9aSMike Snitzer 1989cec47e3dSKiyoshi Ueda switch (r) { 1990cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1991cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1992cec47e3dSKiyoshi Ueda break; 1993cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1994cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 19956db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1996466d89a6SKeith Busch blk_rq_pos(rq)); 1997466d89a6SKeith Busch dm_dispatch_clone_request(clone, rq); 1998cec47e3dSKiyoshi Ueda break; 1999cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 2000cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 2001cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 2002cec47e3dSKiyoshi Ueda break; 2003cec47e3dSKiyoshi Ueda default: 2004cec47e3dSKiyoshi Ueda if (r > 0) { 2005cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 2006cec47e3dSKiyoshi Ueda BUG(); 2007cec47e3dSKiyoshi Ueda } 2008cec47e3dSKiyoshi Ueda 2009cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 2010466d89a6SKeith Busch dm_kill_unmapped_request(rq, r); 2011e5863d9aSMike Snitzer return r; 2012cec47e3dSKiyoshi Ueda } 20139eef87daSKiyoshi Ueda 2014e5863d9aSMike Snitzer return 0; 2015cec47e3dSKiyoshi Ueda } 2016cec47e3dSKiyoshi Ueda 20172eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work) 2018ba1cbad9SMike Snitzer { 20192eb6e1e3SKeith Busch struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 2020e5863d9aSMike Snitzer struct request *rq = tio->orig; 2021e5863d9aSMike Snitzer struct mapped_device *md = tio->md; 2022ba1cbad9SMike Snitzer 2023bfebd1cdSMike Snitzer if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2024e5863d9aSMike Snitzer dm_requeue_unmapped_original_request(md, rq); 20252eb6e1e3SKeith Busch } 20262eb6e1e3SKeith Busch 2027466d89a6SKeith Busch static void dm_start_request(struct mapped_device *md, struct request *orig) 2028ba1cbad9SMike Snitzer { 2029bfebd1cdSMike Snitzer if (!orig->q->mq_ops) 2030ba1cbad9SMike Snitzer blk_start_request(orig); 2031bfebd1cdSMike Snitzer else 2032bfebd1cdSMike Snitzer blk_mq_start_request(orig); 2033466d89a6SKeith Busch atomic_inc(&md->pending[rq_data_dir(orig)]); 2034ba1cbad9SMike Snitzer 20350ce65797SMike Snitzer if (md->seq_rq_merge_deadline_usecs) { 2036de3ec86dSMike Snitzer md->last_rq_pos = rq_end_sector(orig); 2037de3ec86dSMike Snitzer md->last_rq_rw = rq_data_dir(orig); 20380ce65797SMike Snitzer md->last_rq_start_time = ktime_get(); 20390ce65797SMike Snitzer } 2040de3ec86dSMike Snitzer 2041ba1cbad9SMike Snitzer /* 2042ba1cbad9SMike Snitzer * Hold the md reference here for the in-flight I/O. 2043ba1cbad9SMike Snitzer * We can't rely on the reference count by device opener, 2044ba1cbad9SMike Snitzer * because the device may be closed during the request completion 2045ba1cbad9SMike Snitzer * when all bios are completed. 2046ba1cbad9SMike Snitzer * See the comment in rq_completed() too. 2047ba1cbad9SMike Snitzer */ 2048ba1cbad9SMike Snitzer dm_get(md); 2049ba1cbad9SMike Snitzer } 2050ba1cbad9SMike Snitzer 20510ce65797SMike Snitzer #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 20520ce65797SMike Snitzer 20530ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 20540ce65797SMike Snitzer { 20550ce65797SMike Snitzer return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); 20560ce65797SMike Snitzer } 20570ce65797SMike Snitzer 20580ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 20590ce65797SMike Snitzer const char *buf, size_t count) 20600ce65797SMike Snitzer { 20610ce65797SMike Snitzer unsigned deadline; 20620ce65797SMike Snitzer 206317e149b8SMike Snitzer if (!dm_request_based(md) || md->use_blk_mq) 20640ce65797SMike Snitzer return count; 20650ce65797SMike Snitzer 20660ce65797SMike Snitzer if (kstrtouint(buf, 10, &deadline)) 20670ce65797SMike Snitzer return -EINVAL; 20680ce65797SMike Snitzer 20690ce65797SMike Snitzer if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) 20700ce65797SMike Snitzer deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; 20710ce65797SMike Snitzer 20720ce65797SMike Snitzer md->seq_rq_merge_deadline_usecs = deadline; 20730ce65797SMike Snitzer 20740ce65797SMike Snitzer return count; 20750ce65797SMike Snitzer } 20760ce65797SMike Snitzer 20770ce65797SMike Snitzer static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) 20780ce65797SMike Snitzer { 20790ce65797SMike Snitzer ktime_t kt_deadline; 20800ce65797SMike Snitzer 20810ce65797SMike Snitzer if (!md->seq_rq_merge_deadline_usecs) 20820ce65797SMike Snitzer return false; 20830ce65797SMike Snitzer 20840ce65797SMike Snitzer kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); 20850ce65797SMike Snitzer kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); 20860ce65797SMike Snitzer 20870ce65797SMike Snitzer return !ktime_after(ktime_get(), kt_deadline); 20880ce65797SMike Snitzer } 20890ce65797SMike Snitzer 2090cec47e3dSKiyoshi Ueda /* 2091cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 2092cec47e3dSKiyoshi Ueda * Called with the queue lock held. 2093cec47e3dSKiyoshi Ueda */ 2094cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 2095cec47e3dSKiyoshi Ueda { 2096cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 209783d5e5b0SMikulas Patocka int srcu_idx; 209883d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2099cec47e3dSKiyoshi Ueda struct dm_target *ti; 2100466d89a6SKeith Busch struct request *rq; 21012eb6e1e3SKeith Busch struct dm_rq_target_io *tio; 210229e4013dSTejun Heo sector_t pos; 2103cec47e3dSKiyoshi Ueda 2104cec47e3dSKiyoshi Ueda /* 2105b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 2106b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 2107b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 2108b4324feeSKiyoshi Ueda * dm_suspend(). 2109cec47e3dSKiyoshi Ueda */ 21107eaceaccSJens Axboe while (!blk_queue_stopped(q)) { 2111cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 2112cec47e3dSKiyoshi Ueda if (!rq) 21139d1deb83SMike Snitzer goto out; 2114cec47e3dSKiyoshi Ueda 211529e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 211629e4013dSTejun Heo pos = 0; 211729e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 211829e4013dSTejun Heo pos = blk_rq_pos(rq); 2119d0bcb878SKiyoshi Ueda 212029e4013dSTejun Heo ti = dm_table_find_target(map, pos); 2121ba1cbad9SMike Snitzer if (!dm_target_is_valid(ti)) { 2122ba1cbad9SMike Snitzer /* 2123466d89a6SKeith Busch * Must perform setup, that rq_completed() requires, 2124ba1cbad9SMike Snitzer * before calling dm_kill_unmapped_request 2125ba1cbad9SMike Snitzer */ 2126ba1cbad9SMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 2127466d89a6SKeith Busch dm_start_request(md, rq); 2128466d89a6SKeith Busch dm_kill_unmapped_request(rq, -EIO); 2129ba1cbad9SMike Snitzer continue; 2130ba1cbad9SMike Snitzer } 213129e4013dSTejun Heo 21320ce65797SMike Snitzer if (dm_request_peeked_before_merge_deadline(md) && 21330ce65797SMike Snitzer md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2134de3ec86dSMike Snitzer md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) 2135de3ec86dSMike Snitzer goto delay_and_out; 2136de3ec86dSMike Snitzer 2137cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 21387eaceaccSJens Axboe goto delay_and_out; 2139cec47e3dSKiyoshi Ueda 2140466d89a6SKeith Busch dm_start_request(md, rq); 2141b4324feeSKiyoshi Ueda 2142bfebd1cdSMike Snitzer tio = tio_from_request(rq); 21432eb6e1e3SKeith Busch /* Establish tio->ti before queuing work (map_tio_request) */ 21442eb6e1e3SKeith Busch tio->ti = ti; 21452eb6e1e3SKeith Busch queue_kthread_work(&md->kworker, &tio->work); 2146052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 2147cec47e3dSKiyoshi Ueda } 2148cec47e3dSKiyoshi Ueda 2149cec47e3dSKiyoshi Ueda goto out; 2150cec47e3dSKiyoshi Ueda 21517eaceaccSJens Axboe delay_and_out: 2152d548b34bSMike Snitzer blk_delay_queue(q, HZ / 100); 2153cec47e3dSKiyoshi Ueda out: 215483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 2155cec47e3dSKiyoshi Ueda } 2156cec47e3dSKiyoshi Ueda 21571da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 21581da177e4SLinus Torvalds { 21598a57dfc6SChandra Seetharaman int r = bdi_bits; 21608a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 21618a57dfc6SChandra Seetharaman struct dm_table *map; 21621da177e4SLinus Torvalds 21631eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 216483d5e5b0SMikulas Patocka map = dm_get_live_table_fast(md); 21658a57dfc6SChandra Seetharaman if (map) { 2166cec47e3dSKiyoshi Ueda /* 2167cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 2168cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 2169cec47e3dSKiyoshi Ueda */ 2170cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2171cec47e3dSKiyoshi Ueda r = md->queue->backing_dev_info.state & 2172cec47e3dSKiyoshi Ueda bdi_bits; 2173cec47e3dSKiyoshi Ueda else 21741da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 21758a57dfc6SChandra Seetharaman } 217683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 21778a57dfc6SChandra Seetharaman } 21788a57dfc6SChandra Seetharaman 21791da177e4SLinus Torvalds return r; 21801da177e4SLinus Torvalds } 21811da177e4SLinus Torvalds 21821da177e4SLinus Torvalds /*----------------------------------------------------------------- 21831da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 21841da177e4SLinus Torvalds *---------------------------------------------------------------*/ 21852b06cfffSAlasdair G Kergon static void free_minor(int minor) 21861da177e4SLinus Torvalds { 2187f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21881da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 2189f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21901da177e4SLinus Torvalds } 21911da177e4SLinus Torvalds 21921da177e4SLinus Torvalds /* 21931da177e4SLinus Torvalds * See if the device with a specific minor # is free. 21941da177e4SLinus Torvalds */ 2195cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 21961da177e4SLinus Torvalds { 2197c9d76be6STejun Heo int r; 21981da177e4SLinus Torvalds 21991da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 22001da177e4SLinus Torvalds return -EINVAL; 22011da177e4SLinus Torvalds 2202c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2203f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22041da177e4SLinus Torvalds 2205c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 22061da177e4SLinus Torvalds 2207f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2208c9d76be6STejun Heo idr_preload_end(); 2209c9d76be6STejun Heo if (r < 0) 2210c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 2211c9d76be6STejun Heo return 0; 22121da177e4SLinus Torvalds } 22131da177e4SLinus Torvalds 2214cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 22151da177e4SLinus Torvalds { 2216c9d76be6STejun Heo int r; 22171da177e4SLinus Torvalds 2218c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2219f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22201da177e4SLinus Torvalds 2221c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 22221da177e4SLinus Torvalds 2223f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2224c9d76be6STejun Heo idr_preload_end(); 2225c9d76be6STejun Heo if (r < 0) 22261da177e4SLinus Torvalds return r; 2227c9d76be6STejun Heo *minor = r; 2228c9d76be6STejun Heo return 0; 22291da177e4SLinus Torvalds } 22301da177e4SLinus Torvalds 223183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 22321da177e4SLinus Torvalds 223353d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 223453d5914fSMikulas Patocka 22354a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 22364a0b4ddfSMike Snitzer { 22374a0b4ddfSMike Snitzer /* 22384a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 2239bfebd1cdSMike Snitzer * devices. The type of this dm device may not have been decided yet. 22404a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 22414a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 22424a0b4ddfSMike Snitzer * for request stacking support until then. 22434a0b4ddfSMike Snitzer * 22444a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 22454a0b4ddfSMike Snitzer */ 22464a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2247bfebd1cdSMike Snitzer } 22484a0b4ddfSMike Snitzer 2249bfebd1cdSMike Snitzer static void dm_init_old_md_queue(struct mapped_device *md) 2250bfebd1cdSMike Snitzer { 225117e149b8SMike Snitzer md->use_blk_mq = false; 2252bfebd1cdSMike Snitzer dm_init_md_queue(md); 2253bfebd1cdSMike Snitzer 2254bfebd1cdSMike Snitzer /* 2255bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 2256bfebd1cdSMike Snitzer */ 22574a0b4ddfSMike Snitzer md->queue->queuedata = md; 22584a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 22594a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_data = md; 2260ff36ab34SMike Snitzer 22614a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 22624a0b4ddfSMike Snitzer } 22634a0b4ddfSMike Snitzer 22641da177e4SLinus Torvalds /* 22651da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 22661da177e4SLinus Torvalds */ 22672b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 22681da177e4SLinus Torvalds { 22691da177e4SLinus Torvalds int r; 2270cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 2271ba61fdd1SJeff Mahoney void *old_md; 22721da177e4SLinus Torvalds 22731da177e4SLinus Torvalds if (!md) { 22741da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 22751da177e4SLinus Torvalds return NULL; 22761da177e4SLinus Torvalds } 22771da177e4SLinus Torvalds 227810da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 22796ed7ade8SMilan Broz goto bad_module_get; 228010da4f79SJeff Mahoney 22811da177e4SLinus Torvalds /* get a minor number for the dev */ 22822b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 2283cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 22842b06cfffSAlasdair G Kergon else 2285cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 22861da177e4SLinus Torvalds if (r < 0) 22876ed7ade8SMilan Broz goto bad_minor; 22881da177e4SLinus Torvalds 228983d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 229083d5e5b0SMikulas Patocka if (r < 0) 229183d5e5b0SMikulas Patocka goto bad_io_barrier; 229283d5e5b0SMikulas Patocka 229317e149b8SMike Snitzer md->use_blk_mq = use_blk_mq; 2294a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 2295e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 2296a5664dadSMike Snitzer mutex_init(&md->type_lock); 229786f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 2298022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 22991da177e4SLinus Torvalds atomic_set(&md->holders, 1); 23005c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 23011da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 23027a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 23037a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 230486f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 23057a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 23061da177e4SLinus Torvalds 23074a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 23081da177e4SLinus Torvalds if (!md->queue) 23096ed7ade8SMilan Broz goto bad_queue; 23101da177e4SLinus Torvalds 23114a0b4ddfSMike Snitzer dm_init_md_queue(md); 23129faf400fSStefan Bader 23131da177e4SLinus Torvalds md->disk = alloc_disk(1); 23141da177e4SLinus Torvalds if (!md->disk) 23156ed7ade8SMilan Broz goto bad_disk; 23161da177e4SLinus Torvalds 2317316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 2318316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 2319f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 232053d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 2321f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 23222995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 23232eb6e1e3SKeith Busch md->kworker_task = NULL; 2324f0b04115SJeff Mahoney 23251da177e4SLinus Torvalds md->disk->major = _major; 23261da177e4SLinus Torvalds md->disk->first_minor = minor; 23271da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 23281da177e4SLinus Torvalds md->disk->queue = md->queue; 23291da177e4SLinus Torvalds md->disk->private_data = md; 23301da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 23311da177e4SLinus Torvalds add_disk(md->disk); 23327e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 23331da177e4SLinus Torvalds 2334670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2335304f3f6aSMilan Broz if (!md->wq) 2336304f3f6aSMilan Broz goto bad_thread; 2337304f3f6aSMilan Broz 233832a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 233932a926daSMikulas Patocka if (!md->bdev) 234032a926daSMikulas Patocka goto bad_bdev; 234132a926daSMikulas Patocka 23426a8736d1STejun Heo bio_init(&md->flush_bio); 23436a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 23446a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 23456a8736d1STejun Heo 2346fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2347fd2ed4d2SMikulas Patocka 2348ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2349f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2350ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2351f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2352ba61fdd1SJeff Mahoney 2353ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2354ba61fdd1SJeff Mahoney 23551da177e4SLinus Torvalds return md; 23561da177e4SLinus Torvalds 235732a926daSMikulas Patocka bad_bdev: 235832a926daSMikulas Patocka destroy_workqueue(md->wq); 2359304f3f6aSMilan Broz bad_thread: 236003022c54SZdenek Kabelac del_gendisk(md->disk); 2361304f3f6aSMilan Broz put_disk(md->disk); 23626ed7ade8SMilan Broz bad_disk: 23631312f40eSAl Viro blk_cleanup_queue(md->queue); 23646ed7ade8SMilan Broz bad_queue: 236583d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 236683d5e5b0SMikulas Patocka bad_io_barrier: 23671da177e4SLinus Torvalds free_minor(minor); 23686ed7ade8SMilan Broz bad_minor: 236910da4f79SJeff Mahoney module_put(THIS_MODULE); 23706ed7ade8SMilan Broz bad_module_get: 23711da177e4SLinus Torvalds kfree(md); 23721da177e4SLinus Torvalds return NULL; 23731da177e4SLinus Torvalds } 23741da177e4SLinus Torvalds 2375ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2376ae9da83fSJun'ichi Nomura 23771da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 23781da177e4SLinus Torvalds { 2379f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 238063d94e48SJun'ichi Nomura 2381ae9da83fSJun'ichi Nomura unlock_fs(md); 2382304f3f6aSMilan Broz destroy_workqueue(md->wq); 23832eb6e1e3SKeith Busch 23842eb6e1e3SKeith Busch if (md->kworker_task) 23852eb6e1e3SKeith Busch kthread_stop(md->kworker_task); 2386e6ee8c0bSKiyoshi Ueda if (md->io_pool) 23871da177e4SLinus Torvalds mempool_destroy(md->io_pool); 23881ae49ea2SMike Snitzer if (md->rq_pool) 23891ae49ea2SMike Snitzer mempool_destroy(md->rq_pool); 2390e6ee8c0bSKiyoshi Ueda if (md->bs) 23919faf400fSStefan Bader bioset_free(md->bs); 239263a4f065SMike Snitzer 239383d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 239486f1152bSBenjamin Marzinski free_table_devices(&md->table_devices); 239563a4f065SMike Snitzer dm_stats_cleanup(&md->stats); 2396fba9f90eSJeff Mahoney 2397fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 2398fba9f90eSJeff Mahoney md->disk->private_data = NULL; 2399fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 240063a4f065SMike Snitzer if (blk_get_integrity(md->disk)) 240163a4f065SMike Snitzer blk_integrity_unregister(md->disk); 240263a4f065SMike Snitzer del_gendisk(md->disk); 24031da177e4SLinus Torvalds put_disk(md->disk); 24041312f40eSAl Viro blk_cleanup_queue(md->queue); 240517e149b8SMike Snitzer if (md->use_blk_mq) 2406bfebd1cdSMike Snitzer blk_mq_free_tag_set(&md->tag_set); 240763a4f065SMike Snitzer bdput(md->bdev); 240863a4f065SMike Snitzer free_minor(minor); 240963a4f065SMike Snitzer 241010da4f79SJeff Mahoney module_put(THIS_MODULE); 24111da177e4SLinus Torvalds kfree(md); 24121da177e4SLinus Torvalds } 24131da177e4SLinus Torvalds 2414e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2415e6ee8c0bSKiyoshi Ueda { 2416c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2417e6ee8c0bSKiyoshi Ueda 241817e149b8SMike Snitzer if (md->bs) { 241916245bdcSJun'ichi Nomura /* The md already has necessary mempools. */ 242016245bdcSJun'ichi Nomura if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2421c0820cf5SMikulas Patocka /* 242216245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 242316245bdcSJun'ichi Nomura * because a different table was loaded. 2424c0820cf5SMikulas Patocka */ 2425c0820cf5SMikulas Patocka bioset_free(md->bs); 2426c0820cf5SMikulas Patocka md->bs = p->bs; 2427c0820cf5SMikulas Patocka p->bs = NULL; 2428466d89a6SKeith Busch } 242916245bdcSJun'ichi Nomura /* 243016245bdcSJun'ichi Nomura * There's no need to reload with request-based dm 243116245bdcSJun'ichi Nomura * because the size of front_pad doesn't change. 243216245bdcSJun'ichi Nomura * Note for future: If you are to reload bioset, 243316245bdcSJun'ichi Nomura * prep-ed requests in the queue may refer 243416245bdcSJun'ichi Nomura * to bio from the old bioset, so you must walk 243516245bdcSJun'ichi Nomura * through the queue to unprep. 243616245bdcSJun'ichi Nomura */ 2437e6ee8c0bSKiyoshi Ueda goto out; 2438c0820cf5SMikulas Patocka } 2439e6ee8c0bSKiyoshi Ueda 24401ae49ea2SMike Snitzer BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2441e6ee8c0bSKiyoshi Ueda 2442e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 2443e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 24441ae49ea2SMike Snitzer md->rq_pool = p->rq_pool; 24451ae49ea2SMike Snitzer p->rq_pool = NULL; 2446e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 2447e6ee8c0bSKiyoshi Ueda p->bs = NULL; 2448e6ee8c0bSKiyoshi Ueda 2449e6ee8c0bSKiyoshi Ueda out: 245002233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2451e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 2452e6ee8c0bSKiyoshi Ueda } 2453e6ee8c0bSKiyoshi Ueda 24541da177e4SLinus Torvalds /* 24551da177e4SLinus Torvalds * Bind a table to the device. 24561da177e4SLinus Torvalds */ 24571da177e4SLinus Torvalds static void event_callback(void *context) 24581da177e4SLinus Torvalds { 24597a8c3d3bSMike Anderson unsigned long flags; 24607a8c3d3bSMike Anderson LIST_HEAD(uevents); 24611da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 24621da177e4SLinus Torvalds 24637a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 24647a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 24657a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 24667a8c3d3bSMike Anderson 2467ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 24687a8c3d3bSMike Anderson 24691da177e4SLinus Torvalds atomic_inc(&md->event_nr); 24701da177e4SLinus Torvalds wake_up(&md->eventq); 24711da177e4SLinus Torvalds } 24721da177e4SLinus Torvalds 2473c217649bSMike Snitzer /* 2474c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2475c217649bSMike Snitzer */ 24764e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 24771da177e4SLinus Torvalds { 24784e90188bSAlasdair G Kergon set_capacity(md->disk, size); 24791da177e4SLinus Torvalds 2480db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 24811da177e4SLinus Torvalds } 24821da177e4SLinus Torvalds 2483042d2a9bSAlasdair G Kergon /* 2484d5b9dd04SMikulas Patocka * Return 1 if the queue has a compulsory merge_bvec_fn function. 2485d5b9dd04SMikulas Patocka * 2486d5b9dd04SMikulas Patocka * If this function returns 0, then the device is either a non-dm 2487d5b9dd04SMikulas Patocka * device without a merge_bvec_fn, or it is a dm device that is 2488d5b9dd04SMikulas Patocka * able to split any bios it receives that are too big. 2489d5b9dd04SMikulas Patocka */ 2490d5b9dd04SMikulas Patocka int dm_queue_merge_is_compulsory(struct request_queue *q) 2491d5b9dd04SMikulas Patocka { 2492d5b9dd04SMikulas Patocka struct mapped_device *dev_md; 2493d5b9dd04SMikulas Patocka 2494d5b9dd04SMikulas Patocka if (!q->merge_bvec_fn) 2495d5b9dd04SMikulas Patocka return 0; 2496d5b9dd04SMikulas Patocka 2497ff36ab34SMike Snitzer if (q->make_request_fn == dm_make_request) { 2498d5b9dd04SMikulas Patocka dev_md = q->queuedata; 2499d5b9dd04SMikulas Patocka if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) 2500d5b9dd04SMikulas Patocka return 0; 2501d5b9dd04SMikulas Patocka } 2502d5b9dd04SMikulas Patocka 2503d5b9dd04SMikulas Patocka return 1; 2504d5b9dd04SMikulas Patocka } 2505d5b9dd04SMikulas Patocka 2506d5b9dd04SMikulas Patocka static int dm_device_merge_is_compulsory(struct dm_target *ti, 2507d5b9dd04SMikulas Patocka struct dm_dev *dev, sector_t start, 2508d5b9dd04SMikulas Patocka sector_t len, void *data) 2509d5b9dd04SMikulas Patocka { 2510d5b9dd04SMikulas Patocka struct block_device *bdev = dev->bdev; 2511d5b9dd04SMikulas Patocka struct request_queue *q = bdev_get_queue(bdev); 2512d5b9dd04SMikulas Patocka 2513d5b9dd04SMikulas Patocka return dm_queue_merge_is_compulsory(q); 2514d5b9dd04SMikulas Patocka } 2515d5b9dd04SMikulas Patocka 2516d5b9dd04SMikulas Patocka /* 2517d5b9dd04SMikulas Patocka * Return 1 if it is acceptable to ignore merge_bvec_fn based 2518d5b9dd04SMikulas Patocka * on the properties of the underlying devices. 2519d5b9dd04SMikulas Patocka */ 2520d5b9dd04SMikulas Patocka static int dm_table_merge_is_optional(struct dm_table *table) 2521d5b9dd04SMikulas Patocka { 2522d5b9dd04SMikulas Patocka unsigned i = 0; 2523d5b9dd04SMikulas Patocka struct dm_target *ti; 2524d5b9dd04SMikulas Patocka 2525d5b9dd04SMikulas Patocka while (i < dm_table_get_num_targets(table)) { 2526d5b9dd04SMikulas Patocka ti = dm_table_get_target(table, i++); 2527d5b9dd04SMikulas Patocka 2528d5b9dd04SMikulas Patocka if (ti->type->iterate_devices && 2529d5b9dd04SMikulas Patocka ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) 2530d5b9dd04SMikulas Patocka return 0; 2531d5b9dd04SMikulas Patocka } 2532d5b9dd04SMikulas Patocka 2533d5b9dd04SMikulas Patocka return 1; 2534d5b9dd04SMikulas Patocka } 2535d5b9dd04SMikulas Patocka 2536d5b9dd04SMikulas Patocka /* 2537042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2538042d2a9bSAlasdair G Kergon */ 2539042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2540754c5fc7SMike Snitzer struct queue_limits *limits) 25411da177e4SLinus Torvalds { 2542042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2543165125e1SJens Axboe struct request_queue *q = md->queue; 25441da177e4SLinus Torvalds sector_t size; 2545d5b9dd04SMikulas Patocka int merge_is_optional; 25461da177e4SLinus Torvalds 25471da177e4SLinus Torvalds size = dm_table_get_size(t); 25483ac51e74SDarrick J. Wong 25493ac51e74SDarrick J. Wong /* 25503ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 25513ac51e74SDarrick J. Wong */ 2552fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 25533ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 25543ac51e74SDarrick J. Wong 25554e90188bSAlasdair G Kergon __set_size(md, size); 25561da177e4SLinus Torvalds 2557cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 25582ca3310eSAlasdair G Kergon 2559e6ee8c0bSKiyoshi Ueda /* 2560e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2561e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2562e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2563e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2564e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2565e6ee8c0bSKiyoshi Ueda */ 2566bfebd1cdSMike Snitzer if (dm_table_request_based(t)) 2567e6ee8c0bSKiyoshi Ueda stop_queue(q); 2568e6ee8c0bSKiyoshi Ueda 2569e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2570e6ee8c0bSKiyoshi Ueda 2571d5b9dd04SMikulas Patocka merge_is_optional = dm_table_merge_is_optional(t); 2572d5b9dd04SMikulas Patocka 2573a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 257483d5e5b0SMikulas Patocka rcu_assign_pointer(md->map, t); 257536a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 257636a0456fSAlasdair G Kergon 2577754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 2578d5b9dd04SMikulas Patocka if (merge_is_optional) 2579d5b9dd04SMikulas Patocka set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2580d5b9dd04SMikulas Patocka else 2581d5b9dd04SMikulas Patocka clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 258241abc4e1SHannes Reinecke if (old_map) 258383d5e5b0SMikulas Patocka dm_sync_table(md); 25842ca3310eSAlasdair G Kergon 2585042d2a9bSAlasdair G Kergon return old_map; 25861da177e4SLinus Torvalds } 25871da177e4SLinus Torvalds 2588a7940155SAlasdair G Kergon /* 2589a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2590a7940155SAlasdair G Kergon */ 2591a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 25921da177e4SLinus Torvalds { 2593a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 25941da177e4SLinus Torvalds 25951da177e4SLinus Torvalds if (!map) 2596a7940155SAlasdair G Kergon return NULL; 25971da177e4SLinus Torvalds 25981da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 25999cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 260083d5e5b0SMikulas Patocka dm_sync_table(md); 2601a7940155SAlasdair G Kergon 2602a7940155SAlasdair G Kergon return map; 26031da177e4SLinus Torvalds } 26041da177e4SLinus Torvalds 26051da177e4SLinus Torvalds /* 26061da177e4SLinus Torvalds * Constructor for a new device. 26071da177e4SLinus Torvalds */ 26082b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 26091da177e4SLinus Torvalds { 26101da177e4SLinus Torvalds struct mapped_device *md; 26111da177e4SLinus Torvalds 26122b06cfffSAlasdair G Kergon md = alloc_dev(minor); 26131da177e4SLinus Torvalds if (!md) 26141da177e4SLinus Torvalds return -ENXIO; 26151da177e4SLinus Torvalds 2616784aae73SMilan Broz dm_sysfs_init(md); 2617784aae73SMilan Broz 26181da177e4SLinus Torvalds *result = md; 26191da177e4SLinus Torvalds return 0; 26201da177e4SLinus Torvalds } 26211da177e4SLinus Torvalds 2622a5664dadSMike Snitzer /* 2623a5664dadSMike Snitzer * Functions to manage md->type. 2624a5664dadSMike Snitzer * All are required to hold md->type_lock. 2625a5664dadSMike Snitzer */ 2626a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2627a5664dadSMike Snitzer { 2628a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2629a5664dadSMike Snitzer } 2630a5664dadSMike Snitzer 2631a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2632a5664dadSMike Snitzer { 2633a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2634a5664dadSMike Snitzer } 2635a5664dadSMike Snitzer 2636a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2637a5664dadSMike Snitzer { 263800c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2639a5664dadSMike Snitzer md->type = type; 2640a5664dadSMike Snitzer } 2641a5664dadSMike Snitzer 2642a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2643a5664dadSMike Snitzer { 264400c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2645a5664dadSMike Snitzer return md->type; 2646a5664dadSMike Snitzer } 2647a5664dadSMike Snitzer 264836a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 264936a0456fSAlasdair G Kergon { 265036a0456fSAlasdair G Kergon return md->immutable_target_type; 265136a0456fSAlasdair G Kergon } 265236a0456fSAlasdair G Kergon 26534a0b4ddfSMike Snitzer /* 2654f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2655f84cb8a4SMike Snitzer * count on 'md'. 2656f84cb8a4SMike Snitzer */ 2657f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2658f84cb8a4SMike Snitzer { 2659f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2660f84cb8a4SMike Snitzer return &md->queue->limits; 2661f84cb8a4SMike Snitzer } 2662f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2663f84cb8a4SMike Snitzer 2664bfebd1cdSMike Snitzer static void init_rq_based_worker_thread(struct mapped_device *md) 2665bfebd1cdSMike Snitzer { 2666bfebd1cdSMike Snitzer /* Initialize the request-based DM worker thread */ 2667bfebd1cdSMike Snitzer init_kthread_worker(&md->kworker); 2668bfebd1cdSMike Snitzer md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2669bfebd1cdSMike Snitzer "kdmwork-%s", dm_device_name(md)); 2670bfebd1cdSMike Snitzer } 2671bfebd1cdSMike Snitzer 2672f84cb8a4SMike Snitzer /* 26734a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 26744a0b4ddfSMike Snitzer */ 26754a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 26764a0b4ddfSMike Snitzer { 26774a0b4ddfSMike Snitzer struct request_queue *q = NULL; 26784a0b4ddfSMike Snitzer 26794a0b4ddfSMike Snitzer /* Fully initialize the queue */ 26804a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 26814a0b4ddfSMike Snitzer if (!q) 2682bfebd1cdSMike Snitzer return -EINVAL; 26834a0b4ddfSMike Snitzer 26840ce65797SMike Snitzer /* disable dm_request_fn's merge heuristic by default */ 26850ce65797SMike Snitzer md->seq_rq_merge_deadline_usecs = 0; 26860ce65797SMike Snitzer 26874a0b4ddfSMike Snitzer md->queue = q; 2688bfebd1cdSMike Snitzer dm_init_old_md_queue(md); 26894a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 26904a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 26914a0b4ddfSMike Snitzer 2692bfebd1cdSMike Snitzer init_rq_based_worker_thread(md); 26932eb6e1e3SKeith Busch 26944a0b4ddfSMike Snitzer elv_register_queue(md->queue); 26954a0b4ddfSMike Snitzer 2696bfebd1cdSMike Snitzer return 0; 2697bfebd1cdSMike Snitzer } 2698bfebd1cdSMike Snitzer 2699bfebd1cdSMike Snitzer static int dm_mq_init_request(void *data, struct request *rq, 2700bfebd1cdSMike Snitzer unsigned int hctx_idx, unsigned int request_idx, 2701bfebd1cdSMike Snitzer unsigned int numa_node) 2702bfebd1cdSMike Snitzer { 2703bfebd1cdSMike Snitzer struct mapped_device *md = data; 2704bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2705bfebd1cdSMike Snitzer 2706bfebd1cdSMike Snitzer /* 2707bfebd1cdSMike Snitzer * Must initialize md member of tio, otherwise it won't 2708bfebd1cdSMike Snitzer * be available in dm_mq_queue_rq. 2709bfebd1cdSMike Snitzer */ 2710bfebd1cdSMike Snitzer tio->md = md; 2711bfebd1cdSMike Snitzer 2712bfebd1cdSMike Snitzer return 0; 2713bfebd1cdSMike Snitzer } 2714bfebd1cdSMike Snitzer 2715bfebd1cdSMike Snitzer static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 2716bfebd1cdSMike Snitzer const struct blk_mq_queue_data *bd) 2717bfebd1cdSMike Snitzer { 2718bfebd1cdSMike Snitzer struct request *rq = bd->rq; 2719bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2720bfebd1cdSMike Snitzer struct mapped_device *md = tio->md; 2721bfebd1cdSMike Snitzer int srcu_idx; 2722bfebd1cdSMike Snitzer struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2723bfebd1cdSMike Snitzer struct dm_target *ti; 2724bfebd1cdSMike Snitzer sector_t pos; 2725bfebd1cdSMike Snitzer 2726bfebd1cdSMike Snitzer /* always use block 0 to find the target for flushes for now */ 2727bfebd1cdSMike Snitzer pos = 0; 2728bfebd1cdSMike Snitzer if (!(rq->cmd_flags & REQ_FLUSH)) 2729bfebd1cdSMike Snitzer pos = blk_rq_pos(rq); 2730bfebd1cdSMike Snitzer 2731bfebd1cdSMike Snitzer ti = dm_table_find_target(map, pos); 2732bfebd1cdSMike Snitzer if (!dm_target_is_valid(ti)) { 2733bfebd1cdSMike Snitzer dm_put_live_table(md, srcu_idx); 2734bfebd1cdSMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 2735bfebd1cdSMike Snitzer /* 2736bfebd1cdSMike Snitzer * Must perform setup, that rq_completed() requires, 2737bfebd1cdSMike Snitzer * before returning BLK_MQ_RQ_QUEUE_ERROR 2738bfebd1cdSMike Snitzer */ 2739bfebd1cdSMike Snitzer dm_start_request(md, rq); 2740bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_ERROR; 2741bfebd1cdSMike Snitzer } 2742bfebd1cdSMike Snitzer dm_put_live_table(md, srcu_idx); 2743bfebd1cdSMike Snitzer 2744bfebd1cdSMike Snitzer if (ti->type->busy && ti->type->busy(ti)) 2745bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_BUSY; 2746bfebd1cdSMike Snitzer 2747bfebd1cdSMike Snitzer dm_start_request(md, rq); 2748bfebd1cdSMike Snitzer 2749bfebd1cdSMike Snitzer /* Init tio using md established in .init_request */ 2750bfebd1cdSMike Snitzer init_tio(tio, rq, md); 2751bfebd1cdSMike Snitzer 275202233342SMike Snitzer /* 275302233342SMike Snitzer * Establish tio->ti before queuing work (map_tio_request) 275402233342SMike Snitzer * or making direct call to map_request(). 275502233342SMike Snitzer */ 2756bfebd1cdSMike Snitzer tio->ti = ti; 275702233342SMike Snitzer 275802233342SMike Snitzer /* Clone the request if underlying devices aren't blk-mq */ 275902233342SMike Snitzer if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 276002233342SMike Snitzer /* clone request is allocated at the end of the pdu */ 276102233342SMike Snitzer tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 276245714fbeSMike Snitzer (void) clone_rq(rq, md, tio, GFP_ATOMIC); 2763bfebd1cdSMike Snitzer queue_kthread_work(&md->kworker, &tio->work); 276402233342SMike Snitzer } else { 276502233342SMike Snitzer /* Direct call is fine since .queue_rq allows allocations */ 276645714fbeSMike Snitzer if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 276745714fbeSMike Snitzer /* Undo dm_start_request() before requeuing */ 276845714fbeSMike Snitzer rq_completed(md, rq_data_dir(rq), false); 276945714fbeSMike Snitzer return BLK_MQ_RQ_QUEUE_BUSY; 277045714fbeSMike Snitzer } 277102233342SMike Snitzer } 2772bfebd1cdSMike Snitzer 2773bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_OK; 2774bfebd1cdSMike Snitzer } 2775bfebd1cdSMike Snitzer 2776bfebd1cdSMike Snitzer static struct blk_mq_ops dm_mq_ops = { 2777bfebd1cdSMike Snitzer .queue_rq = dm_mq_queue_rq, 2778bfebd1cdSMike Snitzer .map_queue = blk_mq_map_queue, 2779bfebd1cdSMike Snitzer .complete = dm_softirq_done, 2780bfebd1cdSMike Snitzer .init_request = dm_mq_init_request, 2781bfebd1cdSMike Snitzer }; 2782bfebd1cdSMike Snitzer 2783bfebd1cdSMike Snitzer static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2784bfebd1cdSMike Snitzer { 278502233342SMike Snitzer unsigned md_type = dm_get_md_type(md); 2786bfebd1cdSMike Snitzer struct request_queue *q; 2787bfebd1cdSMike Snitzer int err; 2788bfebd1cdSMike Snitzer 2789bfebd1cdSMike Snitzer memset(&md->tag_set, 0, sizeof(md->tag_set)); 2790bfebd1cdSMike Snitzer md->tag_set.ops = &dm_mq_ops; 2791bfebd1cdSMike Snitzer md->tag_set.queue_depth = BLKDEV_MAX_RQ; 2792bfebd1cdSMike Snitzer md->tag_set.numa_node = NUMA_NO_NODE; 2793bfebd1cdSMike Snitzer md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2794bfebd1cdSMike Snitzer md->tag_set.nr_hw_queues = 1; 279502233342SMike Snitzer if (md_type == DM_TYPE_REQUEST_BASED) { 279602233342SMike Snitzer /* make the memory for non-blk-mq clone part of the pdu */ 279702233342SMike Snitzer md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request); 279802233342SMike Snitzer } else 2799bfebd1cdSMike Snitzer md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); 2800bfebd1cdSMike Snitzer md->tag_set.driver_data = md; 2801bfebd1cdSMike Snitzer 2802bfebd1cdSMike Snitzer err = blk_mq_alloc_tag_set(&md->tag_set); 2803bfebd1cdSMike Snitzer if (err) 2804bfebd1cdSMike Snitzer return err; 2805bfebd1cdSMike Snitzer 2806bfebd1cdSMike Snitzer q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); 2807bfebd1cdSMike Snitzer if (IS_ERR(q)) { 2808bfebd1cdSMike Snitzer err = PTR_ERR(q); 2809bfebd1cdSMike Snitzer goto out_tag_set; 2810bfebd1cdSMike Snitzer } 2811bfebd1cdSMike Snitzer md->queue = q; 2812bfebd1cdSMike Snitzer dm_init_md_queue(md); 2813bfebd1cdSMike Snitzer 2814bfebd1cdSMike Snitzer /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2815bfebd1cdSMike Snitzer blk_mq_register_disk(md->disk); 2816bfebd1cdSMike Snitzer 281702233342SMike Snitzer if (md_type == DM_TYPE_REQUEST_BASED) 2818bfebd1cdSMike Snitzer init_rq_based_worker_thread(md); 2819bfebd1cdSMike Snitzer 2820bfebd1cdSMike Snitzer return 0; 2821bfebd1cdSMike Snitzer 2822bfebd1cdSMike Snitzer out_tag_set: 2823bfebd1cdSMike Snitzer blk_mq_free_tag_set(&md->tag_set); 2824bfebd1cdSMike Snitzer return err; 28254a0b4ddfSMike Snitzer } 28264a0b4ddfSMike Snitzer 282717e149b8SMike Snitzer static unsigned filter_md_type(unsigned type, struct mapped_device *md) 282817e149b8SMike Snitzer { 282917e149b8SMike Snitzer if (type == DM_TYPE_BIO_BASED) 283017e149b8SMike Snitzer return type; 283117e149b8SMike Snitzer 283217e149b8SMike Snitzer return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; 283317e149b8SMike Snitzer } 283417e149b8SMike Snitzer 28354a0b4ddfSMike Snitzer /* 28364a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 28374a0b4ddfSMike Snitzer */ 28384a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 28394a0b4ddfSMike Snitzer { 2840bfebd1cdSMike Snitzer int r; 284117e149b8SMike Snitzer unsigned md_type = filter_md_type(dm_get_md_type(md), md); 2842bfebd1cdSMike Snitzer 2843bfebd1cdSMike Snitzer switch (md_type) { 2844bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2845bfebd1cdSMike Snitzer r = dm_init_request_based_queue(md); 2846bfebd1cdSMike Snitzer if (r) { 28474a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 2848bfebd1cdSMike Snitzer return r; 28494a0b4ddfSMike Snitzer } 2850bfebd1cdSMike Snitzer break; 2851bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 2852bfebd1cdSMike Snitzer r = dm_init_request_based_blk_mq_queue(md); 2853bfebd1cdSMike Snitzer if (r) { 2854bfebd1cdSMike Snitzer DMWARN("Cannot initialize queue for request-based blk-mq mapped device"); 2855bfebd1cdSMike Snitzer return r; 2856bfebd1cdSMike Snitzer } 2857bfebd1cdSMike Snitzer break; 2858bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2859bfebd1cdSMike Snitzer dm_init_old_md_queue(md); 2860ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2861ff36ab34SMike Snitzer blk_queue_merge_bvec(md->queue, dm_merge_bvec); 2862bfebd1cdSMike Snitzer break; 2863ff36ab34SMike Snitzer } 28644a0b4ddfSMike Snitzer 28654a0b4ddfSMike Snitzer return 0; 28664a0b4ddfSMike Snitzer } 28674a0b4ddfSMike Snitzer 28682bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 28691da177e4SLinus Torvalds { 28701da177e4SLinus Torvalds struct mapped_device *md; 28711da177e4SLinus Torvalds unsigned minor = MINOR(dev); 28721da177e4SLinus Torvalds 28731da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 28741da177e4SLinus Torvalds return NULL; 28751da177e4SLinus Torvalds 2876f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 28771da177e4SLinus Torvalds 28781da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 28792bec1f4aSMikulas Patocka if (md) { 28802bec1f4aSMikulas Patocka if ((md == MINOR_ALLOCED || 2881f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2882abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2883fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2884637842cfSDavid Teigland md = NULL; 2885fba9f90eSJeff Mahoney goto out; 2886fba9f90eSJeff Mahoney } 28872bec1f4aSMikulas Patocka dm_get(md); 28882bec1f4aSMikulas Patocka } 28891da177e4SLinus Torvalds 2890fba9f90eSJeff Mahoney out: 2891f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 28921da177e4SLinus Torvalds 2893637842cfSDavid Teigland return md; 2894637842cfSDavid Teigland } 28953cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2896d229a958SDavid Teigland 28979ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2898637842cfSDavid Teigland { 28999ade92a9SAlasdair G Kergon return md->interface_ptr; 29001da177e4SLinus Torvalds } 29011da177e4SLinus Torvalds 29021da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 29031da177e4SLinus Torvalds { 29041da177e4SLinus Torvalds md->interface_ptr = ptr; 29051da177e4SLinus Torvalds } 29061da177e4SLinus Torvalds 29071da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 29081da177e4SLinus Torvalds { 29091da177e4SLinus Torvalds atomic_inc(&md->holders); 29103f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 29111da177e4SLinus Torvalds } 29121da177e4SLinus Torvalds 291309ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 291409ee96b2SMikulas Patocka { 291509ee96b2SMikulas Patocka spin_lock(&_minor_lock); 291609ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 291709ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 291809ee96b2SMikulas Patocka return -EBUSY; 291909ee96b2SMikulas Patocka } 292009ee96b2SMikulas Patocka dm_get(md); 292109ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 292209ee96b2SMikulas Patocka return 0; 292309ee96b2SMikulas Patocka } 292409ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 292509ee96b2SMikulas Patocka 292672d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 292772d94861SAlasdair G Kergon { 292872d94861SAlasdair G Kergon return md->name; 292972d94861SAlasdair G Kergon } 293072d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 293172d94861SAlasdair G Kergon 29323f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 29331da177e4SLinus Torvalds { 29341134e5aeSMike Anderson struct dm_table *map; 293583d5e5b0SMikulas Patocka int srcu_idx; 29361da177e4SLinus Torvalds 29373f77316dSKiyoshi Ueda might_sleep(); 2938fba9f90eSJeff Mahoney 293983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 294063a4f065SMike Snitzer 294163a4f065SMike Snitzer spin_lock(&_minor_lock); 29423f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2943fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2944f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 29453f77316dSKiyoshi Ueda 294602233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 29472eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 29482eb6e1e3SKeith Busch 2949ab7c7bb6SMikulas Patocka /* 2950ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2951ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2952ab7c7bb6SMikulas Patocka */ 2953ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 29544f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 29551da177e4SLinus Torvalds dm_table_presuspend_targets(map); 29561da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 29571da177e4SLinus Torvalds } 2958ab7c7bb6SMikulas Patocka mutex_unlock(&md->suspend_lock); 29593f77316dSKiyoshi Ueda 296083d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 296183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 296283d5e5b0SMikulas Patocka 29633f77316dSKiyoshi Ueda /* 29643f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 29653f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 29663f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 29673f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 29683f77316dSKiyoshi Ueda */ 29693f77316dSKiyoshi Ueda if (wait) 29703f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 29713f77316dSKiyoshi Ueda msleep(1); 29723f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 29733f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 29743f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 29753f77316dSKiyoshi Ueda 2976784aae73SMilan Broz dm_sysfs_exit(md); 2977a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 29781da177e4SLinus Torvalds free_dev(md); 29791da177e4SLinus Torvalds } 29803f77316dSKiyoshi Ueda 29813f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 29823f77316dSKiyoshi Ueda { 29833f77316dSKiyoshi Ueda __dm_destroy(md, true); 29843f77316dSKiyoshi Ueda } 29853f77316dSKiyoshi Ueda 29863f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 29873f77316dSKiyoshi Ueda { 29883f77316dSKiyoshi Ueda __dm_destroy(md, false); 29893f77316dSKiyoshi Ueda } 29903f77316dSKiyoshi Ueda 29913f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 29923f77316dSKiyoshi Ueda { 29933f77316dSKiyoshi Ueda atomic_dec(&md->holders); 29941da177e4SLinus Torvalds } 299579eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 29961da177e4SLinus Torvalds 2997401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 299846125c1cSMilan Broz { 299946125c1cSMilan Broz int r = 0; 3000b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 3001b44ebeb0SMikulas Patocka 3002b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 300346125c1cSMilan Broz 300446125c1cSMilan Broz while (1) { 3005401600dfSMikulas Patocka set_current_state(interruptible); 300646125c1cSMilan Broz 3007b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 300846125c1cSMilan Broz break; 300946125c1cSMilan Broz 3010401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 3011401600dfSMikulas Patocka signal_pending(current)) { 301246125c1cSMilan Broz r = -EINTR; 301346125c1cSMilan Broz break; 301446125c1cSMilan Broz } 301546125c1cSMilan Broz 301646125c1cSMilan Broz io_schedule(); 301746125c1cSMilan Broz } 301846125c1cSMilan Broz set_current_state(TASK_RUNNING); 301946125c1cSMilan Broz 3020b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 3021b44ebeb0SMikulas Patocka 302246125c1cSMilan Broz return r; 302346125c1cSMilan Broz } 302446125c1cSMilan Broz 30251da177e4SLinus Torvalds /* 30261da177e4SLinus Torvalds * Process the deferred bios 30271da177e4SLinus Torvalds */ 3028ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 30291da177e4SLinus Torvalds { 3030ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 3031ef208587SMikulas Patocka work); 30326d6f10dfSMilan Broz struct bio *c; 303383d5e5b0SMikulas Patocka int srcu_idx; 303483d5e5b0SMikulas Patocka struct dm_table *map; 30351da177e4SLinus Torvalds 303683d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 3037ef208587SMikulas Patocka 30383b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 3039022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 3040022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 3041022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 3042022c2611SMikulas Patocka 30436a8736d1STejun Heo if (!c) 3044df12ee99SAlasdair G Kergon break; 304573d410c0SMilan Broz 3046e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 3047e6ee8c0bSKiyoshi Ueda generic_make_request(c); 3048af7e466aSMikulas Patocka else 304983d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 3050e6ee8c0bSKiyoshi Ueda } 30513b00b203SMikulas Patocka 305283d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 30531da177e4SLinus Torvalds } 30541da177e4SLinus Torvalds 30559a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 3056304f3f6aSMilan Broz { 30573b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 30584e857c58SPeter Zijlstra smp_mb__after_atomic(); 305953d5914fSMikulas Patocka queue_work(md->wq, &md->work); 3060304f3f6aSMilan Broz } 3061304f3f6aSMilan Broz 30621da177e4SLinus Torvalds /* 3063042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 30641da177e4SLinus Torvalds */ 3065042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 30661da177e4SLinus Torvalds { 306787eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 3068754c5fc7SMike Snitzer struct queue_limits limits; 3069042d2a9bSAlasdair G Kergon int r; 30701da177e4SLinus Torvalds 3071e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 30721da177e4SLinus Torvalds 30731da177e4SLinus Torvalds /* device must be suspended */ 30744f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 307593c534aeSAlasdair G Kergon goto out; 30761da177e4SLinus Torvalds 30773ae70656SMike Snitzer /* 30783ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 30793ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 30803ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 30813ae70656SMike Snitzer * reappear. 30823ae70656SMike Snitzer */ 30833ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 308483d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 30853ae70656SMike Snitzer if (live_map) 30863ae70656SMike Snitzer limits = md->queue->limits; 308783d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 30883ae70656SMike Snitzer } 30893ae70656SMike Snitzer 309087eb5b21SMike Christie if (!live_map) { 3091754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 3092042d2a9bSAlasdair G Kergon if (r) { 3093042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 3094754c5fc7SMike Snitzer goto out; 3095042d2a9bSAlasdair G Kergon } 309687eb5b21SMike Christie } 3097754c5fc7SMike Snitzer 3098042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 30991da177e4SLinus Torvalds 310093c534aeSAlasdair G Kergon out: 3101e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 3102042d2a9bSAlasdair G Kergon return map; 31031da177e4SLinus Torvalds } 31041da177e4SLinus Torvalds 31051da177e4SLinus Torvalds /* 31061da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 31071da177e4SLinus Torvalds * device. 31081da177e4SLinus Torvalds */ 31092ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 31101da177e4SLinus Torvalds { 3111e39e2e95SAlasdair G Kergon int r; 31121da177e4SLinus Torvalds 31131da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 3114dfbe03f6SAlasdair G Kergon 3115db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 3116dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 3117cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 3118e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 3119e39e2e95SAlasdair G Kergon return r; 3120dfbe03f6SAlasdair G Kergon } 3121dfbe03f6SAlasdair G Kergon 3122aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 3123aa8d7c2fSAlasdair G Kergon 31241da177e4SLinus Torvalds return 0; 31251da177e4SLinus Torvalds } 31261da177e4SLinus Torvalds 31272ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 31281da177e4SLinus Torvalds { 3129aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 3130aa8d7c2fSAlasdair G Kergon return; 3131aa8d7c2fSAlasdair G Kergon 3132db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 31331da177e4SLinus Torvalds md->frozen_sb = NULL; 3134aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 31351da177e4SLinus Torvalds } 31361da177e4SLinus Torvalds 31371da177e4SLinus Torvalds /* 3138ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 3139ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 3140ffcc3936SMike Snitzer * are being added to md->deferred list. 3141cec47e3dSKiyoshi Ueda * 3142ffcc3936SMike Snitzer * Caller must hold md->suspend_lock 3143cec47e3dSKiyoshi Ueda */ 3144ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 3145ffcc3936SMike Snitzer unsigned suspend_flags, int interruptible) 31461da177e4SLinus Torvalds { 3147ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 3148ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 3149ffcc3936SMike Snitzer int r; 3150cf222b37SAlasdair G Kergon 31512e93ccc1SKiyoshi Ueda /* 31522e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 31532e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 31542e93ccc1SKiyoshi Ueda */ 31552e93ccc1SKiyoshi Ueda if (noflush) 31562e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 31572e93ccc1SKiyoshi Ueda 3158d67ee213SMike Snitzer /* 3159d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 3160d67ee213SMike Snitzer * provide the .presuspend_undo hook. 3161d67ee213SMike Snitzer */ 31621da177e4SLinus Torvalds dm_table_presuspend_targets(map); 31631da177e4SLinus Torvalds 31642e93ccc1SKiyoshi Ueda /* 31659f518b27SKiyoshi Ueda * Flush I/O to the device. 31669f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 31679f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 31689f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 31692e93ccc1SKiyoshi Ueda */ 317032a926daSMikulas Patocka if (!noflush && do_lockfs) { 31712ca3310eSAlasdair G Kergon r = lock_fs(md); 3172d67ee213SMike Snitzer if (r) { 3173d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 3174ffcc3936SMike Snitzer return r; 3175aa8d7c2fSAlasdair G Kergon } 3176d67ee213SMike Snitzer } 31771da177e4SLinus Torvalds 31781da177e4SLinus Torvalds /* 31793b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 31803b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 31813b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 31823b00b203SMikulas Patocka * dm_wq_work. 31833b00b203SMikulas Patocka * 31843b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 31853b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 31866a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 31876a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 31886a8736d1STejun Heo * flush_workqueue(md->wq). 31891da177e4SLinus Torvalds */ 31901eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 319141abc4e1SHannes Reinecke if (map) 319283d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 31931da177e4SLinus Torvalds 3194d0bcb878SKiyoshi Ueda /* 319529e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 319629e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 3197d0bcb878SKiyoshi Ueda */ 31982eb6e1e3SKeith Busch if (dm_request_based(md)) { 31999f518b27SKiyoshi Ueda stop_queue(md->queue); 320002233342SMike Snitzer if (md->kworker_task) 32012eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 32022eb6e1e3SKeith Busch } 3203cec47e3dSKiyoshi Ueda 3204d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 3205d0bcb878SKiyoshi Ueda 32061da177e4SLinus Torvalds /* 32073b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 32083b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 32093b00b203SMikulas Patocka * to finish. 32101da177e4SLinus Torvalds */ 3211ffcc3936SMike Snitzer r = dm_wait_for_completion(md, interruptible); 32121da177e4SLinus Torvalds 32136d6f10dfSMilan Broz if (noflush) 3214022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 321541abc4e1SHannes Reinecke if (map) 321683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 32172e93ccc1SKiyoshi Ueda 32181da177e4SLinus Torvalds /* were we interrupted ? */ 321946125c1cSMilan Broz if (r < 0) { 32209a1fb464SMikulas Patocka dm_queue_flush(md); 322173d410c0SMilan Broz 3222cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 32239f518b27SKiyoshi Ueda start_queue(md->queue); 3224cec47e3dSKiyoshi Ueda 32252ca3310eSAlasdair G Kergon unlock_fs(md); 3226d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 3227ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 3228ffcc3936SMike Snitzer } 3229ffcc3936SMike Snitzer 3230ffcc3936SMike Snitzer return r; 32312ca3310eSAlasdair G Kergon } 32322ca3310eSAlasdair G Kergon 32333b00b203SMikulas Patocka /* 3234ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 3235ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 3236ffcc3936SMike Snitzer * the background. Before the table can be swapped with 3237ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 3238ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 32393b00b203SMikulas Patocka */ 3240ffcc3936SMike Snitzer /* 3241ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 3242ffcc3936SMike Snitzer * 3243ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 3244ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 3245ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 3246ffcc3936SMike Snitzer * 3247ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 3248ffcc3936SMike Snitzer */ 3249ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 3250ffcc3936SMike Snitzer { 3251ffcc3936SMike Snitzer struct dm_table *map = NULL; 3252ffcc3936SMike Snitzer int r = 0; 3253ffcc3936SMike Snitzer 3254ffcc3936SMike Snitzer retry: 3255ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3256ffcc3936SMike Snitzer 3257ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3258ffcc3936SMike Snitzer r = -EINVAL; 3259ffcc3936SMike Snitzer goto out_unlock; 3260ffcc3936SMike Snitzer } 3261ffcc3936SMike Snitzer 3262ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3263ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3264ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3265ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3266ffcc3936SMike Snitzer if (r) 3267ffcc3936SMike Snitzer return r; 3268ffcc3936SMike Snitzer goto retry; 3269ffcc3936SMike Snitzer } 3270ffcc3936SMike Snitzer 3271a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3272ffcc3936SMike Snitzer 3273ffcc3936SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 3274ffcc3936SMike Snitzer if (r) 3275ffcc3936SMike Snitzer goto out_unlock; 32763b00b203SMikulas Patocka 32771da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 32781da177e4SLinus Torvalds 32794d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 32804d4471cbSKiyoshi Ueda 3281d287483dSAlasdair G Kergon out_unlock: 3282e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 3283cf222b37SAlasdair G Kergon return r; 32841da177e4SLinus Torvalds } 32851da177e4SLinus Torvalds 3286ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 32871da177e4SLinus Torvalds { 3288ffcc3936SMike Snitzer if (map) { 3289ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 32908757b776SMilan Broz if (r) 3291ffcc3936SMike Snitzer return r; 3292ffcc3936SMike Snitzer } 32932ca3310eSAlasdair G Kergon 32949a1fb464SMikulas Patocka dm_queue_flush(md); 32952ca3310eSAlasdair G Kergon 3296cec47e3dSKiyoshi Ueda /* 3297cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 3298cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 3299cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 3300cec47e3dSKiyoshi Ueda */ 3301cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 3302cec47e3dSKiyoshi Ueda start_queue(md->queue); 3303cec47e3dSKiyoshi Ueda 33042ca3310eSAlasdair G Kergon unlock_fs(md); 33052ca3310eSAlasdair G Kergon 3306ffcc3936SMike Snitzer return 0; 3307ffcc3936SMike Snitzer } 3308ffcc3936SMike Snitzer 3309ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 3310ffcc3936SMike Snitzer { 3311ffcc3936SMike Snitzer int r = -EINVAL; 3312ffcc3936SMike Snitzer struct dm_table *map = NULL; 3313ffcc3936SMike Snitzer 3314ffcc3936SMike Snitzer retry: 3315ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3316ffcc3936SMike Snitzer 3317ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 3318ffcc3936SMike Snitzer goto out; 3319ffcc3936SMike Snitzer 3320ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3321ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3322ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3323ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3324ffcc3936SMike Snitzer if (r) 3325ffcc3936SMike Snitzer return r; 3326ffcc3936SMike Snitzer goto retry; 3327ffcc3936SMike Snitzer } 3328ffcc3936SMike Snitzer 3329a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3330ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 3331ffcc3936SMike Snitzer goto out; 3332ffcc3936SMike Snitzer 3333ffcc3936SMike Snitzer r = __dm_resume(md, map); 3334ffcc3936SMike Snitzer if (r) 3335ffcc3936SMike Snitzer goto out; 3336ffcc3936SMike Snitzer 33372ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 33382ca3310eSAlasdair G Kergon 3339cf222b37SAlasdair G Kergon r = 0; 3340cf222b37SAlasdair G Kergon out: 3341e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 33422ca3310eSAlasdair G Kergon 3343cf222b37SAlasdair G Kergon return r; 33441da177e4SLinus Torvalds } 33451da177e4SLinus Torvalds 3346fd2ed4d2SMikulas Patocka /* 3347fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 3348fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 3349fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 3350fd2ed4d2SMikulas Patocka */ 3351fd2ed4d2SMikulas Patocka 3352ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 3353ffcc3936SMike Snitzer { 3354ffcc3936SMike Snitzer struct dm_table *map = NULL; 3355ffcc3936SMike Snitzer 335696b26c8cSMikulas Patocka if (md->internal_suspend_count++) 3357ffcc3936SMike Snitzer return; /* nested internal suspend */ 3358ffcc3936SMike Snitzer 3359ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3360ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3361ffcc3936SMike Snitzer return; /* nest suspend */ 3362ffcc3936SMike Snitzer } 3363ffcc3936SMike Snitzer 3364a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3365ffcc3936SMike Snitzer 3366ffcc3936SMike Snitzer /* 3367ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3368ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3369ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 3370ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 3371ffcc3936SMike Snitzer */ 3372ffcc3936SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3373ffcc3936SMike Snitzer 3374ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3375ffcc3936SMike Snitzer 3376ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 3377ffcc3936SMike Snitzer } 3378ffcc3936SMike Snitzer 3379ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 3380ffcc3936SMike Snitzer { 338196b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 338296b26c8cSMikulas Patocka 338396b26c8cSMikulas Patocka if (--md->internal_suspend_count) 3384ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 3385ffcc3936SMike Snitzer 3386ffcc3936SMike Snitzer if (dm_suspended_md(md)) 3387ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 3388ffcc3936SMike Snitzer 3389ffcc3936SMike Snitzer /* 3390ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 3391ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 3392ffcc3936SMike Snitzer */ 3393ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 3394ffcc3936SMike Snitzer 3395ffcc3936SMike Snitzer done: 3396ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3397ffcc3936SMike Snitzer smp_mb__after_atomic(); 3398ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3399ffcc3936SMike Snitzer } 3400ffcc3936SMike Snitzer 3401ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 3402fd2ed4d2SMikulas Patocka { 3403fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 3404ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3405ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3406ffcc3936SMike Snitzer } 3407ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3408ffcc3936SMike Snitzer 3409ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 3410ffcc3936SMike Snitzer { 3411ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3412ffcc3936SMike Snitzer __dm_internal_resume(md); 3413ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3414ffcc3936SMike Snitzer } 3415ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 3416ffcc3936SMike Snitzer 3417ffcc3936SMike Snitzer /* 3418ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 3419ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 3420ffcc3936SMike Snitzer */ 3421ffcc3936SMike Snitzer 3422ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 3423ffcc3936SMike Snitzer { 3424ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3425ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3426fd2ed4d2SMikulas Patocka return; 3427fd2ed4d2SMikulas Patocka 3428fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3429fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 3430fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 3431fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3432fd2ed4d2SMikulas Patocka } 3433b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3434fd2ed4d2SMikulas Patocka 3435ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 3436fd2ed4d2SMikulas Patocka { 3437ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3438fd2ed4d2SMikulas Patocka goto done; 3439fd2ed4d2SMikulas Patocka 3440fd2ed4d2SMikulas Patocka dm_queue_flush(md); 3441fd2ed4d2SMikulas Patocka 3442fd2ed4d2SMikulas Patocka done: 3443fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 3444fd2ed4d2SMikulas Patocka } 3445b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3446fd2ed4d2SMikulas Patocka 34471da177e4SLinus Torvalds /*----------------------------------------------------------------- 34481da177e4SLinus Torvalds * Event notification. 34491da177e4SLinus Torvalds *---------------------------------------------------------------*/ 34503abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 345160935eb2SMilan Broz unsigned cookie) 345269267a30SAlasdair G Kergon { 345360935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 345460935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 345560935eb2SMilan Broz 345660935eb2SMilan Broz if (!cookie) 34573abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 345860935eb2SMilan Broz else { 345960935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 346060935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 34613abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 34623abf85b5SPeter Rajnoha action, envp); 346360935eb2SMilan Broz } 346469267a30SAlasdair G Kergon } 346569267a30SAlasdair G Kergon 34667a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 34677a8c3d3bSMike Anderson { 34687a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 34697a8c3d3bSMike Anderson } 34707a8c3d3bSMike Anderson 34711da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 34721da177e4SLinus Torvalds { 34731da177e4SLinus Torvalds return atomic_read(&md->event_nr); 34741da177e4SLinus Torvalds } 34751da177e4SLinus Torvalds 34761da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 34771da177e4SLinus Torvalds { 34781da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 34791da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 34801da177e4SLinus Torvalds } 34811da177e4SLinus Torvalds 34827a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 34837a8c3d3bSMike Anderson { 34847a8c3d3bSMike Anderson unsigned long flags; 34857a8c3d3bSMike Anderson 34867a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 34877a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 34887a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 34897a8c3d3bSMike Anderson } 34907a8c3d3bSMike Anderson 34911da177e4SLinus Torvalds /* 34921da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 34931da177e4SLinus Torvalds * count on 'md'. 34941da177e4SLinus Torvalds */ 34951da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 34961da177e4SLinus Torvalds { 34971da177e4SLinus Torvalds return md->disk; 34981da177e4SLinus Torvalds } 349965ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 35001da177e4SLinus Torvalds 3501784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 3502784aae73SMilan Broz { 35032995fa78SMikulas Patocka return &md->kobj_holder.kobj; 3504784aae73SMilan Broz } 3505784aae73SMilan Broz 3506784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3507784aae73SMilan Broz { 3508784aae73SMilan Broz struct mapped_device *md; 3509784aae73SMilan Broz 35102995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3511784aae73SMilan Broz 35124d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 3513432a212cSMike Anderson dm_deleting_md(md)) 35144d89b7b4SMilan Broz return NULL; 35154d89b7b4SMilan Broz 3516784aae73SMilan Broz dm_get(md); 3517784aae73SMilan Broz return md; 3518784aae73SMilan Broz } 3519784aae73SMilan Broz 35204f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 35211da177e4SLinus Torvalds { 35221da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 35231da177e4SLinus Torvalds } 35241da177e4SLinus Torvalds 3525ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 3526ffcc3936SMike Snitzer { 3527ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3528ffcc3936SMike Snitzer } 3529ffcc3936SMike Snitzer 35302c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 35312c140a24SMikulas Patocka { 35322c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 35332c140a24SMikulas Patocka } 35342c140a24SMikulas Patocka 353564dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 353664dbce58SKiyoshi Ueda { 3537ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 353864dbce58SKiyoshi Ueda } 353964dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 354064dbce58SKiyoshi Ueda 35412e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 35422e93ccc1SKiyoshi Ueda { 3543ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 35442e93ccc1SKiyoshi Ueda } 35452e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 35462e93ccc1SKiyoshi Ueda 354717e149b8SMike Snitzer struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 354817e149b8SMike Snitzer unsigned integrity, unsigned per_bio_data_size) 3549e6ee8c0bSKiyoshi Ueda { 35505f015204SJun'ichi Nomura struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 355117e149b8SMike Snitzer struct kmem_cache *cachep = NULL; 3552e5863d9aSMike Snitzer unsigned int pool_size = 0; 35535f015204SJun'ichi Nomura unsigned int front_pad; 3554e6ee8c0bSKiyoshi Ueda 3555e6ee8c0bSKiyoshi Ueda if (!pools) 3556e6ee8c0bSKiyoshi Ueda return NULL; 3557e6ee8c0bSKiyoshi Ueda 355817e149b8SMike Snitzer type = filter_md_type(type, md); 355917e149b8SMike Snitzer 3560e5863d9aSMike Snitzer switch (type) { 3561e5863d9aSMike Snitzer case DM_TYPE_BIO_BASED: 35625f015204SJun'ichi Nomura cachep = _io_cache; 3563e8603136SMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 35645f015204SJun'ichi Nomura front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3565e5863d9aSMike Snitzer break; 3566e5863d9aSMike Snitzer case DM_TYPE_REQUEST_BASED: 356717e149b8SMike Snitzer cachep = _rq_tio_cache; 3568f4790826SMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 35691ae49ea2SMike Snitzer pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 35701ae49ea2SMike Snitzer if (!pools->rq_pool) 35711ae49ea2SMike Snitzer goto out; 3572e5863d9aSMike Snitzer /* fall through to setup remaining rq-based pools */ 3573e5863d9aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 3574e5863d9aSMike Snitzer if (!pool_size) 35756cfa5857SMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 35765f015204SJun'ichi Nomura front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 35775f015204SJun'ichi Nomura /* per_bio_data_size is not used. See __bind_mempools(). */ 35785f015204SJun'ichi Nomura WARN_ON(per_bio_data_size != 0); 3579e5863d9aSMike Snitzer break; 3580e5863d9aSMike Snitzer default: 358117e149b8SMike Snitzer BUG(); 3582e5863d9aSMike Snitzer } 35835f015204SJun'ichi Nomura 358417e149b8SMike Snitzer if (cachep) { 35856cfa5857SMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3586e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 35875f015204SJun'ichi Nomura goto out; 358817e149b8SMike Snitzer } 3589e6ee8c0bSKiyoshi Ueda 35903d8aab2dSJunichi Nomura pools->bs = bioset_create_nobvec(pool_size, front_pad); 3591e6ee8c0bSKiyoshi Ueda if (!pools->bs) 35925f015204SJun'ichi Nomura goto out; 3593e6ee8c0bSKiyoshi Ueda 3594a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 35955f015204SJun'ichi Nomura goto out; 3596a91a2785SMartin K. Petersen 3597e6ee8c0bSKiyoshi Ueda return pools; 3598e6ee8c0bSKiyoshi Ueda 35995f015204SJun'ichi Nomura out: 36005f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3601e6ee8c0bSKiyoshi Ueda 3602e6ee8c0bSKiyoshi Ueda return NULL; 3603e6ee8c0bSKiyoshi Ueda } 3604e6ee8c0bSKiyoshi Ueda 3605e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3606e6ee8c0bSKiyoshi Ueda { 3607e6ee8c0bSKiyoshi Ueda if (!pools) 3608e6ee8c0bSKiyoshi Ueda return; 3609e6ee8c0bSKiyoshi Ueda 3610e6ee8c0bSKiyoshi Ueda if (pools->io_pool) 3611e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 3612e6ee8c0bSKiyoshi Ueda 36131ae49ea2SMike Snitzer if (pools->rq_pool) 36141ae49ea2SMike Snitzer mempool_destroy(pools->rq_pool); 36151ae49ea2SMike Snitzer 3616e6ee8c0bSKiyoshi Ueda if (pools->bs) 3617e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 3618e6ee8c0bSKiyoshi Ueda 3619e6ee8c0bSKiyoshi Ueda kfree(pools); 3620e6ee8c0bSKiyoshi Ueda } 3621e6ee8c0bSKiyoshi Ueda 362283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 36231da177e4SLinus Torvalds .open = dm_blk_open, 36241da177e4SLinus Torvalds .release = dm_blk_close, 3625aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 36263ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 36271da177e4SLinus Torvalds .owner = THIS_MODULE 36281da177e4SLinus Torvalds }; 36291da177e4SLinus Torvalds 36301da177e4SLinus Torvalds /* 36311da177e4SLinus Torvalds * module hooks 36321da177e4SLinus Torvalds */ 36331da177e4SLinus Torvalds module_init(dm_init); 36341da177e4SLinus Torvalds module_exit(dm_exit); 36351da177e4SLinus Torvalds 36361da177e4SLinus Torvalds module_param(major, uint, 0); 36371da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3638f4790826SMike Snitzer 3639e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3640e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3641e8603136SMike Snitzer 3642f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3643f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3644f4790826SMike Snitzer 364517e149b8SMike Snitzer module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 364617e149b8SMike Snitzer MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 364717e149b8SMike Snitzer 36481da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 36491da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 36501da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3651