11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 22ffcc3936SMike Snitzer #include <linux/wait.h> 232eb6e1e3SKeith Busch #include <linux/kthread.h> 240ce65797SMike Snitzer #include <linux/ktime.h> 25de3ec86dSMike Snitzer #include <linux/elevator.h> /* for rq_end_sector() */ 26bfebd1cdSMike Snitzer #include <linux/blk-mq.h> 2771cdb697SChristoph Hellwig #include <linux/pr.h> 2855782138SLi Zefan 2955782138SLi Zefan #include <trace/events/block.h> 301da177e4SLinus Torvalds 3172d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3272d94861SAlasdair G Kergon 3371a16736SNamhyung Kim #ifdef CONFIG_PRINTK 3471a16736SNamhyung Kim /* 3571a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3671a16736SNamhyung Kim */ 3771a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3871a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3971a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 4071a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 4171a16736SNamhyung Kim #endif 4271a16736SNamhyung Kim 4360935eb2SMilan Broz /* 4460935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 4560935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4660935eb2SMilan Broz */ 4760935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4860935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4960935eb2SMilan Broz 501da177e4SLinus Torvalds static const char *_name = DM_NAME; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds static unsigned int major = 0; 531da177e4SLinus Torvalds static unsigned int _major = 0; 541da177e4SLinus Torvalds 55d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 56d15b774cSAlasdair G Kergon 57f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 582c140a24SMikulas Patocka 592c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 602c140a24SMikulas Patocka 612c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 622c140a24SMikulas Patocka 63acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 64acfe0ad7SMikulas Patocka 651da177e4SLinus Torvalds /* 668fbf26adSKiyoshi Ueda * For bio-based dm. 671da177e4SLinus Torvalds * One of these is allocated per bio. 681da177e4SLinus Torvalds */ 691da177e4SLinus Torvalds struct dm_io { 701da177e4SLinus Torvalds struct mapped_device *md; 711da177e4SLinus Torvalds int error; 721da177e4SLinus Torvalds atomic_t io_count; 736ae2fa67SRichard Kennedy struct bio *bio; 743eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 75f88fb981SKiyoshi Ueda spinlock_t endio_lock; 76fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 771da177e4SLinus Torvalds }; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds /* 808fbf26adSKiyoshi Ueda * For request-based dm. 818fbf26adSKiyoshi Ueda * One of these is allocated per request. 828fbf26adSKiyoshi Ueda */ 838fbf26adSKiyoshi Ueda struct dm_rq_target_io { 848fbf26adSKiyoshi Ueda struct mapped_device *md; 858fbf26adSKiyoshi Ueda struct dm_target *ti; 861ae49ea2SMike Snitzer struct request *orig, *clone; 872eb6e1e3SKeith Busch struct kthread_work work; 888fbf26adSKiyoshi Ueda int error; 898fbf26adSKiyoshi Ueda union map_info info; 90e262f347SMikulas Patocka struct dm_stats_aux stats_aux; 91e262f347SMikulas Patocka unsigned long duration_jiffies; 92e262f347SMikulas Patocka unsigned n_sectors; 938fbf26adSKiyoshi Ueda }; 948fbf26adSKiyoshi Ueda 958fbf26adSKiyoshi Ueda /* 9694818742SKent Overstreet * For request-based dm - the bio clones we allocate are embedded in these 9794818742SKent Overstreet * structs. 9894818742SKent Overstreet * 9994818742SKent Overstreet * We allocate these with bio_alloc_bioset, using the front_pad parameter when 10094818742SKent Overstreet * the bioset is created - this means the bio has to come at the end of the 10194818742SKent Overstreet * struct. 1028fbf26adSKiyoshi Ueda */ 1038fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 1048fbf26adSKiyoshi Ueda struct bio *orig; 105cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 10694818742SKent Overstreet struct bio clone; 1078fbf26adSKiyoshi Ueda }; 1088fbf26adSKiyoshi Ueda 109ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 110ba61fdd1SJeff Mahoney 1111da177e4SLinus Torvalds /* 1121da177e4SLinus Torvalds * Bits for the md->flags field. 1131da177e4SLinus Torvalds */ 1141eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1151da177e4SLinus Torvalds #define DMF_SUSPENDED 1 116aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 117fba9f90eSJeff Mahoney #define DMF_FREEING 3 1185c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1192e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1208ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1218ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1221da177e4SLinus Torvalds 123304f3f6aSMilan Broz /* 12483d5e5b0SMikulas Patocka * A dummy definition to make RCU happy. 12583d5e5b0SMikulas Patocka * struct dm_table should never be dereferenced in this file. 12683d5e5b0SMikulas Patocka */ 12783d5e5b0SMikulas Patocka struct dm_table { 12883d5e5b0SMikulas Patocka int undefined__; 12983d5e5b0SMikulas Patocka }; 13083d5e5b0SMikulas Patocka 13183d5e5b0SMikulas Patocka /* 132304f3f6aSMilan Broz * Work processed by per-device workqueue. 133304f3f6aSMilan Broz */ 1341da177e4SLinus Torvalds struct mapped_device { 13583d5e5b0SMikulas Patocka struct srcu_struct io_barrier; 136e61290a4SDaniel Walker struct mutex suspend_lock; 1371da177e4SLinus Torvalds atomic_t holders; 1385c6bd75dSAlasdair G Kergon atomic_t open_count; 1391da177e4SLinus Torvalds 1402a7faeb1SMikulas Patocka /* 1412a7faeb1SMikulas Patocka * The current mapping. 1422a7faeb1SMikulas Patocka * Use dm_get_live_table{_fast} or take suspend_lock for 1432a7faeb1SMikulas Patocka * dereference. 1442a7faeb1SMikulas Patocka */ 1456fa99520SPranith Kumar struct dm_table __rcu *map; 1462a7faeb1SMikulas Patocka 14786f1152bSBenjamin Marzinski struct list_head table_devices; 14886f1152bSBenjamin Marzinski struct mutex table_devices_lock; 14986f1152bSBenjamin Marzinski 1501da177e4SLinus Torvalds unsigned long flags; 1511da177e4SLinus Torvalds 152165125e1SJens Axboe struct request_queue *queue; 153a5664dadSMike Snitzer unsigned type; 1544a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 155a5664dadSMike Snitzer struct mutex type_lock; 156a5664dadSMike Snitzer 157*16f12266SMike Snitzer struct dm_target *immutable_target; 15836a0456fSAlasdair G Kergon struct target_type *immutable_target_type; 15936a0456fSAlasdair G Kergon 1601da177e4SLinus Torvalds struct gendisk *disk; 1617e51f257SMike Anderson char name[16]; 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds void *interface_ptr; 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds /* 1661da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1671da177e4SLinus Torvalds */ 168316d315bSNikanth Karthikesan atomic_t pending[2]; 1691da177e4SLinus Torvalds wait_queue_head_t wait; 17053d5914fSMikulas Patocka struct work_struct work; 1711da177e4SLinus Torvalds struct bio_list deferred; 172022c2611SMikulas Patocka spinlock_t deferred_lock; 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds /* 17529e4013dSTejun Heo * Processing queue (flush) 176304f3f6aSMilan Broz */ 177304f3f6aSMilan Broz struct workqueue_struct *wq; 178304f3f6aSMilan Broz 179304f3f6aSMilan Broz /* 1801da177e4SLinus Torvalds * io objects are allocated from here. 1811da177e4SLinus Torvalds */ 1821da177e4SLinus Torvalds mempool_t *io_pool; 1831ae49ea2SMike Snitzer mempool_t *rq_pool; 1841da177e4SLinus Torvalds 1859faf400fSStefan Bader struct bio_set *bs; 1869faf400fSStefan Bader 1871da177e4SLinus Torvalds /* 1881da177e4SLinus Torvalds * Event handling. 1891da177e4SLinus Torvalds */ 1901da177e4SLinus Torvalds atomic_t event_nr; 1911da177e4SLinus Torvalds wait_queue_head_t eventq; 1927a8c3d3bSMike Anderson atomic_t uevent_seq; 1937a8c3d3bSMike Anderson struct list_head uevent_list; 1947a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1951da177e4SLinus Torvalds 1961da177e4SLinus Torvalds /* 1971da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 1981da177e4SLinus Torvalds */ 1991da177e4SLinus Torvalds struct super_block *frozen_sb; 200db8fef4fSMikulas Patocka struct block_device *bdev; 2013ac51e74SDarrick J. Wong 2023ac51e74SDarrick J. Wong /* forced geometry settings */ 2033ac51e74SDarrick J. Wong struct hd_geometry geometry; 204784aae73SMilan Broz 2052995fa78SMikulas Patocka /* kobject and completion */ 2062995fa78SMikulas Patocka struct dm_kobject_holder kobj_holder; 207be35f486SMikulas Patocka 208d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 209d87f4c14STejun Heo struct bio flush_bio; 210fd2ed4d2SMikulas Patocka 21196b26c8cSMikulas Patocka /* the number of internal suspends */ 21296b26c8cSMikulas Patocka unsigned internal_suspend_count; 21396b26c8cSMikulas Patocka 214fd2ed4d2SMikulas Patocka struct dm_stats stats; 2152eb6e1e3SKeith Busch 2162eb6e1e3SKeith Busch struct kthread_worker kworker; 2172eb6e1e3SKeith Busch struct task_struct *kworker_task; 218de3ec86dSMike Snitzer 219de3ec86dSMike Snitzer /* for request-based merge heuristic in dm_request_fn() */ 2200ce65797SMike Snitzer unsigned seq_rq_merge_deadline_usecs; 221de3ec86dSMike Snitzer int last_rq_rw; 2220ce65797SMike Snitzer sector_t last_rq_pos; 2230ce65797SMike Snitzer ktime_t last_rq_start_time; 224bfebd1cdSMike Snitzer 225bfebd1cdSMike Snitzer /* for blk-mq request-based DM support */ 226bfebd1cdSMike Snitzer struct blk_mq_tag_set tag_set; 22717e149b8SMike Snitzer bool use_blk_mq; 2281da177e4SLinus Torvalds }; 2291da177e4SLinus Torvalds 23017e149b8SMike Snitzer #ifdef CONFIG_DM_MQ_DEFAULT 23117e149b8SMike Snitzer static bool use_blk_mq = true; 23217e149b8SMike Snitzer #else 23317e149b8SMike Snitzer static bool use_blk_mq = false; 23417e149b8SMike Snitzer #endif 23517e149b8SMike Snitzer 23617e149b8SMike Snitzer bool dm_use_blk_mq(struct mapped_device *md) 23717e149b8SMike Snitzer { 23817e149b8SMike Snitzer return md->use_blk_mq; 23917e149b8SMike Snitzer } 24017e149b8SMike Snitzer 241e6ee8c0bSKiyoshi Ueda /* 242e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 243e6ee8c0bSKiyoshi Ueda */ 244e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 245e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 2461ae49ea2SMike Snitzer mempool_t *rq_pool; 247e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 248e6ee8c0bSKiyoshi Ueda }; 249e6ee8c0bSKiyoshi Ueda 25086f1152bSBenjamin Marzinski struct table_device { 25186f1152bSBenjamin Marzinski struct list_head list; 25286f1152bSBenjamin Marzinski atomic_t count; 25386f1152bSBenjamin Marzinski struct dm_dev dm_dev; 25486f1152bSBenjamin Marzinski }; 25586f1152bSBenjamin Marzinski 2566cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 2576cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS 256 258f4790826SMike Snitzer #define RESERVED_MAX_IOS 1024 259e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 2608fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 2611ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 26294818742SKent Overstreet 263f4790826SMike Snitzer /* 264e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 265e8603136SMike Snitzer */ 266e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 267e8603136SMike Snitzer 268e8603136SMike Snitzer /* 269f4790826SMike Snitzer * Request-based DM's mempools' reserved IOs set by the user. 270f4790826SMike Snitzer */ 271f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 272f4790826SMike Snitzer 27309c2d531SMike Snitzer static unsigned __dm_get_module_param(unsigned *module_param, 274f4790826SMike Snitzer unsigned def, unsigned max) 275f4790826SMike Snitzer { 27609c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 27709c2d531SMike Snitzer unsigned modified_param = 0; 278f4790826SMike Snitzer 27909c2d531SMike Snitzer if (!param) 28009c2d531SMike Snitzer modified_param = def; 28109c2d531SMike Snitzer else if (param > max) 28209c2d531SMike Snitzer modified_param = max; 283f4790826SMike Snitzer 28409c2d531SMike Snitzer if (modified_param) { 28509c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 28609c2d531SMike Snitzer param = modified_param; 287f4790826SMike Snitzer } 288f4790826SMike Snitzer 28909c2d531SMike Snitzer return param; 290f4790826SMike Snitzer } 291f4790826SMike Snitzer 292e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 293e8603136SMike Snitzer { 29409c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 295e8603136SMike Snitzer RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 296e8603136SMike Snitzer } 297e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 298e8603136SMike Snitzer 299f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void) 300f4790826SMike Snitzer { 30109c2d531SMike Snitzer return __dm_get_module_param(&reserved_rq_based_ios, 302f4790826SMike Snitzer RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 303f4790826SMike Snitzer } 304f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 305f4790826SMike Snitzer 3061da177e4SLinus Torvalds static int __init local_init(void) 3071da177e4SLinus Torvalds { 30851157b4aSKiyoshi Ueda int r = -ENOMEM; 3091da177e4SLinus Torvalds 3101da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 311028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 3121da177e4SLinus Torvalds if (!_io_cache) 31351157b4aSKiyoshi Ueda return r; 3141da177e4SLinus Torvalds 3158fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 3168fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 317dba14160SMikulas Patocka goto out_free_io_cache; 3188fbf26adSKiyoshi Ueda 3191ae49ea2SMike Snitzer _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 3201ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 3211ae49ea2SMike Snitzer if (!_rq_cache) 3221ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 3231ae49ea2SMike Snitzer 32451e5b2bdSMike Anderson r = dm_uevent_init(); 32551157b4aSKiyoshi Ueda if (r) 3261ae49ea2SMike Snitzer goto out_free_rq_cache; 32751e5b2bdSMike Anderson 328acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 329acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 330acfe0ad7SMikulas Patocka r = -ENOMEM; 331acfe0ad7SMikulas Patocka goto out_uevent_exit; 332acfe0ad7SMikulas Patocka } 333acfe0ad7SMikulas Patocka 3341da177e4SLinus Torvalds _major = major; 3351da177e4SLinus Torvalds r = register_blkdev(_major, _name); 33651157b4aSKiyoshi Ueda if (r < 0) 337acfe0ad7SMikulas Patocka goto out_free_workqueue; 3381da177e4SLinus Torvalds 3391da177e4SLinus Torvalds if (!_major) 3401da177e4SLinus Torvalds _major = r; 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds return 0; 34351157b4aSKiyoshi Ueda 344acfe0ad7SMikulas Patocka out_free_workqueue: 345acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 34651157b4aSKiyoshi Ueda out_uevent_exit: 34751157b4aSKiyoshi Ueda dm_uevent_exit(); 3481ae49ea2SMike Snitzer out_free_rq_cache: 3491ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3508fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 3518fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 35251157b4aSKiyoshi Ueda out_free_io_cache: 35351157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 35451157b4aSKiyoshi Ueda 35551157b4aSKiyoshi Ueda return r; 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds static void local_exit(void) 3591da177e4SLinus Torvalds { 3602c140a24SMikulas Patocka flush_scheduled_work(); 361acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 3622c140a24SMikulas Patocka 3631ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3648fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 3651da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 36600d59405SAkinobu Mita unregister_blkdev(_major, _name); 36751e5b2bdSMike Anderson dm_uevent_exit(); 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds _major = 0; 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds DMINFO("cleaned up"); 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 374b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 3751da177e4SLinus Torvalds local_init, 3761da177e4SLinus Torvalds dm_target_init, 3771da177e4SLinus Torvalds dm_linear_init, 3781da177e4SLinus Torvalds dm_stripe_init, 379952b3557SMikulas Patocka dm_io_init, 380945fa4d2SMikulas Patocka dm_kcopyd_init, 3811da177e4SLinus Torvalds dm_interface_init, 382fd2ed4d2SMikulas Patocka dm_statistics_init, 3831da177e4SLinus Torvalds }; 3841da177e4SLinus Torvalds 385b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 3861da177e4SLinus Torvalds local_exit, 3871da177e4SLinus Torvalds dm_target_exit, 3881da177e4SLinus Torvalds dm_linear_exit, 3891da177e4SLinus Torvalds dm_stripe_exit, 390952b3557SMikulas Patocka dm_io_exit, 391945fa4d2SMikulas Patocka dm_kcopyd_exit, 3921da177e4SLinus Torvalds dm_interface_exit, 393fd2ed4d2SMikulas Patocka dm_statistics_exit, 3941da177e4SLinus Torvalds }; 3951da177e4SLinus Torvalds 3961da177e4SLinus Torvalds static int __init dm_init(void) 3971da177e4SLinus Torvalds { 3981da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3991da177e4SLinus Torvalds 4001da177e4SLinus Torvalds int r, i; 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds for (i = 0; i < count; i++) { 4031da177e4SLinus Torvalds r = _inits[i](); 4041da177e4SLinus Torvalds if (r) 4051da177e4SLinus Torvalds goto bad; 4061da177e4SLinus Torvalds } 4071da177e4SLinus Torvalds 4081da177e4SLinus Torvalds return 0; 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds bad: 4111da177e4SLinus Torvalds while (i--) 4121da177e4SLinus Torvalds _exits[i](); 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds return r; 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds static void __exit dm_exit(void) 4181da177e4SLinus Torvalds { 4191da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds while (i--) 4221da177e4SLinus Torvalds _exits[i](); 423d15b774cSAlasdair G Kergon 424d15b774cSAlasdair G Kergon /* 425d15b774cSAlasdair G Kergon * Should be empty by this point. 426d15b774cSAlasdair G Kergon */ 427d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 4281da177e4SLinus Torvalds } 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * Block device functions 4321da177e4SLinus Torvalds */ 433432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 434432a212cSMike Anderson { 435432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 436432a212cSMike Anderson } 437432a212cSMike Anderson 438fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 4391da177e4SLinus Torvalds { 4401da177e4SLinus Torvalds struct mapped_device *md; 4411da177e4SLinus Torvalds 442fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 443fba9f90eSJeff Mahoney 444fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 445fba9f90eSJeff Mahoney if (!md) 446fba9f90eSJeff Mahoney goto out; 447fba9f90eSJeff Mahoney 4485c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 449432a212cSMike Anderson dm_deleting_md(md)) { 450fba9f90eSJeff Mahoney md = NULL; 451fba9f90eSJeff Mahoney goto out; 452fba9f90eSJeff Mahoney } 453fba9f90eSJeff Mahoney 4541da177e4SLinus Torvalds dm_get(md); 4555c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 456fba9f90eSJeff Mahoney out: 457fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 458fba9f90eSJeff Mahoney 459fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 4601da177e4SLinus Torvalds } 4611da177e4SLinus Torvalds 462db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 4631da177e4SLinus Torvalds { 46463a4f065SMike Snitzer struct mapped_device *md; 4656e9624b8SArnd Bergmann 4664a1aeb98SMilan Broz spin_lock(&_minor_lock); 4674a1aeb98SMilan Broz 46863a4f065SMike Snitzer md = disk->private_data; 46963a4f065SMike Snitzer if (WARN_ON(!md)) 47063a4f065SMike Snitzer goto out; 47163a4f065SMike Snitzer 4722c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 4732c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 474acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 4752c140a24SMikulas Patocka 4761da177e4SLinus Torvalds dm_put(md); 47763a4f065SMike Snitzer out: 4784a1aeb98SMilan Broz spin_unlock(&_minor_lock); 4791da177e4SLinus Torvalds } 4801da177e4SLinus Torvalds 4815c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 4825c6bd75dSAlasdair G Kergon { 4835c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 4845c6bd75dSAlasdair G Kergon } 4855c6bd75dSAlasdair G Kergon 4865c6bd75dSAlasdair G Kergon /* 4875c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 4885c6bd75dSAlasdair G Kergon */ 4892c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 4905c6bd75dSAlasdair G Kergon { 4915c6bd75dSAlasdair G Kergon int r = 0; 4925c6bd75dSAlasdair G Kergon 4935c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4945c6bd75dSAlasdair G Kergon 4952c140a24SMikulas Patocka if (dm_open_count(md)) { 4965c6bd75dSAlasdair G Kergon r = -EBUSY; 4972c140a24SMikulas Patocka if (mark_deferred) 4982c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4992c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 5002c140a24SMikulas Patocka r = -EEXIST; 5015c6bd75dSAlasdair G Kergon else 5025c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 5035c6bd75dSAlasdair G Kergon 5045c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 5055c6bd75dSAlasdair G Kergon 5065c6bd75dSAlasdair G Kergon return r; 5075c6bd75dSAlasdair G Kergon } 5085c6bd75dSAlasdair G Kergon 5092c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 5102c140a24SMikulas Patocka { 5112c140a24SMikulas Patocka int r = 0; 5122c140a24SMikulas Patocka 5132c140a24SMikulas Patocka spin_lock(&_minor_lock); 5142c140a24SMikulas Patocka 5152c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 5162c140a24SMikulas Patocka r = -EBUSY; 5172c140a24SMikulas Patocka else 5182c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 5192c140a24SMikulas Patocka 5202c140a24SMikulas Patocka spin_unlock(&_minor_lock); 5212c140a24SMikulas Patocka 5222c140a24SMikulas Patocka return r; 5232c140a24SMikulas Patocka } 5242c140a24SMikulas Patocka 5252c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 5262c140a24SMikulas Patocka { 5272c140a24SMikulas Patocka dm_deferred_remove(); 5282c140a24SMikulas Patocka } 5292c140a24SMikulas Patocka 530fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 531fd2ed4d2SMikulas Patocka { 532fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 533fd2ed4d2SMikulas Patocka } 534fd2ed4d2SMikulas Patocka 5359974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 5369974fa2cSMike Snitzer { 5379974fa2cSMike Snitzer return md->queue; 5389974fa2cSMike Snitzer } 5399974fa2cSMike Snitzer 540fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 541fd2ed4d2SMikulas Patocka { 542fd2ed4d2SMikulas Patocka return &md->stats; 543fd2ed4d2SMikulas Patocka } 544fd2ed4d2SMikulas Patocka 5453ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5463ac51e74SDarrick J. Wong { 5473ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 5483ac51e74SDarrick J. Wong 5493ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 5503ac51e74SDarrick J. Wong } 5513ac51e74SDarrick J. Wong 552956a4025SMike Snitzer static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 55366482026SMike Snitzer struct block_device **bdev, 554956a4025SMike Snitzer fmode_t *mode) 555aa129a22SMilan Broz { 55666482026SMike Snitzer struct dm_target *tgt; 5576c182cd8SHannes Reinecke struct dm_table *map; 558956a4025SMike Snitzer int srcu_idx, r; 559aa129a22SMilan Broz 5606c182cd8SHannes Reinecke retry: 561e56f81e0SChristoph Hellwig r = -ENOTTY; 562956a4025SMike Snitzer map = dm_get_live_table(md, &srcu_idx); 563aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 564aa129a22SMilan Broz goto out; 565aa129a22SMilan Broz 566aa129a22SMilan Broz /* We only support devices that have a single target */ 567aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 568aa129a22SMilan Broz goto out; 569aa129a22SMilan Broz 57066482026SMike Snitzer tgt = dm_table_get_target(map, 0); 57166482026SMike Snitzer if (!tgt->type->prepare_ioctl) 5724d341d82SMike Snitzer goto out; 573aa129a22SMilan Broz 5744f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 575aa129a22SMilan Broz r = -EAGAIN; 576aa129a22SMilan Broz goto out; 577aa129a22SMilan Broz } 578aa129a22SMilan Broz 57966482026SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev, mode); 580e56f81e0SChristoph Hellwig if (r < 0) 581e56f81e0SChristoph Hellwig goto out; 582e56f81e0SChristoph Hellwig 583956a4025SMike Snitzer bdgrab(*bdev); 584956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 585e56f81e0SChristoph Hellwig return r; 586aa129a22SMilan Broz 587aa129a22SMilan Broz out: 588956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 5895bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 5906c182cd8SHannes Reinecke msleep(10); 5916c182cd8SHannes Reinecke goto retry; 5926c182cd8SHannes Reinecke } 593e56f81e0SChristoph Hellwig return r; 594e56f81e0SChristoph Hellwig } 5956c182cd8SHannes Reinecke 596e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 597e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 598e56f81e0SChristoph Hellwig { 599e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 600956a4025SMike Snitzer int r; 601e56f81e0SChristoph Hellwig 602956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 603e56f81e0SChristoph Hellwig if (r < 0) 604e56f81e0SChristoph Hellwig return r; 605e56f81e0SChristoph Hellwig 606e56f81e0SChristoph Hellwig if (r > 0) { 607e56f81e0SChristoph Hellwig /* 608e56f81e0SChristoph Hellwig * Target determined this ioctl is being issued against 609e56f81e0SChristoph Hellwig * a logical partition of the parent bdev; so extra 610e56f81e0SChristoph Hellwig * validation is needed. 611e56f81e0SChristoph Hellwig */ 612e56f81e0SChristoph Hellwig r = scsi_verify_blk_ioctl(NULL, cmd); 613e56f81e0SChristoph Hellwig if (r) 614e56f81e0SChristoph Hellwig goto out; 615e56f81e0SChristoph Hellwig } 616e56f81e0SChristoph Hellwig 61766482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 618e56f81e0SChristoph Hellwig out: 619956a4025SMike Snitzer bdput(bdev); 620aa129a22SMilan Broz return r; 621aa129a22SMilan Broz } 622aa129a22SMilan Broz 623028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 6241da177e4SLinus Torvalds { 6251da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 6261da177e4SLinus Torvalds } 6271da177e4SLinus Torvalds 628028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 6291da177e4SLinus Torvalds { 6301da177e4SLinus Torvalds mempool_free(io, md->io_pool); 6311da177e4SLinus Torvalds } 6321da177e4SLinus Torvalds 633028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 6341da177e4SLinus Torvalds { 635dba14160SMikulas Patocka bio_put(&tio->clone); 6361da177e4SLinus Torvalds } 6371da177e4SLinus Torvalds 63808885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 63908885643SKiyoshi Ueda gfp_t gfp_mask) 640cec47e3dSKiyoshi Ueda { 6415f015204SJun'ichi Nomura return mempool_alloc(md->io_pool, gfp_mask); 642cec47e3dSKiyoshi Ueda } 643cec47e3dSKiyoshi Ueda 644cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 645cec47e3dSKiyoshi Ueda { 6465f015204SJun'ichi Nomura mempool_free(tio, tio->md->io_pool); 647cec47e3dSKiyoshi Ueda } 648cec47e3dSKiyoshi Ueda 6491ae49ea2SMike Snitzer static struct request *alloc_clone_request(struct mapped_device *md, 6501ae49ea2SMike Snitzer gfp_t gfp_mask) 6511ae49ea2SMike Snitzer { 6521ae49ea2SMike Snitzer return mempool_alloc(md->rq_pool, gfp_mask); 6531ae49ea2SMike Snitzer } 6541ae49ea2SMike Snitzer 6551ae49ea2SMike Snitzer static void free_clone_request(struct mapped_device *md, struct request *rq) 6561ae49ea2SMike Snitzer { 6571ae49ea2SMike Snitzer mempool_free(rq, md->rq_pool); 6581ae49ea2SMike Snitzer } 6591ae49ea2SMike Snitzer 66090abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 66190abb8c4SKiyoshi Ueda { 66290abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 66390abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 66490abb8c4SKiyoshi Ueda } 66590abb8c4SKiyoshi Ueda 6663eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6673eaf840eSJun'ichi "Nick" Nomura { 6683eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 669fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 670c9959059STejun Heo int cpu; 671fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 6723eaf840eSJun'ichi "Nick" Nomura 6733eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6743eaf840eSJun'ichi "Nick" Nomura 675074a7acaSTejun Heo cpu = part_stat_lock(); 676074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 677074a7acaSTejun Heo part_stat_unlock(); 6781e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 6791e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 680fd2ed4d2SMikulas Patocka 681fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6824f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 683fd2ed4d2SMikulas Patocka bio_sectors(bio), false, 0, &io->stats_aux); 6843eaf840eSJun'ichi "Nick" Nomura } 6853eaf840eSJun'ichi "Nick" Nomura 686d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6873eaf840eSJun'ichi "Nick" Nomura { 6883eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 6893eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 6903eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 69118c0b223SGu Zheng int pending; 6923eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 6933eaf840eSJun'ichi "Nick" Nomura 69418c0b223SGu Zheng generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 6953eaf840eSJun'ichi "Nick" Nomura 696fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6974f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 698fd2ed4d2SMikulas Patocka bio_sectors(bio), true, duration, &io->stats_aux); 699fd2ed4d2SMikulas Patocka 700af7e466aSMikulas Patocka /* 701af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 702d87f4c14STejun Heo * a flush. 703af7e466aSMikulas Patocka */ 7041e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 7051e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 706316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 7073eaf840eSJun'ichi "Nick" Nomura 708d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 709d221d2e7SMikulas Patocka if (!pending) 710d221d2e7SMikulas Patocka wake_up(&md->wait); 7113eaf840eSJun'ichi "Nick" Nomura } 7123eaf840eSJun'ichi "Nick" Nomura 7131da177e4SLinus Torvalds /* 7141da177e4SLinus Torvalds * Add the bio to the list of deferred io. 7151da177e4SLinus Torvalds */ 71692c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 7171da177e4SLinus Torvalds { 71805447420SKiyoshi Ueda unsigned long flags; 7191da177e4SLinus Torvalds 72005447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 7211da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 72205447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 72392c63902SMikulas Patocka queue_work(md->wq, &md->work); 7241da177e4SLinus Torvalds } 7251da177e4SLinus Torvalds 7261da177e4SLinus Torvalds /* 7271da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 7281da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 72983d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 7301da177e4SLinus Torvalds */ 73183d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7321da177e4SLinus Torvalds { 73383d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7341da177e4SLinus Torvalds 73583d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 73683d5e5b0SMikulas Patocka } 7371da177e4SLinus Torvalds 73883d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 73983d5e5b0SMikulas Patocka { 74083d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 74183d5e5b0SMikulas Patocka } 74283d5e5b0SMikulas Patocka 74383d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 74483d5e5b0SMikulas Patocka { 74583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 74683d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 74783d5e5b0SMikulas Patocka } 74883d5e5b0SMikulas Patocka 74983d5e5b0SMikulas Patocka /* 75083d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 75183d5e5b0SMikulas Patocka * The caller must not block between these two functions. 75283d5e5b0SMikulas Patocka */ 75383d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 75483d5e5b0SMikulas Patocka { 75583d5e5b0SMikulas Patocka rcu_read_lock(); 75683d5e5b0SMikulas Patocka return rcu_dereference(md->map); 75783d5e5b0SMikulas Patocka } 75883d5e5b0SMikulas Patocka 75983d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 76083d5e5b0SMikulas Patocka { 76183d5e5b0SMikulas Patocka rcu_read_unlock(); 7621da177e4SLinus Torvalds } 7631da177e4SLinus Torvalds 7643ac51e74SDarrick J. Wong /* 76586f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 76686f1152bSBenjamin Marzinski */ 76786f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 76886f1152bSBenjamin Marzinski struct mapped_device *md) 76986f1152bSBenjamin Marzinski { 77086f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 77186f1152bSBenjamin Marzinski struct block_device *bdev; 77286f1152bSBenjamin Marzinski 77386f1152bSBenjamin Marzinski int r; 77486f1152bSBenjamin Marzinski 77586f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 77686f1152bSBenjamin Marzinski 77786f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 77886f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 77986f1152bSBenjamin Marzinski return PTR_ERR(bdev); 78086f1152bSBenjamin Marzinski 78186f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 78286f1152bSBenjamin Marzinski if (r) { 78386f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 78486f1152bSBenjamin Marzinski return r; 78586f1152bSBenjamin Marzinski } 78686f1152bSBenjamin Marzinski 78786f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 78886f1152bSBenjamin Marzinski return 0; 78986f1152bSBenjamin Marzinski } 79086f1152bSBenjamin Marzinski 79186f1152bSBenjamin Marzinski /* 79286f1152bSBenjamin Marzinski * Close a table device that we've been using. 79386f1152bSBenjamin Marzinski */ 79486f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 79586f1152bSBenjamin Marzinski { 79686f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 79786f1152bSBenjamin Marzinski return; 79886f1152bSBenjamin Marzinski 79986f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 80086f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 80186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 80286f1152bSBenjamin Marzinski } 80386f1152bSBenjamin Marzinski 80486f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 80586f1152bSBenjamin Marzinski fmode_t mode) { 80686f1152bSBenjamin Marzinski struct table_device *td; 80786f1152bSBenjamin Marzinski 80886f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 80986f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 81086f1152bSBenjamin Marzinski return td; 81186f1152bSBenjamin Marzinski 81286f1152bSBenjamin Marzinski return NULL; 81386f1152bSBenjamin Marzinski } 81486f1152bSBenjamin Marzinski 81586f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 81686f1152bSBenjamin Marzinski struct dm_dev **result) { 81786f1152bSBenjamin Marzinski int r; 81886f1152bSBenjamin Marzinski struct table_device *td; 81986f1152bSBenjamin Marzinski 82086f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 82186f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 82286f1152bSBenjamin Marzinski if (!td) { 82386f1152bSBenjamin Marzinski td = kmalloc(sizeof(*td), GFP_KERNEL); 82486f1152bSBenjamin Marzinski if (!td) { 82586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 82686f1152bSBenjamin Marzinski return -ENOMEM; 82786f1152bSBenjamin Marzinski } 82886f1152bSBenjamin Marzinski 82986f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 83086f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 83186f1152bSBenjamin Marzinski 83286f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 83386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83486f1152bSBenjamin Marzinski kfree(td); 83586f1152bSBenjamin Marzinski return r; 83686f1152bSBenjamin Marzinski } 83786f1152bSBenjamin Marzinski 83886f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 83986f1152bSBenjamin Marzinski 84086f1152bSBenjamin Marzinski atomic_set(&td->count, 0); 84186f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 84286f1152bSBenjamin Marzinski } 84386f1152bSBenjamin Marzinski atomic_inc(&td->count); 84486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 84586f1152bSBenjamin Marzinski 84686f1152bSBenjamin Marzinski *result = &td->dm_dev; 84786f1152bSBenjamin Marzinski return 0; 84886f1152bSBenjamin Marzinski } 84986f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 85086f1152bSBenjamin Marzinski 85186f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 85286f1152bSBenjamin Marzinski { 85386f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 85486f1152bSBenjamin Marzinski 85586f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 85686f1152bSBenjamin Marzinski if (atomic_dec_and_test(&td->count)) { 85786f1152bSBenjamin Marzinski close_table_device(td, md); 85886f1152bSBenjamin Marzinski list_del(&td->list); 85986f1152bSBenjamin Marzinski kfree(td); 86086f1152bSBenjamin Marzinski } 86186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 86286f1152bSBenjamin Marzinski } 86386f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 86486f1152bSBenjamin Marzinski 86586f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 86686f1152bSBenjamin Marzinski { 86786f1152bSBenjamin Marzinski struct list_head *tmp, *next; 86886f1152bSBenjamin Marzinski 86986f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 87086f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 87186f1152bSBenjamin Marzinski 87286f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 87386f1152bSBenjamin Marzinski td->dm_dev.name, atomic_read(&td->count)); 87486f1152bSBenjamin Marzinski kfree(td); 87586f1152bSBenjamin Marzinski } 87686f1152bSBenjamin Marzinski } 87786f1152bSBenjamin Marzinski 87886f1152bSBenjamin Marzinski /* 8793ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8803ac51e74SDarrick J. Wong */ 8813ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8823ac51e74SDarrick J. Wong { 8833ac51e74SDarrick J. Wong *geo = md->geometry; 8843ac51e74SDarrick J. Wong 8853ac51e74SDarrick J. Wong return 0; 8863ac51e74SDarrick J. Wong } 8873ac51e74SDarrick J. Wong 8883ac51e74SDarrick J. Wong /* 8893ac51e74SDarrick J. Wong * Set the geometry of a device. 8903ac51e74SDarrick J. Wong */ 8913ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8923ac51e74SDarrick J. Wong { 8933ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8943ac51e74SDarrick J. Wong 8953ac51e74SDarrick J. Wong if (geo->start > sz) { 8963ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8973ac51e74SDarrick J. Wong return -EINVAL; 8983ac51e74SDarrick J. Wong } 8993ac51e74SDarrick J. Wong 9003ac51e74SDarrick J. Wong md->geometry = *geo; 9013ac51e74SDarrick J. Wong 9023ac51e74SDarrick J. Wong return 0; 9033ac51e74SDarrick J. Wong } 9043ac51e74SDarrick J. Wong 9051da177e4SLinus Torvalds /*----------------------------------------------------------------- 9061da177e4SLinus Torvalds * CRUD START: 9071da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 9081da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 9091da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 9101da177e4SLinus Torvalds * interests of getting something for people to use I give 9111da177e4SLinus Torvalds * you this clearly demarcated crap. 9121da177e4SLinus Torvalds *---------------------------------------------------------------*/ 9131da177e4SLinus Torvalds 9142e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 9152e93ccc1SKiyoshi Ueda { 9162e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 9172e93ccc1SKiyoshi Ueda } 9182e93ccc1SKiyoshi Ueda 9191da177e4SLinus Torvalds /* 9201da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 9211da177e4SLinus Torvalds * cloned into, completing the original io if necc. 9221da177e4SLinus Torvalds */ 923858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 9241da177e4SLinus Torvalds { 9252e93ccc1SKiyoshi Ueda unsigned long flags; 926b35f8caaSMilan Broz int io_error; 927b35f8caaSMilan Broz struct bio *bio; 928b35f8caaSMilan Broz struct mapped_device *md = io->md; 9292e93ccc1SKiyoshi Ueda 9302e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 931f88fb981SKiyoshi Ueda if (unlikely(error)) { 932f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 933f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 9341da177e4SLinus Torvalds io->error = error; 935f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 936f88fb981SKiyoshi Ueda } 9371da177e4SLinus Torvalds 9381da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9392e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 9402e93ccc1SKiyoshi Ueda /* 9412e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9422e93ccc1SKiyoshi Ueda */ 943022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9446a8736d1STejun Heo if (__noflush_suspending(md)) 9456a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 9466a8736d1STejun Heo else 9472e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9482e93ccc1SKiyoshi Ueda io->error = -EIO; 949022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9502e93ccc1SKiyoshi Ueda } 9512e93ccc1SKiyoshi Ueda 952b35f8caaSMilan Broz io_error = io->error; 953b35f8caaSMilan Broz bio = io->bio; 954af7e466aSMikulas Patocka end_io_acct(io); 955a97f925aSMikulas Patocka free_io(md, io); 9561da177e4SLinus Torvalds 9576a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 9586a8736d1STejun Heo return; 9596a8736d1STejun Heo 9604f024f37SKent Overstreet if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 9611da177e4SLinus Torvalds /* 9626a8736d1STejun Heo * Preflush done for flush with data, reissue 9636a8736d1STejun Heo * without REQ_FLUSH. 9641da177e4SLinus Torvalds */ 9656a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 9666a8736d1STejun Heo queue_io(md, bio); 9675f3ea37cSArnaldo Carvalho de Melo } else { 968b372d360SMike Snitzer /* done with normal IO or empty flush */ 9690a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 9704246a0b6SChristoph Hellwig bio->bi_error = io_error; 9714246a0b6SChristoph Hellwig bio_endio(bio); 9722e93ccc1SKiyoshi Ueda } 9731da177e4SLinus Torvalds } 974af7e466aSMikulas Patocka } 9751da177e4SLinus Torvalds 9767eee4ae2SMike Snitzer static void disable_write_same(struct mapped_device *md) 9777eee4ae2SMike Snitzer { 9787eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9797eee4ae2SMike Snitzer 9807eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9817eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9827eee4ae2SMike Snitzer } 9837eee4ae2SMike Snitzer 9844246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9851da177e4SLinus Torvalds { 9864246a0b6SChristoph Hellwig int error = bio->bi_error; 9875164beceSzhendong chen int r = error; 988bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 989b35f8caaSMilan Broz struct dm_io *io = tio->io; 9909faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9911da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9921da177e4SLinus Torvalds 9931da177e4SLinus Torvalds if (endio) { 9947de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 9952e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 9962e93ccc1SKiyoshi Ueda /* 9972e93ccc1SKiyoshi Ueda * error and requeue request are handled 9982e93ccc1SKiyoshi Ueda * in dec_pending(). 9992e93ccc1SKiyoshi Ueda */ 10001da177e4SLinus Torvalds error = r; 100145cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 100245cbcd79SKiyoshi Ueda /* The target will handle the io */ 10036712ecf8SNeilBrown return; 100445cbcd79SKiyoshi Ueda else if (r) { 100545cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 100645cbcd79SKiyoshi Ueda BUG(); 100745cbcd79SKiyoshi Ueda } 10081da177e4SLinus Torvalds } 10091da177e4SLinus Torvalds 10107eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 10117eee4ae2SMike Snitzer !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 10127eee4ae2SMike Snitzer disable_write_same(md); 10137eee4ae2SMike Snitzer 10149faf400fSStefan Bader free_tio(md, tio); 1015b35f8caaSMilan Broz dec_pending(io, error); 10161da177e4SLinus Torvalds } 10171da177e4SLinus Torvalds 101878d8e58aSMike Snitzer /* 101978d8e58aSMike Snitzer * Partial completion handling for request-based dm 102078d8e58aSMike Snitzer */ 10214246a0b6SChristoph Hellwig static void end_clone_bio(struct bio *clone) 102278d8e58aSMike Snitzer { 102378d8e58aSMike Snitzer struct dm_rq_clone_bio_info *info = 102478d8e58aSMike Snitzer container_of(clone, struct dm_rq_clone_bio_info, clone); 102578d8e58aSMike Snitzer struct dm_rq_target_io *tio = info->tio; 102678d8e58aSMike Snitzer struct bio *bio = info->orig; 102778d8e58aSMike Snitzer unsigned int nr_bytes = info->orig->bi_iter.bi_size; 102850887bd1SJunichi Nomura int error = clone->bi_error; 102978d8e58aSMike Snitzer 103078d8e58aSMike Snitzer bio_put(clone); 103178d8e58aSMike Snitzer 103278d8e58aSMike Snitzer if (tio->error) 103378d8e58aSMike Snitzer /* 103478d8e58aSMike Snitzer * An error has already been detected on the request. 103578d8e58aSMike Snitzer * Once error occurred, just let clone->end_io() handle 103678d8e58aSMike Snitzer * the remainder. 103778d8e58aSMike Snitzer */ 103878d8e58aSMike Snitzer return; 103950887bd1SJunichi Nomura else if (error) { 104078d8e58aSMike Snitzer /* 104178d8e58aSMike Snitzer * Don't notice the error to the upper layer yet. 104278d8e58aSMike Snitzer * The error handling decision is made by the target driver, 104378d8e58aSMike Snitzer * when the request is completed. 104478d8e58aSMike Snitzer */ 104550887bd1SJunichi Nomura tio->error = error; 104678d8e58aSMike Snitzer return; 104778d8e58aSMike Snitzer } 104878d8e58aSMike Snitzer 104978d8e58aSMike Snitzer /* 105078d8e58aSMike Snitzer * I/O for the bio successfully completed. 105178d8e58aSMike Snitzer * Notice the data completion to the upper layer. 105278d8e58aSMike Snitzer */ 105378d8e58aSMike Snitzer 105478d8e58aSMike Snitzer /* 105578d8e58aSMike Snitzer * bios are processed from the head of the list. 105678d8e58aSMike Snitzer * So the completing bio should always be rq->bio. 105778d8e58aSMike Snitzer * If it's not, something wrong is happening. 105878d8e58aSMike Snitzer */ 105978d8e58aSMike Snitzer if (tio->orig->bio != bio) 106078d8e58aSMike Snitzer DMERR("bio completion is going in the middle of the request"); 106178d8e58aSMike Snitzer 106278d8e58aSMike Snitzer /* 106378d8e58aSMike Snitzer * Update the original request. 106478d8e58aSMike Snitzer * Do not use blk_end_request() here, because it may complete 106578d8e58aSMike Snitzer * the original request before the clone, and break the ordering. 106678d8e58aSMike Snitzer */ 106778d8e58aSMike Snitzer blk_update_request(tio->orig, 0, nr_bytes); 106878d8e58aSMike Snitzer } 106978d8e58aSMike Snitzer 1070bfebd1cdSMike Snitzer static struct dm_rq_target_io *tio_from_request(struct request *rq) 1071bfebd1cdSMike Snitzer { 1072bfebd1cdSMike Snitzer return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 1073bfebd1cdSMike Snitzer } 1074bfebd1cdSMike Snitzer 1075e262f347SMikulas Patocka static void rq_end_stats(struct mapped_device *md, struct request *orig) 1076e262f347SMikulas Patocka { 1077e262f347SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) { 1078e262f347SMikulas Patocka struct dm_rq_target_io *tio = tio_from_request(orig); 1079e262f347SMikulas Patocka tio->duration_jiffies = jiffies - tio->duration_jiffies; 1080e262f347SMikulas Patocka dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), 1081e262f347SMikulas Patocka tio->n_sectors, true, tio->duration_jiffies, 1082e262f347SMikulas Patocka &tio->stats_aux); 1083e262f347SMikulas Patocka } 1084e262f347SMikulas Patocka } 1085e262f347SMikulas Patocka 1086cec47e3dSKiyoshi Ueda /* 1087cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 1088cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 1089cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 1090cec47e3dSKiyoshi Ueda */ 1091466d89a6SKeith Busch static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1092cec47e3dSKiyoshi Ueda { 1093b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 1094cec47e3dSKiyoshi Ueda 1095cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 1096621739b0SMike Snitzer if (!md_in_flight(md)) 1097cec47e3dSKiyoshi Ueda wake_up(&md->wait); 1098cec47e3dSKiyoshi Ueda 1099a8c32a5cSJens Axboe /* 1100a8c32a5cSJens Axboe * Run this off this callpath, as drivers could invoke end_io while 1101a8c32a5cSJens Axboe * inside their request_fn (and holding the queue lock). Calling 1102a8c32a5cSJens Axboe * back into ->request_fn() could deadlock attempting to grab the 1103a8c32a5cSJens Axboe * queue lock again. 1104a8c32a5cSJens Axboe */ 11056acfe68bSMike Snitzer if (!md->queue->mq_ops && run_queue) 1106a8c32a5cSJens Axboe blk_run_queue_async(md->queue); 1107cec47e3dSKiyoshi Ueda 1108cec47e3dSKiyoshi Ueda /* 1109cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 1110cec47e3dSKiyoshi Ueda */ 1111cec47e3dSKiyoshi Ueda dm_put(md); 1112cec47e3dSKiyoshi Ueda } 1113cec47e3dSKiyoshi Ueda 1114e5d8de32SMike Snitzer static void free_rq_clone(struct request *clone) 1115a77e28c7SKiyoshi Ueda { 1116a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1117bfebd1cdSMike Snitzer struct mapped_device *md = tio->md; 1118a77e28c7SKiyoshi Ueda 111978d8e58aSMike Snitzer blk_rq_unprep_clone(clone); 112078d8e58aSMike Snitzer 1121aa6df8ddSMike Snitzer if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1122aa6df8ddSMike Snitzer /* stacked on blk-mq queue(s) */ 1123e5863d9aSMike Snitzer tio->ti->type->release_clone_rq(clone); 112402233342SMike Snitzer else if (!md->queue->mq_ops) 112502233342SMike Snitzer /* request_fn queue stacked on request_fn queue(s) */ 1126bfebd1cdSMike Snitzer free_clone_request(md, clone); 1127aa6df8ddSMike Snitzer /* 1128aa6df8ddSMike Snitzer * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: 1129aa6df8ddSMike Snitzer * no need to call free_clone_request() because we leverage blk-mq by 1130aa6df8ddSMike Snitzer * allocating the clone at the end of the blk-mq pdu (see: clone_rq) 1131aa6df8ddSMike Snitzer */ 1132bfebd1cdSMike Snitzer 1133bfebd1cdSMike Snitzer if (!md->queue->mq_ops) 1134a77e28c7SKiyoshi Ueda free_rq_tio(tio); 1135a77e28c7SKiyoshi Ueda } 1136a77e28c7SKiyoshi Ueda 1137980691e5SKiyoshi Ueda /* 1138980691e5SKiyoshi Ueda * Complete the clone and the original request. 1139466d89a6SKeith Busch * Must be called without clone's queue lock held, 1140466d89a6SKeith Busch * see end_clone_request() for more details. 1141980691e5SKiyoshi Ueda */ 1142980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 1143980691e5SKiyoshi Ueda { 1144980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 1145980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1146980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 1147980691e5SKiyoshi Ueda struct request *rq = tio->orig; 1148980691e5SKiyoshi Ueda 114929e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1150980691e5SKiyoshi Ueda rq->errors = clone->errors; 1151980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 1152980691e5SKiyoshi Ueda 1153980691e5SKiyoshi Ueda if (rq->sense) 1154980691e5SKiyoshi Ueda /* 1155980691e5SKiyoshi Ueda * We are using the sense buffer of the original 1156980691e5SKiyoshi Ueda * request. 1157980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 1158980691e5SKiyoshi Ueda */ 1159980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 1160980691e5SKiyoshi Ueda } 1161980691e5SKiyoshi Ueda 1162e5d8de32SMike Snitzer free_rq_clone(clone); 1163e262f347SMikulas Patocka rq_end_stats(md, rq); 1164bfebd1cdSMike Snitzer if (!rq->q->mq_ops) 1165980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 1166bfebd1cdSMike Snitzer else 1167bfebd1cdSMike Snitzer blk_mq_end_request(rq, error); 116829e4013dSTejun Heo rq_completed(md, rw, true); 1169980691e5SKiyoshi Ueda } 1170980691e5SKiyoshi Ueda 1171cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 1172cec47e3dSKiyoshi Ueda { 1173bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1174466d89a6SKeith Busch struct request *clone = tio->clone; 1175cec47e3dSKiyoshi Ueda 1176bfebd1cdSMike Snitzer if (!rq->q->mq_ops) { 1177cec47e3dSKiyoshi Ueda rq->special = NULL; 1178cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 1179bfebd1cdSMike Snitzer } 1180cec47e3dSKiyoshi Ueda 1181e5863d9aSMike Snitzer if (clone) 1182e5d8de32SMike Snitzer free_rq_clone(clone); 11834328daa2SMike Snitzer else if (!tio->md->queue->mq_ops) 11844328daa2SMike Snitzer free_rq_tio(tio); 1185cec47e3dSKiyoshi Ueda } 1186cec47e3dSKiyoshi Ueda 1187cec47e3dSKiyoshi Ueda /* 1188cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 1189cec47e3dSKiyoshi Ueda */ 1190bfebd1cdSMike Snitzer static void old_requeue_request(struct request *rq) 1191cec47e3dSKiyoshi Ueda { 1192cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 1193cec47e3dSKiyoshi Ueda unsigned long flags; 1194cec47e3dSKiyoshi Ueda 1195cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1196cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 11974ae9944dSJunichi Nomura blk_run_queue_async(q); 1198cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 1199bfebd1cdSMike Snitzer } 1200bfebd1cdSMike Snitzer 12012d76fff1SMike Snitzer static void dm_requeue_original_request(struct mapped_device *md, 1202bfebd1cdSMike Snitzer struct request *rq) 1203bfebd1cdSMike Snitzer { 1204bfebd1cdSMike Snitzer int rw = rq_data_dir(rq); 1205bfebd1cdSMike Snitzer 1206bfebd1cdSMike Snitzer dm_unprep_request(rq); 1207bfebd1cdSMike Snitzer 1208e262f347SMikulas Patocka rq_end_stats(md, rq); 1209bfebd1cdSMike Snitzer if (!rq->q->mq_ops) 1210bfebd1cdSMike Snitzer old_requeue_request(rq); 1211bfebd1cdSMike Snitzer else { 1212bfebd1cdSMike Snitzer blk_mq_requeue_request(rq); 1213bfebd1cdSMike Snitzer blk_mq_kick_requeue_list(rq->q); 1214bfebd1cdSMike Snitzer } 1215cec47e3dSKiyoshi Ueda 1216466d89a6SKeith Busch rq_completed(md, rw, false); 1217cec47e3dSKiyoshi Ueda } 1218466d89a6SKeith Busch 1219bfebd1cdSMike Snitzer static void old_stop_queue(struct request_queue *q) 1220cec47e3dSKiyoshi Ueda { 1221bfebd1cdSMike Snitzer unsigned long flags; 1222bfebd1cdSMike Snitzer 1223bfebd1cdSMike Snitzer if (blk_queue_stopped(q)) 1224bfebd1cdSMike Snitzer return; 1225bfebd1cdSMike Snitzer 1226bfebd1cdSMike Snitzer spin_lock_irqsave(q->queue_lock, flags); 1227cec47e3dSKiyoshi Ueda blk_stop_queue(q); 1228bfebd1cdSMike Snitzer spin_unlock_irqrestore(q->queue_lock, flags); 1229cec47e3dSKiyoshi Ueda } 1230cec47e3dSKiyoshi Ueda 1231cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 1232cec47e3dSKiyoshi Ueda { 1233bfebd1cdSMike Snitzer if (!q->mq_ops) 1234bfebd1cdSMike Snitzer old_stop_queue(q); 1235bfebd1cdSMike Snitzer else 1236bfebd1cdSMike Snitzer blk_mq_stop_hw_queues(q); 1237bfebd1cdSMike Snitzer } 1238bfebd1cdSMike Snitzer 1239bfebd1cdSMike Snitzer static void old_start_queue(struct request_queue *q) 1240bfebd1cdSMike Snitzer { 1241cec47e3dSKiyoshi Ueda unsigned long flags; 1242cec47e3dSKiyoshi Ueda 1243cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1244cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 1245cec47e3dSKiyoshi Ueda blk_start_queue(q); 1246bfebd1cdSMike Snitzer spin_unlock_irqrestore(q->queue_lock, flags); 1247cec47e3dSKiyoshi Ueda } 1248cec47e3dSKiyoshi Ueda 1249cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 1250cec47e3dSKiyoshi Ueda { 1251bfebd1cdSMike Snitzer if (!q->mq_ops) 1252bfebd1cdSMike Snitzer old_start_queue(q); 1253bfebd1cdSMike Snitzer else 1254bfebd1cdSMike Snitzer blk_mq_start_stopped_hw_queues(q, true); 1255cec47e3dSKiyoshi Ueda } 1256cec47e3dSKiyoshi Ueda 125711a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 125811a68244SKiyoshi Ueda { 125911a68244SKiyoshi Ueda int r = error; 126011a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1261ba1cbad9SMike Snitzer dm_request_endio_fn rq_end_io = NULL; 1262ba1cbad9SMike Snitzer 1263ba1cbad9SMike Snitzer if (tio->ti) { 1264ba1cbad9SMike Snitzer rq_end_io = tio->ti->type->rq_end_io; 126511a68244SKiyoshi Ueda 126611a68244SKiyoshi Ueda if (mapped && rq_end_io) 126711a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 1268ba1cbad9SMike Snitzer } 126911a68244SKiyoshi Ueda 12707eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 12717eee4ae2SMike Snitzer !clone->q->limits.max_write_same_sectors)) 12727eee4ae2SMike Snitzer disable_write_same(tio->md); 12737eee4ae2SMike Snitzer 127411a68244SKiyoshi Ueda if (r <= 0) 127511a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 127611a68244SKiyoshi Ueda dm_end_request(clone, r); 127711a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 127811a68244SKiyoshi Ueda /* The target will handle the I/O */ 127911a68244SKiyoshi Ueda return; 128011a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 128111a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 12822d76fff1SMike Snitzer dm_requeue_original_request(tio->md, tio->orig); 128311a68244SKiyoshi Ueda else { 128411a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 128511a68244SKiyoshi Ueda BUG(); 128611a68244SKiyoshi Ueda } 128711a68244SKiyoshi Ueda } 128811a68244SKiyoshi Ueda 1289cec47e3dSKiyoshi Ueda /* 1290cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 1291cec47e3dSKiyoshi Ueda */ 1292cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 1293cec47e3dSKiyoshi Ueda { 129411a68244SKiyoshi Ueda bool mapped = true; 1295bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1296466d89a6SKeith Busch struct request *clone = tio->clone; 1297bfebd1cdSMike Snitzer int rw; 1298cec47e3dSKiyoshi Ueda 1299e5863d9aSMike Snitzer if (!clone) { 1300e262f347SMikulas Patocka rq_end_stats(tio->md, rq); 1301bfebd1cdSMike Snitzer rw = rq_data_dir(rq); 1302bfebd1cdSMike Snitzer if (!rq->q->mq_ops) { 1303e5863d9aSMike Snitzer blk_end_request_all(rq, tio->error); 1304bfebd1cdSMike Snitzer rq_completed(tio->md, rw, false); 1305e5863d9aSMike Snitzer free_rq_tio(tio); 1306bfebd1cdSMike Snitzer } else { 1307bfebd1cdSMike Snitzer blk_mq_end_request(rq, tio->error); 1308bfebd1cdSMike Snitzer rq_completed(tio->md, rw, false); 1309bfebd1cdSMike Snitzer } 1310e5863d9aSMike Snitzer return; 1311e5863d9aSMike Snitzer } 1312cec47e3dSKiyoshi Ueda 131311a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 131411a68244SKiyoshi Ueda mapped = false; 1315cec47e3dSKiyoshi Ueda 131611a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 1317cec47e3dSKiyoshi Ueda } 1318cec47e3dSKiyoshi Ueda 1319cec47e3dSKiyoshi Ueda /* 1320cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 1321cec47e3dSKiyoshi Ueda * through softirq context. 1322cec47e3dSKiyoshi Ueda */ 1323466d89a6SKeith Busch static void dm_complete_request(struct request *rq, int error) 1324cec47e3dSKiyoshi Ueda { 1325bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = tio_from_request(rq); 1326cec47e3dSKiyoshi Ueda 1327cec47e3dSKiyoshi Ueda tio->error = error; 13286acfe68bSMike Snitzer if (!rq->q->mq_ops) 1329cec47e3dSKiyoshi Ueda blk_complete_request(rq); 13306acfe68bSMike Snitzer else 13316acfe68bSMike Snitzer blk_mq_complete_request(rq, error); 1332cec47e3dSKiyoshi Ueda } 1333cec47e3dSKiyoshi Ueda 1334cec47e3dSKiyoshi Ueda /* 1335cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 1336cec47e3dSKiyoshi Ueda * through softirq context. 1337cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 1338e5863d9aSMike Snitzer * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1339cec47e3dSKiyoshi Ueda */ 1340466d89a6SKeith Busch static void dm_kill_unmapped_request(struct request *rq, int error) 1341cec47e3dSKiyoshi Ueda { 1342cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 1343466d89a6SKeith Busch dm_complete_request(rq, error); 1344cec47e3dSKiyoshi Ueda } 1345cec47e3dSKiyoshi Ueda 1346cec47e3dSKiyoshi Ueda /* 1347bfebd1cdSMike Snitzer * Called with the clone's queue lock held (for non-blk-mq) 1348cec47e3dSKiyoshi Ueda */ 1349cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 1350cec47e3dSKiyoshi Ueda { 1351466d89a6SKeith Busch struct dm_rq_target_io *tio = clone->end_io_data; 1352466d89a6SKeith Busch 1353e5863d9aSMike Snitzer if (!clone->q->mq_ops) { 1354cec47e3dSKiyoshi Ueda /* 1355cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 1356cec47e3dSKiyoshi Ueda * the clone was dispatched. 1357e5863d9aSMike Snitzer * The clone is *NOT* freed actually here because it is alloced 1358e5863d9aSMike Snitzer * from dm own mempool (REQ_ALLOCED isn't set). 1359cec47e3dSKiyoshi Ueda */ 1360cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 1361e5863d9aSMike Snitzer } 1362cec47e3dSKiyoshi Ueda 1363cec47e3dSKiyoshi Ueda /* 1364cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 1365466d89a6SKeith Busch * hold the clone's queue lock. Otherwise, deadlock could occur because: 1366cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 1367cec47e3dSKiyoshi Ueda * of the stacking during the completion 1368cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 1369466d89a6SKeith Busch * against this clone's queue 1370cec47e3dSKiyoshi Ueda */ 1371466d89a6SKeith Busch dm_complete_request(tio->orig, error); 1372cec47e3dSKiyoshi Ueda } 1373cec47e3dSKiyoshi Ueda 137456a67df7SMike Snitzer /* 137556a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 137656a67df7SMike Snitzer * target boundary. 137756a67df7SMike Snitzer */ 137856a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 13791da177e4SLinus Torvalds { 138056a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 138156a67df7SMike Snitzer 138256a67df7SMike Snitzer return ti->len - target_offset; 138356a67df7SMike Snitzer } 138456a67df7SMike Snitzer 138556a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 138656a67df7SMike Snitzer { 138756a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1388542f9038SMike Snitzer sector_t offset, max_len; 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds /* 13911da177e4SLinus Torvalds * Does the target need to split even further? 13921da177e4SLinus Torvalds */ 1393542f9038SMike Snitzer if (ti->max_io_len) { 1394542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1395542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1396542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1397542f9038SMike Snitzer else 1398542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1399542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1400542f9038SMike Snitzer 1401542f9038SMike Snitzer if (len > max_len) 1402542f9038SMike Snitzer len = max_len; 14031da177e4SLinus Torvalds } 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds return len; 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds 1408542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1409542f9038SMike Snitzer { 1410542f9038SMike Snitzer if (len > UINT_MAX) { 1411542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1412542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1413542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1414542f9038SMike Snitzer return -EINVAL; 1415542f9038SMike Snitzer } 1416542f9038SMike Snitzer 1417542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1418542f9038SMike Snitzer 1419542f9038SMike Snitzer return 0; 1420542f9038SMike Snitzer } 1421542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1422542f9038SMike Snitzer 14231dd40c3eSMikulas Patocka /* 14241dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 14251dd40c3eSMikulas Patocka * allowed for all bio types except REQ_FLUSH. 14261dd40c3eSMikulas Patocka * 14271dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 14281dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 14291dd40c3eSMikulas Patocka * sent in a next bio. 14301dd40c3eSMikulas Patocka * 14311dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 14321dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 14331dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 14341dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 14351dd40c3eSMikulas Patocka * 14361dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 14371dd40c3eSMikulas Patocka * <------- bi_size -------> 14381dd40c3eSMikulas Patocka * <-- n_sectors --> 14391dd40c3eSMikulas Patocka * 14401dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 14411dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 14421dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 14431dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 14441dd40c3eSMikulas Patocka * to make it empty) 14451dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 14461dd40c3eSMikulas Patocka * 14471dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 14481dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 14491dd40c3eSMikulas Patocka * copies of the bio. 14501dd40c3eSMikulas Patocka */ 14511dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 14521dd40c3eSMikulas Patocka { 14531dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 14541dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 14551dd40c3eSMikulas Patocka BUG_ON(bio->bi_rw & REQ_FLUSH); 14561dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 14571dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 14581dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 14591dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 14601dd40c3eSMikulas Patocka } 14611dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 14621dd40c3eSMikulas Patocka 1463bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 14641da177e4SLinus Torvalds { 14651da177e4SLinus Torvalds int r; 14662056a782SJens Axboe sector_t sector; 14679faf400fSStefan Bader struct mapped_device *md; 1468dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1469bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 14701da177e4SLinus Torvalds 14711da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 14721da177e4SLinus Torvalds 14731da177e4SLinus Torvalds /* 14741da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 14751da177e4SLinus Torvalds * anything, the target has assumed ownership of 14761da177e4SLinus Torvalds * this io. 14771da177e4SLinus Torvalds */ 14781da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 14794f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 14807de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 148145cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 14821da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 14832056a782SJens Axboe 1484d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 148522a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 14862056a782SJens Axboe 14871da177e4SLinus Torvalds generic_make_request(clone); 14882e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 14892e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 14909faf400fSStefan Bader md = tio->io->md; 14919faf400fSStefan Bader dec_pending(tio->io, r); 14929faf400fSStefan Bader free_tio(md, tio); 1493ab37844dSMikulas Patocka } else if (r != DM_MAPIO_SUBMITTED) { 149445cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 149545cbcd79SKiyoshi Ueda BUG(); 14961da177e4SLinus Torvalds } 14971da177e4SLinus Torvalds } 14981da177e4SLinus Torvalds 14991da177e4SLinus Torvalds struct clone_info { 15001da177e4SLinus Torvalds struct mapped_device *md; 15011da177e4SLinus Torvalds struct dm_table *map; 15021da177e4SLinus Torvalds struct bio *bio; 15031da177e4SLinus Torvalds struct dm_io *io; 15041da177e4SLinus Torvalds sector_t sector; 1505e0d6609aSMikulas Patocka unsigned sector_count; 15061da177e4SLinus Torvalds }; 15071da177e4SLinus Torvalds 1508e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1509bd2a49b8SAlasdair G Kergon { 15104f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 15114f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 15121da177e4SLinus Torvalds } 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds /* 15151da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 15161da177e4SLinus Torvalds */ 1517dba14160SMikulas Patocka static void clone_bio(struct dm_target_io *tio, struct bio *bio, 15181c3b13e6SKent Overstreet sector_t sector, unsigned len) 15191da177e4SLinus Torvalds { 1520dba14160SMikulas Patocka struct bio *clone = &tio->clone; 15211da177e4SLinus Torvalds 15221c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 15239c47008dSMartin K. Petersen 15241c3b13e6SKent Overstreet if (bio_integrity(bio)) 15251c3b13e6SKent Overstreet bio_integrity_clone(clone, bio, GFP_NOIO); 15261c3b13e6SKent Overstreet 15271c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 15281c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 15291c3b13e6SKent Overstreet 15301c3b13e6SKent Overstreet if (bio_integrity(bio)) 15311c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 15321da177e4SLinus Torvalds } 15331da177e4SLinus Torvalds 15349015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 153599778273SJunichi Nomura struct dm_target *ti, 153655a62eefSAlasdair G Kergon unsigned target_bio_nr) 1537f9ab94ceSMikulas Patocka { 1538dba14160SMikulas Patocka struct dm_target_io *tio; 1539dba14160SMikulas Patocka struct bio *clone; 1540dba14160SMikulas Patocka 154199778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1542dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1543f9ab94ceSMikulas Patocka 1544f9ab94ceSMikulas Patocka tio->io = ci->io; 1545f9ab94ceSMikulas Patocka tio->ti = ti; 154655a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 15479015df24SAlasdair G Kergon 15489015df24SAlasdair G Kergon return tio; 15499015df24SAlasdair G Kergon } 15509015df24SAlasdair G Kergon 155114fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 155214fe594dSAlasdair G Kergon struct dm_target *ti, 15531dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 15549015df24SAlasdair G Kergon { 155599778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1556dba14160SMikulas Patocka struct bio *clone = &tio->clone; 15579015df24SAlasdair G Kergon 15581dd40c3eSMikulas Patocka tio->len_ptr = len; 15591dd40c3eSMikulas Patocka 15601c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1561bd2a49b8SAlasdair G Kergon if (len) 15621dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1563f9ab94ceSMikulas Patocka 1564bd2a49b8SAlasdair G Kergon __map_bio(tio); 1565f9ab94ceSMikulas Patocka } 1566f9ab94ceSMikulas Patocka 156714fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 15681dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 156906a426ceSMike Snitzer { 157055a62eefSAlasdair G Kergon unsigned target_bio_nr; 157106a426ceSMike Snitzer 157255a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 157314fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 157406a426ceSMike Snitzer } 157506a426ceSMike Snitzer 157614fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1577f9ab94ceSMikulas Patocka { 157806a426ceSMike Snitzer unsigned target_nr = 0; 1579f9ab94ceSMikulas Patocka struct dm_target *ti; 1580f9ab94ceSMikulas Patocka 1581b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1582f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 15831dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1584f9ab94ceSMikulas Patocka 1585f9ab94ceSMikulas Patocka return 0; 1586f9ab94ceSMikulas Patocka } 1587f9ab94ceSMikulas Patocka 1588e4c93811SAlasdair G Kergon static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 15891dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 15905ae89a87SMike Snitzer { 1591dba14160SMikulas Patocka struct bio *bio = ci->bio; 15925ae89a87SMike Snitzer struct dm_target_io *tio; 1593b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1594b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 15955ae89a87SMike Snitzer 1596b0d8ed4dSAlasdair G Kergon /* 1597b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1598b0d8ed4dSAlasdair G Kergon */ 1599b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1600b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1601e4c93811SAlasdair G Kergon 1602b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 160399778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 16041dd40c3eSMikulas Patocka tio->len_ptr = len; 16051dd40c3eSMikulas Patocka clone_bio(tio, bio, sector, *len); 1606bd2a49b8SAlasdair G Kergon __map_bio(tio); 16075ae89a87SMike Snitzer } 1608b0d8ed4dSAlasdair G Kergon } 16095ae89a87SMike Snitzer 161055a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 161123508a96SMike Snitzer 161255a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 161323508a96SMike Snitzer { 161455a62eefSAlasdair G Kergon return ti->num_discard_bios; 161523508a96SMike Snitzer } 161623508a96SMike Snitzer 161755a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 161823508a96SMike Snitzer { 161955a62eefSAlasdair G Kergon return ti->num_write_same_bios; 162023508a96SMike Snitzer } 162123508a96SMike Snitzer 162223508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 162323508a96SMike Snitzer 162423508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 162523508a96SMike Snitzer { 162655a62eefSAlasdair G Kergon return ti->split_discard_bios; 162723508a96SMike Snitzer } 162823508a96SMike Snitzer 162914fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 163055a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 163123508a96SMike Snitzer is_split_required_fn is_split_required) 16325ae89a87SMike Snitzer { 16335ae89a87SMike Snitzer struct dm_target *ti; 1634e0d6609aSMikulas Patocka unsigned len; 163555a62eefSAlasdair G Kergon unsigned num_bios; 16365ae89a87SMike Snitzer 1637a79245b3SMike Snitzer do { 16385ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 16395ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 16405ae89a87SMike Snitzer return -EIO; 16415ae89a87SMike Snitzer 16425ae89a87SMike Snitzer /* 164323508a96SMike Snitzer * Even though the device advertised support for this type of 164423508a96SMike Snitzer * request, that does not mean every target supports it, and 1645936688d7SMike Snitzer * reconfiguration might also have changed that since the 16465ae89a87SMike Snitzer * check was performed. 16475ae89a87SMike Snitzer */ 164855a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 164955a62eefSAlasdair G Kergon if (!num_bios) 16505ae89a87SMike Snitzer return -EOPNOTSUPP; 16515ae89a87SMike Snitzer 165223508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1653e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 16547acf0277SMikulas Patocka else 1655e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 16565ae89a87SMike Snitzer 16571dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 16585ae89a87SMike Snitzer 1659a79245b3SMike Snitzer ci->sector += len; 1660a79245b3SMike Snitzer } while (ci->sector_count -= len); 16615ae89a87SMike Snitzer 16625ae89a87SMike Snitzer return 0; 16635ae89a87SMike Snitzer } 16645ae89a87SMike Snitzer 166514fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 166623508a96SMike Snitzer { 166714fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 166823508a96SMike Snitzer is_split_required_for_discard); 166923508a96SMike Snitzer } 167023508a96SMike Snitzer 167114fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 167223508a96SMike Snitzer { 167314fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 167423508a96SMike Snitzer } 167523508a96SMike Snitzer 1676e4c93811SAlasdair G Kergon /* 1677e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1678e4c93811SAlasdair G Kergon */ 1679e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1680e4c93811SAlasdair G Kergon { 1681e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1682e4c93811SAlasdair G Kergon struct dm_target *ti; 16831c3b13e6SKent Overstreet unsigned len; 1684e4c93811SAlasdair G Kergon 1685e4c93811SAlasdair G Kergon if (unlikely(bio->bi_rw & REQ_DISCARD)) 1686e4c93811SAlasdair G Kergon return __send_discard(ci); 1687e4c93811SAlasdair G Kergon else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1688e4c93811SAlasdair G Kergon return __send_write_same(ci); 1689e4c93811SAlasdair G Kergon 1690e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1691e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1692e4c93811SAlasdair G Kergon return -EIO; 1693e4c93811SAlasdair G Kergon 16941c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1695e4c93811SAlasdair G Kergon 16961dd40c3eSMikulas Patocka __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1697e4c93811SAlasdair G Kergon 1698e4c93811SAlasdair G Kergon ci->sector += len; 1699e4c93811SAlasdair G Kergon ci->sector_count -= len; 1700e4c93811SAlasdair G Kergon 1701e4c93811SAlasdair G Kergon return 0; 1702e4c93811SAlasdair G Kergon } 1703e4c93811SAlasdair G Kergon 1704e4c93811SAlasdair G Kergon /* 170514fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 17061da177e4SLinus Torvalds */ 170783d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 170883d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 17091da177e4SLinus Torvalds { 17101da177e4SLinus Torvalds struct clone_info ci; 1711512875bdSJun'ichi Nomura int error = 0; 17121da177e4SLinus Torvalds 171383d5e5b0SMikulas Patocka if (unlikely(!map)) { 1714f0b9a450SMikulas Patocka bio_io_error(bio); 1715f0b9a450SMikulas Patocka return; 1716f0b9a450SMikulas Patocka } 1717692d0eb9SMikulas Patocka 171883d5e5b0SMikulas Patocka ci.map = map; 17191da177e4SLinus Torvalds ci.md = md; 17201da177e4SLinus Torvalds ci.io = alloc_io(md); 17211da177e4SLinus Torvalds ci.io->error = 0; 17221da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 17231da177e4SLinus Torvalds ci.io->bio = bio; 17241da177e4SLinus Torvalds ci.io->md = md; 1725f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 17264f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 17271da177e4SLinus Torvalds 17283eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1729bd2a49b8SAlasdair G Kergon 1730b372d360SMike Snitzer if (bio->bi_rw & REQ_FLUSH) { 1731b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1732b372d360SMike Snitzer ci.sector_count = 0; 173314fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1734b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1735b372d360SMike Snitzer } else { 17366a8736d1STejun Heo ci.bio = bio; 1737f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1738512875bdSJun'ichi Nomura while (ci.sector_count && !error) 173914fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1740d87f4c14STejun Heo } 17411da177e4SLinus Torvalds 17421da177e4SLinus Torvalds /* drop the extra reference count */ 1743512875bdSJun'ichi Nomura dec_pending(ci.io, error); 17449e4e5f87SMilan Broz } 17459e4e5f87SMilan Broz /*----------------------------------------------------------------- 17461da177e4SLinus Torvalds * CRUD END 17471da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17481da177e4SLinus Torvalds 17491da177e4SLinus Torvalds /* 17501da177e4SLinus Torvalds * The request function that just remaps the bio built up by 17511da177e4SLinus Torvalds * dm_merge_bvec. 17521da177e4SLinus Torvalds */ 1753dece1635SJens Axboe static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 17541da177e4SLinus Torvalds { 175512f03a49SKevin Corry int rw = bio_data_dir(bio); 17561da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 175783d5e5b0SMikulas Patocka int srcu_idx; 175883d5e5b0SMikulas Patocka struct dm_table *map; 17591da177e4SLinus Torvalds 176083d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17611da177e4SLinus Torvalds 176218c0b223SGu Zheng generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 176312f03a49SKevin Corry 17646a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17656a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 176683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17671da177e4SLinus Torvalds 17686a8736d1STejun Heo if (bio_rw(bio) != READA) 176992c63902SMikulas Patocka queue_io(md, bio); 17706a8736d1STejun Heo else 17716a8736d1STejun Heo bio_io_error(bio); 1772dece1635SJens Axboe return BLK_QC_T_NONE; 17731da177e4SLinus Torvalds } 17741da177e4SLinus Torvalds 177583d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 177683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1777dece1635SJens Axboe return BLK_QC_T_NONE; 1778cec47e3dSKiyoshi Ueda } 1779cec47e3dSKiyoshi Ueda 1780fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md) 1781cec47e3dSKiyoshi Ueda { 1782cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1783cec47e3dSKiyoshi Ueda } 1784cec47e3dSKiyoshi Ueda 1785466d89a6SKeith Busch static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1786cec47e3dSKiyoshi Ueda { 1787cec47e3dSKiyoshi Ueda int r; 1788cec47e3dSKiyoshi Ueda 1789466d89a6SKeith Busch if (blk_queue_io_stat(clone->q)) 1790466d89a6SKeith Busch clone->cmd_flags |= REQ_IO_STAT; 1791cec47e3dSKiyoshi Ueda 1792466d89a6SKeith Busch clone->start_time = jiffies; 1793466d89a6SKeith Busch r = blk_insert_cloned_request(clone->q, clone); 1794cec47e3dSKiyoshi Ueda if (r) 1795466d89a6SKeith Busch /* must complete clone in terms of original request */ 1796cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1797cec47e3dSKiyoshi Ueda } 1798cec47e3dSKiyoshi Ueda 179978d8e58aSMike Snitzer static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 180078d8e58aSMike Snitzer void *data) 1801cec47e3dSKiyoshi Ueda { 180278d8e58aSMike Snitzer struct dm_rq_target_io *tio = data; 180378d8e58aSMike Snitzer struct dm_rq_clone_bio_info *info = 180478d8e58aSMike Snitzer container_of(bio, struct dm_rq_clone_bio_info, clone); 180578d8e58aSMike Snitzer 180678d8e58aSMike Snitzer info->orig = bio_orig; 180778d8e58aSMike Snitzer info->tio = tio; 180878d8e58aSMike Snitzer bio->bi_end_io = end_clone_bio; 180978d8e58aSMike Snitzer 181078d8e58aSMike Snitzer return 0; 181178d8e58aSMike Snitzer } 181278d8e58aSMike Snitzer 181378d8e58aSMike Snitzer static int setup_clone(struct request *clone, struct request *rq, 181478d8e58aSMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 181578d8e58aSMike Snitzer { 181678d8e58aSMike Snitzer int r; 181778d8e58aSMike Snitzer 181878d8e58aSMike Snitzer r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 181978d8e58aSMike Snitzer dm_rq_bio_constructor, tio); 182078d8e58aSMike Snitzer if (r) 182178d8e58aSMike Snitzer return r; 182278d8e58aSMike Snitzer 182378d8e58aSMike Snitzer clone->cmd = rq->cmd; 182478d8e58aSMike Snitzer clone->cmd_len = rq->cmd_len; 182578d8e58aSMike Snitzer clone->sense = rq->sense; 1826cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1827cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 182878d8e58aSMike Snitzer 18291ae49ea2SMike Snitzer tio->clone = clone; 183078d8e58aSMike Snitzer 183178d8e58aSMike Snitzer return 0; 1832cec47e3dSKiyoshi Ueda } 1833cec47e3dSKiyoshi Ueda 18346facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 18351ae49ea2SMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 18366facdaffSKiyoshi Ueda { 183702233342SMike Snitzer /* 183802233342SMike Snitzer * Do not allocate a clone if tio->clone was already set 183902233342SMike Snitzer * (see: dm_mq_queue_rq). 184002233342SMike Snitzer */ 184102233342SMike Snitzer bool alloc_clone = !tio->clone; 184202233342SMike Snitzer struct request *clone; 18431ae49ea2SMike Snitzer 184402233342SMike Snitzer if (alloc_clone) { 184502233342SMike Snitzer clone = alloc_clone_request(md, gfp_mask); 18461ae49ea2SMike Snitzer if (!clone) 18471ae49ea2SMike Snitzer return NULL; 184802233342SMike Snitzer } else 184902233342SMike Snitzer clone = tio->clone; 18501ae49ea2SMike Snitzer 18511ae49ea2SMike Snitzer blk_rq_init(NULL, clone); 185278d8e58aSMike Snitzer if (setup_clone(clone, rq, tio, gfp_mask)) { 185378d8e58aSMike Snitzer /* -ENOMEM */ 185478d8e58aSMike Snitzer if (alloc_clone) 185578d8e58aSMike Snitzer free_clone_request(md, clone); 185678d8e58aSMike Snitzer return NULL; 185778d8e58aSMike Snitzer } 18581ae49ea2SMike Snitzer 18591ae49ea2SMike Snitzer return clone; 18601ae49ea2SMike Snitzer } 18611ae49ea2SMike Snitzer 18622eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work); 18632eb6e1e3SKeith Busch 1864bfebd1cdSMike Snitzer static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1865bfebd1cdSMike Snitzer struct mapped_device *md) 1866bfebd1cdSMike Snitzer { 1867bfebd1cdSMike Snitzer tio->md = md; 1868bfebd1cdSMike Snitzer tio->ti = NULL; 1869bfebd1cdSMike Snitzer tio->clone = NULL; 1870bfebd1cdSMike Snitzer tio->orig = rq; 1871bfebd1cdSMike Snitzer tio->error = 0; 1872bfebd1cdSMike Snitzer memset(&tio->info, 0, sizeof(tio->info)); 187302233342SMike Snitzer if (md->kworker_task) 1874bfebd1cdSMike Snitzer init_kthread_work(&tio->work, map_tio_request); 1875bfebd1cdSMike Snitzer } 1876bfebd1cdSMike Snitzer 1877466d89a6SKeith Busch static struct dm_rq_target_io *prep_tio(struct request *rq, 1878466d89a6SKeith Busch struct mapped_device *md, gfp_t gfp_mask) 18796facdaffSKiyoshi Ueda { 18806facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 1881e5863d9aSMike Snitzer int srcu_idx; 1882e5863d9aSMike Snitzer struct dm_table *table; 18836facdaffSKiyoshi Ueda 18846facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 18856facdaffSKiyoshi Ueda if (!tio) 18866facdaffSKiyoshi Ueda return NULL; 18876facdaffSKiyoshi Ueda 1888bfebd1cdSMike Snitzer init_tio(tio, rq, md); 18896facdaffSKiyoshi Ueda 1890e5863d9aSMike Snitzer table = dm_get_live_table(md, &srcu_idx); 1891e5863d9aSMike Snitzer if (!dm_table_mq_request_based(table)) { 1892466d89a6SKeith Busch if (!clone_rq(rq, md, tio, gfp_mask)) { 1893e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 18946facdaffSKiyoshi Ueda free_rq_tio(tio); 18956facdaffSKiyoshi Ueda return NULL; 18966facdaffSKiyoshi Ueda } 1897e5863d9aSMike Snitzer } 1898e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 18996facdaffSKiyoshi Ueda 1900466d89a6SKeith Busch return tio; 19016facdaffSKiyoshi Ueda } 19026facdaffSKiyoshi Ueda 1903cec47e3dSKiyoshi Ueda /* 1904cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1905cec47e3dSKiyoshi Ueda */ 1906cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1907cec47e3dSKiyoshi Ueda { 1908cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1909466d89a6SKeith Busch struct dm_rq_target_io *tio; 1910cec47e3dSKiyoshi Ueda 1911cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1912cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1913cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1914cec47e3dSKiyoshi Ueda } 1915cec47e3dSKiyoshi Ueda 1916466d89a6SKeith Busch tio = prep_tio(rq, md, GFP_ATOMIC); 1917466d89a6SKeith Busch if (!tio) 1918cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1919cec47e3dSKiyoshi Ueda 1920466d89a6SKeith Busch rq->special = tio; 1921cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1922cec47e3dSKiyoshi Ueda 1923cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1924cec47e3dSKiyoshi Ueda } 1925cec47e3dSKiyoshi Ueda 19269eef87daSKiyoshi Ueda /* 19279eef87daSKiyoshi Ueda * Returns: 1928e5863d9aSMike Snitzer * 0 : the request has been processed 1929e5863d9aSMike Snitzer * DM_MAPIO_REQUEUE : the original request needs to be requeued 1930e5863d9aSMike Snitzer * < 0 : the request was completed due to failure 19319eef87daSKiyoshi Ueda */ 1932bfebd1cdSMike Snitzer static int map_request(struct dm_rq_target_io *tio, struct request *rq, 1933cec47e3dSKiyoshi Ueda struct mapped_device *md) 1934cec47e3dSKiyoshi Ueda { 1935e5863d9aSMike Snitzer int r; 1936bfebd1cdSMike Snitzer struct dm_target *ti = tio->ti; 1937e5863d9aSMike Snitzer struct request *clone = NULL; 1938cec47e3dSKiyoshi Ueda 1939e5863d9aSMike Snitzer if (tio->clone) { 1940e5863d9aSMike Snitzer clone = tio->clone; 1941cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1942e5863d9aSMike Snitzer } else { 1943e5863d9aSMike Snitzer r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1944e5863d9aSMike Snitzer if (r < 0) { 1945e5863d9aSMike Snitzer /* The target wants to complete the I/O */ 1946e5863d9aSMike Snitzer dm_kill_unmapped_request(rq, r); 1947e5863d9aSMike Snitzer return r; 1948e5863d9aSMike Snitzer } 19493a140755SJunichi Nomura if (r != DM_MAPIO_REMAPPED) 19503a140755SJunichi Nomura return r; 195178d8e58aSMike Snitzer if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 195278d8e58aSMike Snitzer /* -ENOMEM */ 195378d8e58aSMike Snitzer ti->type->release_clone_rq(clone); 195478d8e58aSMike Snitzer return DM_MAPIO_REQUEUE; 195578d8e58aSMike Snitzer } 1956e5863d9aSMike Snitzer } 1957e5863d9aSMike Snitzer 1958cec47e3dSKiyoshi Ueda switch (r) { 1959cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1960cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1961cec47e3dSKiyoshi Ueda break; 1962cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1963cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 19646db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1965466d89a6SKeith Busch blk_rq_pos(rq)); 1966466d89a6SKeith Busch dm_dispatch_clone_request(clone, rq); 1967cec47e3dSKiyoshi Ueda break; 1968cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1969cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 19702d76fff1SMike Snitzer dm_requeue_original_request(md, tio->orig); 1971cec47e3dSKiyoshi Ueda break; 1972cec47e3dSKiyoshi Ueda default: 1973cec47e3dSKiyoshi Ueda if (r > 0) { 1974cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1975cec47e3dSKiyoshi Ueda BUG(); 1976cec47e3dSKiyoshi Ueda } 1977cec47e3dSKiyoshi Ueda 1978cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1979466d89a6SKeith Busch dm_kill_unmapped_request(rq, r); 1980e5863d9aSMike Snitzer return r; 1981cec47e3dSKiyoshi Ueda } 19829eef87daSKiyoshi Ueda 1983e5863d9aSMike Snitzer return 0; 1984cec47e3dSKiyoshi Ueda } 1985cec47e3dSKiyoshi Ueda 19862eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work) 1987ba1cbad9SMike Snitzer { 19882eb6e1e3SKeith Busch struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1989e5863d9aSMike Snitzer struct request *rq = tio->orig; 1990e5863d9aSMike Snitzer struct mapped_device *md = tio->md; 1991ba1cbad9SMike Snitzer 1992bfebd1cdSMike Snitzer if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 19932d76fff1SMike Snitzer dm_requeue_original_request(md, rq); 19942eb6e1e3SKeith Busch } 19952eb6e1e3SKeith Busch 1996466d89a6SKeith Busch static void dm_start_request(struct mapped_device *md, struct request *orig) 1997ba1cbad9SMike Snitzer { 1998bfebd1cdSMike Snitzer if (!orig->q->mq_ops) 1999ba1cbad9SMike Snitzer blk_start_request(orig); 2000bfebd1cdSMike Snitzer else 2001bfebd1cdSMike Snitzer blk_mq_start_request(orig); 2002466d89a6SKeith Busch atomic_inc(&md->pending[rq_data_dir(orig)]); 2003ba1cbad9SMike Snitzer 20040ce65797SMike Snitzer if (md->seq_rq_merge_deadline_usecs) { 2005de3ec86dSMike Snitzer md->last_rq_pos = rq_end_sector(orig); 2006de3ec86dSMike Snitzer md->last_rq_rw = rq_data_dir(orig); 20070ce65797SMike Snitzer md->last_rq_start_time = ktime_get(); 20080ce65797SMike Snitzer } 2009de3ec86dSMike Snitzer 2010e262f347SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) { 2011e262f347SMikulas Patocka struct dm_rq_target_io *tio = tio_from_request(orig); 2012e262f347SMikulas Patocka tio->duration_jiffies = jiffies; 2013e262f347SMikulas Patocka tio->n_sectors = blk_rq_sectors(orig); 2014e262f347SMikulas Patocka dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), 2015e262f347SMikulas Patocka tio->n_sectors, false, 0, &tio->stats_aux); 2016e262f347SMikulas Patocka } 2017e262f347SMikulas Patocka 2018ba1cbad9SMike Snitzer /* 2019ba1cbad9SMike Snitzer * Hold the md reference here for the in-flight I/O. 2020ba1cbad9SMike Snitzer * We can't rely on the reference count by device opener, 2021ba1cbad9SMike Snitzer * because the device may be closed during the request completion 2022ba1cbad9SMike Snitzer * when all bios are completed. 2023ba1cbad9SMike Snitzer * See the comment in rq_completed() too. 2024ba1cbad9SMike Snitzer */ 2025ba1cbad9SMike Snitzer dm_get(md); 2026ba1cbad9SMike Snitzer } 2027ba1cbad9SMike Snitzer 20280ce65797SMike Snitzer #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 20290ce65797SMike Snitzer 20300ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 20310ce65797SMike Snitzer { 20320ce65797SMike Snitzer return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); 20330ce65797SMike Snitzer } 20340ce65797SMike Snitzer 20350ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 20360ce65797SMike Snitzer const char *buf, size_t count) 20370ce65797SMike Snitzer { 20380ce65797SMike Snitzer unsigned deadline; 20390ce65797SMike Snitzer 204017e149b8SMike Snitzer if (!dm_request_based(md) || md->use_blk_mq) 20410ce65797SMike Snitzer return count; 20420ce65797SMike Snitzer 20430ce65797SMike Snitzer if (kstrtouint(buf, 10, &deadline)) 20440ce65797SMike Snitzer return -EINVAL; 20450ce65797SMike Snitzer 20460ce65797SMike Snitzer if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) 20470ce65797SMike Snitzer deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; 20480ce65797SMike Snitzer 20490ce65797SMike Snitzer md->seq_rq_merge_deadline_usecs = deadline; 20500ce65797SMike Snitzer 20510ce65797SMike Snitzer return count; 20520ce65797SMike Snitzer } 20530ce65797SMike Snitzer 20540ce65797SMike Snitzer static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) 20550ce65797SMike Snitzer { 20560ce65797SMike Snitzer ktime_t kt_deadline; 20570ce65797SMike Snitzer 20580ce65797SMike Snitzer if (!md->seq_rq_merge_deadline_usecs) 20590ce65797SMike Snitzer return false; 20600ce65797SMike Snitzer 20610ce65797SMike Snitzer kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); 20620ce65797SMike Snitzer kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); 20630ce65797SMike Snitzer 20640ce65797SMike Snitzer return !ktime_after(ktime_get(), kt_deadline); 20650ce65797SMike Snitzer } 20660ce65797SMike Snitzer 2067cec47e3dSKiyoshi Ueda /* 2068cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 2069cec47e3dSKiyoshi Ueda * Called with the queue lock held. 2070cec47e3dSKiyoshi Ueda */ 2071cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 2072cec47e3dSKiyoshi Ueda { 2073cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 207483d5e5b0SMikulas Patocka int srcu_idx; 207583d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2076cec47e3dSKiyoshi Ueda struct dm_target *ti; 2077466d89a6SKeith Busch struct request *rq; 20782eb6e1e3SKeith Busch struct dm_rq_target_io *tio; 207929e4013dSTejun Heo sector_t pos; 2080cec47e3dSKiyoshi Ueda 2081cec47e3dSKiyoshi Ueda /* 2082b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 2083b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 2084b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 2085b4324feeSKiyoshi Ueda * dm_suspend(). 2086cec47e3dSKiyoshi Ueda */ 20877eaceaccSJens Axboe while (!blk_queue_stopped(q)) { 2088cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 2089cec47e3dSKiyoshi Ueda if (!rq) 20909d1deb83SMike Snitzer goto out; 2091cec47e3dSKiyoshi Ueda 209229e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 209329e4013dSTejun Heo pos = 0; 209429e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 209529e4013dSTejun Heo pos = blk_rq_pos(rq); 2096d0bcb878SKiyoshi Ueda 209729e4013dSTejun Heo ti = dm_table_find_target(map, pos); 2098ba1cbad9SMike Snitzer if (!dm_target_is_valid(ti)) { 2099ba1cbad9SMike Snitzer /* 2100466d89a6SKeith Busch * Must perform setup, that rq_completed() requires, 2101ba1cbad9SMike Snitzer * before calling dm_kill_unmapped_request 2102ba1cbad9SMike Snitzer */ 2103ba1cbad9SMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 2104466d89a6SKeith Busch dm_start_request(md, rq); 2105466d89a6SKeith Busch dm_kill_unmapped_request(rq, -EIO); 2106ba1cbad9SMike Snitzer continue; 2107ba1cbad9SMike Snitzer } 210829e4013dSTejun Heo 21090ce65797SMike Snitzer if (dm_request_peeked_before_merge_deadline(md) && 21100ce65797SMike Snitzer md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && 2111de3ec86dSMike Snitzer md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) 2112de3ec86dSMike Snitzer goto delay_and_out; 2113de3ec86dSMike Snitzer 2114cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 21157eaceaccSJens Axboe goto delay_and_out; 2116cec47e3dSKiyoshi Ueda 2117466d89a6SKeith Busch dm_start_request(md, rq); 2118b4324feeSKiyoshi Ueda 2119bfebd1cdSMike Snitzer tio = tio_from_request(rq); 21202eb6e1e3SKeith Busch /* Establish tio->ti before queuing work (map_tio_request) */ 21212eb6e1e3SKeith Busch tio->ti = ti; 21222eb6e1e3SKeith Busch queue_kthread_work(&md->kworker, &tio->work); 2123052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 2124cec47e3dSKiyoshi Ueda } 2125cec47e3dSKiyoshi Ueda 2126cec47e3dSKiyoshi Ueda goto out; 2127cec47e3dSKiyoshi Ueda 21287eaceaccSJens Axboe delay_and_out: 2129d548b34bSMike Snitzer blk_delay_queue(q, HZ / 100); 2130cec47e3dSKiyoshi Ueda out: 213183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 2132cec47e3dSKiyoshi Ueda } 2133cec47e3dSKiyoshi Ueda 21341da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 21351da177e4SLinus Torvalds { 21368a57dfc6SChandra Seetharaman int r = bdi_bits; 21378a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 21388a57dfc6SChandra Seetharaman struct dm_table *map; 21391da177e4SLinus Torvalds 21401eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2141e522c039SMike Snitzer if (dm_request_based(md)) { 2142cec47e3dSKiyoshi Ueda /* 2143e522c039SMike Snitzer * With request-based DM we only need to check the 2144e522c039SMike Snitzer * top-level queue for congestion. 2145cec47e3dSKiyoshi Ueda */ 2146e522c039SMike Snitzer r = md->queue->backing_dev_info.wb.state & bdi_bits; 2147e522c039SMike Snitzer } else { 2148e522c039SMike Snitzer map = dm_get_live_table_fast(md); 2149e522c039SMike Snitzer if (map) 21501da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 215183d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 21528a57dfc6SChandra Seetharaman } 2153e522c039SMike Snitzer } 21548a57dfc6SChandra Seetharaman 21551da177e4SLinus Torvalds return r; 21561da177e4SLinus Torvalds } 21571da177e4SLinus Torvalds 21581da177e4SLinus Torvalds /*----------------------------------------------------------------- 21591da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 21601da177e4SLinus Torvalds *---------------------------------------------------------------*/ 21612b06cfffSAlasdair G Kergon static void free_minor(int minor) 21621da177e4SLinus Torvalds { 2163f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21641da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 2165f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21661da177e4SLinus Torvalds } 21671da177e4SLinus Torvalds 21681da177e4SLinus Torvalds /* 21691da177e4SLinus Torvalds * See if the device with a specific minor # is free. 21701da177e4SLinus Torvalds */ 2171cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 21721da177e4SLinus Torvalds { 2173c9d76be6STejun Heo int r; 21741da177e4SLinus Torvalds 21751da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 21761da177e4SLinus Torvalds return -EINVAL; 21771da177e4SLinus Torvalds 2178c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2179f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21801da177e4SLinus Torvalds 2181c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 21821da177e4SLinus Torvalds 2183f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2184c9d76be6STejun Heo idr_preload_end(); 2185c9d76be6STejun Heo if (r < 0) 2186c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 2187c9d76be6STejun Heo return 0; 21881da177e4SLinus Torvalds } 21891da177e4SLinus Torvalds 2190cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 21911da177e4SLinus Torvalds { 2192c9d76be6STejun Heo int r; 21931da177e4SLinus Torvalds 2194c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2195f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21961da177e4SLinus Torvalds 2197c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 21981da177e4SLinus Torvalds 2199f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2200c9d76be6STejun Heo idr_preload_end(); 2201c9d76be6STejun Heo if (r < 0) 22021da177e4SLinus Torvalds return r; 2203c9d76be6STejun Heo *minor = r; 2204c9d76be6STejun Heo return 0; 22051da177e4SLinus Torvalds } 22061da177e4SLinus Torvalds 220783d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 22081da177e4SLinus Torvalds 220953d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 221053d5914fSMikulas Patocka 22114a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 22124a0b4ddfSMike Snitzer { 22134a0b4ddfSMike Snitzer /* 22144a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 2215bfebd1cdSMike Snitzer * devices. The type of this dm device may not have been decided yet. 22164a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 22174a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 22184a0b4ddfSMike Snitzer * for request stacking support until then. 22194a0b4ddfSMike Snitzer * 22204a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 22214a0b4ddfSMike Snitzer */ 22224a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2223ad5f498fSMikulas Patocka 2224ad5f498fSMikulas Patocka /* 2225ad5f498fSMikulas Patocka * Initialize data that will only be used by a non-blk-mq DM queue 2226ad5f498fSMikulas Patocka * - must do so here (in alloc_dev callchain) before queue is used 2227ad5f498fSMikulas Patocka */ 2228ad5f498fSMikulas Patocka md->queue->queuedata = md; 2229ad5f498fSMikulas Patocka md->queue->backing_dev_info.congested_data = md; 2230bfebd1cdSMike Snitzer } 22314a0b4ddfSMike Snitzer 2232bfebd1cdSMike Snitzer static void dm_init_old_md_queue(struct mapped_device *md) 2233bfebd1cdSMike Snitzer { 223417e149b8SMike Snitzer md->use_blk_mq = false; 2235bfebd1cdSMike Snitzer dm_init_md_queue(md); 2236bfebd1cdSMike Snitzer 2237bfebd1cdSMike Snitzer /* 2238bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 2239bfebd1cdSMike Snitzer */ 22404a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 22414a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 22424a0b4ddfSMike Snitzer } 22434a0b4ddfSMike Snitzer 22440f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 22450f20972fSMike Snitzer { 22460f20972fSMike Snitzer if (md->wq) 22470f20972fSMike Snitzer destroy_workqueue(md->wq); 22480f20972fSMike Snitzer if (md->kworker_task) 22490f20972fSMike Snitzer kthread_stop(md->kworker_task); 22500f20972fSMike Snitzer mempool_destroy(md->io_pool); 22510f20972fSMike Snitzer mempool_destroy(md->rq_pool); 22520f20972fSMike Snitzer if (md->bs) 22530f20972fSMike Snitzer bioset_free(md->bs); 22540f20972fSMike Snitzer 2255b06075a9SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 2256b06075a9SMikulas Patocka 22570f20972fSMike Snitzer if (md->disk) { 22580f20972fSMike Snitzer spin_lock(&_minor_lock); 22590f20972fSMike Snitzer md->disk->private_data = NULL; 22600f20972fSMike Snitzer spin_unlock(&_minor_lock); 22610f20972fSMike Snitzer del_gendisk(md->disk); 22620f20972fSMike Snitzer put_disk(md->disk); 22630f20972fSMike Snitzer } 22640f20972fSMike Snitzer 22650f20972fSMike Snitzer if (md->queue) 22660f20972fSMike Snitzer blk_cleanup_queue(md->queue); 22670f20972fSMike Snitzer 22680f20972fSMike Snitzer if (md->bdev) { 22690f20972fSMike Snitzer bdput(md->bdev); 22700f20972fSMike Snitzer md->bdev = NULL; 22710f20972fSMike Snitzer } 22720f20972fSMike Snitzer } 22730f20972fSMike Snitzer 22741da177e4SLinus Torvalds /* 22751da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 22761da177e4SLinus Torvalds */ 22772b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 22781da177e4SLinus Torvalds { 22791da177e4SLinus Torvalds int r; 2280cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 2281ba61fdd1SJeff Mahoney void *old_md; 22821da177e4SLinus Torvalds 22831da177e4SLinus Torvalds if (!md) { 22841da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 22851da177e4SLinus Torvalds return NULL; 22861da177e4SLinus Torvalds } 22871da177e4SLinus Torvalds 228810da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 22896ed7ade8SMilan Broz goto bad_module_get; 229010da4f79SJeff Mahoney 22911da177e4SLinus Torvalds /* get a minor number for the dev */ 22922b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 2293cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 22942b06cfffSAlasdair G Kergon else 2295cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 22961da177e4SLinus Torvalds if (r < 0) 22976ed7ade8SMilan Broz goto bad_minor; 22981da177e4SLinus Torvalds 229983d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 230083d5e5b0SMikulas Patocka if (r < 0) 230183d5e5b0SMikulas Patocka goto bad_io_barrier; 230283d5e5b0SMikulas Patocka 230317e149b8SMike Snitzer md->use_blk_mq = use_blk_mq; 2304a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 2305e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 2306a5664dadSMike Snitzer mutex_init(&md->type_lock); 230786f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 2308022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 23091da177e4SLinus Torvalds atomic_set(&md->holders, 1); 23105c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 23111da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 23127a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 23137a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 231486f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 23157a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 23161da177e4SLinus Torvalds 23174a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 23181da177e4SLinus Torvalds if (!md->queue) 23190f20972fSMike Snitzer goto bad; 23201da177e4SLinus Torvalds 23214a0b4ddfSMike Snitzer dm_init_md_queue(md); 23229faf400fSStefan Bader 23231da177e4SLinus Torvalds md->disk = alloc_disk(1); 23241da177e4SLinus Torvalds if (!md->disk) 23250f20972fSMike Snitzer goto bad; 23261da177e4SLinus Torvalds 2327316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 2328316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 2329f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 233053d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 2331f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 23322995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 23332eb6e1e3SKeith Busch md->kworker_task = NULL; 2334f0b04115SJeff Mahoney 23351da177e4SLinus Torvalds md->disk->major = _major; 23361da177e4SLinus Torvalds md->disk->first_minor = minor; 23371da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 23381da177e4SLinus Torvalds md->disk->queue = md->queue; 23391da177e4SLinus Torvalds md->disk->private_data = md; 23401da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 23411da177e4SLinus Torvalds add_disk(md->disk); 23427e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 23431da177e4SLinus Torvalds 2344670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2345304f3f6aSMilan Broz if (!md->wq) 23460f20972fSMike Snitzer goto bad; 2347304f3f6aSMilan Broz 234832a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 234932a926daSMikulas Patocka if (!md->bdev) 23500f20972fSMike Snitzer goto bad; 235132a926daSMikulas Patocka 23526a8736d1STejun Heo bio_init(&md->flush_bio); 23536a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 23546a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 23556a8736d1STejun Heo 2356fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2357fd2ed4d2SMikulas Patocka 2358ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2359f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2360ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2361f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2362ba61fdd1SJeff Mahoney 2363ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2364ba61fdd1SJeff Mahoney 23651da177e4SLinus Torvalds return md; 23661da177e4SLinus Torvalds 23670f20972fSMike Snitzer bad: 23680f20972fSMike Snitzer cleanup_mapped_device(md); 236983d5e5b0SMikulas Patocka bad_io_barrier: 23701da177e4SLinus Torvalds free_minor(minor); 23716ed7ade8SMilan Broz bad_minor: 237210da4f79SJeff Mahoney module_put(THIS_MODULE); 23736ed7ade8SMilan Broz bad_module_get: 23741da177e4SLinus Torvalds kfree(md); 23751da177e4SLinus Torvalds return NULL; 23761da177e4SLinus Torvalds } 23771da177e4SLinus Torvalds 2378ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2379ae9da83fSJun'ichi Nomura 23801da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 23811da177e4SLinus Torvalds { 2382f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 238363d94e48SJun'ichi Nomura 2384ae9da83fSJun'ichi Nomura unlock_fs(md); 23852eb6e1e3SKeith Busch 23860f20972fSMike Snitzer cleanup_mapped_device(md); 238717e149b8SMike Snitzer if (md->use_blk_mq) 2388bfebd1cdSMike Snitzer blk_mq_free_tag_set(&md->tag_set); 23890f20972fSMike Snitzer 23900f20972fSMike Snitzer free_table_devices(&md->table_devices); 23910f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 239263a4f065SMike Snitzer free_minor(minor); 239363a4f065SMike Snitzer 239410da4f79SJeff Mahoney module_put(THIS_MODULE); 23951da177e4SLinus Torvalds kfree(md); 23961da177e4SLinus Torvalds } 23971da177e4SLinus Torvalds 2398e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2399e6ee8c0bSKiyoshi Ueda { 2400c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2401e6ee8c0bSKiyoshi Ueda 24024e6e36c3SMike Snitzer if (md->bs) { 24034e6e36c3SMike Snitzer /* The md already has necessary mempools. */ 24044e6e36c3SMike Snitzer if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2405c0820cf5SMikulas Patocka /* 240616245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 240716245bdcSJun'ichi Nomura * because a different table was loaded. 2408c0820cf5SMikulas Patocka */ 2409c0820cf5SMikulas Patocka bioset_free(md->bs); 2410c0820cf5SMikulas Patocka md->bs = p->bs; 2411c0820cf5SMikulas Patocka p->bs = NULL; 2412c0820cf5SMikulas Patocka } 2413cbc4e3c1SMike Snitzer /* 24144e6e36c3SMike Snitzer * There's no need to reload with request-based dm 24154e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 24164e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 24174e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 24184e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 24194e6e36c3SMike Snitzer * through the queue to unprep. 2420cbc4e3c1SMike Snitzer */ 2421cbc4e3c1SMike Snitzer goto out; 2422cbc4e3c1SMike Snitzer } 2423cbc4e3c1SMike Snitzer 2424cbc4e3c1SMike Snitzer BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2425e6ee8c0bSKiyoshi Ueda 2426e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 2427e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 24281ae49ea2SMike Snitzer md->rq_pool = p->rq_pool; 24291ae49ea2SMike Snitzer p->rq_pool = NULL; 2430e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 2431e6ee8c0bSKiyoshi Ueda p->bs = NULL; 24324e6e36c3SMike Snitzer 2433e6ee8c0bSKiyoshi Ueda out: 243402233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2435e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 2436e6ee8c0bSKiyoshi Ueda } 2437e6ee8c0bSKiyoshi Ueda 24381da177e4SLinus Torvalds /* 24391da177e4SLinus Torvalds * Bind a table to the device. 24401da177e4SLinus Torvalds */ 24411da177e4SLinus Torvalds static void event_callback(void *context) 24421da177e4SLinus Torvalds { 24437a8c3d3bSMike Anderson unsigned long flags; 24447a8c3d3bSMike Anderson LIST_HEAD(uevents); 24451da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 24461da177e4SLinus Torvalds 24477a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 24487a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 24497a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 24507a8c3d3bSMike Anderson 2451ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 24527a8c3d3bSMike Anderson 24531da177e4SLinus Torvalds atomic_inc(&md->event_nr); 24541da177e4SLinus Torvalds wake_up(&md->eventq); 24551da177e4SLinus Torvalds } 24561da177e4SLinus Torvalds 2457c217649bSMike Snitzer /* 2458c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2459c217649bSMike Snitzer */ 24604e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 24611da177e4SLinus Torvalds { 24624e90188bSAlasdair G Kergon set_capacity(md->disk, size); 24631da177e4SLinus Torvalds 2464db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 24651da177e4SLinus Torvalds } 24661da177e4SLinus Torvalds 2467042d2a9bSAlasdair G Kergon /* 2468042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2469042d2a9bSAlasdair G Kergon */ 2470042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2471754c5fc7SMike Snitzer struct queue_limits *limits) 24721da177e4SLinus Torvalds { 2473042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2474165125e1SJens Axboe struct request_queue *q = md->queue; 24751da177e4SLinus Torvalds sector_t size; 24761da177e4SLinus Torvalds 24771da177e4SLinus Torvalds size = dm_table_get_size(t); 24783ac51e74SDarrick J. Wong 24793ac51e74SDarrick J. Wong /* 24803ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 24813ac51e74SDarrick J. Wong */ 2482fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 24833ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 24843ac51e74SDarrick J. Wong 24854e90188bSAlasdair G Kergon __set_size(md, size); 24861da177e4SLinus Torvalds 2487cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 24882ca3310eSAlasdair G Kergon 2489e6ee8c0bSKiyoshi Ueda /* 2490e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2491e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2492e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2493e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2494e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2495e6ee8c0bSKiyoshi Ueda */ 2496*16f12266SMike Snitzer if (dm_table_request_based(t)) { 2497e6ee8c0bSKiyoshi Ueda stop_queue(q); 2498*16f12266SMike Snitzer /* 2499*16f12266SMike Snitzer * Leverage the fact that request-based DM targets are 2500*16f12266SMike Snitzer * immutable singletons and establish md->immutable_target 2501*16f12266SMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq 2502*16f12266SMike Snitzer */ 2503*16f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 2504*16f12266SMike Snitzer } 2505e6ee8c0bSKiyoshi Ueda 2506e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2507e6ee8c0bSKiyoshi Ueda 2508a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 250983d5e5b0SMikulas Patocka rcu_assign_pointer(md->map, t); 251036a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 251136a0456fSAlasdair G Kergon 2512754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 251341abc4e1SHannes Reinecke if (old_map) 251483d5e5b0SMikulas Patocka dm_sync_table(md); 25152ca3310eSAlasdair G Kergon 2516042d2a9bSAlasdair G Kergon return old_map; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 2519a7940155SAlasdair G Kergon /* 2520a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2521a7940155SAlasdair G Kergon */ 2522a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 25231da177e4SLinus Torvalds { 2524a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 25251da177e4SLinus Torvalds 25261da177e4SLinus Torvalds if (!map) 2527a7940155SAlasdair G Kergon return NULL; 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 25309cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 253183d5e5b0SMikulas Patocka dm_sync_table(md); 2532a7940155SAlasdair G Kergon 2533a7940155SAlasdair G Kergon return map; 25341da177e4SLinus Torvalds } 25351da177e4SLinus Torvalds 25361da177e4SLinus Torvalds /* 25371da177e4SLinus Torvalds * Constructor for a new device. 25381da177e4SLinus Torvalds */ 25392b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 25401da177e4SLinus Torvalds { 25411da177e4SLinus Torvalds struct mapped_device *md; 25421da177e4SLinus Torvalds 25432b06cfffSAlasdair G Kergon md = alloc_dev(minor); 25441da177e4SLinus Torvalds if (!md) 25451da177e4SLinus Torvalds return -ENXIO; 25461da177e4SLinus Torvalds 2547784aae73SMilan Broz dm_sysfs_init(md); 2548784aae73SMilan Broz 25491da177e4SLinus Torvalds *result = md; 25501da177e4SLinus Torvalds return 0; 25511da177e4SLinus Torvalds } 25521da177e4SLinus Torvalds 2553a5664dadSMike Snitzer /* 2554a5664dadSMike Snitzer * Functions to manage md->type. 2555a5664dadSMike Snitzer * All are required to hold md->type_lock. 2556a5664dadSMike Snitzer */ 2557a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2558a5664dadSMike Snitzer { 2559a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2560a5664dadSMike Snitzer } 2561a5664dadSMike Snitzer 2562a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2563a5664dadSMike Snitzer { 2564a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2565a5664dadSMike Snitzer } 2566a5664dadSMike Snitzer 2567a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2568a5664dadSMike Snitzer { 256900c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2570a5664dadSMike Snitzer md->type = type; 2571a5664dadSMike Snitzer } 2572a5664dadSMike Snitzer 2573a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2574a5664dadSMike Snitzer { 2575a5664dadSMike Snitzer return md->type; 2576a5664dadSMike Snitzer } 2577a5664dadSMike Snitzer 257836a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 257936a0456fSAlasdair G Kergon { 258036a0456fSAlasdair G Kergon return md->immutable_target_type; 258136a0456fSAlasdair G Kergon } 258236a0456fSAlasdair G Kergon 25834a0b4ddfSMike Snitzer /* 2584f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2585f84cb8a4SMike Snitzer * count on 'md'. 2586f84cb8a4SMike Snitzer */ 2587f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2588f84cb8a4SMike Snitzer { 2589f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2590f84cb8a4SMike Snitzer return &md->queue->limits; 2591f84cb8a4SMike Snitzer } 2592f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2593f84cb8a4SMike Snitzer 2594bfebd1cdSMike Snitzer static void init_rq_based_worker_thread(struct mapped_device *md) 2595bfebd1cdSMike Snitzer { 2596bfebd1cdSMike Snitzer /* Initialize the request-based DM worker thread */ 2597bfebd1cdSMike Snitzer init_kthread_worker(&md->kworker); 2598bfebd1cdSMike Snitzer md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2599bfebd1cdSMike Snitzer "kdmwork-%s", dm_device_name(md)); 2600bfebd1cdSMike Snitzer } 2601bfebd1cdSMike Snitzer 2602f84cb8a4SMike Snitzer /* 26034a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 26044a0b4ddfSMike Snitzer */ 26054a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 26064a0b4ddfSMike Snitzer { 26074a0b4ddfSMike Snitzer struct request_queue *q = NULL; 26084a0b4ddfSMike Snitzer 26094a0b4ddfSMike Snitzer /* Fully initialize the queue */ 26104a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 26114a0b4ddfSMike Snitzer if (!q) 2612bfebd1cdSMike Snitzer return -EINVAL; 26134a0b4ddfSMike Snitzer 26140ce65797SMike Snitzer /* disable dm_request_fn's merge heuristic by default */ 26150ce65797SMike Snitzer md->seq_rq_merge_deadline_usecs = 0; 26160ce65797SMike Snitzer 26174a0b4ddfSMike Snitzer md->queue = q; 2618bfebd1cdSMike Snitzer dm_init_old_md_queue(md); 26194a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 26204a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 26214a0b4ddfSMike Snitzer 2622bfebd1cdSMike Snitzer init_rq_based_worker_thread(md); 26232eb6e1e3SKeith Busch 26244a0b4ddfSMike Snitzer elv_register_queue(md->queue); 26254a0b4ddfSMike Snitzer 2626bfebd1cdSMike Snitzer return 0; 2627bfebd1cdSMike Snitzer } 2628bfebd1cdSMike Snitzer 2629bfebd1cdSMike Snitzer static int dm_mq_init_request(void *data, struct request *rq, 2630bfebd1cdSMike Snitzer unsigned int hctx_idx, unsigned int request_idx, 2631bfebd1cdSMike Snitzer unsigned int numa_node) 2632bfebd1cdSMike Snitzer { 2633bfebd1cdSMike Snitzer struct mapped_device *md = data; 2634bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2635bfebd1cdSMike Snitzer 2636bfebd1cdSMike Snitzer /* 2637bfebd1cdSMike Snitzer * Must initialize md member of tio, otherwise it won't 2638bfebd1cdSMike Snitzer * be available in dm_mq_queue_rq. 2639bfebd1cdSMike Snitzer */ 2640bfebd1cdSMike Snitzer tio->md = md; 2641bfebd1cdSMike Snitzer 2642bfebd1cdSMike Snitzer return 0; 2643bfebd1cdSMike Snitzer } 2644bfebd1cdSMike Snitzer 2645bfebd1cdSMike Snitzer static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 2646bfebd1cdSMike Snitzer const struct blk_mq_queue_data *bd) 2647bfebd1cdSMike Snitzer { 2648bfebd1cdSMike Snitzer struct request *rq = bd->rq; 2649bfebd1cdSMike Snitzer struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2650bfebd1cdSMike Snitzer struct mapped_device *md = tio->md; 2651*16f12266SMike Snitzer struct dm_target *ti = md->immutable_target; 2652*16f12266SMike Snitzer 2653*16f12266SMike Snitzer if (unlikely(!ti)) { 2654bfebd1cdSMike Snitzer int srcu_idx; 2655bfebd1cdSMike Snitzer struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2656bfebd1cdSMike Snitzer 2657*16f12266SMike Snitzer ti = dm_table_find_target(map, 0); 2658bfebd1cdSMike Snitzer dm_put_live_table(md, srcu_idx); 2659bfebd1cdSMike Snitzer } 2660bfebd1cdSMike Snitzer 2661bfebd1cdSMike Snitzer if (ti->type->busy && ti->type->busy(ti)) 2662bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_BUSY; 2663bfebd1cdSMike Snitzer 2664bfebd1cdSMike Snitzer dm_start_request(md, rq); 2665bfebd1cdSMike Snitzer 2666bfebd1cdSMike Snitzer /* Init tio using md established in .init_request */ 2667bfebd1cdSMike Snitzer init_tio(tio, rq, md); 2668bfebd1cdSMike Snitzer 266902233342SMike Snitzer /* 267002233342SMike Snitzer * Establish tio->ti before queuing work (map_tio_request) 267102233342SMike Snitzer * or making direct call to map_request(). 267202233342SMike Snitzer */ 2673bfebd1cdSMike Snitzer tio->ti = ti; 267402233342SMike Snitzer 2675*16f12266SMike Snitzer /* 2676*16f12266SMike Snitzer * Both the table and md type cannot change after initial table load 2677*16f12266SMike Snitzer */ 2678*16f12266SMike Snitzer if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { 267902233342SMike Snitzer /* clone request is allocated at the end of the pdu */ 268002233342SMike Snitzer tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 268145714fbeSMike Snitzer (void) clone_rq(rq, md, tio, GFP_ATOMIC); 2682bfebd1cdSMike Snitzer queue_kthread_work(&md->kworker, &tio->work); 268302233342SMike Snitzer } else { 268402233342SMike Snitzer /* Direct call is fine since .queue_rq allows allocations */ 268545714fbeSMike Snitzer if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { 268645714fbeSMike Snitzer /* Undo dm_start_request() before requeuing */ 2687e262f347SMikulas Patocka rq_end_stats(md, rq); 268845714fbeSMike Snitzer rq_completed(md, rq_data_dir(rq), false); 268945714fbeSMike Snitzer return BLK_MQ_RQ_QUEUE_BUSY; 269045714fbeSMike Snitzer } 269102233342SMike Snitzer } 2692bfebd1cdSMike Snitzer 2693bfebd1cdSMike Snitzer return BLK_MQ_RQ_QUEUE_OK; 2694bfebd1cdSMike Snitzer } 2695bfebd1cdSMike Snitzer 2696bfebd1cdSMike Snitzer static struct blk_mq_ops dm_mq_ops = { 2697bfebd1cdSMike Snitzer .queue_rq = dm_mq_queue_rq, 2698bfebd1cdSMike Snitzer .map_queue = blk_mq_map_queue, 2699bfebd1cdSMike Snitzer .complete = dm_softirq_done, 2700bfebd1cdSMike Snitzer .init_request = dm_mq_init_request, 2701bfebd1cdSMike Snitzer }; 2702bfebd1cdSMike Snitzer 2703bfebd1cdSMike Snitzer static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2704bfebd1cdSMike Snitzer { 270502233342SMike Snitzer unsigned md_type = dm_get_md_type(md); 2706bfebd1cdSMike Snitzer struct request_queue *q; 2707bfebd1cdSMike Snitzer int err; 2708bfebd1cdSMike Snitzer 2709bfebd1cdSMike Snitzer memset(&md->tag_set, 0, sizeof(md->tag_set)); 2710bfebd1cdSMike Snitzer md->tag_set.ops = &dm_mq_ops; 2711bfebd1cdSMike Snitzer md->tag_set.queue_depth = BLKDEV_MAX_RQ; 2712bfebd1cdSMike Snitzer md->tag_set.numa_node = NUMA_NO_NODE; 2713bfebd1cdSMike Snitzer md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2714bfebd1cdSMike Snitzer md->tag_set.nr_hw_queues = 1; 271502233342SMike Snitzer if (md_type == DM_TYPE_REQUEST_BASED) { 271602233342SMike Snitzer /* make the memory for non-blk-mq clone part of the pdu */ 271702233342SMike Snitzer md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request); 271802233342SMike Snitzer } else 2719bfebd1cdSMike Snitzer md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); 2720bfebd1cdSMike Snitzer md->tag_set.driver_data = md; 2721bfebd1cdSMike Snitzer 2722bfebd1cdSMike Snitzer err = blk_mq_alloc_tag_set(&md->tag_set); 2723bfebd1cdSMike Snitzer if (err) 2724bfebd1cdSMike Snitzer return err; 2725bfebd1cdSMike Snitzer 2726bfebd1cdSMike Snitzer q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); 2727bfebd1cdSMike Snitzer if (IS_ERR(q)) { 2728bfebd1cdSMike Snitzer err = PTR_ERR(q); 2729bfebd1cdSMike Snitzer goto out_tag_set; 2730bfebd1cdSMike Snitzer } 2731bfebd1cdSMike Snitzer md->queue = q; 2732bfebd1cdSMike Snitzer dm_init_md_queue(md); 2733bfebd1cdSMike Snitzer 2734bfebd1cdSMike Snitzer /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2735bfebd1cdSMike Snitzer blk_mq_register_disk(md->disk); 2736bfebd1cdSMike Snitzer 273702233342SMike Snitzer if (md_type == DM_TYPE_REQUEST_BASED) 2738bfebd1cdSMike Snitzer init_rq_based_worker_thread(md); 2739bfebd1cdSMike Snitzer 2740bfebd1cdSMike Snitzer return 0; 2741bfebd1cdSMike Snitzer 2742bfebd1cdSMike Snitzer out_tag_set: 2743bfebd1cdSMike Snitzer blk_mq_free_tag_set(&md->tag_set); 2744bfebd1cdSMike Snitzer return err; 27454a0b4ddfSMike Snitzer } 27464a0b4ddfSMike Snitzer 27474e6e36c3SMike Snitzer static unsigned filter_md_type(unsigned type, struct mapped_device *md) 27484e6e36c3SMike Snitzer { 27494e6e36c3SMike Snitzer if (type == DM_TYPE_BIO_BASED) 27504e6e36c3SMike Snitzer return type; 27514e6e36c3SMike Snitzer 27524e6e36c3SMike Snitzer return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; 27534e6e36c3SMike Snitzer } 27544e6e36c3SMike Snitzer 27554a0b4ddfSMike Snitzer /* 27564a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 27574a0b4ddfSMike Snitzer */ 27584a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 27594a0b4ddfSMike Snitzer { 2760bfebd1cdSMike Snitzer int r; 276117e149b8SMike Snitzer unsigned md_type = filter_md_type(dm_get_md_type(md), md); 2762bfebd1cdSMike Snitzer 2763bfebd1cdSMike Snitzer switch (md_type) { 2764bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2765bfebd1cdSMike Snitzer r = dm_init_request_based_queue(md); 2766bfebd1cdSMike Snitzer if (r) { 27674a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 2768bfebd1cdSMike Snitzer return r; 27694a0b4ddfSMike Snitzer } 2770bfebd1cdSMike Snitzer break; 2771bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 2772bfebd1cdSMike Snitzer r = dm_init_request_based_blk_mq_queue(md); 2773bfebd1cdSMike Snitzer if (r) { 2774bfebd1cdSMike Snitzer DMWARN("Cannot initialize queue for request-based blk-mq mapped device"); 2775bfebd1cdSMike Snitzer return r; 2776bfebd1cdSMike Snitzer } 2777bfebd1cdSMike Snitzer break; 2778bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2779bfebd1cdSMike Snitzer dm_init_old_md_queue(md); 2780ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2781dbba42d8SMikulas Patocka /* 2782dbba42d8SMikulas Patocka * DM handles splitting bios as needed. Free the bio_split bioset 2783dbba42d8SMikulas Patocka * since it won't be used (saves 1 process per bio-based DM device). 2784dbba42d8SMikulas Patocka */ 2785dbba42d8SMikulas Patocka bioset_free(md->queue->bio_split); 2786dbba42d8SMikulas Patocka md->queue->bio_split = NULL; 2787bfebd1cdSMike Snitzer break; 2788ff36ab34SMike Snitzer } 27894a0b4ddfSMike Snitzer 27904a0b4ddfSMike Snitzer return 0; 27914a0b4ddfSMike Snitzer } 27924a0b4ddfSMike Snitzer 27932bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 27941da177e4SLinus Torvalds { 27951da177e4SLinus Torvalds struct mapped_device *md; 27961da177e4SLinus Torvalds unsigned minor = MINOR(dev); 27971da177e4SLinus Torvalds 27981da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 27991da177e4SLinus Torvalds return NULL; 28001da177e4SLinus Torvalds 2801f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 28021da177e4SLinus Torvalds 28031da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 28042bec1f4aSMikulas Patocka if (md) { 28052bec1f4aSMikulas Patocka if ((md == MINOR_ALLOCED || 2806f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2807abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2808fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2809637842cfSDavid Teigland md = NULL; 2810fba9f90eSJeff Mahoney goto out; 2811fba9f90eSJeff Mahoney } 28122bec1f4aSMikulas Patocka dm_get(md); 28132bec1f4aSMikulas Patocka } 28141da177e4SLinus Torvalds 2815fba9f90eSJeff Mahoney out: 2816f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 28171da177e4SLinus Torvalds 2818637842cfSDavid Teigland return md; 2819637842cfSDavid Teigland } 28203cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2821d229a958SDavid Teigland 28229ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2823637842cfSDavid Teigland { 28249ade92a9SAlasdair G Kergon return md->interface_ptr; 28251da177e4SLinus Torvalds } 28261da177e4SLinus Torvalds 28271da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 28281da177e4SLinus Torvalds { 28291da177e4SLinus Torvalds md->interface_ptr = ptr; 28301da177e4SLinus Torvalds } 28311da177e4SLinus Torvalds 28321da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 28331da177e4SLinus Torvalds { 28341da177e4SLinus Torvalds atomic_inc(&md->holders); 28353f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 28361da177e4SLinus Torvalds } 28371da177e4SLinus Torvalds 283809ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 283909ee96b2SMikulas Patocka { 284009ee96b2SMikulas Patocka spin_lock(&_minor_lock); 284109ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 284209ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 284309ee96b2SMikulas Patocka return -EBUSY; 284409ee96b2SMikulas Patocka } 284509ee96b2SMikulas Patocka dm_get(md); 284609ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 284709ee96b2SMikulas Patocka return 0; 284809ee96b2SMikulas Patocka } 284909ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 285009ee96b2SMikulas Patocka 285172d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 285272d94861SAlasdair G Kergon { 285372d94861SAlasdair G Kergon return md->name; 285472d94861SAlasdair G Kergon } 285572d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 285672d94861SAlasdair G Kergon 28573f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 28581da177e4SLinus Torvalds { 28591134e5aeSMike Anderson struct dm_table *map; 286083d5e5b0SMikulas Patocka int srcu_idx; 28611da177e4SLinus Torvalds 28623f77316dSKiyoshi Ueda might_sleep(); 2863fba9f90eSJeff Mahoney 286463a4f065SMike Snitzer spin_lock(&_minor_lock); 28653f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2866fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2867f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 28683f77316dSKiyoshi Ueda 286902233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 28702eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 28712eb6e1e3SKeith Busch 2872ab7c7bb6SMikulas Patocka /* 2873ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2874ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2875ab7c7bb6SMikulas Patocka */ 2876ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 28772a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 28784f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 28791da177e4SLinus Torvalds dm_table_presuspend_targets(map); 28801da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 28811da177e4SLinus Torvalds } 288283d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 288383d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 28842a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 288583d5e5b0SMikulas Patocka 28863f77316dSKiyoshi Ueda /* 28873f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 28883f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 28893f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 28903f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 28913f77316dSKiyoshi Ueda */ 28923f77316dSKiyoshi Ueda if (wait) 28933f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 28943f77316dSKiyoshi Ueda msleep(1); 28953f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 28963f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 28973f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 28983f77316dSKiyoshi Ueda 2899784aae73SMilan Broz dm_sysfs_exit(md); 2900a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 29011da177e4SLinus Torvalds free_dev(md); 29021da177e4SLinus Torvalds } 29033f77316dSKiyoshi Ueda 29043f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 29053f77316dSKiyoshi Ueda { 29063f77316dSKiyoshi Ueda __dm_destroy(md, true); 29073f77316dSKiyoshi Ueda } 29083f77316dSKiyoshi Ueda 29093f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 29103f77316dSKiyoshi Ueda { 29113f77316dSKiyoshi Ueda __dm_destroy(md, false); 29123f77316dSKiyoshi Ueda } 29133f77316dSKiyoshi Ueda 29143f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 29153f77316dSKiyoshi Ueda { 29163f77316dSKiyoshi Ueda atomic_dec(&md->holders); 29171da177e4SLinus Torvalds } 291879eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 29191da177e4SLinus Torvalds 2920401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 292146125c1cSMilan Broz { 292246125c1cSMilan Broz int r = 0; 2923b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2924b44ebeb0SMikulas Patocka 2925b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 292646125c1cSMilan Broz 292746125c1cSMilan Broz while (1) { 2928401600dfSMikulas Patocka set_current_state(interruptible); 292946125c1cSMilan Broz 2930b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 293146125c1cSMilan Broz break; 293246125c1cSMilan Broz 2933401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2934401600dfSMikulas Patocka signal_pending(current)) { 293546125c1cSMilan Broz r = -EINTR; 293646125c1cSMilan Broz break; 293746125c1cSMilan Broz } 293846125c1cSMilan Broz 293946125c1cSMilan Broz io_schedule(); 294046125c1cSMilan Broz } 294146125c1cSMilan Broz set_current_state(TASK_RUNNING); 294246125c1cSMilan Broz 2943b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2944b44ebeb0SMikulas Patocka 294546125c1cSMilan Broz return r; 294646125c1cSMilan Broz } 294746125c1cSMilan Broz 29481da177e4SLinus Torvalds /* 29491da177e4SLinus Torvalds * Process the deferred bios 29501da177e4SLinus Torvalds */ 2951ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 29521da177e4SLinus Torvalds { 2953ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2954ef208587SMikulas Patocka work); 29556d6f10dfSMilan Broz struct bio *c; 295683d5e5b0SMikulas Patocka int srcu_idx; 295783d5e5b0SMikulas Patocka struct dm_table *map; 29581da177e4SLinus Torvalds 295983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2960ef208587SMikulas Patocka 29613b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2962022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2963022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2964022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2965022c2611SMikulas Patocka 29666a8736d1STejun Heo if (!c) 2967df12ee99SAlasdair G Kergon break; 296873d410c0SMilan Broz 2969e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2970e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2971af7e466aSMikulas Patocka else 297283d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2973e6ee8c0bSKiyoshi Ueda } 29743b00b203SMikulas Patocka 297583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 29761da177e4SLinus Torvalds } 29771da177e4SLinus Torvalds 29789a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2979304f3f6aSMilan Broz { 29803b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 29814e857c58SPeter Zijlstra smp_mb__after_atomic(); 298253d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2983304f3f6aSMilan Broz } 2984304f3f6aSMilan Broz 29851da177e4SLinus Torvalds /* 2986042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 29871da177e4SLinus Torvalds */ 2988042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 29891da177e4SLinus Torvalds { 299087eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2991754c5fc7SMike Snitzer struct queue_limits limits; 2992042d2a9bSAlasdair G Kergon int r; 29931da177e4SLinus Torvalds 2994e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 29951da177e4SLinus Torvalds 29961da177e4SLinus Torvalds /* device must be suspended */ 29974f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 299893c534aeSAlasdair G Kergon goto out; 29991da177e4SLinus Torvalds 30003ae70656SMike Snitzer /* 30013ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 30023ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 30033ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 30043ae70656SMike Snitzer * reappear. 30053ae70656SMike Snitzer */ 30063ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 300783d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 30083ae70656SMike Snitzer if (live_map) 30093ae70656SMike Snitzer limits = md->queue->limits; 301083d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 30113ae70656SMike Snitzer } 30123ae70656SMike Snitzer 301387eb5b21SMike Christie if (!live_map) { 3014754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 3015042d2a9bSAlasdair G Kergon if (r) { 3016042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 3017754c5fc7SMike Snitzer goto out; 3018042d2a9bSAlasdair G Kergon } 301987eb5b21SMike Christie } 3020754c5fc7SMike Snitzer 3021042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 30221da177e4SLinus Torvalds 302393c534aeSAlasdair G Kergon out: 3024e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 3025042d2a9bSAlasdair G Kergon return map; 30261da177e4SLinus Torvalds } 30271da177e4SLinus Torvalds 30281da177e4SLinus Torvalds /* 30291da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 30301da177e4SLinus Torvalds * device. 30311da177e4SLinus Torvalds */ 30322ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 30331da177e4SLinus Torvalds { 3034e39e2e95SAlasdair G Kergon int r; 30351da177e4SLinus Torvalds 30361da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 3037dfbe03f6SAlasdair G Kergon 3038db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 3039dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 3040cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 3041e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 3042e39e2e95SAlasdair G Kergon return r; 3043dfbe03f6SAlasdair G Kergon } 3044dfbe03f6SAlasdair G Kergon 3045aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 3046aa8d7c2fSAlasdair G Kergon 30471da177e4SLinus Torvalds return 0; 30481da177e4SLinus Torvalds } 30491da177e4SLinus Torvalds 30502ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 30511da177e4SLinus Torvalds { 3052aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 3053aa8d7c2fSAlasdair G Kergon return; 3054aa8d7c2fSAlasdair G Kergon 3055db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 30561da177e4SLinus Torvalds md->frozen_sb = NULL; 3057aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 30581da177e4SLinus Torvalds } 30591da177e4SLinus Torvalds 30601da177e4SLinus Torvalds /* 3061ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 3062ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 3063ffcc3936SMike Snitzer * are being added to md->deferred list. 3064cec47e3dSKiyoshi Ueda * 3065ffcc3936SMike Snitzer * Caller must hold md->suspend_lock 3066cec47e3dSKiyoshi Ueda */ 3067ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 3068ffcc3936SMike Snitzer unsigned suspend_flags, int interruptible) 30691da177e4SLinus Torvalds { 3070ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 3071ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 3072ffcc3936SMike Snitzer int r; 3073cf222b37SAlasdair G Kergon 30742e93ccc1SKiyoshi Ueda /* 30752e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 30762e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 30772e93ccc1SKiyoshi Ueda */ 30782e93ccc1SKiyoshi Ueda if (noflush) 30792e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 30802e93ccc1SKiyoshi Ueda 3081d67ee213SMike Snitzer /* 3082d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 3083d67ee213SMike Snitzer * provide the .presuspend_undo hook. 3084d67ee213SMike Snitzer */ 30851da177e4SLinus Torvalds dm_table_presuspend_targets(map); 30861da177e4SLinus Torvalds 30872e93ccc1SKiyoshi Ueda /* 30889f518b27SKiyoshi Ueda * Flush I/O to the device. 30899f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 30909f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 30919f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 30922e93ccc1SKiyoshi Ueda */ 309332a926daSMikulas Patocka if (!noflush && do_lockfs) { 30942ca3310eSAlasdair G Kergon r = lock_fs(md); 3095d67ee213SMike Snitzer if (r) { 3096d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 3097ffcc3936SMike Snitzer return r; 3098aa8d7c2fSAlasdair G Kergon } 3099d67ee213SMike Snitzer } 31001da177e4SLinus Torvalds 31011da177e4SLinus Torvalds /* 31023b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 31033b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 31043b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 31053b00b203SMikulas Patocka * dm_wq_work. 31063b00b203SMikulas Patocka * 31073b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 31083b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 31096a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 31106a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 31116a8736d1STejun Heo * flush_workqueue(md->wq). 31121da177e4SLinus Torvalds */ 31131eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 311441abc4e1SHannes Reinecke if (map) 311583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 31161da177e4SLinus Torvalds 3117d0bcb878SKiyoshi Ueda /* 311829e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 311929e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 3120d0bcb878SKiyoshi Ueda */ 31212eb6e1e3SKeith Busch if (dm_request_based(md)) { 31229f518b27SKiyoshi Ueda stop_queue(md->queue); 312302233342SMike Snitzer if (md->kworker_task) 31242eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 31252eb6e1e3SKeith Busch } 3126cec47e3dSKiyoshi Ueda 3127d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 3128d0bcb878SKiyoshi Ueda 31291da177e4SLinus Torvalds /* 31303b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 31313b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 31323b00b203SMikulas Patocka * to finish. 31331da177e4SLinus Torvalds */ 3134ffcc3936SMike Snitzer r = dm_wait_for_completion(md, interruptible); 31351da177e4SLinus Torvalds 31366d6f10dfSMilan Broz if (noflush) 3137022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 313841abc4e1SHannes Reinecke if (map) 313983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 31402e93ccc1SKiyoshi Ueda 31411da177e4SLinus Torvalds /* were we interrupted ? */ 314246125c1cSMilan Broz if (r < 0) { 31439a1fb464SMikulas Patocka dm_queue_flush(md); 314473d410c0SMilan Broz 3145cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 31469f518b27SKiyoshi Ueda start_queue(md->queue); 3147cec47e3dSKiyoshi Ueda 31482ca3310eSAlasdair G Kergon unlock_fs(md); 3149d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 3150ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 3151ffcc3936SMike Snitzer } 3152ffcc3936SMike Snitzer 3153ffcc3936SMike Snitzer return r; 31542ca3310eSAlasdair G Kergon } 31552ca3310eSAlasdair G Kergon 31563b00b203SMikulas Patocka /* 3157ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 3158ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 3159ffcc3936SMike Snitzer * the background. Before the table can be swapped with 3160ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 3161ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 31623b00b203SMikulas Patocka */ 3163ffcc3936SMike Snitzer /* 3164ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 3165ffcc3936SMike Snitzer * 3166ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 3167ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 3168ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 3169ffcc3936SMike Snitzer * 3170ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 3171ffcc3936SMike Snitzer */ 3172ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 3173ffcc3936SMike Snitzer { 3174ffcc3936SMike Snitzer struct dm_table *map = NULL; 3175ffcc3936SMike Snitzer int r = 0; 3176ffcc3936SMike Snitzer 3177ffcc3936SMike Snitzer retry: 3178ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3179ffcc3936SMike Snitzer 3180ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3181ffcc3936SMike Snitzer r = -EINVAL; 3182ffcc3936SMike Snitzer goto out_unlock; 3183ffcc3936SMike Snitzer } 3184ffcc3936SMike Snitzer 3185ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3186ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3187ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3188ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3189ffcc3936SMike Snitzer if (r) 3190ffcc3936SMike Snitzer return r; 3191ffcc3936SMike Snitzer goto retry; 3192ffcc3936SMike Snitzer } 3193ffcc3936SMike Snitzer 3194a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3195ffcc3936SMike Snitzer 3196ffcc3936SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 3197ffcc3936SMike Snitzer if (r) 3198ffcc3936SMike Snitzer goto out_unlock; 31993b00b203SMikulas Patocka 32001da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 32011da177e4SLinus Torvalds 32024d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 32034d4471cbSKiyoshi Ueda 3204d287483dSAlasdair G Kergon out_unlock: 3205e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 3206cf222b37SAlasdair G Kergon return r; 32071da177e4SLinus Torvalds } 32081da177e4SLinus Torvalds 3209ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 32101da177e4SLinus Torvalds { 3211ffcc3936SMike Snitzer if (map) { 3212ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 32138757b776SMilan Broz if (r) 3214ffcc3936SMike Snitzer return r; 3215ffcc3936SMike Snitzer } 32162ca3310eSAlasdair G Kergon 32179a1fb464SMikulas Patocka dm_queue_flush(md); 32182ca3310eSAlasdair G Kergon 3219cec47e3dSKiyoshi Ueda /* 3220cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 3221cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 3222cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 3223cec47e3dSKiyoshi Ueda */ 3224cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 3225cec47e3dSKiyoshi Ueda start_queue(md->queue); 3226cec47e3dSKiyoshi Ueda 32272ca3310eSAlasdair G Kergon unlock_fs(md); 32282ca3310eSAlasdair G Kergon 3229ffcc3936SMike Snitzer return 0; 3230ffcc3936SMike Snitzer } 3231ffcc3936SMike Snitzer 3232ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 3233ffcc3936SMike Snitzer { 3234ffcc3936SMike Snitzer int r = -EINVAL; 3235ffcc3936SMike Snitzer struct dm_table *map = NULL; 3236ffcc3936SMike Snitzer 3237ffcc3936SMike Snitzer retry: 3238ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3239ffcc3936SMike Snitzer 3240ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 3241ffcc3936SMike Snitzer goto out; 3242ffcc3936SMike Snitzer 3243ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3244ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3245ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3246ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3247ffcc3936SMike Snitzer if (r) 3248ffcc3936SMike Snitzer return r; 3249ffcc3936SMike Snitzer goto retry; 3250ffcc3936SMike Snitzer } 3251ffcc3936SMike Snitzer 3252a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3253ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 3254ffcc3936SMike Snitzer goto out; 3255ffcc3936SMike Snitzer 3256ffcc3936SMike Snitzer r = __dm_resume(md, map); 3257ffcc3936SMike Snitzer if (r) 3258ffcc3936SMike Snitzer goto out; 3259ffcc3936SMike Snitzer 32602ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 32612ca3310eSAlasdair G Kergon 3262cf222b37SAlasdair G Kergon r = 0; 3263cf222b37SAlasdair G Kergon out: 3264e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 32652ca3310eSAlasdair G Kergon 3266cf222b37SAlasdair G Kergon return r; 32671da177e4SLinus Torvalds } 32681da177e4SLinus Torvalds 3269fd2ed4d2SMikulas Patocka /* 3270fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 3271fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 3272fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 3273fd2ed4d2SMikulas Patocka */ 3274fd2ed4d2SMikulas Patocka 3275ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 3276ffcc3936SMike Snitzer { 3277ffcc3936SMike Snitzer struct dm_table *map = NULL; 3278ffcc3936SMike Snitzer 327996b26c8cSMikulas Patocka if (md->internal_suspend_count++) 3280ffcc3936SMike Snitzer return; /* nested internal suspend */ 3281ffcc3936SMike Snitzer 3282ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3283ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3284ffcc3936SMike Snitzer return; /* nest suspend */ 3285ffcc3936SMike Snitzer } 3286ffcc3936SMike Snitzer 3287a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3288ffcc3936SMike Snitzer 3289ffcc3936SMike Snitzer /* 3290ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3291ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3292ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 3293ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 3294ffcc3936SMike Snitzer */ 3295ffcc3936SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3296ffcc3936SMike Snitzer 3297ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3298ffcc3936SMike Snitzer 3299ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 3300ffcc3936SMike Snitzer } 3301ffcc3936SMike Snitzer 3302ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 3303ffcc3936SMike Snitzer { 330496b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 330596b26c8cSMikulas Patocka 330696b26c8cSMikulas Patocka if (--md->internal_suspend_count) 3307ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 3308ffcc3936SMike Snitzer 3309ffcc3936SMike Snitzer if (dm_suspended_md(md)) 3310ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 3311ffcc3936SMike Snitzer 3312ffcc3936SMike Snitzer /* 3313ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 3314ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 3315ffcc3936SMike Snitzer */ 3316ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 3317ffcc3936SMike Snitzer 3318ffcc3936SMike Snitzer done: 3319ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3320ffcc3936SMike Snitzer smp_mb__after_atomic(); 3321ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3322ffcc3936SMike Snitzer } 3323ffcc3936SMike Snitzer 3324ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 3325fd2ed4d2SMikulas Patocka { 3326fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 3327ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3328ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3329ffcc3936SMike Snitzer } 3330ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3331ffcc3936SMike Snitzer 3332ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 3333ffcc3936SMike Snitzer { 3334ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3335ffcc3936SMike Snitzer __dm_internal_resume(md); 3336ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3337ffcc3936SMike Snitzer } 3338ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 3339ffcc3936SMike Snitzer 3340ffcc3936SMike Snitzer /* 3341ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 3342ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 3343ffcc3936SMike Snitzer */ 3344ffcc3936SMike Snitzer 3345ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 3346ffcc3936SMike Snitzer { 3347ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3348ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3349fd2ed4d2SMikulas Patocka return; 3350fd2ed4d2SMikulas Patocka 3351fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3352fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 3353fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 3354fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3355fd2ed4d2SMikulas Patocka } 3356b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3357fd2ed4d2SMikulas Patocka 3358ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 3359fd2ed4d2SMikulas Patocka { 3360ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3361fd2ed4d2SMikulas Patocka goto done; 3362fd2ed4d2SMikulas Patocka 3363fd2ed4d2SMikulas Patocka dm_queue_flush(md); 3364fd2ed4d2SMikulas Patocka 3365fd2ed4d2SMikulas Patocka done: 3366fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 3367fd2ed4d2SMikulas Patocka } 3368b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3369fd2ed4d2SMikulas Patocka 33701da177e4SLinus Torvalds /*----------------------------------------------------------------- 33711da177e4SLinus Torvalds * Event notification. 33721da177e4SLinus Torvalds *---------------------------------------------------------------*/ 33733abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 337460935eb2SMilan Broz unsigned cookie) 337569267a30SAlasdair G Kergon { 337660935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 337760935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 337860935eb2SMilan Broz 337960935eb2SMilan Broz if (!cookie) 33803abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 338160935eb2SMilan Broz else { 338260935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 338360935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 33843abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 33853abf85b5SPeter Rajnoha action, envp); 338660935eb2SMilan Broz } 338769267a30SAlasdair G Kergon } 338869267a30SAlasdair G Kergon 33897a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 33907a8c3d3bSMike Anderson { 33917a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 33927a8c3d3bSMike Anderson } 33937a8c3d3bSMike Anderson 33941da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 33951da177e4SLinus Torvalds { 33961da177e4SLinus Torvalds return atomic_read(&md->event_nr); 33971da177e4SLinus Torvalds } 33981da177e4SLinus Torvalds 33991da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 34001da177e4SLinus Torvalds { 34011da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 34021da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 34031da177e4SLinus Torvalds } 34041da177e4SLinus Torvalds 34057a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 34067a8c3d3bSMike Anderson { 34077a8c3d3bSMike Anderson unsigned long flags; 34087a8c3d3bSMike Anderson 34097a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 34107a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 34117a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 34127a8c3d3bSMike Anderson } 34137a8c3d3bSMike Anderson 34141da177e4SLinus Torvalds /* 34151da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 34161da177e4SLinus Torvalds * count on 'md'. 34171da177e4SLinus Torvalds */ 34181da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 34191da177e4SLinus Torvalds { 34201da177e4SLinus Torvalds return md->disk; 34211da177e4SLinus Torvalds } 342265ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 34231da177e4SLinus Torvalds 3424784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 3425784aae73SMilan Broz { 34262995fa78SMikulas Patocka return &md->kobj_holder.kobj; 3427784aae73SMilan Broz } 3428784aae73SMilan Broz 3429784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3430784aae73SMilan Broz { 3431784aae73SMilan Broz struct mapped_device *md; 3432784aae73SMilan Broz 34332995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3434784aae73SMilan Broz 34354d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 3436432a212cSMike Anderson dm_deleting_md(md)) 34374d89b7b4SMilan Broz return NULL; 34384d89b7b4SMilan Broz 3439784aae73SMilan Broz dm_get(md); 3440784aae73SMilan Broz return md; 3441784aae73SMilan Broz } 3442784aae73SMilan Broz 34434f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 34441da177e4SLinus Torvalds { 34451da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 34461da177e4SLinus Torvalds } 34471da177e4SLinus Torvalds 3448ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 3449ffcc3936SMike Snitzer { 3450ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3451ffcc3936SMike Snitzer } 3452ffcc3936SMike Snitzer 34532c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 34542c140a24SMikulas Patocka { 34552c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 34562c140a24SMikulas Patocka } 34572c140a24SMikulas Patocka 345864dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 345964dbce58SKiyoshi Ueda { 3460ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 346164dbce58SKiyoshi Ueda } 346264dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 346364dbce58SKiyoshi Ueda 34642e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 34652e93ccc1SKiyoshi Ueda { 3466ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 34672e93ccc1SKiyoshi Ueda } 34682e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 34692e93ccc1SKiyoshi Ueda 347078d8e58aSMike Snitzer struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 347178d8e58aSMike Snitzer unsigned integrity, unsigned per_bio_data_size) 3472e6ee8c0bSKiyoshi Ueda { 347378d8e58aSMike Snitzer struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 347478d8e58aSMike Snitzer struct kmem_cache *cachep = NULL; 347578d8e58aSMike Snitzer unsigned int pool_size = 0; 34765f015204SJun'ichi Nomura unsigned int front_pad; 3477e6ee8c0bSKiyoshi Ueda 3478e6ee8c0bSKiyoshi Ueda if (!pools) 34794e6e36c3SMike Snitzer return NULL; 3480e6ee8c0bSKiyoshi Ueda 348178d8e58aSMike Snitzer type = filter_md_type(type, md); 348217e149b8SMike Snitzer 348378d8e58aSMike Snitzer switch (type) { 348478d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 348578d8e58aSMike Snitzer cachep = _io_cache; 348678d8e58aSMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 348778d8e58aSMike Snitzer front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 348878d8e58aSMike Snitzer break; 348978d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 349078d8e58aSMike Snitzer cachep = _rq_tio_cache; 349178d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 349278d8e58aSMike Snitzer pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 349378d8e58aSMike Snitzer if (!pools->rq_pool) 349478d8e58aSMike Snitzer goto out; 349578d8e58aSMike Snitzer /* fall through to setup remaining rq-based pools */ 349678d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 349778d8e58aSMike Snitzer if (!pool_size) 349878d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 349978d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 350078d8e58aSMike Snitzer /* per_bio_data_size is not used. See __bind_mempools(). */ 350178d8e58aSMike Snitzer WARN_ON(per_bio_data_size != 0); 350278d8e58aSMike Snitzer break; 350378d8e58aSMike Snitzer default: 350478d8e58aSMike Snitzer BUG(); 350578d8e58aSMike Snitzer } 350678d8e58aSMike Snitzer 350778d8e58aSMike Snitzer if (cachep) { 350878d8e58aSMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3509e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 35105f015204SJun'ichi Nomura goto out; 351178d8e58aSMike Snitzer } 3512e6ee8c0bSKiyoshi Ueda 35133d8aab2dSJunichi Nomura pools->bs = bioset_create_nobvec(pool_size, front_pad); 3514e6ee8c0bSKiyoshi Ueda if (!pools->bs) 35155f015204SJun'ichi Nomura goto out; 3516e6ee8c0bSKiyoshi Ueda 3517a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 35185f015204SJun'ichi Nomura goto out; 3519a91a2785SMartin K. Petersen 3520e6ee8c0bSKiyoshi Ueda return pools; 352178d8e58aSMike Snitzer 35225f015204SJun'ichi Nomura out: 35235f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3524e6ee8c0bSKiyoshi Ueda 35254e6e36c3SMike Snitzer return NULL; 3526e6ee8c0bSKiyoshi Ueda } 3527e6ee8c0bSKiyoshi Ueda 3528e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3529e6ee8c0bSKiyoshi Ueda { 3530e6ee8c0bSKiyoshi Ueda if (!pools) 3531e6ee8c0bSKiyoshi Ueda return; 3532e6ee8c0bSKiyoshi Ueda 3533e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 35341ae49ea2SMike Snitzer mempool_destroy(pools->rq_pool); 35351ae49ea2SMike Snitzer 3536e6ee8c0bSKiyoshi Ueda if (pools->bs) 3537e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 3538e6ee8c0bSKiyoshi Ueda 3539e6ee8c0bSKiyoshi Ueda kfree(pools); 3540e6ee8c0bSKiyoshi Ueda } 3541e6ee8c0bSKiyoshi Ueda 354271cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 354371cdb697SChristoph Hellwig u32 flags) 354471cdb697SChristoph Hellwig { 354571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 354671cdb697SChristoph Hellwig const struct pr_ops *ops; 354771cdb697SChristoph Hellwig fmode_t mode; 3548956a4025SMike Snitzer int r; 354971cdb697SChristoph Hellwig 3550956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 355171cdb697SChristoph Hellwig if (r < 0) 355271cdb697SChristoph Hellwig return r; 355371cdb697SChristoph Hellwig 355471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 355571cdb697SChristoph Hellwig if (ops && ops->pr_register) 355671cdb697SChristoph Hellwig r = ops->pr_register(bdev, old_key, new_key, flags); 355771cdb697SChristoph Hellwig else 355871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 355971cdb697SChristoph Hellwig 3560956a4025SMike Snitzer bdput(bdev); 356171cdb697SChristoph Hellwig return r; 356271cdb697SChristoph Hellwig } 356371cdb697SChristoph Hellwig 356471cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 356571cdb697SChristoph Hellwig u32 flags) 356671cdb697SChristoph Hellwig { 356771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 356871cdb697SChristoph Hellwig const struct pr_ops *ops; 356971cdb697SChristoph Hellwig fmode_t mode; 3570956a4025SMike Snitzer int r; 357171cdb697SChristoph Hellwig 3572956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 357371cdb697SChristoph Hellwig if (r < 0) 357471cdb697SChristoph Hellwig return r; 357571cdb697SChristoph Hellwig 357671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 357771cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 357871cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 357971cdb697SChristoph Hellwig else 358071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 358171cdb697SChristoph Hellwig 3582956a4025SMike Snitzer bdput(bdev); 358371cdb697SChristoph Hellwig return r; 358471cdb697SChristoph Hellwig } 358571cdb697SChristoph Hellwig 358671cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 358771cdb697SChristoph Hellwig { 358871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 358971cdb697SChristoph Hellwig const struct pr_ops *ops; 359071cdb697SChristoph Hellwig fmode_t mode; 3591956a4025SMike Snitzer int r; 359271cdb697SChristoph Hellwig 3593956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 359471cdb697SChristoph Hellwig if (r < 0) 359571cdb697SChristoph Hellwig return r; 359671cdb697SChristoph Hellwig 359771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 359871cdb697SChristoph Hellwig if (ops && ops->pr_release) 359971cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 360071cdb697SChristoph Hellwig else 360171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 360271cdb697SChristoph Hellwig 3603956a4025SMike Snitzer bdput(bdev); 360471cdb697SChristoph Hellwig return r; 360571cdb697SChristoph Hellwig } 360671cdb697SChristoph Hellwig 360771cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 360871cdb697SChristoph Hellwig enum pr_type type, bool abort) 360971cdb697SChristoph Hellwig { 361071cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 361171cdb697SChristoph Hellwig const struct pr_ops *ops; 361271cdb697SChristoph Hellwig fmode_t mode; 3613956a4025SMike Snitzer int r; 361471cdb697SChristoph Hellwig 3615956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 361671cdb697SChristoph Hellwig if (r < 0) 361771cdb697SChristoph Hellwig return r; 361871cdb697SChristoph Hellwig 361971cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 362071cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 362171cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 362271cdb697SChristoph Hellwig else 362371cdb697SChristoph Hellwig r = -EOPNOTSUPP; 362471cdb697SChristoph Hellwig 3625956a4025SMike Snitzer bdput(bdev); 362671cdb697SChristoph Hellwig return r; 362771cdb697SChristoph Hellwig } 362871cdb697SChristoph Hellwig 362971cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 363071cdb697SChristoph Hellwig { 363171cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 363271cdb697SChristoph Hellwig const struct pr_ops *ops; 363371cdb697SChristoph Hellwig fmode_t mode; 3634956a4025SMike Snitzer int r; 363571cdb697SChristoph Hellwig 3636956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 363771cdb697SChristoph Hellwig if (r < 0) 363871cdb697SChristoph Hellwig return r; 363971cdb697SChristoph Hellwig 364071cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 364171cdb697SChristoph Hellwig if (ops && ops->pr_clear) 364271cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 364371cdb697SChristoph Hellwig else 364471cdb697SChristoph Hellwig r = -EOPNOTSUPP; 364571cdb697SChristoph Hellwig 3646956a4025SMike Snitzer bdput(bdev); 364771cdb697SChristoph Hellwig return r; 364871cdb697SChristoph Hellwig } 364971cdb697SChristoph Hellwig 365071cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 365171cdb697SChristoph Hellwig .pr_register = dm_pr_register, 365271cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 365371cdb697SChristoph Hellwig .pr_release = dm_pr_release, 365471cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 365571cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 365671cdb697SChristoph Hellwig }; 365771cdb697SChristoph Hellwig 365883d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 36591da177e4SLinus Torvalds .open = dm_blk_open, 36601da177e4SLinus Torvalds .release = dm_blk_close, 3661aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 36623ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 366371cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 36641da177e4SLinus Torvalds .owner = THIS_MODULE 36651da177e4SLinus Torvalds }; 36661da177e4SLinus Torvalds 36671da177e4SLinus Torvalds /* 36681da177e4SLinus Torvalds * module hooks 36691da177e4SLinus Torvalds */ 36701da177e4SLinus Torvalds module_init(dm_init); 36711da177e4SLinus Torvalds module_exit(dm_exit); 36721da177e4SLinus Torvalds 36731da177e4SLinus Torvalds module_param(major, uint, 0); 36741da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3675f4790826SMike Snitzer 3676e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3677e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3678e8603136SMike Snitzer 3679f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3680f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3681f4790826SMike Snitzer 368217e149b8SMike Snitzer module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 368317e149b8SMike Snitzer MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 368417e149b8SMike Snitzer 36851da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 36861da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 36871da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3688