11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 22ffcc3936SMike Snitzer #include <linux/wait.h> 232eb6e1e3SKeith Busch #include <linux/kthread.h> 2455782138SLi Zefan 2555782138SLi Zefan #include <trace/events/block.h> 261da177e4SLinus Torvalds 2772d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2872d94861SAlasdair G Kergon 2971a16736SNamhyung Kim #ifdef CONFIG_PRINTK 3071a16736SNamhyung Kim /* 3171a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3271a16736SNamhyung Kim */ 3371a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3471a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3571a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 3671a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 3771a16736SNamhyung Kim #endif 3871a16736SNamhyung Kim 3960935eb2SMilan Broz /* 4060935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 4160935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4260935eb2SMilan Broz */ 4360935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4460935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4560935eb2SMilan Broz 461da177e4SLinus Torvalds static const char *_name = DM_NAME; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds static unsigned int major = 0; 491da177e4SLinus Torvalds static unsigned int _major = 0; 501da177e4SLinus Torvalds 51d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 52d15b774cSAlasdair G Kergon 53f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 542c140a24SMikulas Patocka 552c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 562c140a24SMikulas Patocka 572c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 582c140a24SMikulas Patocka 59acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 60acfe0ad7SMikulas Patocka 611da177e4SLinus Torvalds /* 628fbf26adSKiyoshi Ueda * For bio-based dm. 631da177e4SLinus Torvalds * One of these is allocated per bio. 641da177e4SLinus Torvalds */ 651da177e4SLinus Torvalds struct dm_io { 661da177e4SLinus Torvalds struct mapped_device *md; 671da177e4SLinus Torvalds int error; 681da177e4SLinus Torvalds atomic_t io_count; 696ae2fa67SRichard Kennedy struct bio *bio; 703eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 71f88fb981SKiyoshi Ueda spinlock_t endio_lock; 72fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 731da177e4SLinus Torvalds }; 741da177e4SLinus Torvalds 751da177e4SLinus Torvalds /* 768fbf26adSKiyoshi Ueda * For request-based dm. 778fbf26adSKiyoshi Ueda * One of these is allocated per request. 788fbf26adSKiyoshi Ueda */ 798fbf26adSKiyoshi Ueda struct dm_rq_target_io { 808fbf26adSKiyoshi Ueda struct mapped_device *md; 818fbf26adSKiyoshi Ueda struct dm_target *ti; 821ae49ea2SMike Snitzer struct request *orig, *clone; 832eb6e1e3SKeith Busch struct kthread_work work; 848fbf26adSKiyoshi Ueda int error; 858fbf26adSKiyoshi Ueda union map_info info; 868fbf26adSKiyoshi Ueda }; 878fbf26adSKiyoshi Ueda 888fbf26adSKiyoshi Ueda /* 8994818742SKent Overstreet * For request-based dm - the bio clones we allocate are embedded in these 9094818742SKent Overstreet * structs. 9194818742SKent Overstreet * 9294818742SKent Overstreet * We allocate these with bio_alloc_bioset, using the front_pad parameter when 9394818742SKent Overstreet * the bioset is created - this means the bio has to come at the end of the 9494818742SKent Overstreet * struct. 958fbf26adSKiyoshi Ueda */ 968fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 978fbf26adSKiyoshi Ueda struct bio *orig; 98cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 9994818742SKent Overstreet struct bio clone; 1008fbf26adSKiyoshi Ueda }; 1018fbf26adSKiyoshi Ueda 102cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 103cec47e3dSKiyoshi Ueda { 104cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 105cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 106cec47e3dSKiyoshi Ueda return NULL; 107cec47e3dSKiyoshi Ueda } 108cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 109cec47e3dSKiyoshi Ueda 110ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 111ba61fdd1SJeff Mahoney 1121da177e4SLinus Torvalds /* 1131da177e4SLinus Torvalds * Bits for the md->flags field. 1141da177e4SLinus Torvalds */ 1151eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1161da177e4SLinus Torvalds #define DMF_SUSPENDED 1 117aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 118fba9f90eSJeff Mahoney #define DMF_FREEING 3 1195c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1202e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 121d5b9dd04SMikulas Patocka #define DMF_MERGE_IS_OPTIONAL 6 1222c140a24SMikulas Patocka #define DMF_DEFERRED_REMOVE 7 123ffcc3936SMike Snitzer #define DMF_SUSPENDED_INTERNALLY 8 1241da177e4SLinus Torvalds 125304f3f6aSMilan Broz /* 12683d5e5b0SMikulas Patocka * A dummy definition to make RCU happy. 12783d5e5b0SMikulas Patocka * struct dm_table should never be dereferenced in this file. 12883d5e5b0SMikulas Patocka */ 12983d5e5b0SMikulas Patocka struct dm_table { 13083d5e5b0SMikulas Patocka int undefined__; 13183d5e5b0SMikulas Patocka }; 13283d5e5b0SMikulas Patocka 13383d5e5b0SMikulas Patocka /* 134304f3f6aSMilan Broz * Work processed by per-device workqueue. 135304f3f6aSMilan Broz */ 1361da177e4SLinus Torvalds struct mapped_device { 13783d5e5b0SMikulas Patocka struct srcu_struct io_barrier; 138e61290a4SDaniel Walker struct mutex suspend_lock; 1391da177e4SLinus Torvalds atomic_t holders; 1405c6bd75dSAlasdair G Kergon atomic_t open_count; 1411da177e4SLinus Torvalds 1422a7faeb1SMikulas Patocka /* 1432a7faeb1SMikulas Patocka * The current mapping. 1442a7faeb1SMikulas Patocka * Use dm_get_live_table{_fast} or take suspend_lock for 1452a7faeb1SMikulas Patocka * dereference. 1462a7faeb1SMikulas Patocka */ 1476fa99520SPranith Kumar struct dm_table __rcu *map; 1482a7faeb1SMikulas Patocka 14986f1152bSBenjamin Marzinski struct list_head table_devices; 15086f1152bSBenjamin Marzinski struct mutex table_devices_lock; 15186f1152bSBenjamin Marzinski 1521da177e4SLinus Torvalds unsigned long flags; 1531da177e4SLinus Torvalds 154165125e1SJens Axboe struct request_queue *queue; 155a5664dadSMike Snitzer unsigned type; 1564a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 157a5664dadSMike Snitzer struct mutex type_lock; 158a5664dadSMike Snitzer 15936a0456fSAlasdair G Kergon struct target_type *immutable_target_type; 16036a0456fSAlasdair G Kergon 1611da177e4SLinus Torvalds struct gendisk *disk; 1627e51f257SMike Anderson char name[16]; 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds void *interface_ptr; 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds /* 1671da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1681da177e4SLinus Torvalds */ 169316d315bSNikanth Karthikesan atomic_t pending[2]; 1701da177e4SLinus Torvalds wait_queue_head_t wait; 17153d5914fSMikulas Patocka struct work_struct work; 1721da177e4SLinus Torvalds struct bio_list deferred; 173022c2611SMikulas Patocka spinlock_t deferred_lock; 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds /* 17629e4013dSTejun Heo * Processing queue (flush) 177304f3f6aSMilan Broz */ 178304f3f6aSMilan Broz struct workqueue_struct *wq; 179304f3f6aSMilan Broz 180304f3f6aSMilan Broz /* 1811da177e4SLinus Torvalds * io objects are allocated from here. 1821da177e4SLinus Torvalds */ 1831da177e4SLinus Torvalds mempool_t *io_pool; 1841ae49ea2SMike Snitzer mempool_t *rq_pool; 1851da177e4SLinus Torvalds 1869faf400fSStefan Bader struct bio_set *bs; 1879faf400fSStefan Bader 1881da177e4SLinus Torvalds /* 1891da177e4SLinus Torvalds * Event handling. 1901da177e4SLinus Torvalds */ 1911da177e4SLinus Torvalds atomic_t event_nr; 1921da177e4SLinus Torvalds wait_queue_head_t eventq; 1937a8c3d3bSMike Anderson atomic_t uevent_seq; 1947a8c3d3bSMike Anderson struct list_head uevent_list; 1957a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds /* 1981da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 1991da177e4SLinus Torvalds */ 2001da177e4SLinus Torvalds struct super_block *frozen_sb; 201db8fef4fSMikulas Patocka struct block_device *bdev; 2023ac51e74SDarrick J. Wong 2033ac51e74SDarrick J. Wong /* forced geometry settings */ 2043ac51e74SDarrick J. Wong struct hd_geometry geometry; 205784aae73SMilan Broz 2062995fa78SMikulas Patocka /* kobject and completion */ 2072995fa78SMikulas Patocka struct dm_kobject_holder kobj_holder; 208be35f486SMikulas Patocka 209d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 210d87f4c14STejun Heo struct bio flush_bio; 211fd2ed4d2SMikulas Patocka 21296b26c8cSMikulas Patocka /* the number of internal suspends */ 21396b26c8cSMikulas Patocka unsigned internal_suspend_count; 21496b26c8cSMikulas Patocka 215fd2ed4d2SMikulas Patocka struct dm_stats stats; 2162eb6e1e3SKeith Busch 2172eb6e1e3SKeith Busch struct kthread_worker kworker; 2182eb6e1e3SKeith Busch struct task_struct *kworker_task; 2191da177e4SLinus Torvalds }; 2201da177e4SLinus Torvalds 221e6ee8c0bSKiyoshi Ueda /* 222e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 223e6ee8c0bSKiyoshi Ueda */ 224e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 225e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 2261ae49ea2SMike Snitzer mempool_t *rq_pool; 227e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 228e6ee8c0bSKiyoshi Ueda }; 229e6ee8c0bSKiyoshi Ueda 23086f1152bSBenjamin Marzinski struct table_device { 23186f1152bSBenjamin Marzinski struct list_head list; 23286f1152bSBenjamin Marzinski atomic_t count; 23386f1152bSBenjamin Marzinski struct dm_dev dm_dev; 23486f1152bSBenjamin Marzinski }; 23586f1152bSBenjamin Marzinski 2366cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 2376cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS 256 238f4790826SMike Snitzer #define RESERVED_MAX_IOS 1024 239e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 2408fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 2411ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 24294818742SKent Overstreet 243f4790826SMike Snitzer /* 244e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 245e8603136SMike Snitzer */ 246e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 247e8603136SMike Snitzer 248e8603136SMike Snitzer /* 249f4790826SMike Snitzer * Request-based DM's mempools' reserved IOs set by the user. 250f4790826SMike Snitzer */ 251f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 252f4790826SMike Snitzer 25309c2d531SMike Snitzer static unsigned __dm_get_module_param(unsigned *module_param, 254f4790826SMike Snitzer unsigned def, unsigned max) 255f4790826SMike Snitzer { 25609c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 25709c2d531SMike Snitzer unsigned modified_param = 0; 258f4790826SMike Snitzer 25909c2d531SMike Snitzer if (!param) 26009c2d531SMike Snitzer modified_param = def; 26109c2d531SMike Snitzer else if (param > max) 26209c2d531SMike Snitzer modified_param = max; 263f4790826SMike Snitzer 26409c2d531SMike Snitzer if (modified_param) { 26509c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 26609c2d531SMike Snitzer param = modified_param; 267f4790826SMike Snitzer } 268f4790826SMike Snitzer 26909c2d531SMike Snitzer return param; 270f4790826SMike Snitzer } 271f4790826SMike Snitzer 272e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 273e8603136SMike Snitzer { 27409c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 275e8603136SMike Snitzer RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 276e8603136SMike Snitzer } 277e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 278e8603136SMike Snitzer 279f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void) 280f4790826SMike Snitzer { 28109c2d531SMike Snitzer return __dm_get_module_param(&reserved_rq_based_ios, 282f4790826SMike Snitzer RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 283f4790826SMike Snitzer } 284f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 285f4790826SMike Snitzer 2861da177e4SLinus Torvalds static int __init local_init(void) 2871da177e4SLinus Torvalds { 28851157b4aSKiyoshi Ueda int r = -ENOMEM; 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 291028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 2921da177e4SLinus Torvalds if (!_io_cache) 29351157b4aSKiyoshi Ueda return r; 2941da177e4SLinus Torvalds 2958fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2968fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 297dba14160SMikulas Patocka goto out_free_io_cache; 2988fbf26adSKiyoshi Ueda 2991ae49ea2SMike Snitzer _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 3001ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 3011ae49ea2SMike Snitzer if (!_rq_cache) 3021ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 3031ae49ea2SMike Snitzer 30451e5b2bdSMike Anderson r = dm_uevent_init(); 30551157b4aSKiyoshi Ueda if (r) 3061ae49ea2SMike Snitzer goto out_free_rq_cache; 30751e5b2bdSMike Anderson 308acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 309acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 310acfe0ad7SMikulas Patocka r = -ENOMEM; 311acfe0ad7SMikulas Patocka goto out_uevent_exit; 312acfe0ad7SMikulas Patocka } 313acfe0ad7SMikulas Patocka 3141da177e4SLinus Torvalds _major = major; 3151da177e4SLinus Torvalds r = register_blkdev(_major, _name); 31651157b4aSKiyoshi Ueda if (r < 0) 317acfe0ad7SMikulas Patocka goto out_free_workqueue; 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds if (!_major) 3201da177e4SLinus Torvalds _major = r; 3211da177e4SLinus Torvalds 3221da177e4SLinus Torvalds return 0; 32351157b4aSKiyoshi Ueda 324acfe0ad7SMikulas Patocka out_free_workqueue: 325acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 32651157b4aSKiyoshi Ueda out_uevent_exit: 32751157b4aSKiyoshi Ueda dm_uevent_exit(); 3281ae49ea2SMike Snitzer out_free_rq_cache: 3291ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3308fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 3318fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 33251157b4aSKiyoshi Ueda out_free_io_cache: 33351157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 33451157b4aSKiyoshi Ueda 33551157b4aSKiyoshi Ueda return r; 3361da177e4SLinus Torvalds } 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds static void local_exit(void) 3391da177e4SLinus Torvalds { 3402c140a24SMikulas Patocka flush_scheduled_work(); 341acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 3422c140a24SMikulas Patocka 3431ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 3448fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 3451da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 34600d59405SAkinobu Mita unregister_blkdev(_major, _name); 34751e5b2bdSMike Anderson dm_uevent_exit(); 3481da177e4SLinus Torvalds 3491da177e4SLinus Torvalds _major = 0; 3501da177e4SLinus Torvalds 3511da177e4SLinus Torvalds DMINFO("cleaned up"); 3521da177e4SLinus Torvalds } 3531da177e4SLinus Torvalds 354b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 3551da177e4SLinus Torvalds local_init, 3561da177e4SLinus Torvalds dm_target_init, 3571da177e4SLinus Torvalds dm_linear_init, 3581da177e4SLinus Torvalds dm_stripe_init, 359952b3557SMikulas Patocka dm_io_init, 360945fa4d2SMikulas Patocka dm_kcopyd_init, 3611da177e4SLinus Torvalds dm_interface_init, 362fd2ed4d2SMikulas Patocka dm_statistics_init, 3631da177e4SLinus Torvalds }; 3641da177e4SLinus Torvalds 365b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 3661da177e4SLinus Torvalds local_exit, 3671da177e4SLinus Torvalds dm_target_exit, 3681da177e4SLinus Torvalds dm_linear_exit, 3691da177e4SLinus Torvalds dm_stripe_exit, 370952b3557SMikulas Patocka dm_io_exit, 371945fa4d2SMikulas Patocka dm_kcopyd_exit, 3721da177e4SLinus Torvalds dm_interface_exit, 373fd2ed4d2SMikulas Patocka dm_statistics_exit, 3741da177e4SLinus Torvalds }; 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds static int __init dm_init(void) 3771da177e4SLinus Torvalds { 3781da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3791da177e4SLinus Torvalds 3801da177e4SLinus Torvalds int r, i; 3811da177e4SLinus Torvalds 3821da177e4SLinus Torvalds for (i = 0; i < count; i++) { 3831da177e4SLinus Torvalds r = _inits[i](); 3841da177e4SLinus Torvalds if (r) 3851da177e4SLinus Torvalds goto bad; 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds return 0; 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds bad: 3911da177e4SLinus Torvalds while (i--) 3921da177e4SLinus Torvalds _exits[i](); 3931da177e4SLinus Torvalds 3941da177e4SLinus Torvalds return r; 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds 3971da177e4SLinus Torvalds static void __exit dm_exit(void) 3981da177e4SLinus Torvalds { 3991da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 4001da177e4SLinus Torvalds 4011da177e4SLinus Torvalds while (i--) 4021da177e4SLinus Torvalds _exits[i](); 403d15b774cSAlasdair G Kergon 404d15b774cSAlasdair G Kergon /* 405d15b774cSAlasdair G Kergon * Should be empty by this point. 406d15b774cSAlasdair G Kergon */ 407d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds /* 4111da177e4SLinus Torvalds * Block device functions 4121da177e4SLinus Torvalds */ 413432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 414432a212cSMike Anderson { 415432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 416432a212cSMike Anderson } 417432a212cSMike Anderson 418fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 4191da177e4SLinus Torvalds { 4201da177e4SLinus Torvalds struct mapped_device *md; 4211da177e4SLinus Torvalds 422fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 423fba9f90eSJeff Mahoney 424fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 425fba9f90eSJeff Mahoney if (!md) 426fba9f90eSJeff Mahoney goto out; 427fba9f90eSJeff Mahoney 4285c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 429432a212cSMike Anderson dm_deleting_md(md)) { 430fba9f90eSJeff Mahoney md = NULL; 431fba9f90eSJeff Mahoney goto out; 432fba9f90eSJeff Mahoney } 433fba9f90eSJeff Mahoney 4341da177e4SLinus Torvalds dm_get(md); 4355c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 436fba9f90eSJeff Mahoney out: 437fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 438fba9f90eSJeff Mahoney 439fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 4401da177e4SLinus Torvalds } 4411da177e4SLinus Torvalds 442db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 4431da177e4SLinus Torvalds { 44463a4f065SMike Snitzer struct mapped_device *md; 4456e9624b8SArnd Bergmann 4464a1aeb98SMilan Broz spin_lock(&_minor_lock); 4474a1aeb98SMilan Broz 44863a4f065SMike Snitzer md = disk->private_data; 44963a4f065SMike Snitzer if (WARN_ON(!md)) 45063a4f065SMike Snitzer goto out; 45163a4f065SMike Snitzer 4522c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 4532c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 454acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 4552c140a24SMikulas Patocka 4561da177e4SLinus Torvalds dm_put(md); 45763a4f065SMike Snitzer out: 4584a1aeb98SMilan Broz spin_unlock(&_minor_lock); 4591da177e4SLinus Torvalds } 4601da177e4SLinus Torvalds 4615c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 4625c6bd75dSAlasdair G Kergon { 4635c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 4645c6bd75dSAlasdair G Kergon } 4655c6bd75dSAlasdair G Kergon 4665c6bd75dSAlasdair G Kergon /* 4675c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 4685c6bd75dSAlasdair G Kergon */ 4692c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 4705c6bd75dSAlasdair G Kergon { 4715c6bd75dSAlasdair G Kergon int r = 0; 4725c6bd75dSAlasdair G Kergon 4735c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4745c6bd75dSAlasdair G Kergon 4752c140a24SMikulas Patocka if (dm_open_count(md)) { 4765c6bd75dSAlasdair G Kergon r = -EBUSY; 4772c140a24SMikulas Patocka if (mark_deferred) 4782c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4792c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 4802c140a24SMikulas Patocka r = -EEXIST; 4815c6bd75dSAlasdair G Kergon else 4825c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 4835c6bd75dSAlasdair G Kergon 4845c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4855c6bd75dSAlasdair G Kergon 4865c6bd75dSAlasdair G Kergon return r; 4875c6bd75dSAlasdair G Kergon } 4885c6bd75dSAlasdair G Kergon 4892c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4902c140a24SMikulas Patocka { 4912c140a24SMikulas Patocka int r = 0; 4922c140a24SMikulas Patocka 4932c140a24SMikulas Patocka spin_lock(&_minor_lock); 4942c140a24SMikulas Patocka 4952c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4962c140a24SMikulas Patocka r = -EBUSY; 4972c140a24SMikulas Patocka else 4982c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4992c140a24SMikulas Patocka 5002c140a24SMikulas Patocka spin_unlock(&_minor_lock); 5012c140a24SMikulas Patocka 5022c140a24SMikulas Patocka return r; 5032c140a24SMikulas Patocka } 5042c140a24SMikulas Patocka 5052c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 5062c140a24SMikulas Patocka { 5072c140a24SMikulas Patocka dm_deferred_remove(); 5082c140a24SMikulas Patocka } 5092c140a24SMikulas Patocka 510fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 511fd2ed4d2SMikulas Patocka { 512fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 513fd2ed4d2SMikulas Patocka } 514fd2ed4d2SMikulas Patocka 5159974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 5169974fa2cSMike Snitzer { 5179974fa2cSMike Snitzer return md->queue; 5189974fa2cSMike Snitzer } 5199974fa2cSMike Snitzer 520fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 521fd2ed4d2SMikulas Patocka { 522fd2ed4d2SMikulas Patocka return &md->stats; 523fd2ed4d2SMikulas Patocka } 524fd2ed4d2SMikulas Patocka 5253ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5263ac51e74SDarrick J. Wong { 5273ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 5283ac51e74SDarrick J. Wong 5293ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 5303ac51e74SDarrick J. Wong } 5313ac51e74SDarrick J. Wong 532fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 533aa129a22SMilan Broz unsigned int cmd, unsigned long arg) 534aa129a22SMilan Broz { 535fe5f9f2cSAl Viro struct mapped_device *md = bdev->bd_disk->private_data; 53683d5e5b0SMikulas Patocka int srcu_idx; 5376c182cd8SHannes Reinecke struct dm_table *map; 538aa129a22SMilan Broz struct dm_target *tgt; 539aa129a22SMilan Broz int r = -ENOTTY; 540aa129a22SMilan Broz 5416c182cd8SHannes Reinecke retry: 54283d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 54383d5e5b0SMikulas Patocka 544aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 545aa129a22SMilan Broz goto out; 546aa129a22SMilan Broz 547aa129a22SMilan Broz /* We only support devices that have a single target */ 548aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 549aa129a22SMilan Broz goto out; 550aa129a22SMilan Broz 551aa129a22SMilan Broz tgt = dm_table_get_target(map, 0); 5524d341d82SMike Snitzer if (!tgt->type->ioctl) 5534d341d82SMike Snitzer goto out; 554aa129a22SMilan Broz 5554f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 556aa129a22SMilan Broz r = -EAGAIN; 557aa129a22SMilan Broz goto out; 558aa129a22SMilan Broz } 559aa129a22SMilan Broz 560647b3d00SAl Viro r = tgt->type->ioctl(tgt, cmd, arg); 561aa129a22SMilan Broz 562aa129a22SMilan Broz out: 56383d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 564aa129a22SMilan Broz 5656c182cd8SHannes Reinecke if (r == -ENOTCONN) { 5666c182cd8SHannes Reinecke msleep(10); 5676c182cd8SHannes Reinecke goto retry; 5686c182cd8SHannes Reinecke } 5696c182cd8SHannes Reinecke 570aa129a22SMilan Broz return r; 571aa129a22SMilan Broz } 572aa129a22SMilan Broz 573028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 5741da177e4SLinus Torvalds { 5751da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds 578028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5791da177e4SLinus Torvalds { 5801da177e4SLinus Torvalds mempool_free(io, md->io_pool); 5811da177e4SLinus Torvalds } 5821da177e4SLinus Torvalds 583028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 5841da177e4SLinus Torvalds { 585dba14160SMikulas Patocka bio_put(&tio->clone); 5861da177e4SLinus Torvalds } 5871da177e4SLinus Torvalds 58808885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 58908885643SKiyoshi Ueda gfp_t gfp_mask) 590cec47e3dSKiyoshi Ueda { 5915f015204SJun'ichi Nomura return mempool_alloc(md->io_pool, gfp_mask); 592cec47e3dSKiyoshi Ueda } 593cec47e3dSKiyoshi Ueda 594cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 595cec47e3dSKiyoshi Ueda { 5965f015204SJun'ichi Nomura mempool_free(tio, tio->md->io_pool); 597cec47e3dSKiyoshi Ueda } 598cec47e3dSKiyoshi Ueda 5991ae49ea2SMike Snitzer static struct request *alloc_clone_request(struct mapped_device *md, 6001ae49ea2SMike Snitzer gfp_t gfp_mask) 6011ae49ea2SMike Snitzer { 6021ae49ea2SMike Snitzer return mempool_alloc(md->rq_pool, gfp_mask); 6031ae49ea2SMike Snitzer } 6041ae49ea2SMike Snitzer 6051ae49ea2SMike Snitzer static void free_clone_request(struct mapped_device *md, struct request *rq) 6061ae49ea2SMike Snitzer { 6071ae49ea2SMike Snitzer mempool_free(rq, md->rq_pool); 6081ae49ea2SMike Snitzer } 6091ae49ea2SMike Snitzer 61090abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 61190abb8c4SKiyoshi Ueda { 61290abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 61390abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 61490abb8c4SKiyoshi Ueda } 61590abb8c4SKiyoshi Ueda 6163eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6173eaf840eSJun'ichi "Nick" Nomura { 6183eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 619fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 620c9959059STejun Heo int cpu; 621fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 6223eaf840eSJun'ichi "Nick" Nomura 6233eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6243eaf840eSJun'ichi "Nick" Nomura 625074a7acaSTejun Heo cpu = part_stat_lock(); 626074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 627074a7acaSTejun Heo part_stat_unlock(); 6281e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 6291e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 630fd2ed4d2SMikulas Patocka 631fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6324f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 633fd2ed4d2SMikulas Patocka bio_sectors(bio), false, 0, &io->stats_aux); 6343eaf840eSJun'ichi "Nick" Nomura } 6353eaf840eSJun'ichi "Nick" Nomura 636d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6373eaf840eSJun'ichi "Nick" Nomura { 6383eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 6393eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 6403eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 64118c0b223SGu Zheng int pending; 6423eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 6433eaf840eSJun'ichi "Nick" Nomura 64418c0b223SGu Zheng generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 6453eaf840eSJun'ichi "Nick" Nomura 646fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6474f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 648fd2ed4d2SMikulas Patocka bio_sectors(bio), true, duration, &io->stats_aux); 649fd2ed4d2SMikulas Patocka 650af7e466aSMikulas Patocka /* 651af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 652d87f4c14STejun Heo * a flush. 653af7e466aSMikulas Patocka */ 6541e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 6551e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 656316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 6573eaf840eSJun'ichi "Nick" Nomura 658d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 659d221d2e7SMikulas Patocka if (!pending) 660d221d2e7SMikulas Patocka wake_up(&md->wait); 6613eaf840eSJun'ichi "Nick" Nomura } 6623eaf840eSJun'ichi "Nick" Nomura 6631da177e4SLinus Torvalds /* 6641da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6651da177e4SLinus Torvalds */ 66692c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6671da177e4SLinus Torvalds { 66805447420SKiyoshi Ueda unsigned long flags; 6691da177e4SLinus Torvalds 67005447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6711da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 67205447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 67392c63902SMikulas Patocka queue_work(md->wq, &md->work); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds 6761da177e4SLinus Torvalds /* 6771da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6781da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 67983d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6801da177e4SLinus Torvalds */ 68183d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6821da177e4SLinus Torvalds { 68383d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6841da177e4SLinus Torvalds 68583d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 68683d5e5b0SMikulas Patocka } 6871da177e4SLinus Torvalds 68883d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 68983d5e5b0SMikulas Patocka { 69083d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 69183d5e5b0SMikulas Patocka } 69283d5e5b0SMikulas Patocka 69383d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 69483d5e5b0SMikulas Patocka { 69583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 69683d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 69783d5e5b0SMikulas Patocka } 69883d5e5b0SMikulas Patocka 69983d5e5b0SMikulas Patocka /* 70083d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 70183d5e5b0SMikulas Patocka * The caller must not block between these two functions. 70283d5e5b0SMikulas Patocka */ 70383d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 70483d5e5b0SMikulas Patocka { 70583d5e5b0SMikulas Patocka rcu_read_lock(); 70683d5e5b0SMikulas Patocka return rcu_dereference(md->map); 70783d5e5b0SMikulas Patocka } 70883d5e5b0SMikulas Patocka 70983d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 71083d5e5b0SMikulas Patocka { 71183d5e5b0SMikulas Patocka rcu_read_unlock(); 7121da177e4SLinus Torvalds } 7131da177e4SLinus Torvalds 7143ac51e74SDarrick J. Wong /* 71586f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 71686f1152bSBenjamin Marzinski */ 71786f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 71886f1152bSBenjamin Marzinski struct mapped_device *md) 71986f1152bSBenjamin Marzinski { 72086f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 72186f1152bSBenjamin Marzinski struct block_device *bdev; 72286f1152bSBenjamin Marzinski 72386f1152bSBenjamin Marzinski int r; 72486f1152bSBenjamin Marzinski 72586f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 72686f1152bSBenjamin Marzinski 72786f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 72886f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 72986f1152bSBenjamin Marzinski return PTR_ERR(bdev); 73086f1152bSBenjamin Marzinski 73186f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 73286f1152bSBenjamin Marzinski if (r) { 73386f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 73486f1152bSBenjamin Marzinski return r; 73586f1152bSBenjamin Marzinski } 73686f1152bSBenjamin Marzinski 73786f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 73886f1152bSBenjamin Marzinski return 0; 73986f1152bSBenjamin Marzinski } 74086f1152bSBenjamin Marzinski 74186f1152bSBenjamin Marzinski /* 74286f1152bSBenjamin Marzinski * Close a table device that we've been using. 74386f1152bSBenjamin Marzinski */ 74486f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 74586f1152bSBenjamin Marzinski { 74686f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 74786f1152bSBenjamin Marzinski return; 74886f1152bSBenjamin Marzinski 74986f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 75086f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 75186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 75286f1152bSBenjamin Marzinski } 75386f1152bSBenjamin Marzinski 75486f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 75586f1152bSBenjamin Marzinski fmode_t mode) { 75686f1152bSBenjamin Marzinski struct table_device *td; 75786f1152bSBenjamin Marzinski 75886f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 75986f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 76086f1152bSBenjamin Marzinski return td; 76186f1152bSBenjamin Marzinski 76286f1152bSBenjamin Marzinski return NULL; 76386f1152bSBenjamin Marzinski } 76486f1152bSBenjamin Marzinski 76586f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 76686f1152bSBenjamin Marzinski struct dm_dev **result) { 76786f1152bSBenjamin Marzinski int r; 76886f1152bSBenjamin Marzinski struct table_device *td; 76986f1152bSBenjamin Marzinski 77086f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 77186f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 77286f1152bSBenjamin Marzinski if (!td) { 77386f1152bSBenjamin Marzinski td = kmalloc(sizeof(*td), GFP_KERNEL); 77486f1152bSBenjamin Marzinski if (!td) { 77586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 77686f1152bSBenjamin Marzinski return -ENOMEM; 77786f1152bSBenjamin Marzinski } 77886f1152bSBenjamin Marzinski 77986f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 78086f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 78186f1152bSBenjamin Marzinski 78286f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 78386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 78486f1152bSBenjamin Marzinski kfree(td); 78586f1152bSBenjamin Marzinski return r; 78686f1152bSBenjamin Marzinski } 78786f1152bSBenjamin Marzinski 78886f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 78986f1152bSBenjamin Marzinski 79086f1152bSBenjamin Marzinski atomic_set(&td->count, 0); 79186f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 79286f1152bSBenjamin Marzinski } 79386f1152bSBenjamin Marzinski atomic_inc(&td->count); 79486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 79586f1152bSBenjamin Marzinski 79686f1152bSBenjamin Marzinski *result = &td->dm_dev; 79786f1152bSBenjamin Marzinski return 0; 79886f1152bSBenjamin Marzinski } 79986f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 80086f1152bSBenjamin Marzinski 80186f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 80286f1152bSBenjamin Marzinski { 80386f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 80486f1152bSBenjamin Marzinski 80586f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 80686f1152bSBenjamin Marzinski if (atomic_dec_and_test(&td->count)) { 80786f1152bSBenjamin Marzinski close_table_device(td, md); 80886f1152bSBenjamin Marzinski list_del(&td->list); 80986f1152bSBenjamin Marzinski kfree(td); 81086f1152bSBenjamin Marzinski } 81186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 81286f1152bSBenjamin Marzinski } 81386f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 81486f1152bSBenjamin Marzinski 81586f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 81686f1152bSBenjamin Marzinski { 81786f1152bSBenjamin Marzinski struct list_head *tmp, *next; 81886f1152bSBenjamin Marzinski 81986f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 82086f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 82186f1152bSBenjamin Marzinski 82286f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 82386f1152bSBenjamin Marzinski td->dm_dev.name, atomic_read(&td->count)); 82486f1152bSBenjamin Marzinski kfree(td); 82586f1152bSBenjamin Marzinski } 82686f1152bSBenjamin Marzinski } 82786f1152bSBenjamin Marzinski 82886f1152bSBenjamin Marzinski /* 8293ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8303ac51e74SDarrick J. Wong */ 8313ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8323ac51e74SDarrick J. Wong { 8333ac51e74SDarrick J. Wong *geo = md->geometry; 8343ac51e74SDarrick J. Wong 8353ac51e74SDarrick J. Wong return 0; 8363ac51e74SDarrick J. Wong } 8373ac51e74SDarrick J. Wong 8383ac51e74SDarrick J. Wong /* 8393ac51e74SDarrick J. Wong * Set the geometry of a device. 8403ac51e74SDarrick J. Wong */ 8413ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8423ac51e74SDarrick J. Wong { 8433ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8443ac51e74SDarrick J. Wong 8453ac51e74SDarrick J. Wong if (geo->start > sz) { 8463ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8473ac51e74SDarrick J. Wong return -EINVAL; 8483ac51e74SDarrick J. Wong } 8493ac51e74SDarrick J. Wong 8503ac51e74SDarrick J. Wong md->geometry = *geo; 8513ac51e74SDarrick J. Wong 8523ac51e74SDarrick J. Wong return 0; 8533ac51e74SDarrick J. Wong } 8543ac51e74SDarrick J. Wong 8551da177e4SLinus Torvalds /*----------------------------------------------------------------- 8561da177e4SLinus Torvalds * CRUD START: 8571da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 8581da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 8591da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 8601da177e4SLinus Torvalds * interests of getting something for people to use I give 8611da177e4SLinus Torvalds * you this clearly demarcated crap. 8621da177e4SLinus Torvalds *---------------------------------------------------------------*/ 8631da177e4SLinus Torvalds 8642e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8652e93ccc1SKiyoshi Ueda { 8662e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8672e93ccc1SKiyoshi Ueda } 8682e93ccc1SKiyoshi Ueda 8691da177e4SLinus Torvalds /* 8701da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 8711da177e4SLinus Torvalds * cloned into, completing the original io if necc. 8721da177e4SLinus Torvalds */ 873858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 8741da177e4SLinus Torvalds { 8752e93ccc1SKiyoshi Ueda unsigned long flags; 876b35f8caaSMilan Broz int io_error; 877b35f8caaSMilan Broz struct bio *bio; 878b35f8caaSMilan Broz struct mapped_device *md = io->md; 8792e93ccc1SKiyoshi Ueda 8802e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 881f88fb981SKiyoshi Ueda if (unlikely(error)) { 882f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 883f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 8841da177e4SLinus Torvalds io->error = error; 885f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 886f88fb981SKiyoshi Ueda } 8871da177e4SLinus Torvalds 8881da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 8892e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 8902e93ccc1SKiyoshi Ueda /* 8912e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8922e93ccc1SKiyoshi Ueda */ 893022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 8946a8736d1STejun Heo if (__noflush_suspending(md)) 8956a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 8966a8736d1STejun Heo else 8972e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 8982e93ccc1SKiyoshi Ueda io->error = -EIO; 899022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9002e93ccc1SKiyoshi Ueda } 9012e93ccc1SKiyoshi Ueda 902b35f8caaSMilan Broz io_error = io->error; 903b35f8caaSMilan Broz bio = io->bio; 904af7e466aSMikulas Patocka end_io_acct(io); 905a97f925aSMikulas Patocka free_io(md, io); 9061da177e4SLinus Torvalds 9076a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 9086a8736d1STejun Heo return; 9096a8736d1STejun Heo 9104f024f37SKent Overstreet if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 9111da177e4SLinus Torvalds /* 9126a8736d1STejun Heo * Preflush done for flush with data, reissue 9136a8736d1STejun Heo * without REQ_FLUSH. 9141da177e4SLinus Torvalds */ 9156a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 9166a8736d1STejun Heo queue_io(md, bio); 9175f3ea37cSArnaldo Carvalho de Melo } else { 918b372d360SMike Snitzer /* done with normal IO or empty flush */ 9190a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 920b35f8caaSMilan Broz bio_endio(bio, io_error); 9212e93ccc1SKiyoshi Ueda } 9221da177e4SLinus Torvalds } 923af7e466aSMikulas Patocka } 9241da177e4SLinus Torvalds 9257eee4ae2SMike Snitzer static void disable_write_same(struct mapped_device *md) 9267eee4ae2SMike Snitzer { 9277eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9287eee4ae2SMike Snitzer 9297eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9307eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9317eee4ae2SMike Snitzer } 9327eee4ae2SMike Snitzer 9336712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error) 9341da177e4SLinus Torvalds { 9355164beceSzhendong chen int r = error; 936bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 937b35f8caaSMilan Broz struct dm_io *io = tio->io; 9389faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9391da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9401da177e4SLinus Torvalds 9411da177e4SLinus Torvalds if (!bio_flagged(bio, BIO_UPTODATE) && !error) 9421da177e4SLinus Torvalds error = -EIO; 9431da177e4SLinus Torvalds 9441da177e4SLinus Torvalds if (endio) { 9457de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 9462e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 9472e93ccc1SKiyoshi Ueda /* 9482e93ccc1SKiyoshi Ueda * error and requeue request are handled 9492e93ccc1SKiyoshi Ueda * in dec_pending(). 9502e93ccc1SKiyoshi Ueda */ 9511da177e4SLinus Torvalds error = r; 95245cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 95345cbcd79SKiyoshi Ueda /* The target will handle the io */ 9546712ecf8SNeilBrown return; 95545cbcd79SKiyoshi Ueda else if (r) { 95645cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 95745cbcd79SKiyoshi Ueda BUG(); 95845cbcd79SKiyoshi Ueda } 9591da177e4SLinus Torvalds } 9601da177e4SLinus Torvalds 9617eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 9627eee4ae2SMike Snitzer !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 9637eee4ae2SMike Snitzer disable_write_same(md); 9647eee4ae2SMike Snitzer 9659faf400fSStefan Bader free_tio(md, tio); 966b35f8caaSMilan Broz dec_pending(io, error); 9671da177e4SLinus Torvalds } 9681da177e4SLinus Torvalds 969cec47e3dSKiyoshi Ueda /* 970cec47e3dSKiyoshi Ueda * Partial completion handling for request-based dm 971cec47e3dSKiyoshi Ueda */ 972cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error) 973cec47e3dSKiyoshi Ueda { 974bfc6d41cSMikulas Patocka struct dm_rq_clone_bio_info *info = 975bfc6d41cSMikulas Patocka container_of(clone, struct dm_rq_clone_bio_info, clone); 976cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = info->tio; 977cec47e3dSKiyoshi Ueda struct bio *bio = info->orig; 9784f024f37SKent Overstreet unsigned int nr_bytes = info->orig->bi_iter.bi_size; 979cec47e3dSKiyoshi Ueda 980cec47e3dSKiyoshi Ueda bio_put(clone); 981cec47e3dSKiyoshi Ueda 982cec47e3dSKiyoshi Ueda if (tio->error) 983cec47e3dSKiyoshi Ueda /* 984cec47e3dSKiyoshi Ueda * An error has already been detected on the request. 985cec47e3dSKiyoshi Ueda * Once error occurred, just let clone->end_io() handle 986cec47e3dSKiyoshi Ueda * the remainder. 987cec47e3dSKiyoshi Ueda */ 988cec47e3dSKiyoshi Ueda return; 989cec47e3dSKiyoshi Ueda else if (error) { 990cec47e3dSKiyoshi Ueda /* 991cec47e3dSKiyoshi Ueda * Don't notice the error to the upper layer yet. 992cec47e3dSKiyoshi Ueda * The error handling decision is made by the target driver, 993cec47e3dSKiyoshi Ueda * when the request is completed. 994cec47e3dSKiyoshi Ueda */ 995cec47e3dSKiyoshi Ueda tio->error = error; 996cec47e3dSKiyoshi Ueda return; 997cec47e3dSKiyoshi Ueda } 998cec47e3dSKiyoshi Ueda 999cec47e3dSKiyoshi Ueda /* 1000cec47e3dSKiyoshi Ueda * I/O for the bio successfully completed. 1001cec47e3dSKiyoshi Ueda * Notice the data completion to the upper layer. 1002cec47e3dSKiyoshi Ueda */ 1003cec47e3dSKiyoshi Ueda 1004cec47e3dSKiyoshi Ueda /* 1005cec47e3dSKiyoshi Ueda * bios are processed from the head of the list. 1006cec47e3dSKiyoshi Ueda * So the completing bio should always be rq->bio. 1007cec47e3dSKiyoshi Ueda * If it's not, something wrong is happening. 1008cec47e3dSKiyoshi Ueda */ 1009cec47e3dSKiyoshi Ueda if (tio->orig->bio != bio) 1010cec47e3dSKiyoshi Ueda DMERR("bio completion is going in the middle of the request"); 1011cec47e3dSKiyoshi Ueda 1012cec47e3dSKiyoshi Ueda /* 1013cec47e3dSKiyoshi Ueda * Update the original request. 1014cec47e3dSKiyoshi Ueda * Do not use blk_end_request() here, because it may complete 1015cec47e3dSKiyoshi Ueda * the original request before the clone, and break the ordering. 1016cec47e3dSKiyoshi Ueda */ 1017cec47e3dSKiyoshi Ueda blk_update_request(tio->orig, 0, nr_bytes); 1018cec47e3dSKiyoshi Ueda } 1019cec47e3dSKiyoshi Ueda 1020cec47e3dSKiyoshi Ueda /* 1021cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 1022cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 1023cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 1024cec47e3dSKiyoshi Ueda */ 1025466d89a6SKeith Busch static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1026cec47e3dSKiyoshi Ueda { 10279a0e609eSMike Snitzer int nr_requests_pending; 10289a0e609eSMike Snitzer 1029b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 1030cec47e3dSKiyoshi Ueda 1031cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 10329a0e609eSMike Snitzer nr_requests_pending = md_in_flight(md); 10339a0e609eSMike Snitzer if (!nr_requests_pending) 1034cec47e3dSKiyoshi Ueda wake_up(&md->wait); 1035cec47e3dSKiyoshi Ueda 1036a8c32a5cSJens Axboe /* 1037a8c32a5cSJens Axboe * Run this off this callpath, as drivers could invoke end_io while 1038a8c32a5cSJens Axboe * inside their request_fn (and holding the queue lock). Calling 1039a8c32a5cSJens Axboe * back into ->request_fn() could deadlock attempting to grab the 1040a8c32a5cSJens Axboe * queue lock again. 1041a8c32a5cSJens Axboe */ 10429a0e609eSMike Snitzer if (run_queue) { 10439a0e609eSMike Snitzer if (!nr_requests_pending || 10449a0e609eSMike Snitzer (nr_requests_pending >= md->queue->nr_congestion_on)) 1045a8c32a5cSJens Axboe blk_run_queue_async(md->queue); 10469a0e609eSMike Snitzer } 1047cec47e3dSKiyoshi Ueda 1048cec47e3dSKiyoshi Ueda /* 1049cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 1050cec47e3dSKiyoshi Ueda */ 1051cec47e3dSKiyoshi Ueda dm_put(md); 1052cec47e3dSKiyoshi Ueda } 1053cec47e3dSKiyoshi Ueda 1054a77e28c7SKiyoshi Ueda static void free_rq_clone(struct request *clone) 1055a77e28c7SKiyoshi Ueda { 1056a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1057a77e28c7SKiyoshi Ueda 1058a77e28c7SKiyoshi Ueda blk_rq_unprep_clone(clone); 1059e5863d9aSMike Snitzer if (clone->q && clone->q->mq_ops) 1060e5863d9aSMike Snitzer tio->ti->type->release_clone_rq(clone); 1061e5863d9aSMike Snitzer else 10621ae49ea2SMike Snitzer free_clone_request(tio->md, clone); 1063a77e28c7SKiyoshi Ueda free_rq_tio(tio); 1064a77e28c7SKiyoshi Ueda } 1065a77e28c7SKiyoshi Ueda 1066980691e5SKiyoshi Ueda /* 1067980691e5SKiyoshi Ueda * Complete the clone and the original request. 1068466d89a6SKeith Busch * Must be called without clone's queue lock held, 1069466d89a6SKeith Busch * see end_clone_request() for more details. 1070980691e5SKiyoshi Ueda */ 1071980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 1072980691e5SKiyoshi Ueda { 1073980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 1074980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1075980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 1076980691e5SKiyoshi Ueda struct request *rq = tio->orig; 1077980691e5SKiyoshi Ueda 107829e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1079980691e5SKiyoshi Ueda rq->errors = clone->errors; 1080980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 1081980691e5SKiyoshi Ueda 1082980691e5SKiyoshi Ueda if (rq->sense) 1083980691e5SKiyoshi Ueda /* 1084980691e5SKiyoshi Ueda * We are using the sense buffer of the original 1085980691e5SKiyoshi Ueda * request. 1086980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 1087980691e5SKiyoshi Ueda */ 1088980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 1089980691e5SKiyoshi Ueda } 1090980691e5SKiyoshi Ueda 1091980691e5SKiyoshi Ueda free_rq_clone(clone); 1092980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 109329e4013dSTejun Heo rq_completed(md, rw, true); 1094980691e5SKiyoshi Ueda } 1095980691e5SKiyoshi Ueda 1096cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 1097cec47e3dSKiyoshi Ueda { 1098466d89a6SKeith Busch struct dm_rq_target_io *tio = rq->special; 1099466d89a6SKeith Busch struct request *clone = tio->clone; 1100cec47e3dSKiyoshi Ueda 1101cec47e3dSKiyoshi Ueda rq->special = NULL; 1102cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 1103cec47e3dSKiyoshi Ueda 1104e5863d9aSMike Snitzer if (clone) 1105a77e28c7SKiyoshi Ueda free_rq_clone(clone); 1106cec47e3dSKiyoshi Ueda } 1107cec47e3dSKiyoshi Ueda 1108cec47e3dSKiyoshi Ueda /* 1109cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 1110cec47e3dSKiyoshi Ueda */ 1111466d89a6SKeith Busch static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1112466d89a6SKeith Busch struct request *rq) 1113cec47e3dSKiyoshi Ueda { 1114466d89a6SKeith Busch int rw = rq_data_dir(rq); 1115cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 1116cec47e3dSKiyoshi Ueda unsigned long flags; 1117cec47e3dSKiyoshi Ueda 1118cec47e3dSKiyoshi Ueda dm_unprep_request(rq); 1119cec47e3dSKiyoshi Ueda 1120cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1121cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 1122cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 1123cec47e3dSKiyoshi Ueda 1124466d89a6SKeith Busch rq_completed(md, rw, false); 1125cec47e3dSKiyoshi Ueda } 1126466d89a6SKeith Busch 1127466d89a6SKeith Busch static void dm_requeue_unmapped_request(struct request *clone) 1128466d89a6SKeith Busch { 1129466d89a6SKeith Busch struct dm_rq_target_io *tio = clone->end_io_data; 1130466d89a6SKeith Busch 1131466d89a6SKeith Busch dm_requeue_unmapped_original_request(tio->md, tio->orig); 1132cec47e3dSKiyoshi Ueda } 1133cec47e3dSKiyoshi Ueda 1134cec47e3dSKiyoshi Ueda static void __stop_queue(struct request_queue *q) 1135cec47e3dSKiyoshi Ueda { 1136cec47e3dSKiyoshi Ueda blk_stop_queue(q); 1137cec47e3dSKiyoshi Ueda } 1138cec47e3dSKiyoshi Ueda 1139cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 1140cec47e3dSKiyoshi Ueda { 1141cec47e3dSKiyoshi Ueda unsigned long flags; 1142cec47e3dSKiyoshi Ueda 1143cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1144cec47e3dSKiyoshi Ueda __stop_queue(q); 1145cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 1146cec47e3dSKiyoshi Ueda } 1147cec47e3dSKiyoshi Ueda 1148cec47e3dSKiyoshi Ueda static void __start_queue(struct request_queue *q) 1149cec47e3dSKiyoshi Ueda { 1150cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 1151cec47e3dSKiyoshi Ueda blk_start_queue(q); 1152cec47e3dSKiyoshi Ueda } 1153cec47e3dSKiyoshi Ueda 1154cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 1155cec47e3dSKiyoshi Ueda { 1156cec47e3dSKiyoshi Ueda unsigned long flags; 1157cec47e3dSKiyoshi Ueda 1158cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 1159cec47e3dSKiyoshi Ueda __start_queue(q); 1160cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 1161cec47e3dSKiyoshi Ueda } 1162cec47e3dSKiyoshi Ueda 116311a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 116411a68244SKiyoshi Ueda { 116511a68244SKiyoshi Ueda int r = error; 116611a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1167ba1cbad9SMike Snitzer dm_request_endio_fn rq_end_io = NULL; 1168ba1cbad9SMike Snitzer 1169ba1cbad9SMike Snitzer if (tio->ti) { 1170ba1cbad9SMike Snitzer rq_end_io = tio->ti->type->rq_end_io; 117111a68244SKiyoshi Ueda 117211a68244SKiyoshi Ueda if (mapped && rq_end_io) 117311a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 1174ba1cbad9SMike Snitzer } 117511a68244SKiyoshi Ueda 11767eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 11777eee4ae2SMike Snitzer !clone->q->limits.max_write_same_sectors)) 11787eee4ae2SMike Snitzer disable_write_same(tio->md); 11797eee4ae2SMike Snitzer 118011a68244SKiyoshi Ueda if (r <= 0) 118111a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 118211a68244SKiyoshi Ueda dm_end_request(clone, r); 118311a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 118411a68244SKiyoshi Ueda /* The target will handle the I/O */ 118511a68244SKiyoshi Ueda return; 118611a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 118711a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 118811a68244SKiyoshi Ueda dm_requeue_unmapped_request(clone); 118911a68244SKiyoshi Ueda else { 119011a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 119111a68244SKiyoshi Ueda BUG(); 119211a68244SKiyoshi Ueda } 119311a68244SKiyoshi Ueda } 119411a68244SKiyoshi Ueda 1195cec47e3dSKiyoshi Ueda /* 1196cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 1197cec47e3dSKiyoshi Ueda */ 1198cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 1199cec47e3dSKiyoshi Ueda { 120011a68244SKiyoshi Ueda bool mapped = true; 1201466d89a6SKeith Busch struct dm_rq_target_io *tio = rq->special; 1202466d89a6SKeith Busch struct request *clone = tio->clone; 1203cec47e3dSKiyoshi Ueda 1204e5863d9aSMike Snitzer if (!clone) { 1205e5863d9aSMike Snitzer blk_end_request_all(rq, tio->error); 1206e5863d9aSMike Snitzer rq_completed(tio->md, rq_data_dir(rq), false); 1207e5863d9aSMike Snitzer free_rq_tio(tio); 1208e5863d9aSMike Snitzer return; 1209e5863d9aSMike Snitzer } 1210cec47e3dSKiyoshi Ueda 121111a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 121211a68244SKiyoshi Ueda mapped = false; 1213cec47e3dSKiyoshi Ueda 121411a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 1215cec47e3dSKiyoshi Ueda } 1216cec47e3dSKiyoshi Ueda 1217cec47e3dSKiyoshi Ueda /* 1218cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 1219cec47e3dSKiyoshi Ueda * through softirq context. 1220cec47e3dSKiyoshi Ueda */ 1221466d89a6SKeith Busch static void dm_complete_request(struct request *rq, int error) 1222cec47e3dSKiyoshi Ueda { 1223466d89a6SKeith Busch struct dm_rq_target_io *tio = rq->special; 1224cec47e3dSKiyoshi Ueda 1225cec47e3dSKiyoshi Ueda tio->error = error; 1226cec47e3dSKiyoshi Ueda blk_complete_request(rq); 1227cec47e3dSKiyoshi Ueda } 1228cec47e3dSKiyoshi Ueda 1229cec47e3dSKiyoshi Ueda /* 1230cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 1231cec47e3dSKiyoshi Ueda * through softirq context. 1232cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 1233e5863d9aSMike Snitzer * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1234cec47e3dSKiyoshi Ueda */ 1235466d89a6SKeith Busch static void dm_kill_unmapped_request(struct request *rq, int error) 1236cec47e3dSKiyoshi Ueda { 1237cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 1238466d89a6SKeith Busch dm_complete_request(rq, error); 1239cec47e3dSKiyoshi Ueda } 1240cec47e3dSKiyoshi Ueda 1241cec47e3dSKiyoshi Ueda /* 1242466d89a6SKeith Busch * Called with the clone's queue lock held 1243cec47e3dSKiyoshi Ueda */ 1244cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 1245cec47e3dSKiyoshi Ueda { 1246466d89a6SKeith Busch struct dm_rq_target_io *tio = clone->end_io_data; 1247466d89a6SKeith Busch 1248e5863d9aSMike Snitzer if (!clone->q->mq_ops) { 1249cec47e3dSKiyoshi Ueda /* 1250cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 1251cec47e3dSKiyoshi Ueda * the clone was dispatched. 1252e5863d9aSMike Snitzer * The clone is *NOT* freed actually here because it is alloced 1253e5863d9aSMike Snitzer * from dm own mempool (REQ_ALLOCED isn't set). 1254cec47e3dSKiyoshi Ueda */ 1255cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 1256e5863d9aSMike Snitzer } 1257cec47e3dSKiyoshi Ueda 1258cec47e3dSKiyoshi Ueda /* 1259cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 1260466d89a6SKeith Busch * hold the clone's queue lock. Otherwise, deadlock could occur because: 1261cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 1262cec47e3dSKiyoshi Ueda * of the stacking during the completion 1263cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 1264466d89a6SKeith Busch * against this clone's queue 1265cec47e3dSKiyoshi Ueda */ 1266466d89a6SKeith Busch dm_complete_request(tio->orig, error); 1267cec47e3dSKiyoshi Ueda } 1268cec47e3dSKiyoshi Ueda 126956a67df7SMike Snitzer /* 127056a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 127156a67df7SMike Snitzer * target boundary. 127256a67df7SMike Snitzer */ 127356a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 12741da177e4SLinus Torvalds { 127556a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 127656a67df7SMike Snitzer 127756a67df7SMike Snitzer return ti->len - target_offset; 127856a67df7SMike Snitzer } 127956a67df7SMike Snitzer 128056a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 128156a67df7SMike Snitzer { 128256a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1283542f9038SMike Snitzer sector_t offset, max_len; 12841da177e4SLinus Torvalds 12851da177e4SLinus Torvalds /* 12861da177e4SLinus Torvalds * Does the target need to split even further? 12871da177e4SLinus Torvalds */ 1288542f9038SMike Snitzer if (ti->max_io_len) { 1289542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1290542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1291542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1292542f9038SMike Snitzer else 1293542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1294542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1295542f9038SMike Snitzer 1296542f9038SMike Snitzer if (len > max_len) 1297542f9038SMike Snitzer len = max_len; 12981da177e4SLinus Torvalds } 12991da177e4SLinus Torvalds 13001da177e4SLinus Torvalds return len; 13011da177e4SLinus Torvalds } 13021da177e4SLinus Torvalds 1303542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1304542f9038SMike Snitzer { 1305542f9038SMike Snitzer if (len > UINT_MAX) { 1306542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1307542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1308542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1309542f9038SMike Snitzer return -EINVAL; 1310542f9038SMike Snitzer } 1311542f9038SMike Snitzer 1312542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1313542f9038SMike Snitzer 1314542f9038SMike Snitzer return 0; 1315542f9038SMike Snitzer } 1316542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1317542f9038SMike Snitzer 13181dd40c3eSMikulas Patocka /* 13191dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 13201dd40c3eSMikulas Patocka * allowed for all bio types except REQ_FLUSH. 13211dd40c3eSMikulas Patocka * 13221dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 13231dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 13241dd40c3eSMikulas Patocka * sent in a next bio. 13251dd40c3eSMikulas Patocka * 13261dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 13271dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 13281dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 13291dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 13301dd40c3eSMikulas Patocka * 13311dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 13321dd40c3eSMikulas Patocka * <------- bi_size -------> 13331dd40c3eSMikulas Patocka * <-- n_sectors --> 13341dd40c3eSMikulas Patocka * 13351dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 13361dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 13371dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 13381dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 13391dd40c3eSMikulas Patocka * to make it empty) 13401dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 13411dd40c3eSMikulas Patocka * 13421dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 13431dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 13441dd40c3eSMikulas Patocka * copies of the bio. 13451dd40c3eSMikulas Patocka */ 13461dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 13471dd40c3eSMikulas Patocka { 13481dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 13491dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 13501dd40c3eSMikulas Patocka BUG_ON(bio->bi_rw & REQ_FLUSH); 13511dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 13521dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 13531dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 13541dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 13551dd40c3eSMikulas Patocka } 13561dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 13571dd40c3eSMikulas Patocka 1358bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 13591da177e4SLinus Torvalds { 13601da177e4SLinus Torvalds int r; 13612056a782SJens Axboe sector_t sector; 13629faf400fSStefan Bader struct mapped_device *md; 1363dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1364bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 13651da177e4SLinus Torvalds 13661da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds /* 13691da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 13701da177e4SLinus Torvalds * anything, the target has assumed ownership of 13711da177e4SLinus Torvalds * this io. 13721da177e4SLinus Torvalds */ 13731da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 13744f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 13757de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 137645cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 13771da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 13782056a782SJens Axboe 1379d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 138022a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 13812056a782SJens Axboe 13821da177e4SLinus Torvalds generic_make_request(clone); 13832e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 13842e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 13859faf400fSStefan Bader md = tio->io->md; 13869faf400fSStefan Bader dec_pending(tio->io, r); 13879faf400fSStefan Bader free_tio(md, tio); 138845cbcd79SKiyoshi Ueda } else if (r) { 138945cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 139045cbcd79SKiyoshi Ueda BUG(); 13911da177e4SLinus Torvalds } 13921da177e4SLinus Torvalds } 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds struct clone_info { 13951da177e4SLinus Torvalds struct mapped_device *md; 13961da177e4SLinus Torvalds struct dm_table *map; 13971da177e4SLinus Torvalds struct bio *bio; 13981da177e4SLinus Torvalds struct dm_io *io; 13991da177e4SLinus Torvalds sector_t sector; 1400e0d6609aSMikulas Patocka unsigned sector_count; 14011da177e4SLinus Torvalds }; 14021da177e4SLinus Torvalds 1403e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1404bd2a49b8SAlasdair G Kergon { 14054f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 14064f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 14071da177e4SLinus Torvalds } 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds /* 14101da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 14111da177e4SLinus Torvalds */ 1412dba14160SMikulas Patocka static void clone_bio(struct dm_target_io *tio, struct bio *bio, 14131c3b13e6SKent Overstreet sector_t sector, unsigned len) 14141da177e4SLinus Torvalds { 1415dba14160SMikulas Patocka struct bio *clone = &tio->clone; 14161da177e4SLinus Torvalds 14171c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 14189c47008dSMartin K. Petersen 14191c3b13e6SKent Overstreet if (bio_integrity(bio)) 14201c3b13e6SKent Overstreet bio_integrity_clone(clone, bio, GFP_NOIO); 14211c3b13e6SKent Overstreet 14221c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 14231c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 14241c3b13e6SKent Overstreet 14251c3b13e6SKent Overstreet if (bio_integrity(bio)) 14261c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 14271da177e4SLinus Torvalds } 14281da177e4SLinus Torvalds 14299015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 143099778273SJunichi Nomura struct dm_target *ti, 143155a62eefSAlasdair G Kergon unsigned target_bio_nr) 1432f9ab94ceSMikulas Patocka { 1433dba14160SMikulas Patocka struct dm_target_io *tio; 1434dba14160SMikulas Patocka struct bio *clone; 1435dba14160SMikulas Patocka 143699778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1437dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1438f9ab94ceSMikulas Patocka 1439f9ab94ceSMikulas Patocka tio->io = ci->io; 1440f9ab94ceSMikulas Patocka tio->ti = ti; 144155a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 14429015df24SAlasdair G Kergon 14439015df24SAlasdair G Kergon return tio; 14449015df24SAlasdair G Kergon } 14459015df24SAlasdair G Kergon 144614fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 144714fe594dSAlasdair G Kergon struct dm_target *ti, 14481dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 14499015df24SAlasdair G Kergon { 145099778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1451dba14160SMikulas Patocka struct bio *clone = &tio->clone; 14529015df24SAlasdair G Kergon 14531dd40c3eSMikulas Patocka tio->len_ptr = len; 14541dd40c3eSMikulas Patocka 14551c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1456bd2a49b8SAlasdair G Kergon if (len) 14571dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1458f9ab94ceSMikulas Patocka 1459bd2a49b8SAlasdair G Kergon __map_bio(tio); 1460f9ab94ceSMikulas Patocka } 1461f9ab94ceSMikulas Patocka 146214fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 14631dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 146406a426ceSMike Snitzer { 146555a62eefSAlasdair G Kergon unsigned target_bio_nr; 146606a426ceSMike Snitzer 146755a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 146814fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 146906a426ceSMike Snitzer } 147006a426ceSMike Snitzer 147114fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1472f9ab94ceSMikulas Patocka { 147306a426ceSMike Snitzer unsigned target_nr = 0; 1474f9ab94ceSMikulas Patocka struct dm_target *ti; 1475f9ab94ceSMikulas Patocka 1476b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1477f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14781dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1479f9ab94ceSMikulas Patocka 1480f9ab94ceSMikulas Patocka return 0; 1481f9ab94ceSMikulas Patocka } 1482f9ab94ceSMikulas Patocka 1483e4c93811SAlasdair G Kergon static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 14841dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 14855ae89a87SMike Snitzer { 1486dba14160SMikulas Patocka struct bio *bio = ci->bio; 14875ae89a87SMike Snitzer struct dm_target_io *tio; 1488b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1489b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 14905ae89a87SMike Snitzer 1491b0d8ed4dSAlasdair G Kergon /* 1492b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1493b0d8ed4dSAlasdair G Kergon */ 1494b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1495b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1496e4c93811SAlasdair G Kergon 1497b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 149899778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 14991dd40c3eSMikulas Patocka tio->len_ptr = len; 15001dd40c3eSMikulas Patocka clone_bio(tio, bio, sector, *len); 1501bd2a49b8SAlasdair G Kergon __map_bio(tio); 15025ae89a87SMike Snitzer } 1503b0d8ed4dSAlasdair G Kergon } 15045ae89a87SMike Snitzer 150555a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 150623508a96SMike Snitzer 150755a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 150823508a96SMike Snitzer { 150955a62eefSAlasdair G Kergon return ti->num_discard_bios; 151023508a96SMike Snitzer } 151123508a96SMike Snitzer 151255a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 151323508a96SMike Snitzer { 151455a62eefSAlasdair G Kergon return ti->num_write_same_bios; 151523508a96SMike Snitzer } 151623508a96SMike Snitzer 151723508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 151823508a96SMike Snitzer 151923508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 152023508a96SMike Snitzer { 152155a62eefSAlasdair G Kergon return ti->split_discard_bios; 152223508a96SMike Snitzer } 152323508a96SMike Snitzer 152414fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 152555a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 152623508a96SMike Snitzer is_split_required_fn is_split_required) 15275ae89a87SMike Snitzer { 15285ae89a87SMike Snitzer struct dm_target *ti; 1529e0d6609aSMikulas Patocka unsigned len; 153055a62eefSAlasdair G Kergon unsigned num_bios; 15315ae89a87SMike Snitzer 1532a79245b3SMike Snitzer do { 15335ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 15345ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 15355ae89a87SMike Snitzer return -EIO; 15365ae89a87SMike Snitzer 15375ae89a87SMike Snitzer /* 153823508a96SMike Snitzer * Even though the device advertised support for this type of 153923508a96SMike Snitzer * request, that does not mean every target supports it, and 1540936688d7SMike Snitzer * reconfiguration might also have changed that since the 15415ae89a87SMike Snitzer * check was performed. 15425ae89a87SMike Snitzer */ 154355a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 154455a62eefSAlasdair G Kergon if (!num_bios) 15455ae89a87SMike Snitzer return -EOPNOTSUPP; 15465ae89a87SMike Snitzer 154723508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1548e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 15497acf0277SMikulas Patocka else 1550e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 15515ae89a87SMike Snitzer 15521dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 15535ae89a87SMike Snitzer 1554a79245b3SMike Snitzer ci->sector += len; 1555a79245b3SMike Snitzer } while (ci->sector_count -= len); 15565ae89a87SMike Snitzer 15575ae89a87SMike Snitzer return 0; 15585ae89a87SMike Snitzer } 15595ae89a87SMike Snitzer 156014fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 156123508a96SMike Snitzer { 156214fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 156323508a96SMike Snitzer is_split_required_for_discard); 156423508a96SMike Snitzer } 156523508a96SMike Snitzer 156614fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 156723508a96SMike Snitzer { 156814fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 156923508a96SMike Snitzer } 157023508a96SMike Snitzer 1571e4c93811SAlasdair G Kergon /* 1572e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1573e4c93811SAlasdair G Kergon */ 1574e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1575e4c93811SAlasdair G Kergon { 1576e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1577e4c93811SAlasdair G Kergon struct dm_target *ti; 15781c3b13e6SKent Overstreet unsigned len; 1579e4c93811SAlasdair G Kergon 1580e4c93811SAlasdair G Kergon if (unlikely(bio->bi_rw & REQ_DISCARD)) 1581e4c93811SAlasdair G Kergon return __send_discard(ci); 1582e4c93811SAlasdair G Kergon else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1583e4c93811SAlasdair G Kergon return __send_write_same(ci); 1584e4c93811SAlasdair G Kergon 1585e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1586e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1587e4c93811SAlasdair G Kergon return -EIO; 1588e4c93811SAlasdair G Kergon 15891c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1590e4c93811SAlasdair G Kergon 15911dd40c3eSMikulas Patocka __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1592e4c93811SAlasdair G Kergon 1593e4c93811SAlasdair G Kergon ci->sector += len; 1594e4c93811SAlasdair G Kergon ci->sector_count -= len; 1595e4c93811SAlasdair G Kergon 1596e4c93811SAlasdair G Kergon return 0; 1597e4c93811SAlasdair G Kergon } 1598e4c93811SAlasdair G Kergon 1599e4c93811SAlasdair G Kergon /* 160014fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 16011da177e4SLinus Torvalds */ 160283d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 160383d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 16041da177e4SLinus Torvalds { 16051da177e4SLinus Torvalds struct clone_info ci; 1606512875bdSJun'ichi Nomura int error = 0; 16071da177e4SLinus Torvalds 160883d5e5b0SMikulas Patocka if (unlikely(!map)) { 1609f0b9a450SMikulas Patocka bio_io_error(bio); 1610f0b9a450SMikulas Patocka return; 1611f0b9a450SMikulas Patocka } 1612692d0eb9SMikulas Patocka 161383d5e5b0SMikulas Patocka ci.map = map; 16141da177e4SLinus Torvalds ci.md = md; 16151da177e4SLinus Torvalds ci.io = alloc_io(md); 16161da177e4SLinus Torvalds ci.io->error = 0; 16171da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 16181da177e4SLinus Torvalds ci.io->bio = bio; 16191da177e4SLinus Torvalds ci.io->md = md; 1620f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 16214f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 16221da177e4SLinus Torvalds 16233eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1624bd2a49b8SAlasdair G Kergon 1625b372d360SMike Snitzer if (bio->bi_rw & REQ_FLUSH) { 1626b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1627b372d360SMike Snitzer ci.sector_count = 0; 162814fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1629b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1630b372d360SMike Snitzer } else { 16316a8736d1STejun Heo ci.bio = bio; 1632f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1633512875bdSJun'ichi Nomura while (ci.sector_count && !error) 163414fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1635d87f4c14STejun Heo } 16361da177e4SLinus Torvalds 16371da177e4SLinus Torvalds /* drop the extra reference count */ 1638512875bdSJun'ichi Nomura dec_pending(ci.io, error); 16399e4e5f87SMilan Broz } 16409e4e5f87SMilan Broz /*----------------------------------------------------------------- 16411da177e4SLinus Torvalds * CRUD END 16421da177e4SLinus Torvalds *---------------------------------------------------------------*/ 16431da177e4SLinus Torvalds 16441da177e4SLinus Torvalds static int dm_merge_bvec(struct request_queue *q, 16451da177e4SLinus Torvalds struct bvec_merge_data *bvm, 1646f6fccb12SMilan Broz struct bio_vec *biovec) 1647f6fccb12SMilan Broz { 1648f6fccb12SMilan Broz struct mapped_device *md = q->queuedata; 164983d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table_fast(md); 1650f6fccb12SMilan Broz struct dm_target *ti; 1651f6fccb12SMilan Broz sector_t max_sectors; 1652f6fccb12SMilan Broz int max_size = 0; 1653f6fccb12SMilan Broz 1654f6fccb12SMilan Broz if (unlikely(!map)) 1655f6fccb12SMilan Broz goto out; 1656f6fccb12SMilan Broz 1657f6fccb12SMilan Broz ti = dm_table_find_target(map, bvm->bi_sector); 1658f6fccb12SMilan Broz if (!dm_target_is_valid(ti)) 165983d5e5b0SMikulas Patocka goto out; 1660f6fccb12SMilan Broz 1661f6fccb12SMilan Broz /* 1662f6fccb12SMilan Broz * Find maximum amount of I/O that won't need splitting 1663f6fccb12SMilan Broz */ 166456a67df7SMike Snitzer max_sectors = min(max_io_len(bvm->bi_sector, ti), 1665148e51baSMike Snitzer (sector_t) queue_max_sectors(q)); 1666f6fccb12SMilan Broz max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1667148e51baSMike Snitzer if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ 1668f6fccb12SMilan Broz max_size = 0; 1669f6fccb12SMilan Broz 1670f6fccb12SMilan Broz /* 1671f6fccb12SMilan Broz * merge_bvec_fn() returns number of bytes 1672f6fccb12SMilan Broz * it can accept at this offset 1673f6fccb12SMilan Broz * max is precomputed maximal io size 1674f6fccb12SMilan Broz */ 1675f6fccb12SMilan Broz if (max_size && ti->type->merge) 1676f6fccb12SMilan Broz max_size = ti->type->merge(ti, bvm, biovec, max_size); 16778cbeb67aSMikulas Patocka /* 16788cbeb67aSMikulas Patocka * If the target doesn't support merge method and some of the devices 1679148e51baSMike Snitzer * provided their merge_bvec method (we know this by looking for the 1680148e51baSMike Snitzer * max_hw_sectors that dm_set_device_limits may set), then we can't 1681148e51baSMike Snitzer * allow bios with multiple vector entries. So always set max_size 1682148e51baSMike Snitzer * to 0, and the code below allows just one page. 16838cbeb67aSMikulas Patocka */ 16848cbeb67aSMikulas Patocka else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 16858cbeb67aSMikulas Patocka max_size = 0; 1686f6fccb12SMilan Broz 16875037108aSMikulas Patocka out: 168883d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 1689f6fccb12SMilan Broz /* 1690f6fccb12SMilan Broz * Always allow an entire first page 1691f6fccb12SMilan Broz */ 1692f6fccb12SMilan Broz if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1693f6fccb12SMilan Broz max_size = biovec->bv_len; 1694f6fccb12SMilan Broz 1695f6fccb12SMilan Broz return max_size; 1696f6fccb12SMilan Broz } 1697f6fccb12SMilan Broz 16981da177e4SLinus Torvalds /* 16991da177e4SLinus Torvalds * The request function that just remaps the bio built up by 17001da177e4SLinus Torvalds * dm_merge_bvec. 17011da177e4SLinus Torvalds */ 1702ff36ab34SMike Snitzer static void dm_make_request(struct request_queue *q, struct bio *bio) 17031da177e4SLinus Torvalds { 170412f03a49SKevin Corry int rw = bio_data_dir(bio); 17051da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 170683d5e5b0SMikulas Patocka int srcu_idx; 170783d5e5b0SMikulas Patocka struct dm_table *map; 17081da177e4SLinus Torvalds 170983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17101da177e4SLinus Torvalds 171118c0b223SGu Zheng generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 171212f03a49SKevin Corry 17136a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17146a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 171583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17161da177e4SLinus Torvalds 17176a8736d1STejun Heo if (bio_rw(bio) != READA) 171892c63902SMikulas Patocka queue_io(md, bio); 17196a8736d1STejun Heo else 17206a8736d1STejun Heo bio_io_error(bio); 17215a7bbad2SChristoph Hellwig return; 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds 172483d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 172583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17265a7bbad2SChristoph Hellwig return; 1727cec47e3dSKiyoshi Ueda } 1728cec47e3dSKiyoshi Ueda 1729fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md) 1730cec47e3dSKiyoshi Ueda { 1731cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1732cec47e3dSKiyoshi Ueda } 1733cec47e3dSKiyoshi Ueda 1734466d89a6SKeith Busch static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1735cec47e3dSKiyoshi Ueda { 1736cec47e3dSKiyoshi Ueda int r; 1737cec47e3dSKiyoshi Ueda 1738466d89a6SKeith Busch if (blk_queue_io_stat(clone->q)) 1739466d89a6SKeith Busch clone->cmd_flags |= REQ_IO_STAT; 1740cec47e3dSKiyoshi Ueda 1741466d89a6SKeith Busch clone->start_time = jiffies; 1742466d89a6SKeith Busch r = blk_insert_cloned_request(clone->q, clone); 1743cec47e3dSKiyoshi Ueda if (r) 1744466d89a6SKeith Busch /* must complete clone in terms of original request */ 1745cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1746cec47e3dSKiyoshi Ueda } 1747cec47e3dSKiyoshi Ueda 1748cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1749cec47e3dSKiyoshi Ueda void *data) 1750cec47e3dSKiyoshi Ueda { 1751cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = data; 175294818742SKent Overstreet struct dm_rq_clone_bio_info *info = 175394818742SKent Overstreet container_of(bio, struct dm_rq_clone_bio_info, clone); 1754cec47e3dSKiyoshi Ueda 1755cec47e3dSKiyoshi Ueda info->orig = bio_orig; 1756cec47e3dSKiyoshi Ueda info->tio = tio; 1757cec47e3dSKiyoshi Ueda bio->bi_end_io = end_clone_bio; 1758cec47e3dSKiyoshi Ueda 1759cec47e3dSKiyoshi Ueda return 0; 1760cec47e3dSKiyoshi Ueda } 1761cec47e3dSKiyoshi Ueda 1762cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq, 17631ae49ea2SMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 1764cec47e3dSKiyoshi Ueda { 1765d0bcb878SKiyoshi Ueda int r; 1766cec47e3dSKiyoshi Ueda 17671ae49ea2SMike Snitzer r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 1768d0bcb878SKiyoshi Ueda dm_rq_bio_constructor, tio); 1769cec47e3dSKiyoshi Ueda if (r) 1770cec47e3dSKiyoshi Ueda return r; 1771cec47e3dSKiyoshi Ueda 1772cec47e3dSKiyoshi Ueda clone->cmd = rq->cmd; 1773cec47e3dSKiyoshi Ueda clone->cmd_len = rq->cmd_len; 1774cec47e3dSKiyoshi Ueda clone->sense = rq->sense; 1775cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1776cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 1777cec47e3dSKiyoshi Ueda 17781ae49ea2SMike Snitzer tio->clone = clone; 17791ae49ea2SMike Snitzer 1780cec47e3dSKiyoshi Ueda return 0; 1781cec47e3dSKiyoshi Ueda } 1782cec47e3dSKiyoshi Ueda 17836facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 17841ae49ea2SMike Snitzer struct dm_rq_target_io *tio, gfp_t gfp_mask) 17856facdaffSKiyoshi Ueda { 17861ae49ea2SMike Snitzer struct request *clone = alloc_clone_request(md, gfp_mask); 17871ae49ea2SMike Snitzer 17881ae49ea2SMike Snitzer if (!clone) 17891ae49ea2SMike Snitzer return NULL; 17901ae49ea2SMike Snitzer 17911ae49ea2SMike Snitzer blk_rq_init(NULL, clone); 17921ae49ea2SMike Snitzer if (setup_clone(clone, rq, tio, gfp_mask)) { 17931ae49ea2SMike Snitzer /* -ENOMEM */ 17941ae49ea2SMike Snitzer free_clone_request(md, clone); 17951ae49ea2SMike Snitzer return NULL; 17961ae49ea2SMike Snitzer } 17971ae49ea2SMike Snitzer 17981ae49ea2SMike Snitzer return clone; 17991ae49ea2SMike Snitzer } 18001ae49ea2SMike Snitzer 18012eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work); 18022eb6e1e3SKeith Busch 1803466d89a6SKeith Busch static struct dm_rq_target_io *prep_tio(struct request *rq, 1804466d89a6SKeith Busch struct mapped_device *md, gfp_t gfp_mask) 18056facdaffSKiyoshi Ueda { 18066facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 1807e5863d9aSMike Snitzer int srcu_idx; 1808e5863d9aSMike Snitzer struct dm_table *table; 18096facdaffSKiyoshi Ueda 18106facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 18116facdaffSKiyoshi Ueda if (!tio) 18126facdaffSKiyoshi Ueda return NULL; 18136facdaffSKiyoshi Ueda 18146facdaffSKiyoshi Ueda tio->md = md; 18156facdaffSKiyoshi Ueda tio->ti = NULL; 18161ae49ea2SMike Snitzer tio->clone = NULL; 18176facdaffSKiyoshi Ueda tio->orig = rq; 18186facdaffSKiyoshi Ueda tio->error = 0; 18196facdaffSKiyoshi Ueda memset(&tio->info, 0, sizeof(tio->info)); 18202eb6e1e3SKeith Busch init_kthread_work(&tio->work, map_tio_request); 18216facdaffSKiyoshi Ueda 1822e5863d9aSMike Snitzer table = dm_get_live_table(md, &srcu_idx); 1823e5863d9aSMike Snitzer if (!dm_table_mq_request_based(table)) { 1824466d89a6SKeith Busch if (!clone_rq(rq, md, tio, gfp_mask)) { 1825e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 18266facdaffSKiyoshi Ueda free_rq_tio(tio); 18276facdaffSKiyoshi Ueda return NULL; 18286facdaffSKiyoshi Ueda } 1829e5863d9aSMike Snitzer } 1830e5863d9aSMike Snitzer dm_put_live_table(md, srcu_idx); 18316facdaffSKiyoshi Ueda 1832466d89a6SKeith Busch return tio; 18336facdaffSKiyoshi Ueda } 18346facdaffSKiyoshi Ueda 1835cec47e3dSKiyoshi Ueda /* 1836cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1837cec47e3dSKiyoshi Ueda */ 1838cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1839cec47e3dSKiyoshi Ueda { 1840cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1841466d89a6SKeith Busch struct dm_rq_target_io *tio; 1842cec47e3dSKiyoshi Ueda 1843cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1844cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1845cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1846cec47e3dSKiyoshi Ueda } 1847cec47e3dSKiyoshi Ueda 1848466d89a6SKeith Busch tio = prep_tio(rq, md, GFP_ATOMIC); 1849466d89a6SKeith Busch if (!tio) 1850cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1851cec47e3dSKiyoshi Ueda 1852466d89a6SKeith Busch rq->special = tio; 1853cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1854cec47e3dSKiyoshi Ueda 1855cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1856cec47e3dSKiyoshi Ueda } 1857cec47e3dSKiyoshi Ueda 18589eef87daSKiyoshi Ueda /* 18599eef87daSKiyoshi Ueda * Returns: 1860e5863d9aSMike Snitzer * 0 : the request has been processed 1861e5863d9aSMike Snitzer * DM_MAPIO_REQUEUE : the original request needs to be requeued 1862e5863d9aSMike Snitzer * < 0 : the request was completed due to failure 18639eef87daSKiyoshi Ueda */ 1864466d89a6SKeith Busch static int map_request(struct dm_target *ti, struct request *rq, 1865cec47e3dSKiyoshi Ueda struct mapped_device *md) 1866cec47e3dSKiyoshi Ueda { 1867e5863d9aSMike Snitzer int r; 1868466d89a6SKeith Busch struct dm_rq_target_io *tio = rq->special; 1869e5863d9aSMike Snitzer struct request *clone = NULL; 1870cec47e3dSKiyoshi Ueda 1871e5863d9aSMike Snitzer if (tio->clone) { 1872e5863d9aSMike Snitzer clone = tio->clone; 1873cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1874e5863d9aSMike Snitzer } else { 1875e5863d9aSMike Snitzer r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1876e5863d9aSMike Snitzer if (r < 0) { 1877e5863d9aSMike Snitzer /* The target wants to complete the I/O */ 1878e5863d9aSMike Snitzer dm_kill_unmapped_request(rq, r); 1879e5863d9aSMike Snitzer return r; 1880e5863d9aSMike Snitzer } 1881e5863d9aSMike Snitzer if (IS_ERR(clone)) 1882e5863d9aSMike Snitzer return DM_MAPIO_REQUEUE; 1883e5863d9aSMike Snitzer if (setup_clone(clone, rq, tio, GFP_KERNEL)) { 1884e5863d9aSMike Snitzer /* -ENOMEM */ 1885e5863d9aSMike Snitzer ti->type->release_clone_rq(clone); 1886e5863d9aSMike Snitzer return DM_MAPIO_REQUEUE; 1887e5863d9aSMike Snitzer } 1888e5863d9aSMike Snitzer } 1889e5863d9aSMike Snitzer 1890cec47e3dSKiyoshi Ueda switch (r) { 1891cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1892cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1893cec47e3dSKiyoshi Ueda break; 1894cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1895cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 18966db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1897466d89a6SKeith Busch blk_rq_pos(rq)); 1898466d89a6SKeith Busch dm_dispatch_clone_request(clone, rq); 1899cec47e3dSKiyoshi Ueda break; 1900cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1901cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 1902cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 1903cec47e3dSKiyoshi Ueda break; 1904cec47e3dSKiyoshi Ueda default: 1905cec47e3dSKiyoshi Ueda if (r > 0) { 1906cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1907cec47e3dSKiyoshi Ueda BUG(); 1908cec47e3dSKiyoshi Ueda } 1909cec47e3dSKiyoshi Ueda 1910cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1911466d89a6SKeith Busch dm_kill_unmapped_request(rq, r); 1912e5863d9aSMike Snitzer return r; 1913cec47e3dSKiyoshi Ueda } 19149eef87daSKiyoshi Ueda 1915e5863d9aSMike Snitzer return 0; 1916cec47e3dSKiyoshi Ueda } 1917cec47e3dSKiyoshi Ueda 19182eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work) 1919ba1cbad9SMike Snitzer { 19202eb6e1e3SKeith Busch struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1921e5863d9aSMike Snitzer struct request *rq = tio->orig; 1922e5863d9aSMike Snitzer struct mapped_device *md = tio->md; 1923ba1cbad9SMike Snitzer 1924e5863d9aSMike Snitzer if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE) 1925e5863d9aSMike Snitzer dm_requeue_unmapped_original_request(md, rq); 19262eb6e1e3SKeith Busch } 19272eb6e1e3SKeith Busch 1928466d89a6SKeith Busch static void dm_start_request(struct mapped_device *md, struct request *orig) 1929ba1cbad9SMike Snitzer { 1930ba1cbad9SMike Snitzer blk_start_request(orig); 1931466d89a6SKeith Busch atomic_inc(&md->pending[rq_data_dir(orig)]); 1932ba1cbad9SMike Snitzer 1933ba1cbad9SMike Snitzer /* 1934ba1cbad9SMike Snitzer * Hold the md reference here for the in-flight I/O. 1935ba1cbad9SMike Snitzer * We can't rely on the reference count by device opener, 1936ba1cbad9SMike Snitzer * because the device may be closed during the request completion 1937ba1cbad9SMike Snitzer * when all bios are completed. 1938ba1cbad9SMike Snitzer * See the comment in rq_completed() too. 1939ba1cbad9SMike Snitzer */ 1940ba1cbad9SMike Snitzer dm_get(md); 1941ba1cbad9SMike Snitzer } 1942ba1cbad9SMike Snitzer 1943cec47e3dSKiyoshi Ueda /* 1944cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 1945cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1946cec47e3dSKiyoshi Ueda */ 1947cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 1948cec47e3dSKiyoshi Ueda { 1949cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 195083d5e5b0SMikulas Patocka int srcu_idx; 195183d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1952cec47e3dSKiyoshi Ueda struct dm_target *ti; 1953466d89a6SKeith Busch struct request *rq; 19542eb6e1e3SKeith Busch struct dm_rq_target_io *tio; 195529e4013dSTejun Heo sector_t pos; 1956cec47e3dSKiyoshi Ueda 1957cec47e3dSKiyoshi Ueda /* 1958b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 1959b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 1960b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 1961b4324feeSKiyoshi Ueda * dm_suspend(). 1962cec47e3dSKiyoshi Ueda */ 19637eaceaccSJens Axboe while (!blk_queue_stopped(q)) { 1964cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 1965cec47e3dSKiyoshi Ueda if (!rq) 1966*9d1deb83SMike Snitzer goto out; 1967cec47e3dSKiyoshi Ueda 196829e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 196929e4013dSTejun Heo pos = 0; 197029e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 197129e4013dSTejun Heo pos = blk_rq_pos(rq); 1972d0bcb878SKiyoshi Ueda 197329e4013dSTejun Heo ti = dm_table_find_target(map, pos); 1974ba1cbad9SMike Snitzer if (!dm_target_is_valid(ti)) { 1975ba1cbad9SMike Snitzer /* 1976466d89a6SKeith Busch * Must perform setup, that rq_completed() requires, 1977ba1cbad9SMike Snitzer * before calling dm_kill_unmapped_request 1978ba1cbad9SMike Snitzer */ 1979ba1cbad9SMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 1980466d89a6SKeith Busch dm_start_request(md, rq); 1981466d89a6SKeith Busch dm_kill_unmapped_request(rq, -EIO); 1982ba1cbad9SMike Snitzer continue; 1983ba1cbad9SMike Snitzer } 198429e4013dSTejun Heo 1985cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 19867eaceaccSJens Axboe goto delay_and_out; 1987cec47e3dSKiyoshi Ueda 1988466d89a6SKeith Busch dm_start_request(md, rq); 1989b4324feeSKiyoshi Ueda 19902eb6e1e3SKeith Busch tio = rq->special; 19912eb6e1e3SKeith Busch /* Establish tio->ti before queuing work (map_tio_request) */ 19922eb6e1e3SKeith Busch tio->ti = ti; 19932eb6e1e3SKeith Busch queue_kthread_work(&md->kworker, &tio->work); 1994052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 1995cec47e3dSKiyoshi Ueda } 1996cec47e3dSKiyoshi Ueda 1997cec47e3dSKiyoshi Ueda goto out; 1998cec47e3dSKiyoshi Ueda 19997eaceaccSJens Axboe delay_and_out: 20007eaceaccSJens Axboe blk_delay_queue(q, HZ / 10); 2001cec47e3dSKiyoshi Ueda out: 200283d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 2003cec47e3dSKiyoshi Ueda } 2004cec47e3dSKiyoshi Ueda 20051da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 20061da177e4SLinus Torvalds { 20078a57dfc6SChandra Seetharaman int r = bdi_bits; 20088a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 20098a57dfc6SChandra Seetharaman struct dm_table *map; 20101da177e4SLinus Torvalds 20111eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 201283d5e5b0SMikulas Patocka map = dm_get_live_table_fast(md); 20138a57dfc6SChandra Seetharaman if (map) { 2014cec47e3dSKiyoshi Ueda /* 2015cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 2016cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 2017cec47e3dSKiyoshi Ueda */ 2018cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2019cec47e3dSKiyoshi Ueda r = md->queue->backing_dev_info.state & 2020cec47e3dSKiyoshi Ueda bdi_bits; 2021cec47e3dSKiyoshi Ueda else 20221da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 20238a57dfc6SChandra Seetharaman } 202483d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 20258a57dfc6SChandra Seetharaman } 20268a57dfc6SChandra Seetharaman 20271da177e4SLinus Torvalds return r; 20281da177e4SLinus Torvalds } 20291da177e4SLinus Torvalds 20301da177e4SLinus Torvalds /*----------------------------------------------------------------- 20311da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 20321da177e4SLinus Torvalds *---------------------------------------------------------------*/ 20332b06cfffSAlasdair G Kergon static void free_minor(int minor) 20341da177e4SLinus Torvalds { 2035f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 20361da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 2037f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 20381da177e4SLinus Torvalds } 20391da177e4SLinus Torvalds 20401da177e4SLinus Torvalds /* 20411da177e4SLinus Torvalds * See if the device with a specific minor # is free. 20421da177e4SLinus Torvalds */ 2043cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 20441da177e4SLinus Torvalds { 2045c9d76be6STejun Heo int r; 20461da177e4SLinus Torvalds 20471da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 20481da177e4SLinus Torvalds return -EINVAL; 20491da177e4SLinus Torvalds 2050c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2051f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 20521da177e4SLinus Torvalds 2053c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 20541da177e4SLinus Torvalds 2055f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2056c9d76be6STejun Heo idr_preload_end(); 2057c9d76be6STejun Heo if (r < 0) 2058c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 2059c9d76be6STejun Heo return 0; 20601da177e4SLinus Torvalds } 20611da177e4SLinus Torvalds 2062cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 20631da177e4SLinus Torvalds { 2064c9d76be6STejun Heo int r; 20651da177e4SLinus Torvalds 2066c9d76be6STejun Heo idr_preload(GFP_KERNEL); 2067f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 20681da177e4SLinus Torvalds 2069c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 20701da177e4SLinus Torvalds 2071f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2072c9d76be6STejun Heo idr_preload_end(); 2073c9d76be6STejun Heo if (r < 0) 20741da177e4SLinus Torvalds return r; 2075c9d76be6STejun Heo *minor = r; 2076c9d76be6STejun Heo return 0; 20771da177e4SLinus Torvalds } 20781da177e4SLinus Torvalds 207983d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 20801da177e4SLinus Torvalds 208153d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 208253d5914fSMikulas Patocka 20834a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 20844a0b4ddfSMike Snitzer { 20854a0b4ddfSMike Snitzer /* 20864a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 20874a0b4ddfSMike Snitzer * devices. The type of this dm device has not been decided yet. 20884a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 20894a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 20904a0b4ddfSMike Snitzer * for request stacking support until then. 20914a0b4ddfSMike Snitzer * 20924a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 20934a0b4ddfSMike Snitzer */ 20944a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 20954a0b4ddfSMike Snitzer 20964a0b4ddfSMike Snitzer md->queue->queuedata = md; 20974a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 20984a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_data = md; 2099ff36ab34SMike Snitzer 21004a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 21014a0b4ddfSMike Snitzer } 21024a0b4ddfSMike Snitzer 21031da177e4SLinus Torvalds /* 21041da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 21051da177e4SLinus Torvalds */ 21062b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 21071da177e4SLinus Torvalds { 21081da177e4SLinus Torvalds int r; 2109cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 2110ba61fdd1SJeff Mahoney void *old_md; 21111da177e4SLinus Torvalds 21121da177e4SLinus Torvalds if (!md) { 21131da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 21141da177e4SLinus Torvalds return NULL; 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds 211710da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 21186ed7ade8SMilan Broz goto bad_module_get; 211910da4f79SJeff Mahoney 21201da177e4SLinus Torvalds /* get a minor number for the dev */ 21212b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 2122cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 21232b06cfffSAlasdair G Kergon else 2124cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 21251da177e4SLinus Torvalds if (r < 0) 21266ed7ade8SMilan Broz goto bad_minor; 21271da177e4SLinus Torvalds 212883d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 212983d5e5b0SMikulas Patocka if (r < 0) 213083d5e5b0SMikulas Patocka goto bad_io_barrier; 213183d5e5b0SMikulas Patocka 2132a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 2133e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 2134a5664dadSMike Snitzer mutex_init(&md->type_lock); 213586f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 2136022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 21371da177e4SLinus Torvalds atomic_set(&md->holders, 1); 21385c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 21391da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 21407a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 21417a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 214286f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 21437a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 21441da177e4SLinus Torvalds 21454a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 21461da177e4SLinus Torvalds if (!md->queue) 21476ed7ade8SMilan Broz goto bad_queue; 21481da177e4SLinus Torvalds 21494a0b4ddfSMike Snitzer dm_init_md_queue(md); 21509faf400fSStefan Bader 21511da177e4SLinus Torvalds md->disk = alloc_disk(1); 21521da177e4SLinus Torvalds if (!md->disk) 21536ed7ade8SMilan Broz goto bad_disk; 21541da177e4SLinus Torvalds 2155316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 2156316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 2157f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 215853d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 2159f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 21602995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 21612eb6e1e3SKeith Busch md->kworker_task = NULL; 2162f0b04115SJeff Mahoney 21631da177e4SLinus Torvalds md->disk->major = _major; 21641da177e4SLinus Torvalds md->disk->first_minor = minor; 21651da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 21661da177e4SLinus Torvalds md->disk->queue = md->queue; 21671da177e4SLinus Torvalds md->disk->private_data = md; 21681da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 21691da177e4SLinus Torvalds add_disk(md->disk); 21707e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 21711da177e4SLinus Torvalds 2172670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2173304f3f6aSMilan Broz if (!md->wq) 2174304f3f6aSMilan Broz goto bad_thread; 2175304f3f6aSMilan Broz 217632a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 217732a926daSMikulas Patocka if (!md->bdev) 217832a926daSMikulas Patocka goto bad_bdev; 217932a926daSMikulas Patocka 21806a8736d1STejun Heo bio_init(&md->flush_bio); 21816a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 21826a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 21836a8736d1STejun Heo 2184fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2185fd2ed4d2SMikulas Patocka 2186ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2187f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2188ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2189f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2190ba61fdd1SJeff Mahoney 2191ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2192ba61fdd1SJeff Mahoney 21931da177e4SLinus Torvalds return md; 21941da177e4SLinus Torvalds 219532a926daSMikulas Patocka bad_bdev: 219632a926daSMikulas Patocka destroy_workqueue(md->wq); 2197304f3f6aSMilan Broz bad_thread: 219803022c54SZdenek Kabelac del_gendisk(md->disk); 2199304f3f6aSMilan Broz put_disk(md->disk); 22006ed7ade8SMilan Broz bad_disk: 22011312f40eSAl Viro blk_cleanup_queue(md->queue); 22026ed7ade8SMilan Broz bad_queue: 220383d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 220483d5e5b0SMikulas Patocka bad_io_barrier: 22051da177e4SLinus Torvalds free_minor(minor); 22066ed7ade8SMilan Broz bad_minor: 220710da4f79SJeff Mahoney module_put(THIS_MODULE); 22086ed7ade8SMilan Broz bad_module_get: 22091da177e4SLinus Torvalds kfree(md); 22101da177e4SLinus Torvalds return NULL; 22111da177e4SLinus Torvalds } 22121da177e4SLinus Torvalds 2213ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2214ae9da83fSJun'ichi Nomura 22151da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 22161da177e4SLinus Torvalds { 2217f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 221863d94e48SJun'ichi Nomura 2219ae9da83fSJun'ichi Nomura unlock_fs(md); 2220304f3f6aSMilan Broz destroy_workqueue(md->wq); 22212eb6e1e3SKeith Busch 22222eb6e1e3SKeith Busch if (md->kworker_task) 22232eb6e1e3SKeith Busch kthread_stop(md->kworker_task); 2224e6ee8c0bSKiyoshi Ueda if (md->io_pool) 22251da177e4SLinus Torvalds mempool_destroy(md->io_pool); 22261ae49ea2SMike Snitzer if (md->rq_pool) 22271ae49ea2SMike Snitzer mempool_destroy(md->rq_pool); 2228e6ee8c0bSKiyoshi Ueda if (md->bs) 22299faf400fSStefan Bader bioset_free(md->bs); 223063a4f065SMike Snitzer 223183d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 223286f1152bSBenjamin Marzinski free_table_devices(&md->table_devices); 223363a4f065SMike Snitzer dm_stats_cleanup(&md->stats); 2234fba9f90eSJeff Mahoney 2235fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 2236fba9f90eSJeff Mahoney md->disk->private_data = NULL; 2237fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 223863a4f065SMike Snitzer if (blk_get_integrity(md->disk)) 223963a4f065SMike Snitzer blk_integrity_unregister(md->disk); 224063a4f065SMike Snitzer del_gendisk(md->disk); 22411da177e4SLinus Torvalds put_disk(md->disk); 22421312f40eSAl Viro blk_cleanup_queue(md->queue); 224363a4f065SMike Snitzer bdput(md->bdev); 224463a4f065SMike Snitzer free_minor(minor); 224563a4f065SMike Snitzer 224610da4f79SJeff Mahoney module_put(THIS_MODULE); 22471da177e4SLinus Torvalds kfree(md); 22481da177e4SLinus Torvalds } 22491da177e4SLinus Torvalds 2250e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2251e6ee8c0bSKiyoshi Ueda { 2252c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2253e6ee8c0bSKiyoshi Ueda 22545f015204SJun'ichi Nomura if (md->io_pool && md->bs) { 225516245bdcSJun'ichi Nomura /* The md already has necessary mempools. */ 225616245bdcSJun'ichi Nomura if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2257c0820cf5SMikulas Patocka /* 225816245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 225916245bdcSJun'ichi Nomura * because a different table was loaded. 2260c0820cf5SMikulas Patocka */ 2261c0820cf5SMikulas Patocka bioset_free(md->bs); 2262c0820cf5SMikulas Patocka md->bs = p->bs; 2263c0820cf5SMikulas Patocka p->bs = NULL; 2264466d89a6SKeith Busch } 226516245bdcSJun'ichi Nomura /* 226616245bdcSJun'ichi Nomura * There's no need to reload with request-based dm 226716245bdcSJun'ichi Nomura * because the size of front_pad doesn't change. 226816245bdcSJun'ichi Nomura * Note for future: If you are to reload bioset, 226916245bdcSJun'ichi Nomura * prep-ed requests in the queue may refer 227016245bdcSJun'ichi Nomura * to bio from the old bioset, so you must walk 227116245bdcSJun'ichi Nomura * through the queue to unprep. 227216245bdcSJun'ichi Nomura */ 2273e6ee8c0bSKiyoshi Ueda goto out; 2274c0820cf5SMikulas Patocka } 2275e6ee8c0bSKiyoshi Ueda 22761ae49ea2SMike Snitzer BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2277e6ee8c0bSKiyoshi Ueda 2278e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 2279e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 22801ae49ea2SMike Snitzer md->rq_pool = p->rq_pool; 22811ae49ea2SMike Snitzer p->rq_pool = NULL; 2282e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 2283e6ee8c0bSKiyoshi Ueda p->bs = NULL; 2284e6ee8c0bSKiyoshi Ueda 2285e6ee8c0bSKiyoshi Ueda out: 2286e6ee8c0bSKiyoshi Ueda /* mempool bind completed, now no need any mempools in the table */ 2287e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 2288e6ee8c0bSKiyoshi Ueda } 2289e6ee8c0bSKiyoshi Ueda 22901da177e4SLinus Torvalds /* 22911da177e4SLinus Torvalds * Bind a table to the device. 22921da177e4SLinus Torvalds */ 22931da177e4SLinus Torvalds static void event_callback(void *context) 22941da177e4SLinus Torvalds { 22957a8c3d3bSMike Anderson unsigned long flags; 22967a8c3d3bSMike Anderson LIST_HEAD(uevents); 22971da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 22981da177e4SLinus Torvalds 22997a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 23007a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 23017a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 23027a8c3d3bSMike Anderson 2303ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 23047a8c3d3bSMike Anderson 23051da177e4SLinus Torvalds atomic_inc(&md->event_nr); 23061da177e4SLinus Torvalds wake_up(&md->eventq); 23071da177e4SLinus Torvalds } 23081da177e4SLinus Torvalds 2309c217649bSMike Snitzer /* 2310c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2311c217649bSMike Snitzer */ 23124e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 23131da177e4SLinus Torvalds { 23144e90188bSAlasdair G Kergon set_capacity(md->disk, size); 23151da177e4SLinus Torvalds 2316db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 23171da177e4SLinus Torvalds } 23181da177e4SLinus Torvalds 2319042d2a9bSAlasdair G Kergon /* 2320d5b9dd04SMikulas Patocka * Return 1 if the queue has a compulsory merge_bvec_fn function. 2321d5b9dd04SMikulas Patocka * 2322d5b9dd04SMikulas Patocka * If this function returns 0, then the device is either a non-dm 2323d5b9dd04SMikulas Patocka * device without a merge_bvec_fn, or it is a dm device that is 2324d5b9dd04SMikulas Patocka * able to split any bios it receives that are too big. 2325d5b9dd04SMikulas Patocka */ 2326d5b9dd04SMikulas Patocka int dm_queue_merge_is_compulsory(struct request_queue *q) 2327d5b9dd04SMikulas Patocka { 2328d5b9dd04SMikulas Patocka struct mapped_device *dev_md; 2329d5b9dd04SMikulas Patocka 2330d5b9dd04SMikulas Patocka if (!q->merge_bvec_fn) 2331d5b9dd04SMikulas Patocka return 0; 2332d5b9dd04SMikulas Patocka 2333ff36ab34SMike Snitzer if (q->make_request_fn == dm_make_request) { 2334d5b9dd04SMikulas Patocka dev_md = q->queuedata; 2335d5b9dd04SMikulas Patocka if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) 2336d5b9dd04SMikulas Patocka return 0; 2337d5b9dd04SMikulas Patocka } 2338d5b9dd04SMikulas Patocka 2339d5b9dd04SMikulas Patocka return 1; 2340d5b9dd04SMikulas Patocka } 2341d5b9dd04SMikulas Patocka 2342d5b9dd04SMikulas Patocka static int dm_device_merge_is_compulsory(struct dm_target *ti, 2343d5b9dd04SMikulas Patocka struct dm_dev *dev, sector_t start, 2344d5b9dd04SMikulas Patocka sector_t len, void *data) 2345d5b9dd04SMikulas Patocka { 2346d5b9dd04SMikulas Patocka struct block_device *bdev = dev->bdev; 2347d5b9dd04SMikulas Patocka struct request_queue *q = bdev_get_queue(bdev); 2348d5b9dd04SMikulas Patocka 2349d5b9dd04SMikulas Patocka return dm_queue_merge_is_compulsory(q); 2350d5b9dd04SMikulas Patocka } 2351d5b9dd04SMikulas Patocka 2352d5b9dd04SMikulas Patocka /* 2353d5b9dd04SMikulas Patocka * Return 1 if it is acceptable to ignore merge_bvec_fn based 2354d5b9dd04SMikulas Patocka * on the properties of the underlying devices. 2355d5b9dd04SMikulas Patocka */ 2356d5b9dd04SMikulas Patocka static int dm_table_merge_is_optional(struct dm_table *table) 2357d5b9dd04SMikulas Patocka { 2358d5b9dd04SMikulas Patocka unsigned i = 0; 2359d5b9dd04SMikulas Patocka struct dm_target *ti; 2360d5b9dd04SMikulas Patocka 2361d5b9dd04SMikulas Patocka while (i < dm_table_get_num_targets(table)) { 2362d5b9dd04SMikulas Patocka ti = dm_table_get_target(table, i++); 2363d5b9dd04SMikulas Patocka 2364d5b9dd04SMikulas Patocka if (ti->type->iterate_devices && 2365d5b9dd04SMikulas Patocka ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) 2366d5b9dd04SMikulas Patocka return 0; 2367d5b9dd04SMikulas Patocka } 2368d5b9dd04SMikulas Patocka 2369d5b9dd04SMikulas Patocka return 1; 2370d5b9dd04SMikulas Patocka } 2371d5b9dd04SMikulas Patocka 2372d5b9dd04SMikulas Patocka /* 2373042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2374042d2a9bSAlasdair G Kergon */ 2375042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2376754c5fc7SMike Snitzer struct queue_limits *limits) 23771da177e4SLinus Torvalds { 2378042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2379165125e1SJens Axboe struct request_queue *q = md->queue; 23801da177e4SLinus Torvalds sector_t size; 2381d5b9dd04SMikulas Patocka int merge_is_optional; 23821da177e4SLinus Torvalds 23831da177e4SLinus Torvalds size = dm_table_get_size(t); 23843ac51e74SDarrick J. Wong 23853ac51e74SDarrick J. Wong /* 23863ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 23873ac51e74SDarrick J. Wong */ 2388fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 23893ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 23903ac51e74SDarrick J. Wong 23914e90188bSAlasdair G Kergon __set_size(md, size); 23921da177e4SLinus Torvalds 2393cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 23942ca3310eSAlasdair G Kergon 2395e6ee8c0bSKiyoshi Ueda /* 2396e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2397e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2398e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2399e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2400e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2401e6ee8c0bSKiyoshi Ueda */ 2402e6ee8c0bSKiyoshi Ueda if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2403e6ee8c0bSKiyoshi Ueda stop_queue(q); 2404e6ee8c0bSKiyoshi Ueda 2405e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2406e6ee8c0bSKiyoshi Ueda 2407d5b9dd04SMikulas Patocka merge_is_optional = dm_table_merge_is_optional(t); 2408d5b9dd04SMikulas Patocka 2409a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 241083d5e5b0SMikulas Patocka rcu_assign_pointer(md->map, t); 241136a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 241236a0456fSAlasdair G Kergon 2413754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 2414d5b9dd04SMikulas Patocka if (merge_is_optional) 2415d5b9dd04SMikulas Patocka set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2416d5b9dd04SMikulas Patocka else 2417d5b9dd04SMikulas Patocka clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 241841abc4e1SHannes Reinecke if (old_map) 241983d5e5b0SMikulas Patocka dm_sync_table(md); 24202ca3310eSAlasdair G Kergon 2421042d2a9bSAlasdair G Kergon return old_map; 24221da177e4SLinus Torvalds } 24231da177e4SLinus Torvalds 2424a7940155SAlasdair G Kergon /* 2425a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2426a7940155SAlasdair G Kergon */ 2427a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 24281da177e4SLinus Torvalds { 2429a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 24301da177e4SLinus Torvalds 24311da177e4SLinus Torvalds if (!map) 2432a7940155SAlasdair G Kergon return NULL; 24331da177e4SLinus Torvalds 24341da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 24359cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 243683d5e5b0SMikulas Patocka dm_sync_table(md); 2437a7940155SAlasdair G Kergon 2438a7940155SAlasdair G Kergon return map; 24391da177e4SLinus Torvalds } 24401da177e4SLinus Torvalds 24411da177e4SLinus Torvalds /* 24421da177e4SLinus Torvalds * Constructor for a new device. 24431da177e4SLinus Torvalds */ 24442b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 24451da177e4SLinus Torvalds { 24461da177e4SLinus Torvalds struct mapped_device *md; 24471da177e4SLinus Torvalds 24482b06cfffSAlasdair G Kergon md = alloc_dev(minor); 24491da177e4SLinus Torvalds if (!md) 24501da177e4SLinus Torvalds return -ENXIO; 24511da177e4SLinus Torvalds 2452784aae73SMilan Broz dm_sysfs_init(md); 2453784aae73SMilan Broz 24541da177e4SLinus Torvalds *result = md; 24551da177e4SLinus Torvalds return 0; 24561da177e4SLinus Torvalds } 24571da177e4SLinus Torvalds 2458a5664dadSMike Snitzer /* 2459a5664dadSMike Snitzer * Functions to manage md->type. 2460a5664dadSMike Snitzer * All are required to hold md->type_lock. 2461a5664dadSMike Snitzer */ 2462a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2463a5664dadSMike Snitzer { 2464a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2465a5664dadSMike Snitzer } 2466a5664dadSMike Snitzer 2467a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2468a5664dadSMike Snitzer { 2469a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2470a5664dadSMike Snitzer } 2471a5664dadSMike Snitzer 2472a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2473a5664dadSMike Snitzer { 247400c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2475a5664dadSMike Snitzer md->type = type; 2476a5664dadSMike Snitzer } 2477a5664dadSMike Snitzer 2478a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2479a5664dadSMike Snitzer { 248000c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2481a5664dadSMike Snitzer return md->type; 2482a5664dadSMike Snitzer } 2483a5664dadSMike Snitzer 2484e5863d9aSMike Snitzer static bool dm_md_type_request_based(struct mapped_device *md) 2485e5863d9aSMike Snitzer { 2486e5863d9aSMike Snitzer unsigned table_type = dm_get_md_type(md); 2487e5863d9aSMike Snitzer 2488e5863d9aSMike Snitzer return (table_type == DM_TYPE_REQUEST_BASED || 2489e5863d9aSMike Snitzer table_type == DM_TYPE_MQ_REQUEST_BASED); 2490e5863d9aSMike Snitzer } 2491e5863d9aSMike Snitzer 249236a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 249336a0456fSAlasdair G Kergon { 249436a0456fSAlasdair G Kergon return md->immutable_target_type; 249536a0456fSAlasdair G Kergon } 249636a0456fSAlasdair G Kergon 24974a0b4ddfSMike Snitzer /* 2498f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2499f84cb8a4SMike Snitzer * count on 'md'. 2500f84cb8a4SMike Snitzer */ 2501f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2502f84cb8a4SMike Snitzer { 2503f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2504f84cb8a4SMike Snitzer return &md->queue->limits; 2505f84cb8a4SMike Snitzer } 2506f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2507f84cb8a4SMike Snitzer 2508f84cb8a4SMike Snitzer /* 25094a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 25104a0b4ddfSMike Snitzer */ 25114a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 25124a0b4ddfSMike Snitzer { 25134a0b4ddfSMike Snitzer struct request_queue *q = NULL; 25144a0b4ddfSMike Snitzer 25154a0b4ddfSMike Snitzer if (md->queue->elevator) 25164a0b4ddfSMike Snitzer return 1; 25174a0b4ddfSMike Snitzer 25184a0b4ddfSMike Snitzer /* Fully initialize the queue */ 25194a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 25204a0b4ddfSMike Snitzer if (!q) 25214a0b4ddfSMike Snitzer return 0; 25224a0b4ddfSMike Snitzer 25234a0b4ddfSMike Snitzer md->queue = q; 25244a0b4ddfSMike Snitzer dm_init_md_queue(md); 25254a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 25264a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 25274a0b4ddfSMike Snitzer 25282eb6e1e3SKeith Busch /* Also initialize the request-based DM worker thread */ 25292eb6e1e3SKeith Busch init_kthread_worker(&md->kworker); 25302eb6e1e3SKeith Busch md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 25312eb6e1e3SKeith Busch "kdmwork-%s", dm_device_name(md)); 25322eb6e1e3SKeith Busch 25334a0b4ddfSMike Snitzer elv_register_queue(md->queue); 25344a0b4ddfSMike Snitzer 25354a0b4ddfSMike Snitzer return 1; 25364a0b4ddfSMike Snitzer } 25374a0b4ddfSMike Snitzer 25384a0b4ddfSMike Snitzer /* 25394a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 25404a0b4ddfSMike Snitzer */ 25414a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 25424a0b4ddfSMike Snitzer { 2543ff36ab34SMike Snitzer if (dm_md_type_request_based(md)) { 2544ff36ab34SMike Snitzer if (!dm_init_request_based_queue(md)) { 25454a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 25464a0b4ddfSMike Snitzer return -EINVAL; 25474a0b4ddfSMike Snitzer } 2548ff36ab34SMike Snitzer } else { 2549ff36ab34SMike Snitzer /* bio-based specific initialization */ 2550ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2551ff36ab34SMike Snitzer blk_queue_merge_bvec(md->queue, dm_merge_bvec); 2552ff36ab34SMike Snitzer } 25534a0b4ddfSMike Snitzer 25544a0b4ddfSMike Snitzer return 0; 25554a0b4ddfSMike Snitzer } 25564a0b4ddfSMike Snitzer 25572bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 25581da177e4SLinus Torvalds { 25591da177e4SLinus Torvalds struct mapped_device *md; 25601da177e4SLinus Torvalds unsigned minor = MINOR(dev); 25611da177e4SLinus Torvalds 25621da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 25631da177e4SLinus Torvalds return NULL; 25641da177e4SLinus Torvalds 2565f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 25661da177e4SLinus Torvalds 25671da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 25682bec1f4aSMikulas Patocka if (md) { 25692bec1f4aSMikulas Patocka if ((md == MINOR_ALLOCED || 2570f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2571abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2572fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2573637842cfSDavid Teigland md = NULL; 2574fba9f90eSJeff Mahoney goto out; 2575fba9f90eSJeff Mahoney } 25762bec1f4aSMikulas Patocka dm_get(md); 25772bec1f4aSMikulas Patocka } 25781da177e4SLinus Torvalds 2579fba9f90eSJeff Mahoney out: 2580f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 25811da177e4SLinus Torvalds 2582637842cfSDavid Teigland return md; 2583637842cfSDavid Teigland } 25843cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2585d229a958SDavid Teigland 25869ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2587637842cfSDavid Teigland { 25889ade92a9SAlasdair G Kergon return md->interface_ptr; 25891da177e4SLinus Torvalds } 25901da177e4SLinus Torvalds 25911da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 25921da177e4SLinus Torvalds { 25931da177e4SLinus Torvalds md->interface_ptr = ptr; 25941da177e4SLinus Torvalds } 25951da177e4SLinus Torvalds 25961da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 25971da177e4SLinus Torvalds { 25981da177e4SLinus Torvalds atomic_inc(&md->holders); 25993f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 26001da177e4SLinus Torvalds } 26011da177e4SLinus Torvalds 260209ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 260309ee96b2SMikulas Patocka { 260409ee96b2SMikulas Patocka spin_lock(&_minor_lock); 260509ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 260609ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 260709ee96b2SMikulas Patocka return -EBUSY; 260809ee96b2SMikulas Patocka } 260909ee96b2SMikulas Patocka dm_get(md); 261009ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 261109ee96b2SMikulas Patocka return 0; 261209ee96b2SMikulas Patocka } 261309ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 261409ee96b2SMikulas Patocka 261572d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 261672d94861SAlasdair G Kergon { 261772d94861SAlasdair G Kergon return md->name; 261872d94861SAlasdair G Kergon } 261972d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 262072d94861SAlasdair G Kergon 26213f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 26221da177e4SLinus Torvalds { 26231134e5aeSMike Anderson struct dm_table *map; 262483d5e5b0SMikulas Patocka int srcu_idx; 26251da177e4SLinus Torvalds 26263f77316dSKiyoshi Ueda might_sleep(); 2627fba9f90eSJeff Mahoney 262883d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 262963a4f065SMike Snitzer 263063a4f065SMike Snitzer spin_lock(&_minor_lock); 26313f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2632fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2633f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 26343f77316dSKiyoshi Ueda 26352eb6e1e3SKeith Busch if (dm_request_based(md)) 26362eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 26372eb6e1e3SKeith Busch 2638ab7c7bb6SMikulas Patocka /* 2639ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2640ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2641ab7c7bb6SMikulas Patocka */ 2642ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 26434f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 26441da177e4SLinus Torvalds dm_table_presuspend_targets(map); 26451da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 26461da177e4SLinus Torvalds } 2647ab7c7bb6SMikulas Patocka mutex_unlock(&md->suspend_lock); 26483f77316dSKiyoshi Ueda 264983d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 265083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 265183d5e5b0SMikulas Patocka 26523f77316dSKiyoshi Ueda /* 26533f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 26543f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 26553f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 26563f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 26573f77316dSKiyoshi Ueda */ 26583f77316dSKiyoshi Ueda if (wait) 26593f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 26603f77316dSKiyoshi Ueda msleep(1); 26613f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 26623f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 26633f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 26643f77316dSKiyoshi Ueda 2665784aae73SMilan Broz dm_sysfs_exit(md); 2666a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 26671da177e4SLinus Torvalds free_dev(md); 26681da177e4SLinus Torvalds } 26693f77316dSKiyoshi Ueda 26703f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 26713f77316dSKiyoshi Ueda { 26723f77316dSKiyoshi Ueda __dm_destroy(md, true); 26733f77316dSKiyoshi Ueda } 26743f77316dSKiyoshi Ueda 26753f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 26763f77316dSKiyoshi Ueda { 26773f77316dSKiyoshi Ueda __dm_destroy(md, false); 26783f77316dSKiyoshi Ueda } 26793f77316dSKiyoshi Ueda 26803f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 26813f77316dSKiyoshi Ueda { 26823f77316dSKiyoshi Ueda atomic_dec(&md->holders); 26831da177e4SLinus Torvalds } 268479eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 26851da177e4SLinus Torvalds 2686401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 268746125c1cSMilan Broz { 268846125c1cSMilan Broz int r = 0; 2689b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2690b44ebeb0SMikulas Patocka 2691b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 269246125c1cSMilan Broz 269346125c1cSMilan Broz while (1) { 2694401600dfSMikulas Patocka set_current_state(interruptible); 269546125c1cSMilan Broz 2696b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 269746125c1cSMilan Broz break; 269846125c1cSMilan Broz 2699401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2700401600dfSMikulas Patocka signal_pending(current)) { 270146125c1cSMilan Broz r = -EINTR; 270246125c1cSMilan Broz break; 270346125c1cSMilan Broz } 270446125c1cSMilan Broz 270546125c1cSMilan Broz io_schedule(); 270646125c1cSMilan Broz } 270746125c1cSMilan Broz set_current_state(TASK_RUNNING); 270846125c1cSMilan Broz 2709b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2710b44ebeb0SMikulas Patocka 271146125c1cSMilan Broz return r; 271246125c1cSMilan Broz } 271346125c1cSMilan Broz 27141da177e4SLinus Torvalds /* 27151da177e4SLinus Torvalds * Process the deferred bios 27161da177e4SLinus Torvalds */ 2717ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 27181da177e4SLinus Torvalds { 2719ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2720ef208587SMikulas Patocka work); 27216d6f10dfSMilan Broz struct bio *c; 272283d5e5b0SMikulas Patocka int srcu_idx; 272383d5e5b0SMikulas Patocka struct dm_table *map; 27241da177e4SLinus Torvalds 272583d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2726ef208587SMikulas Patocka 27273b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2728022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2729022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2730022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2731022c2611SMikulas Patocka 27326a8736d1STejun Heo if (!c) 2733df12ee99SAlasdair G Kergon break; 273473d410c0SMilan Broz 2735e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2736e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2737af7e466aSMikulas Patocka else 273883d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2739e6ee8c0bSKiyoshi Ueda } 27403b00b203SMikulas Patocka 274183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 27421da177e4SLinus Torvalds } 27431da177e4SLinus Torvalds 27449a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2745304f3f6aSMilan Broz { 27463b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 27474e857c58SPeter Zijlstra smp_mb__after_atomic(); 274853d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2749304f3f6aSMilan Broz } 2750304f3f6aSMilan Broz 27511da177e4SLinus Torvalds /* 2752042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 27531da177e4SLinus Torvalds */ 2754042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 27551da177e4SLinus Torvalds { 275687eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2757754c5fc7SMike Snitzer struct queue_limits limits; 2758042d2a9bSAlasdair G Kergon int r; 27591da177e4SLinus Torvalds 2760e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 27611da177e4SLinus Torvalds 27621da177e4SLinus Torvalds /* device must be suspended */ 27634f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 276493c534aeSAlasdair G Kergon goto out; 27651da177e4SLinus Torvalds 27663ae70656SMike Snitzer /* 27673ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 27683ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 27693ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 27703ae70656SMike Snitzer * reappear. 27713ae70656SMike Snitzer */ 27723ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 277383d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 27743ae70656SMike Snitzer if (live_map) 27753ae70656SMike Snitzer limits = md->queue->limits; 277683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 27773ae70656SMike Snitzer } 27783ae70656SMike Snitzer 277987eb5b21SMike Christie if (!live_map) { 2780754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2781042d2a9bSAlasdair G Kergon if (r) { 2782042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2783754c5fc7SMike Snitzer goto out; 2784042d2a9bSAlasdair G Kergon } 278587eb5b21SMike Christie } 2786754c5fc7SMike Snitzer 2787042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 27881da177e4SLinus Torvalds 278993c534aeSAlasdair G Kergon out: 2790e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2791042d2a9bSAlasdair G Kergon return map; 27921da177e4SLinus Torvalds } 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds /* 27951da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 27961da177e4SLinus Torvalds * device. 27971da177e4SLinus Torvalds */ 27982ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 27991da177e4SLinus Torvalds { 2800e39e2e95SAlasdair G Kergon int r; 28011da177e4SLinus Torvalds 28021da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2803dfbe03f6SAlasdair G Kergon 2804db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2805dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2806cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2807e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2808e39e2e95SAlasdair G Kergon return r; 2809dfbe03f6SAlasdair G Kergon } 2810dfbe03f6SAlasdair G Kergon 2811aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2812aa8d7c2fSAlasdair G Kergon 28131da177e4SLinus Torvalds return 0; 28141da177e4SLinus Torvalds } 28151da177e4SLinus Torvalds 28162ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 28171da177e4SLinus Torvalds { 2818aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2819aa8d7c2fSAlasdair G Kergon return; 2820aa8d7c2fSAlasdair G Kergon 2821db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 28221da177e4SLinus Torvalds md->frozen_sb = NULL; 2823aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 28241da177e4SLinus Torvalds } 28251da177e4SLinus Torvalds 28261da177e4SLinus Torvalds /* 2827ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2828ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2829ffcc3936SMike Snitzer * are being added to md->deferred list. 2830cec47e3dSKiyoshi Ueda * 2831ffcc3936SMike Snitzer * Caller must hold md->suspend_lock 2832cec47e3dSKiyoshi Ueda */ 2833ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2834ffcc3936SMike Snitzer unsigned suspend_flags, int interruptible) 28351da177e4SLinus Torvalds { 2836ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2837ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2838ffcc3936SMike Snitzer int r; 2839cf222b37SAlasdair G Kergon 28402e93ccc1SKiyoshi Ueda /* 28412e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 28422e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 28432e93ccc1SKiyoshi Ueda */ 28442e93ccc1SKiyoshi Ueda if (noflush) 28452e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 28462e93ccc1SKiyoshi Ueda 2847d67ee213SMike Snitzer /* 2848d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2849d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2850d67ee213SMike Snitzer */ 28511da177e4SLinus Torvalds dm_table_presuspend_targets(map); 28521da177e4SLinus Torvalds 28532e93ccc1SKiyoshi Ueda /* 28549f518b27SKiyoshi Ueda * Flush I/O to the device. 28559f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 28569f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 28579f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 28582e93ccc1SKiyoshi Ueda */ 285932a926daSMikulas Patocka if (!noflush && do_lockfs) { 28602ca3310eSAlasdair G Kergon r = lock_fs(md); 2861d67ee213SMike Snitzer if (r) { 2862d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2863ffcc3936SMike Snitzer return r; 2864aa8d7c2fSAlasdair G Kergon } 2865d67ee213SMike Snitzer } 28661da177e4SLinus Torvalds 28671da177e4SLinus Torvalds /* 28683b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 28693b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 28703b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 28713b00b203SMikulas Patocka * dm_wq_work. 28723b00b203SMikulas Patocka * 28733b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 28743b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 28756a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 28766a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 28776a8736d1STejun Heo * flush_workqueue(md->wq). 28781da177e4SLinus Torvalds */ 28791eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 288041abc4e1SHannes Reinecke if (map) 288183d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 28821da177e4SLinus Torvalds 2883d0bcb878SKiyoshi Ueda /* 288429e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 288529e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2886d0bcb878SKiyoshi Ueda */ 28872eb6e1e3SKeith Busch if (dm_request_based(md)) { 28889f518b27SKiyoshi Ueda stop_queue(md->queue); 28892eb6e1e3SKeith Busch flush_kthread_worker(&md->kworker); 28902eb6e1e3SKeith Busch } 2891cec47e3dSKiyoshi Ueda 2892d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2893d0bcb878SKiyoshi Ueda 28941da177e4SLinus Torvalds /* 28953b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 28963b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 28973b00b203SMikulas Patocka * to finish. 28981da177e4SLinus Torvalds */ 2899ffcc3936SMike Snitzer r = dm_wait_for_completion(md, interruptible); 29001da177e4SLinus Torvalds 29016d6f10dfSMilan Broz if (noflush) 2902022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 290341abc4e1SHannes Reinecke if (map) 290483d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 29052e93ccc1SKiyoshi Ueda 29061da177e4SLinus Torvalds /* were we interrupted ? */ 290746125c1cSMilan Broz if (r < 0) { 29089a1fb464SMikulas Patocka dm_queue_flush(md); 290973d410c0SMilan Broz 2910cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 29119f518b27SKiyoshi Ueda start_queue(md->queue); 2912cec47e3dSKiyoshi Ueda 29132ca3310eSAlasdair G Kergon unlock_fs(md); 2914d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2915ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2916ffcc3936SMike Snitzer } 2917ffcc3936SMike Snitzer 2918ffcc3936SMike Snitzer return r; 29192ca3310eSAlasdair G Kergon } 29202ca3310eSAlasdair G Kergon 29213b00b203SMikulas Patocka /* 2922ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2923ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2924ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2925ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2926ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 29273b00b203SMikulas Patocka */ 2928ffcc3936SMike Snitzer /* 2929ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2930ffcc3936SMike Snitzer * 2931ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2932ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2933ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2934ffcc3936SMike Snitzer * 2935ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2936ffcc3936SMike Snitzer */ 2937ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2938ffcc3936SMike Snitzer { 2939ffcc3936SMike Snitzer struct dm_table *map = NULL; 2940ffcc3936SMike Snitzer int r = 0; 2941ffcc3936SMike Snitzer 2942ffcc3936SMike Snitzer retry: 2943ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2944ffcc3936SMike Snitzer 2945ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2946ffcc3936SMike Snitzer r = -EINVAL; 2947ffcc3936SMike Snitzer goto out_unlock; 2948ffcc3936SMike Snitzer } 2949ffcc3936SMike Snitzer 2950ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2951ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2952ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2953ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2954ffcc3936SMike Snitzer if (r) 2955ffcc3936SMike Snitzer return r; 2956ffcc3936SMike Snitzer goto retry; 2957ffcc3936SMike Snitzer } 2958ffcc3936SMike Snitzer 2959a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2960ffcc3936SMike Snitzer 2961ffcc3936SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); 2962ffcc3936SMike Snitzer if (r) 2963ffcc3936SMike Snitzer goto out_unlock; 29643b00b203SMikulas Patocka 29651da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 29661da177e4SLinus Torvalds 29674d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 29684d4471cbSKiyoshi Ueda 2969d287483dSAlasdair G Kergon out_unlock: 2970e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2971cf222b37SAlasdair G Kergon return r; 29721da177e4SLinus Torvalds } 29731da177e4SLinus Torvalds 2974ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 29751da177e4SLinus Torvalds { 2976ffcc3936SMike Snitzer if (map) { 2977ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 29788757b776SMilan Broz if (r) 2979ffcc3936SMike Snitzer return r; 2980ffcc3936SMike Snitzer } 29812ca3310eSAlasdair G Kergon 29829a1fb464SMikulas Patocka dm_queue_flush(md); 29832ca3310eSAlasdair G Kergon 2984cec47e3dSKiyoshi Ueda /* 2985cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2986cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2987cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2988cec47e3dSKiyoshi Ueda */ 2989cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2990cec47e3dSKiyoshi Ueda start_queue(md->queue); 2991cec47e3dSKiyoshi Ueda 29922ca3310eSAlasdair G Kergon unlock_fs(md); 29932ca3310eSAlasdair G Kergon 2994ffcc3936SMike Snitzer return 0; 2995ffcc3936SMike Snitzer } 2996ffcc3936SMike Snitzer 2997ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2998ffcc3936SMike Snitzer { 2999ffcc3936SMike Snitzer int r = -EINVAL; 3000ffcc3936SMike Snitzer struct dm_table *map = NULL; 3001ffcc3936SMike Snitzer 3002ffcc3936SMike Snitzer retry: 3003ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 3004ffcc3936SMike Snitzer 3005ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 3006ffcc3936SMike Snitzer goto out; 3007ffcc3936SMike Snitzer 3008ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 3009ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 3010ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3011ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 3012ffcc3936SMike Snitzer if (r) 3013ffcc3936SMike Snitzer return r; 3014ffcc3936SMike Snitzer goto retry; 3015ffcc3936SMike Snitzer } 3016ffcc3936SMike Snitzer 3017a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3018ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 3019ffcc3936SMike Snitzer goto out; 3020ffcc3936SMike Snitzer 3021ffcc3936SMike Snitzer r = __dm_resume(md, map); 3022ffcc3936SMike Snitzer if (r) 3023ffcc3936SMike Snitzer goto out; 3024ffcc3936SMike Snitzer 30252ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 30262ca3310eSAlasdair G Kergon 3027cf222b37SAlasdair G Kergon r = 0; 3028cf222b37SAlasdair G Kergon out: 3029e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 30302ca3310eSAlasdair G Kergon 3031cf222b37SAlasdair G Kergon return r; 30321da177e4SLinus Torvalds } 30331da177e4SLinus Torvalds 3034fd2ed4d2SMikulas Patocka /* 3035fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 3036fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 3037fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 3038fd2ed4d2SMikulas Patocka */ 3039fd2ed4d2SMikulas Patocka 3040ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 3041ffcc3936SMike Snitzer { 3042ffcc3936SMike Snitzer struct dm_table *map = NULL; 3043ffcc3936SMike Snitzer 304496b26c8cSMikulas Patocka if (md->internal_suspend_count++) 3045ffcc3936SMike Snitzer return; /* nested internal suspend */ 3046ffcc3936SMike Snitzer 3047ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 3048ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3049ffcc3936SMike Snitzer return; /* nest suspend */ 3050ffcc3936SMike Snitzer } 3051ffcc3936SMike Snitzer 3052a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 3053ffcc3936SMike Snitzer 3054ffcc3936SMike Snitzer /* 3055ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 3056ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 3057ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 3058ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 3059ffcc3936SMike Snitzer */ 3060ffcc3936SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); 3061ffcc3936SMike Snitzer 3062ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3063ffcc3936SMike Snitzer 3064ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 3065ffcc3936SMike Snitzer } 3066ffcc3936SMike Snitzer 3067ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 3068ffcc3936SMike Snitzer { 306996b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 307096b26c8cSMikulas Patocka 307196b26c8cSMikulas Patocka if (--md->internal_suspend_count) 3072ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 3073ffcc3936SMike Snitzer 3074ffcc3936SMike Snitzer if (dm_suspended_md(md)) 3075ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 3076ffcc3936SMike Snitzer 3077ffcc3936SMike Snitzer /* 3078ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 3079ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 3080ffcc3936SMike Snitzer */ 3081ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 3082ffcc3936SMike Snitzer 3083ffcc3936SMike Snitzer done: 3084ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3085ffcc3936SMike Snitzer smp_mb__after_atomic(); 3086ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 3087ffcc3936SMike Snitzer } 3088ffcc3936SMike Snitzer 3089ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 3090fd2ed4d2SMikulas Patocka { 3091fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 3092ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 3093ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3094ffcc3936SMike Snitzer } 3095ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 3096ffcc3936SMike Snitzer 3097ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 3098ffcc3936SMike Snitzer { 3099ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3100ffcc3936SMike Snitzer __dm_internal_resume(md); 3101ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 3102ffcc3936SMike Snitzer } 3103ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 3104ffcc3936SMike Snitzer 3105ffcc3936SMike Snitzer /* 3106ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 3107ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 3108ffcc3936SMike Snitzer */ 3109ffcc3936SMike Snitzer 3110ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 3111ffcc3936SMike Snitzer { 3112ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 3113ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3114fd2ed4d2SMikulas Patocka return; 3115fd2ed4d2SMikulas Patocka 3116fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3117fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 3118fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 3119fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3120fd2ed4d2SMikulas Patocka } 3121b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 3122fd2ed4d2SMikulas Patocka 3123ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 3124fd2ed4d2SMikulas Patocka { 3125ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 3126fd2ed4d2SMikulas Patocka goto done; 3127fd2ed4d2SMikulas Patocka 3128fd2ed4d2SMikulas Patocka dm_queue_flush(md); 3129fd2ed4d2SMikulas Patocka 3130fd2ed4d2SMikulas Patocka done: 3131fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 3132fd2ed4d2SMikulas Patocka } 3133b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 3134fd2ed4d2SMikulas Patocka 31351da177e4SLinus Torvalds /*----------------------------------------------------------------- 31361da177e4SLinus Torvalds * Event notification. 31371da177e4SLinus Torvalds *---------------------------------------------------------------*/ 31383abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 313960935eb2SMilan Broz unsigned cookie) 314069267a30SAlasdair G Kergon { 314160935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 314260935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 314360935eb2SMilan Broz 314460935eb2SMilan Broz if (!cookie) 31453abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 314660935eb2SMilan Broz else { 314760935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 314860935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 31493abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 31503abf85b5SPeter Rajnoha action, envp); 315160935eb2SMilan Broz } 315269267a30SAlasdair G Kergon } 315369267a30SAlasdair G Kergon 31547a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 31557a8c3d3bSMike Anderson { 31567a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 31577a8c3d3bSMike Anderson } 31587a8c3d3bSMike Anderson 31591da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 31601da177e4SLinus Torvalds { 31611da177e4SLinus Torvalds return atomic_read(&md->event_nr); 31621da177e4SLinus Torvalds } 31631da177e4SLinus Torvalds 31641da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 31651da177e4SLinus Torvalds { 31661da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 31671da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 31681da177e4SLinus Torvalds } 31691da177e4SLinus Torvalds 31707a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 31717a8c3d3bSMike Anderson { 31727a8c3d3bSMike Anderson unsigned long flags; 31737a8c3d3bSMike Anderson 31747a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 31757a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 31767a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 31777a8c3d3bSMike Anderson } 31787a8c3d3bSMike Anderson 31791da177e4SLinus Torvalds /* 31801da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 31811da177e4SLinus Torvalds * count on 'md'. 31821da177e4SLinus Torvalds */ 31831da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 31841da177e4SLinus Torvalds { 31851da177e4SLinus Torvalds return md->disk; 31861da177e4SLinus Torvalds } 31871da177e4SLinus Torvalds 3188784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 3189784aae73SMilan Broz { 31902995fa78SMikulas Patocka return &md->kobj_holder.kobj; 3191784aae73SMilan Broz } 3192784aae73SMilan Broz 3193784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 3194784aae73SMilan Broz { 3195784aae73SMilan Broz struct mapped_device *md; 3196784aae73SMilan Broz 31972995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 3198784aae73SMilan Broz 31994d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 3200432a212cSMike Anderson dm_deleting_md(md)) 32014d89b7b4SMilan Broz return NULL; 32024d89b7b4SMilan Broz 3203784aae73SMilan Broz dm_get(md); 3204784aae73SMilan Broz return md; 3205784aae73SMilan Broz } 3206784aae73SMilan Broz 32074f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 32081da177e4SLinus Torvalds { 32091da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 32101da177e4SLinus Torvalds } 32111da177e4SLinus Torvalds 3212ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 3213ffcc3936SMike Snitzer { 3214ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 3215ffcc3936SMike Snitzer } 3216ffcc3936SMike Snitzer 32172c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 32182c140a24SMikulas Patocka { 32192c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 32202c140a24SMikulas Patocka } 32212c140a24SMikulas Patocka 322264dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 322364dbce58SKiyoshi Ueda { 3224ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 322564dbce58SKiyoshi Ueda } 322664dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 322764dbce58SKiyoshi Ueda 32282e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 32292e93ccc1SKiyoshi Ueda { 3230ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 32312e93ccc1SKiyoshi Ueda } 32322e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 32332e93ccc1SKiyoshi Ueda 3234c0820cf5SMikulas Patocka struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) 3235e6ee8c0bSKiyoshi Ueda { 32365f015204SJun'ichi Nomura struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 32375f015204SJun'ichi Nomura struct kmem_cache *cachep; 3238e5863d9aSMike Snitzer unsigned int pool_size = 0; 32395f015204SJun'ichi Nomura unsigned int front_pad; 3240e6ee8c0bSKiyoshi Ueda 3241e6ee8c0bSKiyoshi Ueda if (!pools) 3242e6ee8c0bSKiyoshi Ueda return NULL; 3243e6ee8c0bSKiyoshi Ueda 3244e5863d9aSMike Snitzer switch (type) { 3245e5863d9aSMike Snitzer case DM_TYPE_BIO_BASED: 32465f015204SJun'ichi Nomura cachep = _io_cache; 3247e8603136SMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 32485f015204SJun'ichi Nomura front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3249e5863d9aSMike Snitzer break; 3250e5863d9aSMike Snitzer case DM_TYPE_REQUEST_BASED: 3251f4790826SMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 32521ae49ea2SMike Snitzer pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 32531ae49ea2SMike Snitzer if (!pools->rq_pool) 32541ae49ea2SMike Snitzer goto out; 3255e5863d9aSMike Snitzer /* fall through to setup remaining rq-based pools */ 3256e5863d9aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 32575f015204SJun'ichi Nomura cachep = _rq_tio_cache; 3258e5863d9aSMike Snitzer if (!pool_size) 32596cfa5857SMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 32605f015204SJun'ichi Nomura front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 32615f015204SJun'ichi Nomura /* per_bio_data_size is not used. See __bind_mempools(). */ 32625f015204SJun'ichi Nomura WARN_ON(per_bio_data_size != 0); 3263e5863d9aSMike Snitzer break; 3264e5863d9aSMike Snitzer default: 32655f015204SJun'ichi Nomura goto out; 3266e5863d9aSMike Snitzer } 32675f015204SJun'ichi Nomura 32686cfa5857SMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3269e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 32705f015204SJun'ichi Nomura goto out; 3271e6ee8c0bSKiyoshi Ueda 32723d8aab2dSJunichi Nomura pools->bs = bioset_create_nobvec(pool_size, front_pad); 3273e6ee8c0bSKiyoshi Ueda if (!pools->bs) 32745f015204SJun'ichi Nomura goto out; 3275e6ee8c0bSKiyoshi Ueda 3276a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 32775f015204SJun'ichi Nomura goto out; 3278a91a2785SMartin K. Petersen 3279e6ee8c0bSKiyoshi Ueda return pools; 3280e6ee8c0bSKiyoshi Ueda 32815f015204SJun'ichi Nomura out: 32825f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3283e6ee8c0bSKiyoshi Ueda 3284e6ee8c0bSKiyoshi Ueda return NULL; 3285e6ee8c0bSKiyoshi Ueda } 3286e6ee8c0bSKiyoshi Ueda 3287e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3288e6ee8c0bSKiyoshi Ueda { 3289e6ee8c0bSKiyoshi Ueda if (!pools) 3290e6ee8c0bSKiyoshi Ueda return; 3291e6ee8c0bSKiyoshi Ueda 3292e6ee8c0bSKiyoshi Ueda if (pools->io_pool) 3293e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 3294e6ee8c0bSKiyoshi Ueda 32951ae49ea2SMike Snitzer if (pools->rq_pool) 32961ae49ea2SMike Snitzer mempool_destroy(pools->rq_pool); 32971ae49ea2SMike Snitzer 3298e6ee8c0bSKiyoshi Ueda if (pools->bs) 3299e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 3300e6ee8c0bSKiyoshi Ueda 3301e6ee8c0bSKiyoshi Ueda kfree(pools); 3302e6ee8c0bSKiyoshi Ueda } 3303e6ee8c0bSKiyoshi Ueda 330483d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 33051da177e4SLinus Torvalds .open = dm_blk_open, 33061da177e4SLinus Torvalds .release = dm_blk_close, 3307aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 33083ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 33091da177e4SLinus Torvalds .owner = THIS_MODULE 33101da177e4SLinus Torvalds }; 33111da177e4SLinus Torvalds 33121da177e4SLinus Torvalds /* 33131da177e4SLinus Torvalds * module hooks 33141da177e4SLinus Torvalds */ 33151da177e4SLinus Torvalds module_init(dm_init); 33161da177e4SLinus Torvalds module_exit(dm_exit); 33171da177e4SLinus Torvalds 33181da177e4SLinus Torvalds module_param(major, uint, 0); 33191da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3320f4790826SMike Snitzer 3321e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3322e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3323e8603136SMike Snitzer 3324f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 3325f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 3326f4790826SMike Snitzer 33271da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 33281da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 33291da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3330