11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 2255782138SLi Zefan 2355782138SLi Zefan #include <trace/events/block.h> 241da177e4SLinus Torvalds 2572d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2672d94861SAlasdair G Kergon 2771a16736SNamhyung Kim #ifdef CONFIG_PRINTK 2871a16736SNamhyung Kim /* 2971a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3071a16736SNamhyung Kim */ 3171a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3271a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3371a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 3471a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 3571a16736SNamhyung Kim #endif 3671a16736SNamhyung Kim 3760935eb2SMilan Broz /* 3860935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3960935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4060935eb2SMilan Broz */ 4160935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4260935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4360935eb2SMilan Broz 441da177e4SLinus Torvalds static const char *_name = DM_NAME; 451da177e4SLinus Torvalds 461da177e4SLinus Torvalds static unsigned int major = 0; 471da177e4SLinus Torvalds static unsigned int _major = 0; 481da177e4SLinus Torvalds 49d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 50d15b774cSAlasdair G Kergon 51f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 522c140a24SMikulas Patocka 532c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 542c140a24SMikulas Patocka 552c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 562c140a24SMikulas Patocka 57*acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 58*acfe0ad7SMikulas Patocka 591da177e4SLinus Torvalds /* 608fbf26adSKiyoshi Ueda * For bio-based dm. 611da177e4SLinus Torvalds * One of these is allocated per bio. 621da177e4SLinus Torvalds */ 631da177e4SLinus Torvalds struct dm_io { 641da177e4SLinus Torvalds struct mapped_device *md; 651da177e4SLinus Torvalds int error; 661da177e4SLinus Torvalds atomic_t io_count; 676ae2fa67SRichard Kennedy struct bio *bio; 683eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 69f88fb981SKiyoshi Ueda spinlock_t endio_lock; 70fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 711da177e4SLinus Torvalds }; 721da177e4SLinus Torvalds 731da177e4SLinus Torvalds /* 748fbf26adSKiyoshi Ueda * For request-based dm. 758fbf26adSKiyoshi Ueda * One of these is allocated per request. 768fbf26adSKiyoshi Ueda */ 778fbf26adSKiyoshi Ueda struct dm_rq_target_io { 788fbf26adSKiyoshi Ueda struct mapped_device *md; 798fbf26adSKiyoshi Ueda struct dm_target *ti; 808fbf26adSKiyoshi Ueda struct request *orig, clone; 818fbf26adSKiyoshi Ueda int error; 828fbf26adSKiyoshi Ueda union map_info info; 838fbf26adSKiyoshi Ueda }; 848fbf26adSKiyoshi Ueda 858fbf26adSKiyoshi Ueda /* 8694818742SKent Overstreet * For request-based dm - the bio clones we allocate are embedded in these 8794818742SKent Overstreet * structs. 8894818742SKent Overstreet * 8994818742SKent Overstreet * We allocate these with bio_alloc_bioset, using the front_pad parameter when 9094818742SKent Overstreet * the bioset is created - this means the bio has to come at the end of the 9194818742SKent Overstreet * struct. 928fbf26adSKiyoshi Ueda */ 938fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 948fbf26adSKiyoshi Ueda struct bio *orig; 95cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 9694818742SKent Overstreet struct bio clone; 978fbf26adSKiyoshi Ueda }; 988fbf26adSKiyoshi Ueda 99cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 100cec47e3dSKiyoshi Ueda { 101cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 102cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 103cec47e3dSKiyoshi Ueda return NULL; 104cec47e3dSKiyoshi Ueda } 105cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 106cec47e3dSKiyoshi Ueda 107ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 108ba61fdd1SJeff Mahoney 1091da177e4SLinus Torvalds /* 1101da177e4SLinus Torvalds * Bits for the md->flags field. 1111da177e4SLinus Torvalds */ 1121eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1131da177e4SLinus Torvalds #define DMF_SUSPENDED 1 114aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 115fba9f90eSJeff Mahoney #define DMF_FREEING 3 1165c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1172e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 118d5b9dd04SMikulas Patocka #define DMF_MERGE_IS_OPTIONAL 6 1192c140a24SMikulas Patocka #define DMF_DEFERRED_REMOVE 7 1201da177e4SLinus Torvalds 121304f3f6aSMilan Broz /* 12283d5e5b0SMikulas Patocka * A dummy definition to make RCU happy. 12383d5e5b0SMikulas Patocka * struct dm_table should never be dereferenced in this file. 12483d5e5b0SMikulas Patocka */ 12583d5e5b0SMikulas Patocka struct dm_table { 12683d5e5b0SMikulas Patocka int undefined__; 12783d5e5b0SMikulas Patocka }; 12883d5e5b0SMikulas Patocka 12983d5e5b0SMikulas Patocka /* 130304f3f6aSMilan Broz * Work processed by per-device workqueue. 131304f3f6aSMilan Broz */ 1321da177e4SLinus Torvalds struct mapped_device { 13383d5e5b0SMikulas Patocka struct srcu_struct io_barrier; 134e61290a4SDaniel Walker struct mutex suspend_lock; 1351da177e4SLinus Torvalds atomic_t holders; 1365c6bd75dSAlasdair G Kergon atomic_t open_count; 1371da177e4SLinus Torvalds 1382a7faeb1SMikulas Patocka /* 1392a7faeb1SMikulas Patocka * The current mapping. 1402a7faeb1SMikulas Patocka * Use dm_get_live_table{_fast} or take suspend_lock for 1412a7faeb1SMikulas Patocka * dereference. 1422a7faeb1SMikulas Patocka */ 1432a7faeb1SMikulas Patocka struct dm_table *map; 1442a7faeb1SMikulas Patocka 1451da177e4SLinus Torvalds unsigned long flags; 1461da177e4SLinus Torvalds 147165125e1SJens Axboe struct request_queue *queue; 148a5664dadSMike Snitzer unsigned type; 1494a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 150a5664dadSMike Snitzer struct mutex type_lock; 151a5664dadSMike Snitzer 15236a0456fSAlasdair G Kergon struct target_type *immutable_target_type; 15336a0456fSAlasdair G Kergon 1541da177e4SLinus Torvalds struct gendisk *disk; 1557e51f257SMike Anderson char name[16]; 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds void *interface_ptr; 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds /* 1601da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1611da177e4SLinus Torvalds */ 162316d315bSNikanth Karthikesan atomic_t pending[2]; 1631da177e4SLinus Torvalds wait_queue_head_t wait; 16453d5914fSMikulas Patocka struct work_struct work; 1651da177e4SLinus Torvalds struct bio_list deferred; 166022c2611SMikulas Patocka spinlock_t deferred_lock; 1671da177e4SLinus Torvalds 1681da177e4SLinus Torvalds /* 16929e4013dSTejun Heo * Processing queue (flush) 170304f3f6aSMilan Broz */ 171304f3f6aSMilan Broz struct workqueue_struct *wq; 172304f3f6aSMilan Broz 173304f3f6aSMilan Broz /* 1741da177e4SLinus Torvalds * io objects are allocated from here. 1751da177e4SLinus Torvalds */ 1761da177e4SLinus Torvalds mempool_t *io_pool; 1771da177e4SLinus Torvalds 1789faf400fSStefan Bader struct bio_set *bs; 1799faf400fSStefan Bader 1801da177e4SLinus Torvalds /* 1811da177e4SLinus Torvalds * Event handling. 1821da177e4SLinus Torvalds */ 1831da177e4SLinus Torvalds atomic_t event_nr; 1841da177e4SLinus Torvalds wait_queue_head_t eventq; 1857a8c3d3bSMike Anderson atomic_t uevent_seq; 1867a8c3d3bSMike Anderson struct list_head uevent_list; 1877a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds /* 1901da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 1911da177e4SLinus Torvalds */ 1921da177e4SLinus Torvalds struct super_block *frozen_sb; 193db8fef4fSMikulas Patocka struct block_device *bdev; 1943ac51e74SDarrick J. Wong 1953ac51e74SDarrick J. Wong /* forced geometry settings */ 1963ac51e74SDarrick J. Wong struct hd_geometry geometry; 197784aae73SMilan Broz 1982995fa78SMikulas Patocka /* kobject and completion */ 1992995fa78SMikulas Patocka struct dm_kobject_holder kobj_holder; 200be35f486SMikulas Patocka 201d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 202d87f4c14STejun Heo struct bio flush_bio; 203fd2ed4d2SMikulas Patocka 204fd2ed4d2SMikulas Patocka struct dm_stats stats; 2051da177e4SLinus Torvalds }; 2061da177e4SLinus Torvalds 207e6ee8c0bSKiyoshi Ueda /* 208e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 209e6ee8c0bSKiyoshi Ueda */ 210e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 211e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 212e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 213e6ee8c0bSKiyoshi Ueda }; 214e6ee8c0bSKiyoshi Ueda 2156cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 2166cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS 256 217f4790826SMike Snitzer #define RESERVED_MAX_IOS 1024 218e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 2198fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 22094818742SKent Overstreet 221f4790826SMike Snitzer /* 222e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 223e8603136SMike Snitzer */ 224e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 225e8603136SMike Snitzer 226e8603136SMike Snitzer /* 227f4790826SMike Snitzer * Request-based DM's mempools' reserved IOs set by the user. 228f4790826SMike Snitzer */ 229f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 230f4790826SMike Snitzer 231f4790826SMike Snitzer static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, 232f4790826SMike Snitzer unsigned def, unsigned max) 233f4790826SMike Snitzer { 234f4790826SMike Snitzer unsigned ios = ACCESS_ONCE(*reserved_ios); 235f4790826SMike Snitzer unsigned modified_ios = 0; 236f4790826SMike Snitzer 237f4790826SMike Snitzer if (!ios) 238f4790826SMike Snitzer modified_ios = def; 239f4790826SMike Snitzer else if (ios > max) 240f4790826SMike Snitzer modified_ios = max; 241f4790826SMike Snitzer 242f4790826SMike Snitzer if (modified_ios) { 243f4790826SMike Snitzer (void)cmpxchg(reserved_ios, ios, modified_ios); 244f4790826SMike Snitzer ios = modified_ios; 245f4790826SMike Snitzer } 246f4790826SMike Snitzer 247f4790826SMike Snitzer return ios; 248f4790826SMike Snitzer } 249f4790826SMike Snitzer 250e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 251e8603136SMike Snitzer { 252e8603136SMike Snitzer return __dm_get_reserved_ios(&reserved_bio_based_ios, 253e8603136SMike Snitzer RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 254e8603136SMike Snitzer } 255e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 256e8603136SMike Snitzer 257f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void) 258f4790826SMike Snitzer { 259f4790826SMike Snitzer return __dm_get_reserved_ios(&reserved_rq_based_ios, 260f4790826SMike Snitzer RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 261f4790826SMike Snitzer } 262f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 263f4790826SMike Snitzer 2641da177e4SLinus Torvalds static int __init local_init(void) 2651da177e4SLinus Torvalds { 26651157b4aSKiyoshi Ueda int r = -ENOMEM; 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 269028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 2701da177e4SLinus Torvalds if (!_io_cache) 27151157b4aSKiyoshi Ueda return r; 2721da177e4SLinus Torvalds 2738fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2748fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 275dba14160SMikulas Patocka goto out_free_io_cache; 2768fbf26adSKiyoshi Ueda 27751e5b2bdSMike Anderson r = dm_uevent_init(); 27851157b4aSKiyoshi Ueda if (r) 27923e5083bSJun'ichi Nomura goto out_free_rq_tio_cache; 28051e5b2bdSMike Anderson 281*acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 282*acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 283*acfe0ad7SMikulas Patocka r = -ENOMEM; 284*acfe0ad7SMikulas Patocka goto out_uevent_exit; 285*acfe0ad7SMikulas Patocka } 286*acfe0ad7SMikulas Patocka 2871da177e4SLinus Torvalds _major = major; 2881da177e4SLinus Torvalds r = register_blkdev(_major, _name); 28951157b4aSKiyoshi Ueda if (r < 0) 290*acfe0ad7SMikulas Patocka goto out_free_workqueue; 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds if (!_major) 2931da177e4SLinus Torvalds _major = r; 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds return 0; 29651157b4aSKiyoshi Ueda 297*acfe0ad7SMikulas Patocka out_free_workqueue: 298*acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 29951157b4aSKiyoshi Ueda out_uevent_exit: 30051157b4aSKiyoshi Ueda dm_uevent_exit(); 3018fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 3028fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 30351157b4aSKiyoshi Ueda out_free_io_cache: 30451157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 30551157b4aSKiyoshi Ueda 30651157b4aSKiyoshi Ueda return r; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds static void local_exit(void) 3101da177e4SLinus Torvalds { 3112c140a24SMikulas Patocka flush_scheduled_work(); 312*acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 3132c140a24SMikulas Patocka 3148fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 3151da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 31600d59405SAkinobu Mita unregister_blkdev(_major, _name); 31751e5b2bdSMike Anderson dm_uevent_exit(); 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds _major = 0; 3201da177e4SLinus Torvalds 3211da177e4SLinus Torvalds DMINFO("cleaned up"); 3221da177e4SLinus Torvalds } 3231da177e4SLinus Torvalds 324b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 3251da177e4SLinus Torvalds local_init, 3261da177e4SLinus Torvalds dm_target_init, 3271da177e4SLinus Torvalds dm_linear_init, 3281da177e4SLinus Torvalds dm_stripe_init, 329952b3557SMikulas Patocka dm_io_init, 330945fa4d2SMikulas Patocka dm_kcopyd_init, 3311da177e4SLinus Torvalds dm_interface_init, 332fd2ed4d2SMikulas Patocka dm_statistics_init, 3331da177e4SLinus Torvalds }; 3341da177e4SLinus Torvalds 335b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 3361da177e4SLinus Torvalds local_exit, 3371da177e4SLinus Torvalds dm_target_exit, 3381da177e4SLinus Torvalds dm_linear_exit, 3391da177e4SLinus Torvalds dm_stripe_exit, 340952b3557SMikulas Patocka dm_io_exit, 341945fa4d2SMikulas Patocka dm_kcopyd_exit, 3421da177e4SLinus Torvalds dm_interface_exit, 343fd2ed4d2SMikulas Patocka dm_statistics_exit, 3441da177e4SLinus Torvalds }; 3451da177e4SLinus Torvalds 3461da177e4SLinus Torvalds static int __init dm_init(void) 3471da177e4SLinus Torvalds { 3481da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3491da177e4SLinus Torvalds 3501da177e4SLinus Torvalds int r, i; 3511da177e4SLinus Torvalds 3521da177e4SLinus Torvalds for (i = 0; i < count; i++) { 3531da177e4SLinus Torvalds r = _inits[i](); 3541da177e4SLinus Torvalds if (r) 3551da177e4SLinus Torvalds goto bad; 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds return 0; 3591da177e4SLinus Torvalds 3601da177e4SLinus Torvalds bad: 3611da177e4SLinus Torvalds while (i--) 3621da177e4SLinus Torvalds _exits[i](); 3631da177e4SLinus Torvalds 3641da177e4SLinus Torvalds return r; 3651da177e4SLinus Torvalds } 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds static void __exit dm_exit(void) 3681da177e4SLinus Torvalds { 3691da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds while (i--) 3721da177e4SLinus Torvalds _exits[i](); 373d15b774cSAlasdair G Kergon 374d15b774cSAlasdair G Kergon /* 375d15b774cSAlasdair G Kergon * Should be empty by this point. 376d15b774cSAlasdair G Kergon */ 377d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds 3801da177e4SLinus Torvalds /* 3811da177e4SLinus Torvalds * Block device functions 3821da177e4SLinus Torvalds */ 383432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 384432a212cSMike Anderson { 385432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 386432a212cSMike Anderson } 387432a212cSMike Anderson 388fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3891da177e4SLinus Torvalds { 3901da177e4SLinus Torvalds struct mapped_device *md; 3911da177e4SLinus Torvalds 392fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 393fba9f90eSJeff Mahoney 394fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 395fba9f90eSJeff Mahoney if (!md) 396fba9f90eSJeff Mahoney goto out; 397fba9f90eSJeff Mahoney 3985c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 399432a212cSMike Anderson dm_deleting_md(md)) { 400fba9f90eSJeff Mahoney md = NULL; 401fba9f90eSJeff Mahoney goto out; 402fba9f90eSJeff Mahoney } 403fba9f90eSJeff Mahoney 4041da177e4SLinus Torvalds dm_get(md); 4055c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 406fba9f90eSJeff Mahoney 407fba9f90eSJeff Mahoney out: 408fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 409fba9f90eSJeff Mahoney 410fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 4111da177e4SLinus Torvalds } 4121da177e4SLinus Torvalds 413db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 4141da177e4SLinus Torvalds { 415fe5f9f2cSAl Viro struct mapped_device *md = disk->private_data; 4166e9624b8SArnd Bergmann 4174a1aeb98SMilan Broz spin_lock(&_minor_lock); 4184a1aeb98SMilan Broz 4192c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 4202c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 421*acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 4222c140a24SMikulas Patocka 4231da177e4SLinus Torvalds dm_put(md); 4244a1aeb98SMilan Broz 4254a1aeb98SMilan Broz spin_unlock(&_minor_lock); 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds 4285c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 4295c6bd75dSAlasdair G Kergon { 4305c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 4315c6bd75dSAlasdair G Kergon } 4325c6bd75dSAlasdair G Kergon 4335c6bd75dSAlasdair G Kergon /* 4345c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 4355c6bd75dSAlasdair G Kergon */ 4362c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 4375c6bd75dSAlasdair G Kergon { 4385c6bd75dSAlasdair G Kergon int r = 0; 4395c6bd75dSAlasdair G Kergon 4405c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4415c6bd75dSAlasdair G Kergon 4422c140a24SMikulas Patocka if (dm_open_count(md)) { 4435c6bd75dSAlasdair G Kergon r = -EBUSY; 4442c140a24SMikulas Patocka if (mark_deferred) 4452c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4462c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 4472c140a24SMikulas Patocka r = -EEXIST; 4485c6bd75dSAlasdair G Kergon else 4495c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 4505c6bd75dSAlasdair G Kergon 4515c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4525c6bd75dSAlasdair G Kergon 4535c6bd75dSAlasdair G Kergon return r; 4545c6bd75dSAlasdair G Kergon } 4555c6bd75dSAlasdair G Kergon 4562c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4572c140a24SMikulas Patocka { 4582c140a24SMikulas Patocka int r = 0; 4592c140a24SMikulas Patocka 4602c140a24SMikulas Patocka spin_lock(&_minor_lock); 4612c140a24SMikulas Patocka 4622c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4632c140a24SMikulas Patocka r = -EBUSY; 4642c140a24SMikulas Patocka else 4652c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4662c140a24SMikulas Patocka 4672c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4682c140a24SMikulas Patocka 4692c140a24SMikulas Patocka return r; 4702c140a24SMikulas Patocka } 4712c140a24SMikulas Patocka 4722c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4732c140a24SMikulas Patocka { 4742c140a24SMikulas Patocka dm_deferred_remove(); 4752c140a24SMikulas Patocka } 4762c140a24SMikulas Patocka 477fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 478fd2ed4d2SMikulas Patocka { 479fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 480fd2ed4d2SMikulas Patocka } 481fd2ed4d2SMikulas Patocka 4829974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 4839974fa2cSMike Snitzer { 4849974fa2cSMike Snitzer return md->queue; 4859974fa2cSMike Snitzer } 4869974fa2cSMike Snitzer 487fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 488fd2ed4d2SMikulas Patocka { 489fd2ed4d2SMikulas Patocka return &md->stats; 490fd2ed4d2SMikulas Patocka } 491fd2ed4d2SMikulas Patocka 4923ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4933ac51e74SDarrick J. Wong { 4943ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4953ac51e74SDarrick J. Wong 4963ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4973ac51e74SDarrick J. Wong } 4983ac51e74SDarrick J. Wong 499fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 500aa129a22SMilan Broz unsigned int cmd, unsigned long arg) 501aa129a22SMilan Broz { 502fe5f9f2cSAl Viro struct mapped_device *md = bdev->bd_disk->private_data; 50383d5e5b0SMikulas Patocka int srcu_idx; 5046c182cd8SHannes Reinecke struct dm_table *map; 505aa129a22SMilan Broz struct dm_target *tgt; 506aa129a22SMilan Broz int r = -ENOTTY; 507aa129a22SMilan Broz 5086c182cd8SHannes Reinecke retry: 50983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 51083d5e5b0SMikulas Patocka 511aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 512aa129a22SMilan Broz goto out; 513aa129a22SMilan Broz 514aa129a22SMilan Broz /* We only support devices that have a single target */ 515aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 516aa129a22SMilan Broz goto out; 517aa129a22SMilan Broz 518aa129a22SMilan Broz tgt = dm_table_get_target(map, 0); 519aa129a22SMilan Broz 5204f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 521aa129a22SMilan Broz r = -EAGAIN; 522aa129a22SMilan Broz goto out; 523aa129a22SMilan Broz } 524aa129a22SMilan Broz 525aa129a22SMilan Broz if (tgt->type->ioctl) 526647b3d00SAl Viro r = tgt->type->ioctl(tgt, cmd, arg); 527aa129a22SMilan Broz 528aa129a22SMilan Broz out: 52983d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 530aa129a22SMilan Broz 5316c182cd8SHannes Reinecke if (r == -ENOTCONN) { 5326c182cd8SHannes Reinecke msleep(10); 5336c182cd8SHannes Reinecke goto retry; 5346c182cd8SHannes Reinecke } 5356c182cd8SHannes Reinecke 536aa129a22SMilan Broz return r; 537aa129a22SMilan Broz } 538aa129a22SMilan Broz 539028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 5401da177e4SLinus Torvalds { 5411da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 544028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5451da177e4SLinus Torvalds { 5461da177e4SLinus Torvalds mempool_free(io, md->io_pool); 5471da177e4SLinus Torvalds } 5481da177e4SLinus Torvalds 549028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 5501da177e4SLinus Torvalds { 551dba14160SMikulas Patocka bio_put(&tio->clone); 5521da177e4SLinus Torvalds } 5531da177e4SLinus Torvalds 55408885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 55508885643SKiyoshi Ueda gfp_t gfp_mask) 556cec47e3dSKiyoshi Ueda { 5575f015204SJun'ichi Nomura return mempool_alloc(md->io_pool, gfp_mask); 558cec47e3dSKiyoshi Ueda } 559cec47e3dSKiyoshi Ueda 560cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 561cec47e3dSKiyoshi Ueda { 5625f015204SJun'ichi Nomura mempool_free(tio, tio->md->io_pool); 563cec47e3dSKiyoshi Ueda } 564cec47e3dSKiyoshi Ueda 56590abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 56690abb8c4SKiyoshi Ueda { 56790abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 56890abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 56990abb8c4SKiyoshi Ueda } 57090abb8c4SKiyoshi Ueda 5713eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 5723eaf840eSJun'ichi "Nick" Nomura { 5733eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 574fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 575c9959059STejun Heo int cpu; 576fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 5773eaf840eSJun'ichi "Nick" Nomura 5783eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 5793eaf840eSJun'ichi "Nick" Nomura 580074a7acaSTejun Heo cpu = part_stat_lock(); 581074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 582074a7acaSTejun Heo part_stat_unlock(); 5831e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 5841e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 585fd2ed4d2SMikulas Patocka 586fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 5874f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 588fd2ed4d2SMikulas Patocka bio_sectors(bio), false, 0, &io->stats_aux); 5893eaf840eSJun'ichi "Nick" Nomura } 5903eaf840eSJun'ichi "Nick" Nomura 591d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 5923eaf840eSJun'ichi "Nick" Nomura { 5933eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 5943eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 5953eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 596c9959059STejun Heo int pending, cpu; 5973eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 5983eaf840eSJun'ichi "Nick" Nomura 599074a7acaSTejun Heo cpu = part_stat_lock(); 600074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 601074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 602074a7acaSTejun Heo part_stat_unlock(); 6033eaf840eSJun'ichi "Nick" Nomura 604fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 6054f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 606fd2ed4d2SMikulas Patocka bio_sectors(bio), true, duration, &io->stats_aux); 607fd2ed4d2SMikulas Patocka 608af7e466aSMikulas Patocka /* 609af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 610d87f4c14STejun Heo * a flush. 611af7e466aSMikulas Patocka */ 6121e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 6131e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 614316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 6153eaf840eSJun'ichi "Nick" Nomura 616d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 617d221d2e7SMikulas Patocka if (!pending) 618d221d2e7SMikulas Patocka wake_up(&md->wait); 6193eaf840eSJun'ichi "Nick" Nomura } 6203eaf840eSJun'ichi "Nick" Nomura 6211da177e4SLinus Torvalds /* 6221da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6231da177e4SLinus Torvalds */ 62492c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6251da177e4SLinus Torvalds { 62605447420SKiyoshi Ueda unsigned long flags; 6271da177e4SLinus Torvalds 62805447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6291da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 63005447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 63192c63902SMikulas Patocka queue_work(md->wq, &md->work); 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds 6341da177e4SLinus Torvalds /* 6351da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6361da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 63783d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6381da177e4SLinus Torvalds */ 63983d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6401da177e4SLinus Torvalds { 64183d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6421da177e4SLinus Torvalds 64383d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 64483d5e5b0SMikulas Patocka } 6451da177e4SLinus Torvalds 64683d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 64783d5e5b0SMikulas Patocka { 64883d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 64983d5e5b0SMikulas Patocka } 65083d5e5b0SMikulas Patocka 65183d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 65283d5e5b0SMikulas Patocka { 65383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 65483d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 65583d5e5b0SMikulas Patocka } 65683d5e5b0SMikulas Patocka 65783d5e5b0SMikulas Patocka /* 65883d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 65983d5e5b0SMikulas Patocka * The caller must not block between these two functions. 66083d5e5b0SMikulas Patocka */ 66183d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 66283d5e5b0SMikulas Patocka { 66383d5e5b0SMikulas Patocka rcu_read_lock(); 66483d5e5b0SMikulas Patocka return rcu_dereference(md->map); 66583d5e5b0SMikulas Patocka } 66683d5e5b0SMikulas Patocka 66783d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 66883d5e5b0SMikulas Patocka { 66983d5e5b0SMikulas Patocka rcu_read_unlock(); 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds 6723ac51e74SDarrick J. Wong /* 6733ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 6743ac51e74SDarrick J. Wong */ 6753ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 6763ac51e74SDarrick J. Wong { 6773ac51e74SDarrick J. Wong *geo = md->geometry; 6783ac51e74SDarrick J. Wong 6793ac51e74SDarrick J. Wong return 0; 6803ac51e74SDarrick J. Wong } 6813ac51e74SDarrick J. Wong 6823ac51e74SDarrick J. Wong /* 6833ac51e74SDarrick J. Wong * Set the geometry of a device. 6843ac51e74SDarrick J. Wong */ 6853ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 6863ac51e74SDarrick J. Wong { 6873ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 6883ac51e74SDarrick J. Wong 6893ac51e74SDarrick J. Wong if (geo->start > sz) { 6903ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 6913ac51e74SDarrick J. Wong return -EINVAL; 6923ac51e74SDarrick J. Wong } 6933ac51e74SDarrick J. Wong 6943ac51e74SDarrick J. Wong md->geometry = *geo; 6953ac51e74SDarrick J. Wong 6963ac51e74SDarrick J. Wong return 0; 6973ac51e74SDarrick J. Wong } 6983ac51e74SDarrick J. Wong 6991da177e4SLinus Torvalds /*----------------------------------------------------------------- 7001da177e4SLinus Torvalds * CRUD START: 7011da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 7021da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 7031da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 7041da177e4SLinus Torvalds * interests of getting something for people to use I give 7051da177e4SLinus Torvalds * you this clearly demarcated crap. 7061da177e4SLinus Torvalds *---------------------------------------------------------------*/ 7071da177e4SLinus Torvalds 7082e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 7092e93ccc1SKiyoshi Ueda { 7102e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7112e93ccc1SKiyoshi Ueda } 7122e93ccc1SKiyoshi Ueda 7131da177e4SLinus Torvalds /* 7141da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7151da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7161da177e4SLinus Torvalds */ 717858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 7181da177e4SLinus Torvalds { 7192e93ccc1SKiyoshi Ueda unsigned long flags; 720b35f8caaSMilan Broz int io_error; 721b35f8caaSMilan Broz struct bio *bio; 722b35f8caaSMilan Broz struct mapped_device *md = io->md; 7232e93ccc1SKiyoshi Ueda 7242e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 725f88fb981SKiyoshi Ueda if (unlikely(error)) { 726f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 727f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 7281da177e4SLinus Torvalds io->error = error; 729f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 730f88fb981SKiyoshi Ueda } 7311da177e4SLinus Torvalds 7321da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 7332e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 7342e93ccc1SKiyoshi Ueda /* 7352e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 7362e93ccc1SKiyoshi Ueda */ 737022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 7386a8736d1STejun Heo if (__noflush_suspending(md)) 7396a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 7406a8736d1STejun Heo else 7412e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 7422e93ccc1SKiyoshi Ueda io->error = -EIO; 743022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 7442e93ccc1SKiyoshi Ueda } 7452e93ccc1SKiyoshi Ueda 746b35f8caaSMilan Broz io_error = io->error; 747b35f8caaSMilan Broz bio = io->bio; 748af7e466aSMikulas Patocka end_io_acct(io); 749a97f925aSMikulas Patocka free_io(md, io); 7501da177e4SLinus Torvalds 7516a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 7526a8736d1STejun Heo return; 7536a8736d1STejun Heo 7544f024f37SKent Overstreet if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 7551da177e4SLinus Torvalds /* 7566a8736d1STejun Heo * Preflush done for flush with data, reissue 7576a8736d1STejun Heo * without REQ_FLUSH. 7581da177e4SLinus Torvalds */ 7596a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 7606a8736d1STejun Heo queue_io(md, bio); 7615f3ea37cSArnaldo Carvalho de Melo } else { 762b372d360SMike Snitzer /* done with normal IO or empty flush */ 7630a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 764b35f8caaSMilan Broz bio_endio(bio, io_error); 7652e93ccc1SKiyoshi Ueda } 7661da177e4SLinus Torvalds } 767af7e466aSMikulas Patocka } 7681da177e4SLinus Torvalds 7697eee4ae2SMike Snitzer static void disable_write_same(struct mapped_device *md) 7707eee4ae2SMike Snitzer { 7717eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 7727eee4ae2SMike Snitzer 7737eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 7747eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 7757eee4ae2SMike Snitzer } 7767eee4ae2SMike Snitzer 7776712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error) 7781da177e4SLinus Torvalds { 7791da177e4SLinus Torvalds int r = 0; 780bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 781b35f8caaSMilan Broz struct dm_io *io = tio->io; 7829faf400fSStefan Bader struct mapped_device *md = tio->io->md; 7831da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 7841da177e4SLinus Torvalds 7851da177e4SLinus Torvalds if (!bio_flagged(bio, BIO_UPTODATE) && !error) 7861da177e4SLinus Torvalds error = -EIO; 7871da177e4SLinus Torvalds 7881da177e4SLinus Torvalds if (endio) { 7897de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 7902e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 7912e93ccc1SKiyoshi Ueda /* 7922e93ccc1SKiyoshi Ueda * error and requeue request are handled 7932e93ccc1SKiyoshi Ueda * in dec_pending(). 7942e93ccc1SKiyoshi Ueda */ 7951da177e4SLinus Torvalds error = r; 79645cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 79745cbcd79SKiyoshi Ueda /* The target will handle the io */ 7986712ecf8SNeilBrown return; 79945cbcd79SKiyoshi Ueda else if (r) { 80045cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 80145cbcd79SKiyoshi Ueda BUG(); 80245cbcd79SKiyoshi Ueda } 8031da177e4SLinus Torvalds } 8041da177e4SLinus Torvalds 8057eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && 8067eee4ae2SMike Snitzer !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) 8077eee4ae2SMike Snitzer disable_write_same(md); 8087eee4ae2SMike Snitzer 8099faf400fSStefan Bader free_tio(md, tio); 810b35f8caaSMilan Broz dec_pending(io, error); 8111da177e4SLinus Torvalds } 8121da177e4SLinus Torvalds 813cec47e3dSKiyoshi Ueda /* 814cec47e3dSKiyoshi Ueda * Partial completion handling for request-based dm 815cec47e3dSKiyoshi Ueda */ 816cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error) 817cec47e3dSKiyoshi Ueda { 818bfc6d41cSMikulas Patocka struct dm_rq_clone_bio_info *info = 819bfc6d41cSMikulas Patocka container_of(clone, struct dm_rq_clone_bio_info, clone); 820cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = info->tio; 821cec47e3dSKiyoshi Ueda struct bio *bio = info->orig; 8224f024f37SKent Overstreet unsigned int nr_bytes = info->orig->bi_iter.bi_size; 823cec47e3dSKiyoshi Ueda 824cec47e3dSKiyoshi Ueda bio_put(clone); 825cec47e3dSKiyoshi Ueda 826cec47e3dSKiyoshi Ueda if (tio->error) 827cec47e3dSKiyoshi Ueda /* 828cec47e3dSKiyoshi Ueda * An error has already been detected on the request. 829cec47e3dSKiyoshi Ueda * Once error occurred, just let clone->end_io() handle 830cec47e3dSKiyoshi Ueda * the remainder. 831cec47e3dSKiyoshi Ueda */ 832cec47e3dSKiyoshi Ueda return; 833cec47e3dSKiyoshi Ueda else if (error) { 834cec47e3dSKiyoshi Ueda /* 835cec47e3dSKiyoshi Ueda * Don't notice the error to the upper layer yet. 836cec47e3dSKiyoshi Ueda * The error handling decision is made by the target driver, 837cec47e3dSKiyoshi Ueda * when the request is completed. 838cec47e3dSKiyoshi Ueda */ 839cec47e3dSKiyoshi Ueda tio->error = error; 840cec47e3dSKiyoshi Ueda return; 841cec47e3dSKiyoshi Ueda } 842cec47e3dSKiyoshi Ueda 843cec47e3dSKiyoshi Ueda /* 844cec47e3dSKiyoshi Ueda * I/O for the bio successfully completed. 845cec47e3dSKiyoshi Ueda * Notice the data completion to the upper layer. 846cec47e3dSKiyoshi Ueda */ 847cec47e3dSKiyoshi Ueda 848cec47e3dSKiyoshi Ueda /* 849cec47e3dSKiyoshi Ueda * bios are processed from the head of the list. 850cec47e3dSKiyoshi Ueda * So the completing bio should always be rq->bio. 851cec47e3dSKiyoshi Ueda * If it's not, something wrong is happening. 852cec47e3dSKiyoshi Ueda */ 853cec47e3dSKiyoshi Ueda if (tio->orig->bio != bio) 854cec47e3dSKiyoshi Ueda DMERR("bio completion is going in the middle of the request"); 855cec47e3dSKiyoshi Ueda 856cec47e3dSKiyoshi Ueda /* 857cec47e3dSKiyoshi Ueda * Update the original request. 858cec47e3dSKiyoshi Ueda * Do not use blk_end_request() here, because it may complete 859cec47e3dSKiyoshi Ueda * the original request before the clone, and break the ordering. 860cec47e3dSKiyoshi Ueda */ 861cec47e3dSKiyoshi Ueda blk_update_request(tio->orig, 0, nr_bytes); 862cec47e3dSKiyoshi Ueda } 863cec47e3dSKiyoshi Ueda 864cec47e3dSKiyoshi Ueda /* 865cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 866cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 867cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 868cec47e3dSKiyoshi Ueda */ 869b4324feeSKiyoshi Ueda static void rq_completed(struct mapped_device *md, int rw, int run_queue) 870cec47e3dSKiyoshi Ueda { 871b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 872cec47e3dSKiyoshi Ueda 873cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 874b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 875cec47e3dSKiyoshi Ueda wake_up(&md->wait); 876cec47e3dSKiyoshi Ueda 877a8c32a5cSJens Axboe /* 878a8c32a5cSJens Axboe * Run this off this callpath, as drivers could invoke end_io while 879a8c32a5cSJens Axboe * inside their request_fn (and holding the queue lock). Calling 880a8c32a5cSJens Axboe * back into ->request_fn() could deadlock attempting to grab the 881a8c32a5cSJens Axboe * queue lock again. 882a8c32a5cSJens Axboe */ 883cec47e3dSKiyoshi Ueda if (run_queue) 884a8c32a5cSJens Axboe blk_run_queue_async(md->queue); 885cec47e3dSKiyoshi Ueda 886cec47e3dSKiyoshi Ueda /* 887cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 888cec47e3dSKiyoshi Ueda */ 889cec47e3dSKiyoshi Ueda dm_put(md); 890cec47e3dSKiyoshi Ueda } 891cec47e3dSKiyoshi Ueda 892a77e28c7SKiyoshi Ueda static void free_rq_clone(struct request *clone) 893a77e28c7SKiyoshi Ueda { 894a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 895a77e28c7SKiyoshi Ueda 896a77e28c7SKiyoshi Ueda blk_rq_unprep_clone(clone); 897a77e28c7SKiyoshi Ueda free_rq_tio(tio); 898a77e28c7SKiyoshi Ueda } 899a77e28c7SKiyoshi Ueda 900980691e5SKiyoshi Ueda /* 901980691e5SKiyoshi Ueda * Complete the clone and the original request. 902980691e5SKiyoshi Ueda * Must be called without queue lock. 903980691e5SKiyoshi Ueda */ 904980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 905980691e5SKiyoshi Ueda { 906980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 907980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 908980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 909980691e5SKiyoshi Ueda struct request *rq = tio->orig; 910980691e5SKiyoshi Ueda 91129e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 912980691e5SKiyoshi Ueda rq->errors = clone->errors; 913980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 914980691e5SKiyoshi Ueda 915980691e5SKiyoshi Ueda if (rq->sense) 916980691e5SKiyoshi Ueda /* 917980691e5SKiyoshi Ueda * We are using the sense buffer of the original 918980691e5SKiyoshi Ueda * request. 919980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 920980691e5SKiyoshi Ueda */ 921980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 922980691e5SKiyoshi Ueda } 923980691e5SKiyoshi Ueda 924980691e5SKiyoshi Ueda free_rq_clone(clone); 925980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 92629e4013dSTejun Heo rq_completed(md, rw, true); 927980691e5SKiyoshi Ueda } 928980691e5SKiyoshi Ueda 929cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 930cec47e3dSKiyoshi Ueda { 931cec47e3dSKiyoshi Ueda struct request *clone = rq->special; 932cec47e3dSKiyoshi Ueda 933cec47e3dSKiyoshi Ueda rq->special = NULL; 934cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 935cec47e3dSKiyoshi Ueda 936a77e28c7SKiyoshi Ueda free_rq_clone(clone); 937cec47e3dSKiyoshi Ueda } 938cec47e3dSKiyoshi Ueda 939cec47e3dSKiyoshi Ueda /* 940cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 941cec47e3dSKiyoshi Ueda */ 942cec47e3dSKiyoshi Ueda void dm_requeue_unmapped_request(struct request *clone) 943cec47e3dSKiyoshi Ueda { 944b4324feeSKiyoshi Ueda int rw = rq_data_dir(clone); 945cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 946cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 947cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 948cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 949cec47e3dSKiyoshi Ueda unsigned long flags; 950cec47e3dSKiyoshi Ueda 951cec47e3dSKiyoshi Ueda dm_unprep_request(rq); 952cec47e3dSKiyoshi Ueda 953cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 954cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 955cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 956cec47e3dSKiyoshi Ueda 957b4324feeSKiyoshi Ueda rq_completed(md, rw, 0); 958cec47e3dSKiyoshi Ueda } 959cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 960cec47e3dSKiyoshi Ueda 961cec47e3dSKiyoshi Ueda static void __stop_queue(struct request_queue *q) 962cec47e3dSKiyoshi Ueda { 963cec47e3dSKiyoshi Ueda blk_stop_queue(q); 964cec47e3dSKiyoshi Ueda } 965cec47e3dSKiyoshi Ueda 966cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 967cec47e3dSKiyoshi Ueda { 968cec47e3dSKiyoshi Ueda unsigned long flags; 969cec47e3dSKiyoshi Ueda 970cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 971cec47e3dSKiyoshi Ueda __stop_queue(q); 972cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 973cec47e3dSKiyoshi Ueda } 974cec47e3dSKiyoshi Ueda 975cec47e3dSKiyoshi Ueda static void __start_queue(struct request_queue *q) 976cec47e3dSKiyoshi Ueda { 977cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 978cec47e3dSKiyoshi Ueda blk_start_queue(q); 979cec47e3dSKiyoshi Ueda } 980cec47e3dSKiyoshi Ueda 981cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 982cec47e3dSKiyoshi Ueda { 983cec47e3dSKiyoshi Ueda unsigned long flags; 984cec47e3dSKiyoshi Ueda 985cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 986cec47e3dSKiyoshi Ueda __start_queue(q); 987cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 988cec47e3dSKiyoshi Ueda } 989cec47e3dSKiyoshi Ueda 99011a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 99111a68244SKiyoshi Ueda { 99211a68244SKiyoshi Ueda int r = error; 99311a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 994ba1cbad9SMike Snitzer dm_request_endio_fn rq_end_io = NULL; 995ba1cbad9SMike Snitzer 996ba1cbad9SMike Snitzer if (tio->ti) { 997ba1cbad9SMike Snitzer rq_end_io = tio->ti->type->rq_end_io; 99811a68244SKiyoshi Ueda 99911a68244SKiyoshi Ueda if (mapped && rq_end_io) 100011a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 1001ba1cbad9SMike Snitzer } 100211a68244SKiyoshi Ueda 10037eee4ae2SMike Snitzer if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && 10047eee4ae2SMike Snitzer !clone->q->limits.max_write_same_sectors)) 10057eee4ae2SMike Snitzer disable_write_same(tio->md); 10067eee4ae2SMike Snitzer 100711a68244SKiyoshi Ueda if (r <= 0) 100811a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 100911a68244SKiyoshi Ueda dm_end_request(clone, r); 101011a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 101111a68244SKiyoshi Ueda /* The target will handle the I/O */ 101211a68244SKiyoshi Ueda return; 101311a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 101411a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 101511a68244SKiyoshi Ueda dm_requeue_unmapped_request(clone); 101611a68244SKiyoshi Ueda else { 101711a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 101811a68244SKiyoshi Ueda BUG(); 101911a68244SKiyoshi Ueda } 102011a68244SKiyoshi Ueda } 102111a68244SKiyoshi Ueda 1022cec47e3dSKiyoshi Ueda /* 1023cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 1024cec47e3dSKiyoshi Ueda */ 1025cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 1026cec47e3dSKiyoshi Ueda { 102711a68244SKiyoshi Ueda bool mapped = true; 1028cec47e3dSKiyoshi Ueda struct request *clone = rq->completion_data; 1029cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1030cec47e3dSKiyoshi Ueda 103111a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 103211a68244SKiyoshi Ueda mapped = false; 1033cec47e3dSKiyoshi Ueda 103411a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 1035cec47e3dSKiyoshi Ueda } 1036cec47e3dSKiyoshi Ueda 1037cec47e3dSKiyoshi Ueda /* 1038cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 1039cec47e3dSKiyoshi Ueda * through softirq context. 1040cec47e3dSKiyoshi Ueda */ 1041cec47e3dSKiyoshi Ueda static void dm_complete_request(struct request *clone, int error) 1042cec47e3dSKiyoshi Ueda { 1043cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1044cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 1045cec47e3dSKiyoshi Ueda 1046cec47e3dSKiyoshi Ueda tio->error = error; 1047cec47e3dSKiyoshi Ueda rq->completion_data = clone; 1048cec47e3dSKiyoshi Ueda blk_complete_request(rq); 1049cec47e3dSKiyoshi Ueda } 1050cec47e3dSKiyoshi Ueda 1051cec47e3dSKiyoshi Ueda /* 1052cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 1053cec47e3dSKiyoshi Ueda * through softirq context. 1054cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 1055cec47e3dSKiyoshi Ueda * This may be used when the target's map_rq() function fails. 1056cec47e3dSKiyoshi Ueda */ 1057cec47e3dSKiyoshi Ueda void dm_kill_unmapped_request(struct request *clone, int error) 1058cec47e3dSKiyoshi Ueda { 1059cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1060cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 1061cec47e3dSKiyoshi Ueda 1062cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 1063cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 1064cec47e3dSKiyoshi Ueda } 1065cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 1066cec47e3dSKiyoshi Ueda 1067cec47e3dSKiyoshi Ueda /* 1068cec47e3dSKiyoshi Ueda * Called with the queue lock held 1069cec47e3dSKiyoshi Ueda */ 1070cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 1071cec47e3dSKiyoshi Ueda { 1072cec47e3dSKiyoshi Ueda /* 1073cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 1074cec47e3dSKiyoshi Ueda * the clone was dispatched. 1075cec47e3dSKiyoshi Ueda * The clone is *NOT* freed actually here because it is alloced from 1076cec47e3dSKiyoshi Ueda * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 1077cec47e3dSKiyoshi Ueda */ 1078cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 1079cec47e3dSKiyoshi Ueda 1080cec47e3dSKiyoshi Ueda /* 1081cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 1082cec47e3dSKiyoshi Ueda * hold the queue lock. Otherwise, deadlock could occur because: 1083cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 1084cec47e3dSKiyoshi Ueda * of the stacking during the completion 1085cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 1086cec47e3dSKiyoshi Ueda * against this queue 1087cec47e3dSKiyoshi Ueda */ 1088cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 1089cec47e3dSKiyoshi Ueda } 1090cec47e3dSKiyoshi Ueda 109156a67df7SMike Snitzer /* 109256a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 109356a67df7SMike Snitzer * target boundary. 109456a67df7SMike Snitzer */ 109556a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 10961da177e4SLinus Torvalds { 109756a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 109856a67df7SMike Snitzer 109956a67df7SMike Snitzer return ti->len - target_offset; 110056a67df7SMike Snitzer } 110156a67df7SMike Snitzer 110256a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 110356a67df7SMike Snitzer { 110456a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1105542f9038SMike Snitzer sector_t offset, max_len; 11061da177e4SLinus Torvalds 11071da177e4SLinus Torvalds /* 11081da177e4SLinus Torvalds * Does the target need to split even further? 11091da177e4SLinus Torvalds */ 1110542f9038SMike Snitzer if (ti->max_io_len) { 1111542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1112542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1113542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1114542f9038SMike Snitzer else 1115542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1116542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1117542f9038SMike Snitzer 1118542f9038SMike Snitzer if (len > max_len) 1119542f9038SMike Snitzer len = max_len; 11201da177e4SLinus Torvalds } 11211da177e4SLinus Torvalds 11221da177e4SLinus Torvalds return len; 11231da177e4SLinus Torvalds } 11241da177e4SLinus Torvalds 1125542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1126542f9038SMike Snitzer { 1127542f9038SMike Snitzer if (len > UINT_MAX) { 1128542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1129542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1130542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1131542f9038SMike Snitzer return -EINVAL; 1132542f9038SMike Snitzer } 1133542f9038SMike Snitzer 1134542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1135542f9038SMike Snitzer 1136542f9038SMike Snitzer return 0; 1137542f9038SMike Snitzer } 1138542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1139542f9038SMike Snitzer 11401dd40c3eSMikulas Patocka /* 11411dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 11421dd40c3eSMikulas Patocka * allowed for all bio types except REQ_FLUSH. 11431dd40c3eSMikulas Patocka * 11441dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 11451dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 11461dd40c3eSMikulas Patocka * sent in a next bio. 11471dd40c3eSMikulas Patocka * 11481dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 11491dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11501dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 11511dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11521dd40c3eSMikulas Patocka * 11531dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 11541dd40c3eSMikulas Patocka * <------- bi_size -------> 11551dd40c3eSMikulas Patocka * <-- n_sectors --> 11561dd40c3eSMikulas Patocka * 11571dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11581dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11591dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11601dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11611dd40c3eSMikulas Patocka * to make it empty) 11621dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11631dd40c3eSMikulas Patocka * 11641dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 11651dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 11661dd40c3eSMikulas Patocka * copies of the bio. 11671dd40c3eSMikulas Patocka */ 11681dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 11691dd40c3eSMikulas Patocka { 11701dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11711dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 11721dd40c3eSMikulas Patocka BUG_ON(bio->bi_rw & REQ_FLUSH); 11731dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 11741dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 11751dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 11761dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 11771dd40c3eSMikulas Patocka } 11781dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 11791dd40c3eSMikulas Patocka 1180bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 11811da177e4SLinus Torvalds { 11821da177e4SLinus Torvalds int r; 11832056a782SJens Axboe sector_t sector; 11849faf400fSStefan Bader struct mapped_device *md; 1185dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1186bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 11871da177e4SLinus Torvalds 11881da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds /* 11911da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 11921da177e4SLinus Torvalds * anything, the target has assumed ownership of 11931da177e4SLinus Torvalds * this io. 11941da177e4SLinus Torvalds */ 11951da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 11964f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 11977de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 119845cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 11991da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 12002056a782SJens Axboe 1201d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 120222a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 12032056a782SJens Axboe 12041da177e4SLinus Torvalds generic_make_request(clone); 12052e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 12062e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 12079faf400fSStefan Bader md = tio->io->md; 12089faf400fSStefan Bader dec_pending(tio->io, r); 12099faf400fSStefan Bader free_tio(md, tio); 121045cbcd79SKiyoshi Ueda } else if (r) { 121145cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 121245cbcd79SKiyoshi Ueda BUG(); 12131da177e4SLinus Torvalds } 12141da177e4SLinus Torvalds } 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds struct clone_info { 12171da177e4SLinus Torvalds struct mapped_device *md; 12181da177e4SLinus Torvalds struct dm_table *map; 12191da177e4SLinus Torvalds struct bio *bio; 12201da177e4SLinus Torvalds struct dm_io *io; 12211da177e4SLinus Torvalds sector_t sector; 1222e0d6609aSMikulas Patocka unsigned sector_count; 12231da177e4SLinus Torvalds }; 12241da177e4SLinus Torvalds 1225e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1226bd2a49b8SAlasdair G Kergon { 12274f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 12284f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 12291da177e4SLinus Torvalds } 12301da177e4SLinus Torvalds 12311da177e4SLinus Torvalds /* 12321da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 12331da177e4SLinus Torvalds */ 1234dba14160SMikulas Patocka static void clone_bio(struct dm_target_io *tio, struct bio *bio, 12351c3b13e6SKent Overstreet sector_t sector, unsigned len) 12361da177e4SLinus Torvalds { 1237dba14160SMikulas Patocka struct bio *clone = &tio->clone; 12381da177e4SLinus Torvalds 12391c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 12409c47008dSMartin K. Petersen 12411c3b13e6SKent Overstreet if (bio_integrity(bio)) 12421c3b13e6SKent Overstreet bio_integrity_clone(clone, bio, GFP_NOIO); 12431c3b13e6SKent Overstreet 12441c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 12451c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 12461c3b13e6SKent Overstreet 12471c3b13e6SKent Overstreet if (bio_integrity(bio)) 12481c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 12491da177e4SLinus Torvalds } 12501da177e4SLinus Torvalds 12519015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 1252bd2a49b8SAlasdair G Kergon struct dm_target *ti, int nr_iovecs, 125355a62eefSAlasdair G Kergon unsigned target_bio_nr) 1254f9ab94ceSMikulas Patocka { 1255dba14160SMikulas Patocka struct dm_target_io *tio; 1256dba14160SMikulas Patocka struct bio *clone; 1257dba14160SMikulas Patocka 1258dba14160SMikulas Patocka clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); 1259dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1260f9ab94ceSMikulas Patocka 1261f9ab94ceSMikulas Patocka tio->io = ci->io; 1262f9ab94ceSMikulas Patocka tio->ti = ti; 126355a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 12649015df24SAlasdair G Kergon 12659015df24SAlasdair G Kergon return tio; 12669015df24SAlasdair G Kergon } 12679015df24SAlasdair G Kergon 126814fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 126914fe594dSAlasdair G Kergon struct dm_target *ti, 12701dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 12719015df24SAlasdair G Kergon { 127255a62eefSAlasdair G Kergon struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr); 1273dba14160SMikulas Patocka struct bio *clone = &tio->clone; 12749015df24SAlasdair G Kergon 12751dd40c3eSMikulas Patocka tio->len_ptr = len; 12761dd40c3eSMikulas Patocka 127706a426ceSMike Snitzer /* 127806a426ceSMike Snitzer * Discard requests require the bio's inline iovecs be initialized. 127906a426ceSMike Snitzer * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 128006a426ceSMike Snitzer * and discard, so no need for concern about wasted bvec allocations. 128106a426ceSMike Snitzer */ 12821c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1283bd2a49b8SAlasdair G Kergon if (len) 12841dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1285f9ab94ceSMikulas Patocka 1286bd2a49b8SAlasdair G Kergon __map_bio(tio); 1287f9ab94ceSMikulas Patocka } 1288f9ab94ceSMikulas Patocka 128914fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 12901dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 129106a426ceSMike Snitzer { 129255a62eefSAlasdair G Kergon unsigned target_bio_nr; 129306a426ceSMike Snitzer 129455a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 129514fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 129606a426ceSMike Snitzer } 129706a426ceSMike Snitzer 129814fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1299f9ab94ceSMikulas Patocka { 130006a426ceSMike Snitzer unsigned target_nr = 0; 1301f9ab94ceSMikulas Patocka struct dm_target *ti; 1302f9ab94ceSMikulas Patocka 1303b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1304f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 13051dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1306f9ab94ceSMikulas Patocka 1307f9ab94ceSMikulas Patocka return 0; 1308f9ab94ceSMikulas Patocka } 1309f9ab94ceSMikulas Patocka 1310e4c93811SAlasdair G Kergon static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 13111dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 13125ae89a87SMike Snitzer { 1313dba14160SMikulas Patocka struct bio *bio = ci->bio; 13145ae89a87SMike Snitzer struct dm_target_io *tio; 1315b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1316b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 13175ae89a87SMike Snitzer 1318b0d8ed4dSAlasdair G Kergon /* 1319b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1320b0d8ed4dSAlasdair G Kergon */ 1321b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1322b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1323e4c93811SAlasdair G Kergon 1324b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 13251c3b13e6SKent Overstreet tio = alloc_tio(ci, ti, 0, target_bio_nr); 13261dd40c3eSMikulas Patocka tio->len_ptr = len; 13271dd40c3eSMikulas Patocka clone_bio(tio, bio, sector, *len); 1328bd2a49b8SAlasdair G Kergon __map_bio(tio); 13295ae89a87SMike Snitzer } 1330b0d8ed4dSAlasdair G Kergon } 13315ae89a87SMike Snitzer 133255a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 133323508a96SMike Snitzer 133455a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 133523508a96SMike Snitzer { 133655a62eefSAlasdair G Kergon return ti->num_discard_bios; 133723508a96SMike Snitzer } 133823508a96SMike Snitzer 133955a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 134023508a96SMike Snitzer { 134155a62eefSAlasdair G Kergon return ti->num_write_same_bios; 134223508a96SMike Snitzer } 134323508a96SMike Snitzer 134423508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 134523508a96SMike Snitzer 134623508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 134723508a96SMike Snitzer { 134855a62eefSAlasdair G Kergon return ti->split_discard_bios; 134923508a96SMike Snitzer } 135023508a96SMike Snitzer 135114fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 135255a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 135323508a96SMike Snitzer is_split_required_fn is_split_required) 13545ae89a87SMike Snitzer { 13555ae89a87SMike Snitzer struct dm_target *ti; 1356e0d6609aSMikulas Patocka unsigned len; 135755a62eefSAlasdair G Kergon unsigned num_bios; 13585ae89a87SMike Snitzer 1359a79245b3SMike Snitzer do { 13605ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 13615ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 13625ae89a87SMike Snitzer return -EIO; 13635ae89a87SMike Snitzer 13645ae89a87SMike Snitzer /* 136523508a96SMike Snitzer * Even though the device advertised support for this type of 136623508a96SMike Snitzer * request, that does not mean every target supports it, and 1367936688d7SMike Snitzer * reconfiguration might also have changed that since the 13685ae89a87SMike Snitzer * check was performed. 13695ae89a87SMike Snitzer */ 137055a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 137155a62eefSAlasdair G Kergon if (!num_bios) 13725ae89a87SMike Snitzer return -EOPNOTSUPP; 13735ae89a87SMike Snitzer 137423508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1375e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 13767acf0277SMikulas Patocka else 1377e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 13785ae89a87SMike Snitzer 13791dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 13805ae89a87SMike Snitzer 1381a79245b3SMike Snitzer ci->sector += len; 1382a79245b3SMike Snitzer } while (ci->sector_count -= len); 13835ae89a87SMike Snitzer 13845ae89a87SMike Snitzer return 0; 13855ae89a87SMike Snitzer } 13865ae89a87SMike Snitzer 138714fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 138823508a96SMike Snitzer { 138914fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 139023508a96SMike Snitzer is_split_required_for_discard); 139123508a96SMike Snitzer } 139223508a96SMike Snitzer 139314fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 139423508a96SMike Snitzer { 139514fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 139623508a96SMike Snitzer } 139723508a96SMike Snitzer 1398e4c93811SAlasdair G Kergon /* 1399e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1400e4c93811SAlasdair G Kergon */ 1401e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1402e4c93811SAlasdair G Kergon { 1403e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1404e4c93811SAlasdair G Kergon struct dm_target *ti; 14051c3b13e6SKent Overstreet unsigned len; 1406e4c93811SAlasdair G Kergon 1407e4c93811SAlasdair G Kergon if (unlikely(bio->bi_rw & REQ_DISCARD)) 1408e4c93811SAlasdair G Kergon return __send_discard(ci); 1409e4c93811SAlasdair G Kergon else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1410e4c93811SAlasdair G Kergon return __send_write_same(ci); 1411e4c93811SAlasdair G Kergon 1412e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1413e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1414e4c93811SAlasdair G Kergon return -EIO; 1415e4c93811SAlasdair G Kergon 14161c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1417e4c93811SAlasdair G Kergon 14181dd40c3eSMikulas Patocka __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1419e4c93811SAlasdair G Kergon 1420e4c93811SAlasdair G Kergon ci->sector += len; 1421e4c93811SAlasdair G Kergon ci->sector_count -= len; 1422e4c93811SAlasdair G Kergon 1423e4c93811SAlasdair G Kergon return 0; 1424e4c93811SAlasdair G Kergon } 1425e4c93811SAlasdair G Kergon 1426e4c93811SAlasdair G Kergon /* 142714fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 14281da177e4SLinus Torvalds */ 142983d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 143083d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 14311da177e4SLinus Torvalds { 14321da177e4SLinus Torvalds struct clone_info ci; 1433512875bdSJun'ichi Nomura int error = 0; 14341da177e4SLinus Torvalds 143583d5e5b0SMikulas Patocka if (unlikely(!map)) { 1436f0b9a450SMikulas Patocka bio_io_error(bio); 1437f0b9a450SMikulas Patocka return; 1438f0b9a450SMikulas Patocka } 1439692d0eb9SMikulas Patocka 144083d5e5b0SMikulas Patocka ci.map = map; 14411da177e4SLinus Torvalds ci.md = md; 14421da177e4SLinus Torvalds ci.io = alloc_io(md); 14431da177e4SLinus Torvalds ci.io->error = 0; 14441da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 14451da177e4SLinus Torvalds ci.io->bio = bio; 14461da177e4SLinus Torvalds ci.io->md = md; 1447f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 14484f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 14491da177e4SLinus Torvalds 14503eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1451bd2a49b8SAlasdair G Kergon 1452b372d360SMike Snitzer if (bio->bi_rw & REQ_FLUSH) { 1453b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1454b372d360SMike Snitzer ci.sector_count = 0; 145514fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1456b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1457b372d360SMike Snitzer } else { 14586a8736d1STejun Heo ci.bio = bio; 1459f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1460512875bdSJun'ichi Nomura while (ci.sector_count && !error) 146114fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1462d87f4c14STejun Heo } 14631da177e4SLinus Torvalds 14641da177e4SLinus Torvalds /* drop the extra reference count */ 1465512875bdSJun'ichi Nomura dec_pending(ci.io, error); 14669e4e5f87SMilan Broz } 14679e4e5f87SMilan Broz /*----------------------------------------------------------------- 14681da177e4SLinus Torvalds * CRUD END 14691da177e4SLinus Torvalds *---------------------------------------------------------------*/ 14701da177e4SLinus Torvalds 14711da177e4SLinus Torvalds static int dm_merge_bvec(struct request_queue *q, 14721da177e4SLinus Torvalds struct bvec_merge_data *bvm, 1473f6fccb12SMilan Broz struct bio_vec *biovec) 1474f6fccb12SMilan Broz { 1475f6fccb12SMilan Broz struct mapped_device *md = q->queuedata; 147683d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table_fast(md); 1477f6fccb12SMilan Broz struct dm_target *ti; 1478f6fccb12SMilan Broz sector_t max_sectors; 1479f6fccb12SMilan Broz int max_size = 0; 1480f6fccb12SMilan Broz 1481f6fccb12SMilan Broz if (unlikely(!map)) 1482f6fccb12SMilan Broz goto out; 1483f6fccb12SMilan Broz 1484f6fccb12SMilan Broz ti = dm_table_find_target(map, bvm->bi_sector); 1485f6fccb12SMilan Broz if (!dm_target_is_valid(ti)) 148683d5e5b0SMikulas Patocka goto out; 1487f6fccb12SMilan Broz 1488f6fccb12SMilan Broz /* 1489f6fccb12SMilan Broz * Find maximum amount of I/O that won't need splitting 1490f6fccb12SMilan Broz */ 149156a67df7SMike Snitzer max_sectors = min(max_io_len(bvm->bi_sector, ti), 1492f6fccb12SMilan Broz (sector_t) BIO_MAX_SECTORS); 1493f6fccb12SMilan Broz max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1494f6fccb12SMilan Broz if (max_size < 0) 1495f6fccb12SMilan Broz max_size = 0; 1496f6fccb12SMilan Broz 1497f6fccb12SMilan Broz /* 1498f6fccb12SMilan Broz * merge_bvec_fn() returns number of bytes 1499f6fccb12SMilan Broz * it can accept at this offset 1500f6fccb12SMilan Broz * max is precomputed maximal io size 1501f6fccb12SMilan Broz */ 1502f6fccb12SMilan Broz if (max_size && ti->type->merge) 1503f6fccb12SMilan Broz max_size = ti->type->merge(ti, bvm, biovec, max_size); 15048cbeb67aSMikulas Patocka /* 15058cbeb67aSMikulas Patocka * If the target doesn't support merge method and some of the devices 15068cbeb67aSMikulas Patocka * provided their merge_bvec method (we know this by looking at 15078cbeb67aSMikulas Patocka * queue_max_hw_sectors), then we can't allow bios with multiple vector 15088cbeb67aSMikulas Patocka * entries. So always set max_size to 0, and the code below allows 15098cbeb67aSMikulas Patocka * just one page. 15108cbeb67aSMikulas Patocka */ 15118cbeb67aSMikulas Patocka else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 15128cbeb67aSMikulas Patocka max_size = 0; 1513f6fccb12SMilan Broz 15145037108aSMikulas Patocka out: 151583d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 1516f6fccb12SMilan Broz /* 1517f6fccb12SMilan Broz * Always allow an entire first page 1518f6fccb12SMilan Broz */ 1519f6fccb12SMilan Broz if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1520f6fccb12SMilan Broz max_size = biovec->bv_len; 1521f6fccb12SMilan Broz 1522f6fccb12SMilan Broz return max_size; 1523f6fccb12SMilan Broz } 1524f6fccb12SMilan Broz 15251da177e4SLinus Torvalds /* 15261da177e4SLinus Torvalds * The request function that just remaps the bio built up by 15271da177e4SLinus Torvalds * dm_merge_bvec. 15281da177e4SLinus Torvalds */ 15295a7bbad2SChristoph Hellwig static void _dm_request(struct request_queue *q, struct bio *bio) 15301da177e4SLinus Torvalds { 153112f03a49SKevin Corry int rw = bio_data_dir(bio); 15321da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1533c9959059STejun Heo int cpu; 153483d5e5b0SMikulas Patocka int srcu_idx; 153583d5e5b0SMikulas Patocka struct dm_table *map; 15361da177e4SLinus Torvalds 153783d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 15381da177e4SLinus Torvalds 1539074a7acaSTejun Heo cpu = part_stat_lock(); 1540074a7acaSTejun Heo part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 1541074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1542074a7acaSTejun Heo part_stat_unlock(); 154312f03a49SKevin Corry 15446a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 15456a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 154683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 15471da177e4SLinus Torvalds 15486a8736d1STejun Heo if (bio_rw(bio) != READA) 154992c63902SMikulas Patocka queue_io(md, bio); 15506a8736d1STejun Heo else 15516a8736d1STejun Heo bio_io_error(bio); 15525a7bbad2SChristoph Hellwig return; 15531da177e4SLinus Torvalds } 15541da177e4SLinus Torvalds 155583d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 155683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 15575a7bbad2SChristoph Hellwig return; 1558cec47e3dSKiyoshi Ueda } 1559cec47e3dSKiyoshi Ueda 1560fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md) 1561cec47e3dSKiyoshi Ueda { 1562cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1563cec47e3dSKiyoshi Ueda } 1564cec47e3dSKiyoshi Ueda 15655a7bbad2SChristoph Hellwig static void dm_request(struct request_queue *q, struct bio *bio) 1566cec47e3dSKiyoshi Ueda { 1567cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1568cec47e3dSKiyoshi Ueda 1569cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 15705a7bbad2SChristoph Hellwig blk_queue_bio(q, bio); 15715a7bbad2SChristoph Hellwig else 15725a7bbad2SChristoph Hellwig _dm_request(q, bio); 1573cec47e3dSKiyoshi Ueda } 1574cec47e3dSKiyoshi Ueda 1575cec47e3dSKiyoshi Ueda void dm_dispatch_request(struct request *rq) 1576cec47e3dSKiyoshi Ueda { 1577cec47e3dSKiyoshi Ueda int r; 1578cec47e3dSKiyoshi Ueda 1579cec47e3dSKiyoshi Ueda if (blk_queue_io_stat(rq->q)) 1580cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_IO_STAT; 1581cec47e3dSKiyoshi Ueda 1582cec47e3dSKiyoshi Ueda rq->start_time = jiffies; 1583cec47e3dSKiyoshi Ueda r = blk_insert_cloned_request(rq->q, rq); 1584cec47e3dSKiyoshi Ueda if (r) 1585cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1586cec47e3dSKiyoshi Ueda } 1587cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_dispatch_request); 1588cec47e3dSKiyoshi Ueda 1589cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1590cec47e3dSKiyoshi Ueda void *data) 1591cec47e3dSKiyoshi Ueda { 1592cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = data; 159394818742SKent Overstreet struct dm_rq_clone_bio_info *info = 159494818742SKent Overstreet container_of(bio, struct dm_rq_clone_bio_info, clone); 1595cec47e3dSKiyoshi Ueda 1596cec47e3dSKiyoshi Ueda info->orig = bio_orig; 1597cec47e3dSKiyoshi Ueda info->tio = tio; 1598cec47e3dSKiyoshi Ueda bio->bi_end_io = end_clone_bio; 1599cec47e3dSKiyoshi Ueda 1600cec47e3dSKiyoshi Ueda return 0; 1601cec47e3dSKiyoshi Ueda } 1602cec47e3dSKiyoshi Ueda 1603cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq, 1604cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio) 1605cec47e3dSKiyoshi Ueda { 1606d0bcb878SKiyoshi Ueda int r; 1607cec47e3dSKiyoshi Ueda 1608d0bcb878SKiyoshi Ueda r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1609d0bcb878SKiyoshi Ueda dm_rq_bio_constructor, tio); 1610cec47e3dSKiyoshi Ueda if (r) 1611cec47e3dSKiyoshi Ueda return r; 1612cec47e3dSKiyoshi Ueda 1613cec47e3dSKiyoshi Ueda clone->cmd = rq->cmd; 1614cec47e3dSKiyoshi Ueda clone->cmd_len = rq->cmd_len; 1615cec47e3dSKiyoshi Ueda clone->sense = rq->sense; 1616cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1617cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 1618cec47e3dSKiyoshi Ueda 1619cec47e3dSKiyoshi Ueda return 0; 1620cec47e3dSKiyoshi Ueda } 1621cec47e3dSKiyoshi Ueda 16226facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 16236facdaffSKiyoshi Ueda gfp_t gfp_mask) 16246facdaffSKiyoshi Ueda { 16256facdaffSKiyoshi Ueda struct request *clone; 16266facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 16276facdaffSKiyoshi Ueda 16286facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 16296facdaffSKiyoshi Ueda if (!tio) 16306facdaffSKiyoshi Ueda return NULL; 16316facdaffSKiyoshi Ueda 16326facdaffSKiyoshi Ueda tio->md = md; 16336facdaffSKiyoshi Ueda tio->ti = NULL; 16346facdaffSKiyoshi Ueda tio->orig = rq; 16356facdaffSKiyoshi Ueda tio->error = 0; 16366facdaffSKiyoshi Ueda memset(&tio->info, 0, sizeof(tio->info)); 16376facdaffSKiyoshi Ueda 16386facdaffSKiyoshi Ueda clone = &tio->clone; 16396facdaffSKiyoshi Ueda if (setup_clone(clone, rq, tio)) { 16406facdaffSKiyoshi Ueda /* -ENOMEM */ 16416facdaffSKiyoshi Ueda free_rq_tio(tio); 16426facdaffSKiyoshi Ueda return NULL; 16436facdaffSKiyoshi Ueda } 16446facdaffSKiyoshi Ueda 16456facdaffSKiyoshi Ueda return clone; 16466facdaffSKiyoshi Ueda } 16476facdaffSKiyoshi Ueda 1648cec47e3dSKiyoshi Ueda /* 1649cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1650cec47e3dSKiyoshi Ueda */ 1651cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1652cec47e3dSKiyoshi Ueda { 1653cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1654cec47e3dSKiyoshi Ueda struct request *clone; 1655cec47e3dSKiyoshi Ueda 1656cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1657cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1658cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1659cec47e3dSKiyoshi Ueda } 1660cec47e3dSKiyoshi Ueda 16616facdaffSKiyoshi Ueda clone = clone_rq(rq, md, GFP_ATOMIC); 16626facdaffSKiyoshi Ueda if (!clone) 1663cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1664cec47e3dSKiyoshi Ueda 1665cec47e3dSKiyoshi Ueda rq->special = clone; 1666cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1667cec47e3dSKiyoshi Ueda 1668cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1669cec47e3dSKiyoshi Ueda } 1670cec47e3dSKiyoshi Ueda 16719eef87daSKiyoshi Ueda /* 16729eef87daSKiyoshi Ueda * Returns: 16739eef87daSKiyoshi Ueda * 0 : the request has been processed (not requeued) 16749eef87daSKiyoshi Ueda * !0 : the request has been requeued 16759eef87daSKiyoshi Ueda */ 16769eef87daSKiyoshi Ueda static int map_request(struct dm_target *ti, struct request *clone, 1677cec47e3dSKiyoshi Ueda struct mapped_device *md) 1678cec47e3dSKiyoshi Ueda { 16799eef87daSKiyoshi Ueda int r, requeued = 0; 1680cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1681cec47e3dSKiyoshi Ueda 1682cec47e3dSKiyoshi Ueda tio->ti = ti; 1683cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1684cec47e3dSKiyoshi Ueda switch (r) { 1685cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1686cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1687cec47e3dSKiyoshi Ueda break; 1688cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1689cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 16906db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 16916db4ccd6SJun'ichi Nomura blk_rq_pos(tio->orig)); 1692cec47e3dSKiyoshi Ueda dm_dispatch_request(clone); 1693cec47e3dSKiyoshi Ueda break; 1694cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1695cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 1696cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 16979eef87daSKiyoshi Ueda requeued = 1; 1698cec47e3dSKiyoshi Ueda break; 1699cec47e3dSKiyoshi Ueda default: 1700cec47e3dSKiyoshi Ueda if (r > 0) { 1701cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1702cec47e3dSKiyoshi Ueda BUG(); 1703cec47e3dSKiyoshi Ueda } 1704cec47e3dSKiyoshi Ueda 1705cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1706cec47e3dSKiyoshi Ueda dm_kill_unmapped_request(clone, r); 1707cec47e3dSKiyoshi Ueda break; 1708cec47e3dSKiyoshi Ueda } 17099eef87daSKiyoshi Ueda 17109eef87daSKiyoshi Ueda return requeued; 1711cec47e3dSKiyoshi Ueda } 1712cec47e3dSKiyoshi Ueda 1713ba1cbad9SMike Snitzer static struct request *dm_start_request(struct mapped_device *md, struct request *orig) 1714ba1cbad9SMike Snitzer { 1715ba1cbad9SMike Snitzer struct request *clone; 1716ba1cbad9SMike Snitzer 1717ba1cbad9SMike Snitzer blk_start_request(orig); 1718ba1cbad9SMike Snitzer clone = orig->special; 1719ba1cbad9SMike Snitzer atomic_inc(&md->pending[rq_data_dir(clone)]); 1720ba1cbad9SMike Snitzer 1721ba1cbad9SMike Snitzer /* 1722ba1cbad9SMike Snitzer * Hold the md reference here for the in-flight I/O. 1723ba1cbad9SMike Snitzer * We can't rely on the reference count by device opener, 1724ba1cbad9SMike Snitzer * because the device may be closed during the request completion 1725ba1cbad9SMike Snitzer * when all bios are completed. 1726ba1cbad9SMike Snitzer * See the comment in rq_completed() too. 1727ba1cbad9SMike Snitzer */ 1728ba1cbad9SMike Snitzer dm_get(md); 1729ba1cbad9SMike Snitzer 1730ba1cbad9SMike Snitzer return clone; 1731ba1cbad9SMike Snitzer } 1732ba1cbad9SMike Snitzer 1733cec47e3dSKiyoshi Ueda /* 1734cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 1735cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1736cec47e3dSKiyoshi Ueda */ 1737cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 1738cec47e3dSKiyoshi Ueda { 1739cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 174083d5e5b0SMikulas Patocka int srcu_idx; 174183d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1742cec47e3dSKiyoshi Ueda struct dm_target *ti; 1743b4324feeSKiyoshi Ueda struct request *rq, *clone; 174429e4013dSTejun Heo sector_t pos; 1745cec47e3dSKiyoshi Ueda 1746cec47e3dSKiyoshi Ueda /* 1747b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 1748b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 1749b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 1750b4324feeSKiyoshi Ueda * dm_suspend(). 1751cec47e3dSKiyoshi Ueda */ 17527eaceaccSJens Axboe while (!blk_queue_stopped(q)) { 1753cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 1754cec47e3dSKiyoshi Ueda if (!rq) 17557eaceaccSJens Axboe goto delay_and_out; 1756cec47e3dSKiyoshi Ueda 175729e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 175829e4013dSTejun Heo pos = 0; 175929e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 176029e4013dSTejun Heo pos = blk_rq_pos(rq); 1761d0bcb878SKiyoshi Ueda 176229e4013dSTejun Heo ti = dm_table_find_target(map, pos); 1763ba1cbad9SMike Snitzer if (!dm_target_is_valid(ti)) { 1764ba1cbad9SMike Snitzer /* 1765ba1cbad9SMike Snitzer * Must perform setup, that dm_done() requires, 1766ba1cbad9SMike Snitzer * before calling dm_kill_unmapped_request 1767ba1cbad9SMike Snitzer */ 1768ba1cbad9SMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 1769ba1cbad9SMike Snitzer clone = dm_start_request(md, rq); 1770ba1cbad9SMike Snitzer dm_kill_unmapped_request(clone, -EIO); 1771ba1cbad9SMike Snitzer continue; 1772ba1cbad9SMike Snitzer } 177329e4013dSTejun Heo 1774cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 17757eaceaccSJens Axboe goto delay_and_out; 1776cec47e3dSKiyoshi Ueda 1777ba1cbad9SMike Snitzer clone = dm_start_request(md, rq); 1778b4324feeSKiyoshi Ueda 1779cec47e3dSKiyoshi Ueda spin_unlock(q->queue_lock); 17809eef87daSKiyoshi Ueda if (map_request(ti, clone, md)) 17819eef87daSKiyoshi Ueda goto requeued; 17829eef87daSKiyoshi Ueda 1783052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 1784052189a2SKiyoshi Ueda spin_lock(q->queue_lock); 1785cec47e3dSKiyoshi Ueda } 1786cec47e3dSKiyoshi Ueda 1787cec47e3dSKiyoshi Ueda goto out; 1788cec47e3dSKiyoshi Ueda 17899eef87daSKiyoshi Ueda requeued: 1790052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 1791052189a2SKiyoshi Ueda spin_lock(q->queue_lock); 17929eef87daSKiyoshi Ueda 17937eaceaccSJens Axboe delay_and_out: 17947eaceaccSJens Axboe blk_delay_queue(q, HZ / 10); 1795cec47e3dSKiyoshi Ueda out: 179683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1797cec47e3dSKiyoshi Ueda } 1798cec47e3dSKiyoshi Ueda 1799cec47e3dSKiyoshi Ueda int dm_underlying_device_busy(struct request_queue *q) 1800cec47e3dSKiyoshi Ueda { 1801cec47e3dSKiyoshi Ueda return blk_lld_busy(q); 1802cec47e3dSKiyoshi Ueda } 1803cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1804cec47e3dSKiyoshi Ueda 1805cec47e3dSKiyoshi Ueda static int dm_lld_busy(struct request_queue *q) 1806cec47e3dSKiyoshi Ueda { 1807cec47e3dSKiyoshi Ueda int r; 1808cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 180983d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table_fast(md); 1810cec47e3dSKiyoshi Ueda 1811cec47e3dSKiyoshi Ueda if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1812cec47e3dSKiyoshi Ueda r = 1; 1813cec47e3dSKiyoshi Ueda else 1814cec47e3dSKiyoshi Ueda r = dm_table_any_busy_target(map); 1815cec47e3dSKiyoshi Ueda 181683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 1817cec47e3dSKiyoshi Ueda 1818cec47e3dSKiyoshi Ueda return r; 1819cec47e3dSKiyoshi Ueda } 1820cec47e3dSKiyoshi Ueda 18211da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 18221da177e4SLinus Torvalds { 18238a57dfc6SChandra Seetharaman int r = bdi_bits; 18248a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 18258a57dfc6SChandra Seetharaman struct dm_table *map; 18261da177e4SLinus Torvalds 18271eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 182883d5e5b0SMikulas Patocka map = dm_get_live_table_fast(md); 18298a57dfc6SChandra Seetharaman if (map) { 1830cec47e3dSKiyoshi Ueda /* 1831cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 1832cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 1833cec47e3dSKiyoshi Ueda */ 1834cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1835cec47e3dSKiyoshi Ueda r = md->queue->backing_dev_info.state & 1836cec47e3dSKiyoshi Ueda bdi_bits; 1837cec47e3dSKiyoshi Ueda else 18381da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 18398a57dfc6SChandra Seetharaman } 184083d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 18418a57dfc6SChandra Seetharaman } 18428a57dfc6SChandra Seetharaman 18431da177e4SLinus Torvalds return r; 18441da177e4SLinus Torvalds } 18451da177e4SLinus Torvalds 18461da177e4SLinus Torvalds /*----------------------------------------------------------------- 18471da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 18481da177e4SLinus Torvalds *---------------------------------------------------------------*/ 18492b06cfffSAlasdair G Kergon static void free_minor(int minor) 18501da177e4SLinus Torvalds { 1851f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18521da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1853f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18541da177e4SLinus Torvalds } 18551da177e4SLinus Torvalds 18561da177e4SLinus Torvalds /* 18571da177e4SLinus Torvalds * See if the device with a specific minor # is free. 18581da177e4SLinus Torvalds */ 1859cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 18601da177e4SLinus Torvalds { 1861c9d76be6STejun Heo int r; 18621da177e4SLinus Torvalds 18631da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 18641da177e4SLinus Torvalds return -EINVAL; 18651da177e4SLinus Torvalds 1866c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1867f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18681da177e4SLinus Torvalds 1869c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 18701da177e4SLinus Torvalds 1871f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1872c9d76be6STejun Heo idr_preload_end(); 1873c9d76be6STejun Heo if (r < 0) 1874c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1875c9d76be6STejun Heo return 0; 18761da177e4SLinus Torvalds } 18771da177e4SLinus Torvalds 1878cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 18791da177e4SLinus Torvalds { 1880c9d76be6STejun Heo int r; 18811da177e4SLinus Torvalds 1882c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1883f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18841da177e4SLinus Torvalds 1885c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 18861da177e4SLinus Torvalds 1887f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1888c9d76be6STejun Heo idr_preload_end(); 1889c9d76be6STejun Heo if (r < 0) 18901da177e4SLinus Torvalds return r; 1891c9d76be6STejun Heo *minor = r; 1892c9d76be6STejun Heo return 0; 18931da177e4SLinus Torvalds } 18941da177e4SLinus Torvalds 189583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 18961da177e4SLinus Torvalds 189753d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 189853d5914fSMikulas Patocka 18994a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 19004a0b4ddfSMike Snitzer { 19014a0b4ddfSMike Snitzer /* 19024a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 19034a0b4ddfSMike Snitzer * devices. The type of this dm device has not been decided yet. 19044a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 19054a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 19064a0b4ddfSMike Snitzer * for request stacking support until then. 19074a0b4ddfSMike Snitzer * 19084a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 19094a0b4ddfSMike Snitzer */ 19104a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 19114a0b4ddfSMike Snitzer 19124a0b4ddfSMike Snitzer md->queue->queuedata = md; 19134a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 19144a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_data = md; 19154a0b4ddfSMike Snitzer blk_queue_make_request(md->queue, dm_request); 19164a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 19174a0b4ddfSMike Snitzer blk_queue_merge_bvec(md->queue, dm_merge_bvec); 19184a0b4ddfSMike Snitzer } 19194a0b4ddfSMike Snitzer 19201da177e4SLinus Torvalds /* 19211da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 19221da177e4SLinus Torvalds */ 19232b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 19241da177e4SLinus Torvalds { 19251da177e4SLinus Torvalds int r; 1926cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1927ba61fdd1SJeff Mahoney void *old_md; 19281da177e4SLinus Torvalds 19291da177e4SLinus Torvalds if (!md) { 19301da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 19311da177e4SLinus Torvalds return NULL; 19321da177e4SLinus Torvalds } 19331da177e4SLinus Torvalds 193410da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 19356ed7ade8SMilan Broz goto bad_module_get; 193610da4f79SJeff Mahoney 19371da177e4SLinus Torvalds /* get a minor number for the dev */ 19382b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1939cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 19402b06cfffSAlasdair G Kergon else 1941cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 19421da177e4SLinus Torvalds if (r < 0) 19436ed7ade8SMilan Broz goto bad_minor; 19441da177e4SLinus Torvalds 194583d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 194683d5e5b0SMikulas Patocka if (r < 0) 194783d5e5b0SMikulas Patocka goto bad_io_barrier; 194883d5e5b0SMikulas Patocka 1949a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1950e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1951a5664dadSMike Snitzer mutex_init(&md->type_lock); 1952022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 19531da177e4SLinus Torvalds atomic_set(&md->holders, 1); 19545c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 19551da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 19567a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 19577a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 19587a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 19591da177e4SLinus Torvalds 19604a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 19611da177e4SLinus Torvalds if (!md->queue) 19626ed7ade8SMilan Broz goto bad_queue; 19631da177e4SLinus Torvalds 19644a0b4ddfSMike Snitzer dm_init_md_queue(md); 19659faf400fSStefan Bader 19661da177e4SLinus Torvalds md->disk = alloc_disk(1); 19671da177e4SLinus Torvalds if (!md->disk) 19686ed7ade8SMilan Broz goto bad_disk; 19691da177e4SLinus Torvalds 1970316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1971316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1972f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 197353d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1974f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 19752995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1976f0b04115SJeff Mahoney 19771da177e4SLinus Torvalds md->disk->major = _major; 19781da177e4SLinus Torvalds md->disk->first_minor = minor; 19791da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19801da177e4SLinus Torvalds md->disk->queue = md->queue; 19811da177e4SLinus Torvalds md->disk->private_data = md; 19821da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 19831da177e4SLinus Torvalds add_disk(md->disk); 19847e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 19851da177e4SLinus Torvalds 1986670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1987304f3f6aSMilan Broz if (!md->wq) 1988304f3f6aSMilan Broz goto bad_thread; 1989304f3f6aSMilan Broz 199032a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 199132a926daSMikulas Patocka if (!md->bdev) 199232a926daSMikulas Patocka goto bad_bdev; 199332a926daSMikulas Patocka 19946a8736d1STejun Heo bio_init(&md->flush_bio); 19956a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 19966a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 19976a8736d1STejun Heo 1998fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1999fd2ed4d2SMikulas Patocka 2000ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2001f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2002ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2003f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2004ba61fdd1SJeff Mahoney 2005ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2006ba61fdd1SJeff Mahoney 20071da177e4SLinus Torvalds return md; 20081da177e4SLinus Torvalds 200932a926daSMikulas Patocka bad_bdev: 201032a926daSMikulas Patocka destroy_workqueue(md->wq); 2011304f3f6aSMilan Broz bad_thread: 201203022c54SZdenek Kabelac del_gendisk(md->disk); 2013304f3f6aSMilan Broz put_disk(md->disk); 20146ed7ade8SMilan Broz bad_disk: 20151312f40eSAl Viro blk_cleanup_queue(md->queue); 20166ed7ade8SMilan Broz bad_queue: 201783d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 201883d5e5b0SMikulas Patocka bad_io_barrier: 20191da177e4SLinus Torvalds free_minor(minor); 20206ed7ade8SMilan Broz bad_minor: 202110da4f79SJeff Mahoney module_put(THIS_MODULE); 20226ed7ade8SMilan Broz bad_module_get: 20231da177e4SLinus Torvalds kfree(md); 20241da177e4SLinus Torvalds return NULL; 20251da177e4SLinus Torvalds } 20261da177e4SLinus Torvalds 2027ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2028ae9da83fSJun'ichi Nomura 20291da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 20301da177e4SLinus Torvalds { 2031f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 203263d94e48SJun'ichi Nomura 2033ae9da83fSJun'ichi Nomura unlock_fs(md); 2034db8fef4fSMikulas Patocka bdput(md->bdev); 2035304f3f6aSMilan Broz destroy_workqueue(md->wq); 2036e6ee8c0bSKiyoshi Ueda if (md->io_pool) 20371da177e4SLinus Torvalds mempool_destroy(md->io_pool); 2038e6ee8c0bSKiyoshi Ueda if (md->bs) 20399faf400fSStefan Bader bioset_free(md->bs); 20409c47008dSMartin K. Petersen blk_integrity_unregister(md->disk); 20411da177e4SLinus Torvalds del_gendisk(md->disk); 204283d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 204363d94e48SJun'ichi Nomura free_minor(minor); 2044fba9f90eSJeff Mahoney 2045fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 2046fba9f90eSJeff Mahoney md->disk->private_data = NULL; 2047fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 2048fba9f90eSJeff Mahoney 20491da177e4SLinus Torvalds put_disk(md->disk); 20501312f40eSAl Viro blk_cleanup_queue(md->queue); 2051fd2ed4d2SMikulas Patocka dm_stats_cleanup(&md->stats); 205210da4f79SJeff Mahoney module_put(THIS_MODULE); 20531da177e4SLinus Torvalds kfree(md); 20541da177e4SLinus Torvalds } 20551da177e4SLinus Torvalds 2056e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 2057e6ee8c0bSKiyoshi Ueda { 2058c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 2059e6ee8c0bSKiyoshi Ueda 20605f015204SJun'ichi Nomura if (md->io_pool && md->bs) { 206116245bdcSJun'ichi Nomura /* The md already has necessary mempools. */ 206216245bdcSJun'ichi Nomura if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 2063c0820cf5SMikulas Patocka /* 206416245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 206516245bdcSJun'ichi Nomura * because a different table was loaded. 2066c0820cf5SMikulas Patocka */ 2067c0820cf5SMikulas Patocka bioset_free(md->bs); 2068c0820cf5SMikulas Patocka md->bs = p->bs; 2069c0820cf5SMikulas Patocka p->bs = NULL; 207016245bdcSJun'ichi Nomura } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { 207116245bdcSJun'ichi Nomura /* 207216245bdcSJun'ichi Nomura * There's no need to reload with request-based dm 207316245bdcSJun'ichi Nomura * because the size of front_pad doesn't change. 207416245bdcSJun'ichi Nomura * Note for future: If you are to reload bioset, 207516245bdcSJun'ichi Nomura * prep-ed requests in the queue may refer 207616245bdcSJun'ichi Nomura * to bio from the old bioset, so you must walk 207716245bdcSJun'ichi Nomura * through the queue to unprep. 207816245bdcSJun'ichi Nomura */ 207916245bdcSJun'ichi Nomura } 2080e6ee8c0bSKiyoshi Ueda goto out; 2081c0820cf5SMikulas Patocka } 2082e6ee8c0bSKiyoshi Ueda 20835f015204SJun'ichi Nomura BUG_ON(!p || md->io_pool || md->bs); 2084e6ee8c0bSKiyoshi Ueda 2085e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 2086e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 2087e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 2088e6ee8c0bSKiyoshi Ueda p->bs = NULL; 2089e6ee8c0bSKiyoshi Ueda 2090e6ee8c0bSKiyoshi Ueda out: 2091e6ee8c0bSKiyoshi Ueda /* mempool bind completed, now no need any mempools in the table */ 2092e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 2093e6ee8c0bSKiyoshi Ueda } 2094e6ee8c0bSKiyoshi Ueda 20951da177e4SLinus Torvalds /* 20961da177e4SLinus Torvalds * Bind a table to the device. 20971da177e4SLinus Torvalds */ 20981da177e4SLinus Torvalds static void event_callback(void *context) 20991da177e4SLinus Torvalds { 21007a8c3d3bSMike Anderson unsigned long flags; 21017a8c3d3bSMike Anderson LIST_HEAD(uevents); 21021da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 21031da177e4SLinus Torvalds 21047a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 21057a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 21067a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 21077a8c3d3bSMike Anderson 2108ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 21097a8c3d3bSMike Anderson 21101da177e4SLinus Torvalds atomic_inc(&md->event_nr); 21111da177e4SLinus Torvalds wake_up(&md->eventq); 21121da177e4SLinus Torvalds } 21131da177e4SLinus Torvalds 2114c217649bSMike Snitzer /* 2115c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2116c217649bSMike Snitzer */ 21174e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 21181da177e4SLinus Torvalds { 21194e90188bSAlasdair G Kergon set_capacity(md->disk, size); 21201da177e4SLinus Torvalds 2121db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds 2124042d2a9bSAlasdair G Kergon /* 2125d5b9dd04SMikulas Patocka * Return 1 if the queue has a compulsory merge_bvec_fn function. 2126d5b9dd04SMikulas Patocka * 2127d5b9dd04SMikulas Patocka * If this function returns 0, then the device is either a non-dm 2128d5b9dd04SMikulas Patocka * device without a merge_bvec_fn, or it is a dm device that is 2129d5b9dd04SMikulas Patocka * able to split any bios it receives that are too big. 2130d5b9dd04SMikulas Patocka */ 2131d5b9dd04SMikulas Patocka int dm_queue_merge_is_compulsory(struct request_queue *q) 2132d5b9dd04SMikulas Patocka { 2133d5b9dd04SMikulas Patocka struct mapped_device *dev_md; 2134d5b9dd04SMikulas Patocka 2135d5b9dd04SMikulas Patocka if (!q->merge_bvec_fn) 2136d5b9dd04SMikulas Patocka return 0; 2137d5b9dd04SMikulas Patocka 2138d5b9dd04SMikulas Patocka if (q->make_request_fn == dm_request) { 2139d5b9dd04SMikulas Patocka dev_md = q->queuedata; 2140d5b9dd04SMikulas Patocka if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) 2141d5b9dd04SMikulas Patocka return 0; 2142d5b9dd04SMikulas Patocka } 2143d5b9dd04SMikulas Patocka 2144d5b9dd04SMikulas Patocka return 1; 2145d5b9dd04SMikulas Patocka } 2146d5b9dd04SMikulas Patocka 2147d5b9dd04SMikulas Patocka static int dm_device_merge_is_compulsory(struct dm_target *ti, 2148d5b9dd04SMikulas Patocka struct dm_dev *dev, sector_t start, 2149d5b9dd04SMikulas Patocka sector_t len, void *data) 2150d5b9dd04SMikulas Patocka { 2151d5b9dd04SMikulas Patocka struct block_device *bdev = dev->bdev; 2152d5b9dd04SMikulas Patocka struct request_queue *q = bdev_get_queue(bdev); 2153d5b9dd04SMikulas Patocka 2154d5b9dd04SMikulas Patocka return dm_queue_merge_is_compulsory(q); 2155d5b9dd04SMikulas Patocka } 2156d5b9dd04SMikulas Patocka 2157d5b9dd04SMikulas Patocka /* 2158d5b9dd04SMikulas Patocka * Return 1 if it is acceptable to ignore merge_bvec_fn based 2159d5b9dd04SMikulas Patocka * on the properties of the underlying devices. 2160d5b9dd04SMikulas Patocka */ 2161d5b9dd04SMikulas Patocka static int dm_table_merge_is_optional(struct dm_table *table) 2162d5b9dd04SMikulas Patocka { 2163d5b9dd04SMikulas Patocka unsigned i = 0; 2164d5b9dd04SMikulas Patocka struct dm_target *ti; 2165d5b9dd04SMikulas Patocka 2166d5b9dd04SMikulas Patocka while (i < dm_table_get_num_targets(table)) { 2167d5b9dd04SMikulas Patocka ti = dm_table_get_target(table, i++); 2168d5b9dd04SMikulas Patocka 2169d5b9dd04SMikulas Patocka if (ti->type->iterate_devices && 2170d5b9dd04SMikulas Patocka ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) 2171d5b9dd04SMikulas Patocka return 0; 2172d5b9dd04SMikulas Patocka } 2173d5b9dd04SMikulas Patocka 2174d5b9dd04SMikulas Patocka return 1; 2175d5b9dd04SMikulas Patocka } 2176d5b9dd04SMikulas Patocka 2177d5b9dd04SMikulas Patocka /* 2178042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2179042d2a9bSAlasdair G Kergon */ 2180042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2181754c5fc7SMike Snitzer struct queue_limits *limits) 21821da177e4SLinus Torvalds { 2183042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2184165125e1SJens Axboe struct request_queue *q = md->queue; 21851da177e4SLinus Torvalds sector_t size; 2186d5b9dd04SMikulas Patocka int merge_is_optional; 21871da177e4SLinus Torvalds 21881da177e4SLinus Torvalds size = dm_table_get_size(t); 21893ac51e74SDarrick J. Wong 21903ac51e74SDarrick J. Wong /* 21913ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 21923ac51e74SDarrick J. Wong */ 2193fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 21943ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 21953ac51e74SDarrick J. Wong 21964e90188bSAlasdair G Kergon __set_size(md, size); 21971da177e4SLinus Torvalds 2198cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 21992ca3310eSAlasdair G Kergon 2200e6ee8c0bSKiyoshi Ueda /* 2201e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2202e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2203e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2204e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2205e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2206e6ee8c0bSKiyoshi Ueda */ 2207e6ee8c0bSKiyoshi Ueda if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2208e6ee8c0bSKiyoshi Ueda stop_queue(q); 2209e6ee8c0bSKiyoshi Ueda 2210e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2211e6ee8c0bSKiyoshi Ueda 2212d5b9dd04SMikulas Patocka merge_is_optional = dm_table_merge_is_optional(t); 2213d5b9dd04SMikulas Patocka 2214042d2a9bSAlasdair G Kergon old_map = md->map; 221583d5e5b0SMikulas Patocka rcu_assign_pointer(md->map, t); 221636a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 221736a0456fSAlasdair G Kergon 2218754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 2219d5b9dd04SMikulas Patocka if (merge_is_optional) 2220d5b9dd04SMikulas Patocka set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2221d5b9dd04SMikulas Patocka else 2222d5b9dd04SMikulas Patocka clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 222383d5e5b0SMikulas Patocka dm_sync_table(md); 22242ca3310eSAlasdair G Kergon 2225042d2a9bSAlasdair G Kergon return old_map; 22261da177e4SLinus Torvalds } 22271da177e4SLinus Torvalds 2228a7940155SAlasdair G Kergon /* 2229a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2230a7940155SAlasdair G Kergon */ 2231a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 22321da177e4SLinus Torvalds { 22331da177e4SLinus Torvalds struct dm_table *map = md->map; 22341da177e4SLinus Torvalds 22351da177e4SLinus Torvalds if (!map) 2236a7940155SAlasdair G Kergon return NULL; 22371da177e4SLinus Torvalds 22381da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 22399cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 224083d5e5b0SMikulas Patocka dm_sync_table(md); 2241a7940155SAlasdair G Kergon 2242a7940155SAlasdair G Kergon return map; 22431da177e4SLinus Torvalds } 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds /* 22461da177e4SLinus Torvalds * Constructor for a new device. 22471da177e4SLinus Torvalds */ 22482b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 22491da177e4SLinus Torvalds { 22501da177e4SLinus Torvalds struct mapped_device *md; 22511da177e4SLinus Torvalds 22522b06cfffSAlasdair G Kergon md = alloc_dev(minor); 22531da177e4SLinus Torvalds if (!md) 22541da177e4SLinus Torvalds return -ENXIO; 22551da177e4SLinus Torvalds 2256784aae73SMilan Broz dm_sysfs_init(md); 2257784aae73SMilan Broz 22581da177e4SLinus Torvalds *result = md; 22591da177e4SLinus Torvalds return 0; 22601da177e4SLinus Torvalds } 22611da177e4SLinus Torvalds 2262a5664dadSMike Snitzer /* 2263a5664dadSMike Snitzer * Functions to manage md->type. 2264a5664dadSMike Snitzer * All are required to hold md->type_lock. 2265a5664dadSMike Snitzer */ 2266a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2267a5664dadSMike Snitzer { 2268a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2269a5664dadSMike Snitzer } 2270a5664dadSMike Snitzer 2271a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2272a5664dadSMike Snitzer { 2273a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2274a5664dadSMike Snitzer } 2275a5664dadSMike Snitzer 2276a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2277a5664dadSMike Snitzer { 227800c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2279a5664dadSMike Snitzer md->type = type; 2280a5664dadSMike Snitzer } 2281a5664dadSMike Snitzer 2282a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2283a5664dadSMike Snitzer { 228400c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2285a5664dadSMike Snitzer return md->type; 2286a5664dadSMike Snitzer } 2287a5664dadSMike Snitzer 228836a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 228936a0456fSAlasdair G Kergon { 229036a0456fSAlasdair G Kergon return md->immutable_target_type; 229136a0456fSAlasdair G Kergon } 229236a0456fSAlasdair G Kergon 22934a0b4ddfSMike Snitzer /* 2294f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2295f84cb8a4SMike Snitzer * count on 'md'. 2296f84cb8a4SMike Snitzer */ 2297f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2298f84cb8a4SMike Snitzer { 2299f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2300f84cb8a4SMike Snitzer return &md->queue->limits; 2301f84cb8a4SMike Snitzer } 2302f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2303f84cb8a4SMike Snitzer 2304f84cb8a4SMike Snitzer /* 23054a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 23064a0b4ddfSMike Snitzer */ 23074a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 23084a0b4ddfSMike Snitzer { 23094a0b4ddfSMike Snitzer struct request_queue *q = NULL; 23104a0b4ddfSMike Snitzer 23114a0b4ddfSMike Snitzer if (md->queue->elevator) 23124a0b4ddfSMike Snitzer return 1; 23134a0b4ddfSMike Snitzer 23144a0b4ddfSMike Snitzer /* Fully initialize the queue */ 23154a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 23164a0b4ddfSMike Snitzer if (!q) 23174a0b4ddfSMike Snitzer return 0; 23184a0b4ddfSMike Snitzer 23194a0b4ddfSMike Snitzer md->queue = q; 23204a0b4ddfSMike Snitzer dm_init_md_queue(md); 23214a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 23224a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 23234a0b4ddfSMike Snitzer blk_queue_lld_busy(md->queue, dm_lld_busy); 23244a0b4ddfSMike Snitzer 23254a0b4ddfSMike Snitzer elv_register_queue(md->queue); 23264a0b4ddfSMike Snitzer 23274a0b4ddfSMike Snitzer return 1; 23284a0b4ddfSMike Snitzer } 23294a0b4ddfSMike Snitzer 23304a0b4ddfSMike Snitzer /* 23314a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 23324a0b4ddfSMike Snitzer */ 23334a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 23344a0b4ddfSMike Snitzer { 23354a0b4ddfSMike Snitzer if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 23364a0b4ddfSMike Snitzer !dm_init_request_based_queue(md)) { 23374a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 23384a0b4ddfSMike Snitzer return -EINVAL; 23394a0b4ddfSMike Snitzer } 23404a0b4ddfSMike Snitzer 23414a0b4ddfSMike Snitzer return 0; 23424a0b4ddfSMike Snitzer } 23434a0b4ddfSMike Snitzer 2344637842cfSDavid Teigland static struct mapped_device *dm_find_md(dev_t dev) 23451da177e4SLinus Torvalds { 23461da177e4SLinus Torvalds struct mapped_device *md; 23471da177e4SLinus Torvalds unsigned minor = MINOR(dev); 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 23501da177e4SLinus Torvalds return NULL; 23511da177e4SLinus Torvalds 2352f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 23531da177e4SLinus Torvalds 23541da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 2355fba9f90eSJeff Mahoney if (md && (md == MINOR_ALLOCED || 2356f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2357abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2358fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2359637842cfSDavid Teigland md = NULL; 2360fba9f90eSJeff Mahoney goto out; 2361fba9f90eSJeff Mahoney } 23621da177e4SLinus Torvalds 2363fba9f90eSJeff Mahoney out: 2364f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23651da177e4SLinus Torvalds 2366637842cfSDavid Teigland return md; 2367637842cfSDavid Teigland } 2368637842cfSDavid Teigland 2369d229a958SDavid Teigland struct mapped_device *dm_get_md(dev_t dev) 2370d229a958SDavid Teigland { 2371d229a958SDavid Teigland struct mapped_device *md = dm_find_md(dev); 2372d229a958SDavid Teigland 2373d229a958SDavid Teigland if (md) 2374d229a958SDavid Teigland dm_get(md); 2375d229a958SDavid Teigland 2376d229a958SDavid Teigland return md; 2377d229a958SDavid Teigland } 23783cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2379d229a958SDavid Teigland 23809ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2381637842cfSDavid Teigland { 23829ade92a9SAlasdair G Kergon return md->interface_ptr; 23831da177e4SLinus Torvalds } 23841da177e4SLinus Torvalds 23851da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 23861da177e4SLinus Torvalds { 23871da177e4SLinus Torvalds md->interface_ptr = ptr; 23881da177e4SLinus Torvalds } 23891da177e4SLinus Torvalds 23901da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 23911da177e4SLinus Torvalds { 23921da177e4SLinus Torvalds atomic_inc(&md->holders); 23933f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 23941da177e4SLinus Torvalds } 23951da177e4SLinus Torvalds 239672d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 239772d94861SAlasdair G Kergon { 239872d94861SAlasdair G Kergon return md->name; 239972d94861SAlasdair G Kergon } 240072d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 240172d94861SAlasdair G Kergon 24023f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 24031da177e4SLinus Torvalds { 24041134e5aeSMike Anderson struct dm_table *map; 240583d5e5b0SMikulas Patocka int srcu_idx; 24061da177e4SLinus Torvalds 24073f77316dSKiyoshi Ueda might_sleep(); 2408fba9f90eSJeff Mahoney 24093f77316dSKiyoshi Ueda spin_lock(&_minor_lock); 241083d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 24113f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2412fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2413f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 24143f77316dSKiyoshi Ueda 24154f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 24161da177e4SLinus Torvalds dm_table_presuspend_targets(map); 24171da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 24181da177e4SLinus Torvalds } 24193f77316dSKiyoshi Ueda 242083d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 242183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 242283d5e5b0SMikulas Patocka 24233f77316dSKiyoshi Ueda /* 24243f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 24253f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 24263f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 24273f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 24283f77316dSKiyoshi Ueda */ 24293f77316dSKiyoshi Ueda if (wait) 24303f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 24313f77316dSKiyoshi Ueda msleep(1); 24323f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 24333f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 24343f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 24353f77316dSKiyoshi Ueda 2436784aae73SMilan Broz dm_sysfs_exit(md); 2437a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 24381da177e4SLinus Torvalds free_dev(md); 24391da177e4SLinus Torvalds } 24403f77316dSKiyoshi Ueda 24413f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 24423f77316dSKiyoshi Ueda { 24433f77316dSKiyoshi Ueda __dm_destroy(md, true); 24443f77316dSKiyoshi Ueda } 24453f77316dSKiyoshi Ueda 24463f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 24473f77316dSKiyoshi Ueda { 24483f77316dSKiyoshi Ueda __dm_destroy(md, false); 24493f77316dSKiyoshi Ueda } 24503f77316dSKiyoshi Ueda 24513f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 24523f77316dSKiyoshi Ueda { 24533f77316dSKiyoshi Ueda atomic_dec(&md->holders); 24541da177e4SLinus Torvalds } 245579eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 24561da177e4SLinus Torvalds 2457401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 245846125c1cSMilan Broz { 245946125c1cSMilan Broz int r = 0; 2460b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2461b44ebeb0SMikulas Patocka 2462b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 246346125c1cSMilan Broz 246446125c1cSMilan Broz while (1) { 2465401600dfSMikulas Patocka set_current_state(interruptible); 246646125c1cSMilan Broz 2467b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 246846125c1cSMilan Broz break; 246946125c1cSMilan Broz 2470401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2471401600dfSMikulas Patocka signal_pending(current)) { 247246125c1cSMilan Broz r = -EINTR; 247346125c1cSMilan Broz break; 247446125c1cSMilan Broz } 247546125c1cSMilan Broz 247646125c1cSMilan Broz io_schedule(); 247746125c1cSMilan Broz } 247846125c1cSMilan Broz set_current_state(TASK_RUNNING); 247946125c1cSMilan Broz 2480b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2481b44ebeb0SMikulas Patocka 248246125c1cSMilan Broz return r; 248346125c1cSMilan Broz } 248446125c1cSMilan Broz 24851da177e4SLinus Torvalds /* 24861da177e4SLinus Torvalds * Process the deferred bios 24871da177e4SLinus Torvalds */ 2488ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 24891da177e4SLinus Torvalds { 2490ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2491ef208587SMikulas Patocka work); 24926d6f10dfSMilan Broz struct bio *c; 249383d5e5b0SMikulas Patocka int srcu_idx; 249483d5e5b0SMikulas Patocka struct dm_table *map; 24951da177e4SLinus Torvalds 249683d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2497ef208587SMikulas Patocka 24983b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2499022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2500022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2501022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2502022c2611SMikulas Patocka 25036a8736d1STejun Heo if (!c) 2504df12ee99SAlasdair G Kergon break; 250573d410c0SMilan Broz 2506e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2507e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2508af7e466aSMikulas Patocka else 250983d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2510e6ee8c0bSKiyoshi Ueda } 25113b00b203SMikulas Patocka 251283d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 25131da177e4SLinus Torvalds } 25141da177e4SLinus Torvalds 25159a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2516304f3f6aSMilan Broz { 25173b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 25184e857c58SPeter Zijlstra smp_mb__after_atomic(); 251953d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2520304f3f6aSMilan Broz } 2521304f3f6aSMilan Broz 25221da177e4SLinus Torvalds /* 2523042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 25241da177e4SLinus Torvalds */ 2525042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 25261da177e4SLinus Torvalds { 252787eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2528754c5fc7SMike Snitzer struct queue_limits limits; 2529042d2a9bSAlasdair G Kergon int r; 25301da177e4SLinus Torvalds 2531e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 25321da177e4SLinus Torvalds 25331da177e4SLinus Torvalds /* device must be suspended */ 25344f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 253593c534aeSAlasdair G Kergon goto out; 25361da177e4SLinus Torvalds 25373ae70656SMike Snitzer /* 25383ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 25393ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 25403ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 25413ae70656SMike Snitzer * reappear. 25423ae70656SMike Snitzer */ 25433ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 254483d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 25453ae70656SMike Snitzer if (live_map) 25463ae70656SMike Snitzer limits = md->queue->limits; 254783d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 25483ae70656SMike Snitzer } 25493ae70656SMike Snitzer 255087eb5b21SMike Christie if (!live_map) { 2551754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2552042d2a9bSAlasdair G Kergon if (r) { 2553042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2554754c5fc7SMike Snitzer goto out; 2555042d2a9bSAlasdair G Kergon } 255687eb5b21SMike Christie } 2557754c5fc7SMike Snitzer 2558042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 25591da177e4SLinus Torvalds 256093c534aeSAlasdair G Kergon out: 2561e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2562042d2a9bSAlasdair G Kergon return map; 25631da177e4SLinus Torvalds } 25641da177e4SLinus Torvalds 25651da177e4SLinus Torvalds /* 25661da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 25671da177e4SLinus Torvalds * device. 25681da177e4SLinus Torvalds */ 25692ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 25701da177e4SLinus Torvalds { 2571e39e2e95SAlasdair G Kergon int r; 25721da177e4SLinus Torvalds 25731da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2574dfbe03f6SAlasdair G Kergon 2575db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2576dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2577cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2578e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2579e39e2e95SAlasdair G Kergon return r; 2580dfbe03f6SAlasdair G Kergon } 2581dfbe03f6SAlasdair G Kergon 2582aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2583aa8d7c2fSAlasdair G Kergon 25841da177e4SLinus Torvalds return 0; 25851da177e4SLinus Torvalds } 25861da177e4SLinus Torvalds 25872ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 25881da177e4SLinus Torvalds { 2589aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2590aa8d7c2fSAlasdair G Kergon return; 2591aa8d7c2fSAlasdair G Kergon 2592db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 25931da177e4SLinus Torvalds md->frozen_sb = NULL; 2594aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25951da177e4SLinus Torvalds } 25961da177e4SLinus Torvalds 25971da177e4SLinus Torvalds /* 25981da177e4SLinus Torvalds * We need to be able to change a mapping table under a mounted 25991da177e4SLinus Torvalds * filesystem. For example we might want to move some data in 26001da177e4SLinus Torvalds * the background. Before the table can be swapped with 26011da177e4SLinus Torvalds * dm_bind_table, dm_suspend must be called to flush any in 26021da177e4SLinus Torvalds * flight bios and ensure that any further io gets deferred. 26031da177e4SLinus Torvalds */ 2604cec47e3dSKiyoshi Ueda /* 2605cec47e3dSKiyoshi Ueda * Suspend mechanism in request-based dm. 2606cec47e3dSKiyoshi Ueda * 26079f518b27SKiyoshi Ueda * 1. Flush all I/Os by lock_fs() if needed. 26089f518b27SKiyoshi Ueda * 2. Stop dispatching any I/O by stopping the request_queue. 26099f518b27SKiyoshi Ueda * 3. Wait for all in-flight I/Os to be completed or requeued. 2610cec47e3dSKiyoshi Ueda * 26119f518b27SKiyoshi Ueda * To abort suspend, start the request_queue. 2612cec47e3dSKiyoshi Ueda */ 2613a3d77d35SKiyoshi Ueda int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 26141da177e4SLinus Torvalds { 26152ca3310eSAlasdair G Kergon struct dm_table *map = NULL; 261646125c1cSMilan Broz int r = 0; 2617a3d77d35SKiyoshi Ueda int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 26182e93ccc1SKiyoshi Ueda int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 26191da177e4SLinus Torvalds 2620e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 26212ca3310eSAlasdair G Kergon 26224f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 262373d410c0SMilan Broz r = -EINVAL; 2624d287483dSAlasdair G Kergon goto out_unlock; 262573d410c0SMilan Broz } 26261da177e4SLinus Torvalds 262783d5e5b0SMikulas Patocka map = md->map; 2628cf222b37SAlasdair G Kergon 26292e93ccc1SKiyoshi Ueda /* 26302e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 26312e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 26322e93ccc1SKiyoshi Ueda */ 26332e93ccc1SKiyoshi Ueda if (noflush) 26342e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 26352e93ccc1SKiyoshi Ueda 2636436d4108SAlasdair G Kergon /* This does not get reverted if there's an error later. */ 26371da177e4SLinus Torvalds dm_table_presuspend_targets(map); 26381da177e4SLinus Torvalds 26392e93ccc1SKiyoshi Ueda /* 26409f518b27SKiyoshi Ueda * Flush I/O to the device. 26419f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 26429f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 26439f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 26442e93ccc1SKiyoshi Ueda */ 264532a926daSMikulas Patocka if (!noflush && do_lockfs) { 26462ca3310eSAlasdair G Kergon r = lock_fs(md); 26472ca3310eSAlasdair G Kergon if (r) 264883d5e5b0SMikulas Patocka goto out_unlock; 2649aa8d7c2fSAlasdair G Kergon } 26501da177e4SLinus Torvalds 26511da177e4SLinus Torvalds /* 26523b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 26533b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 26543b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 26553b00b203SMikulas Patocka * dm_wq_work. 26563b00b203SMikulas Patocka * 26573b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 26583b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 26596a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 26606a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 26616a8736d1STejun Heo * flush_workqueue(md->wq). 26621da177e4SLinus Torvalds */ 26631eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 266483d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26651da177e4SLinus Torvalds 2666d0bcb878SKiyoshi Ueda /* 266729e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 266829e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2669d0bcb878SKiyoshi Ueda */ 2670cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 26719f518b27SKiyoshi Ueda stop_queue(md->queue); 2672cec47e3dSKiyoshi Ueda 2673d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2674d0bcb878SKiyoshi Ueda 26751da177e4SLinus Torvalds /* 26763b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 26773b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 26783b00b203SMikulas Patocka * to finish. 26791da177e4SLinus Torvalds */ 2680401600dfSMikulas Patocka r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 26811da177e4SLinus Torvalds 26826d6f10dfSMilan Broz if (noflush) 2683022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 268483d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26852e93ccc1SKiyoshi Ueda 26861da177e4SLinus Torvalds /* were we interrupted ? */ 268746125c1cSMilan Broz if (r < 0) { 26889a1fb464SMikulas Patocka dm_queue_flush(md); 268973d410c0SMilan Broz 2690cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 26919f518b27SKiyoshi Ueda start_queue(md->queue); 2692cec47e3dSKiyoshi Ueda 26932ca3310eSAlasdair G Kergon unlock_fs(md); 269483d5e5b0SMikulas Patocka goto out_unlock; /* pushback list is already flushed, so skip flush */ 26952ca3310eSAlasdair G Kergon } 26962ca3310eSAlasdair G Kergon 26973b00b203SMikulas Patocka /* 26983b00b203SMikulas Patocka * If dm_wait_for_completion returned 0, the device is completely 26993b00b203SMikulas Patocka * quiescent now. There is no request-processing activity. All new 27003b00b203SMikulas Patocka * requests are being added to md->deferred list. 27013b00b203SMikulas Patocka */ 27023b00b203SMikulas Patocka 27031da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 27041da177e4SLinus Torvalds 27054d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 27064d4471cbSKiyoshi Ueda 2707d287483dSAlasdair G Kergon out_unlock: 2708e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2709cf222b37SAlasdair G Kergon return r; 27101da177e4SLinus Torvalds } 27111da177e4SLinus Torvalds 27121da177e4SLinus Torvalds int dm_resume(struct mapped_device *md) 27131da177e4SLinus Torvalds { 2714cf222b37SAlasdair G Kergon int r = -EINVAL; 2715cf222b37SAlasdair G Kergon struct dm_table *map = NULL; 27161da177e4SLinus Torvalds 2717e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 27184f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 2719cf222b37SAlasdair G Kergon goto out; 2720cf222b37SAlasdair G Kergon 272183d5e5b0SMikulas Patocka map = md->map; 27222ca3310eSAlasdair G Kergon if (!map || !dm_table_get_size(map)) 2723cf222b37SAlasdair G Kergon goto out; 27241da177e4SLinus Torvalds 27258757b776SMilan Broz r = dm_table_resume_targets(map); 27268757b776SMilan Broz if (r) 27278757b776SMilan Broz goto out; 27282ca3310eSAlasdair G Kergon 27299a1fb464SMikulas Patocka dm_queue_flush(md); 27302ca3310eSAlasdair G Kergon 2731cec47e3dSKiyoshi Ueda /* 2732cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2733cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2734cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2735cec47e3dSKiyoshi Ueda */ 2736cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2737cec47e3dSKiyoshi Ueda start_queue(md->queue); 2738cec47e3dSKiyoshi Ueda 27392ca3310eSAlasdair G Kergon unlock_fs(md); 27402ca3310eSAlasdair G Kergon 27412ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 27422ca3310eSAlasdair G Kergon 2743cf222b37SAlasdair G Kergon r = 0; 2744cf222b37SAlasdair G Kergon out: 2745e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 27462ca3310eSAlasdair G Kergon 2747cf222b37SAlasdair G Kergon return r; 27481da177e4SLinus Torvalds } 27491da177e4SLinus Torvalds 2750fd2ed4d2SMikulas Patocka /* 2751fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2752fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2753fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2754fd2ed4d2SMikulas Patocka * 2755fd2ed4d2SMikulas Patocka * Internal suspend holds md->suspend_lock, which prevents interaction with 2756fd2ed4d2SMikulas Patocka * userspace-driven suspend. 2757fd2ed4d2SMikulas Patocka */ 2758fd2ed4d2SMikulas Patocka 2759fd2ed4d2SMikulas Patocka void dm_internal_suspend(struct mapped_device *md) 2760fd2ed4d2SMikulas Patocka { 2761fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2762fd2ed4d2SMikulas Patocka if (dm_suspended_md(md)) 2763fd2ed4d2SMikulas Patocka return; 2764fd2ed4d2SMikulas Patocka 2765fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2766fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2767fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2768fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2769fd2ed4d2SMikulas Patocka } 2770fd2ed4d2SMikulas Patocka 2771fd2ed4d2SMikulas Patocka void dm_internal_resume(struct mapped_device *md) 2772fd2ed4d2SMikulas Patocka { 2773fd2ed4d2SMikulas Patocka if (dm_suspended_md(md)) 2774fd2ed4d2SMikulas Patocka goto done; 2775fd2ed4d2SMikulas Patocka 2776fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2777fd2ed4d2SMikulas Patocka 2778fd2ed4d2SMikulas Patocka done: 2779fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2780fd2ed4d2SMikulas Patocka } 2781fd2ed4d2SMikulas Patocka 27821da177e4SLinus Torvalds /*----------------------------------------------------------------- 27831da177e4SLinus Torvalds * Event notification. 27841da177e4SLinus Torvalds *---------------------------------------------------------------*/ 27853abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 278660935eb2SMilan Broz unsigned cookie) 278769267a30SAlasdair G Kergon { 278860935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 278960935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 279060935eb2SMilan Broz 279160935eb2SMilan Broz if (!cookie) 27923abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 279360935eb2SMilan Broz else { 279460935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 279560935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 27963abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 27973abf85b5SPeter Rajnoha action, envp); 279860935eb2SMilan Broz } 279969267a30SAlasdair G Kergon } 280069267a30SAlasdair G Kergon 28017a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28027a8c3d3bSMike Anderson { 28037a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 28047a8c3d3bSMike Anderson } 28057a8c3d3bSMike Anderson 28061da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 28071da177e4SLinus Torvalds { 28081da177e4SLinus Torvalds return atomic_read(&md->event_nr); 28091da177e4SLinus Torvalds } 28101da177e4SLinus Torvalds 28111da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 28121da177e4SLinus Torvalds { 28131da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 28141da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 28151da177e4SLinus Torvalds } 28161da177e4SLinus Torvalds 28177a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 28187a8c3d3bSMike Anderson { 28197a8c3d3bSMike Anderson unsigned long flags; 28207a8c3d3bSMike Anderson 28217a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 28227a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 28237a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 28247a8c3d3bSMike Anderson } 28257a8c3d3bSMike Anderson 28261da177e4SLinus Torvalds /* 28271da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 28281da177e4SLinus Torvalds * count on 'md'. 28291da177e4SLinus Torvalds */ 28301da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 28311da177e4SLinus Torvalds { 28321da177e4SLinus Torvalds return md->disk; 28331da177e4SLinus Torvalds } 28341da177e4SLinus Torvalds 2835784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2836784aae73SMilan Broz { 28372995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2838784aae73SMilan Broz } 2839784aae73SMilan Broz 2840784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2841784aae73SMilan Broz { 2842784aae73SMilan Broz struct mapped_device *md; 2843784aae73SMilan Broz 28442995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2845784aae73SMilan Broz 28464d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 2847432a212cSMike Anderson dm_deleting_md(md)) 28484d89b7b4SMilan Broz return NULL; 28494d89b7b4SMilan Broz 2850784aae73SMilan Broz dm_get(md); 2851784aae73SMilan Broz return md; 2852784aae73SMilan Broz } 2853784aae73SMilan Broz 28544f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 28551da177e4SLinus Torvalds { 28561da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 28571da177e4SLinus Torvalds } 28581da177e4SLinus Torvalds 28592c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 28602c140a24SMikulas Patocka { 28612c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 28622c140a24SMikulas Patocka } 28632c140a24SMikulas Patocka 286464dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 286564dbce58SKiyoshi Ueda { 2866ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 286764dbce58SKiyoshi Ueda } 286864dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 286964dbce58SKiyoshi Ueda 28702e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 28712e93ccc1SKiyoshi Ueda { 2872ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 28732e93ccc1SKiyoshi Ueda } 28742e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 28752e93ccc1SKiyoshi Ueda 2876c0820cf5SMikulas Patocka struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) 2877e6ee8c0bSKiyoshi Ueda { 28785f015204SJun'ichi Nomura struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 28795f015204SJun'ichi Nomura struct kmem_cache *cachep; 28805f015204SJun'ichi Nomura unsigned int pool_size; 28815f015204SJun'ichi Nomura unsigned int front_pad; 2882e6ee8c0bSKiyoshi Ueda 2883e6ee8c0bSKiyoshi Ueda if (!pools) 2884e6ee8c0bSKiyoshi Ueda return NULL; 2885e6ee8c0bSKiyoshi Ueda 288623e5083bSJun'ichi Nomura if (type == DM_TYPE_BIO_BASED) { 28875f015204SJun'ichi Nomura cachep = _io_cache; 2888e8603136SMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 28895f015204SJun'ichi Nomura front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 28905f015204SJun'ichi Nomura } else if (type == DM_TYPE_REQUEST_BASED) { 28915f015204SJun'ichi Nomura cachep = _rq_tio_cache; 2892f4790826SMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 28935f015204SJun'ichi Nomura front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 28945f015204SJun'ichi Nomura /* per_bio_data_size is not used. See __bind_mempools(). */ 28955f015204SJun'ichi Nomura WARN_ON(per_bio_data_size != 0); 28965f015204SJun'ichi Nomura } else 28975f015204SJun'ichi Nomura goto out; 28985f015204SJun'ichi Nomura 28996cfa5857SMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 2900e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 29015f015204SJun'ichi Nomura goto out; 2902e6ee8c0bSKiyoshi Ueda 29035f015204SJun'ichi Nomura pools->bs = bioset_create(pool_size, front_pad); 2904e6ee8c0bSKiyoshi Ueda if (!pools->bs) 29055f015204SJun'ichi Nomura goto out; 2906e6ee8c0bSKiyoshi Ueda 2907a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 29085f015204SJun'ichi Nomura goto out; 2909a91a2785SMartin K. Petersen 2910e6ee8c0bSKiyoshi Ueda return pools; 2911e6ee8c0bSKiyoshi Ueda 29125f015204SJun'ichi Nomura out: 29135f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2914e6ee8c0bSKiyoshi Ueda 2915e6ee8c0bSKiyoshi Ueda return NULL; 2916e6ee8c0bSKiyoshi Ueda } 2917e6ee8c0bSKiyoshi Ueda 2918e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2919e6ee8c0bSKiyoshi Ueda { 2920e6ee8c0bSKiyoshi Ueda if (!pools) 2921e6ee8c0bSKiyoshi Ueda return; 2922e6ee8c0bSKiyoshi Ueda 2923e6ee8c0bSKiyoshi Ueda if (pools->io_pool) 2924e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 2925e6ee8c0bSKiyoshi Ueda 2926e6ee8c0bSKiyoshi Ueda if (pools->bs) 2927e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2928e6ee8c0bSKiyoshi Ueda 2929e6ee8c0bSKiyoshi Ueda kfree(pools); 2930e6ee8c0bSKiyoshi Ueda } 2931e6ee8c0bSKiyoshi Ueda 293283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 29331da177e4SLinus Torvalds .open = dm_blk_open, 29341da177e4SLinus Torvalds .release = dm_blk_close, 2935aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 29363ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 29371da177e4SLinus Torvalds .owner = THIS_MODULE 29381da177e4SLinus Torvalds }; 29391da177e4SLinus Torvalds 29401da177e4SLinus Torvalds /* 29411da177e4SLinus Torvalds * module hooks 29421da177e4SLinus Torvalds */ 29431da177e4SLinus Torvalds module_init(dm_init); 29441da177e4SLinus Torvalds module_exit(dm_exit); 29451da177e4SLinus Torvalds 29461da177e4SLinus Torvalds module_param(major, uint, 0); 29471da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 2948f4790826SMike Snitzer 2949e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2950e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2951e8603136SMike Snitzer 2952f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 2953f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 2954f4790826SMike Snitzer 29551da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 29561da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 29571da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2958