11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/mempool.h> 181da177e4SLinus Torvalds #include <linux/slab.h> 191da177e4SLinus Torvalds #include <linux/idr.h> 203ac51e74SDarrick J. Wong #include <linux/hdreg.h> 213f77316dSKiyoshi Ueda #include <linux/delay.h> 2255782138SLi Zefan 2355782138SLi Zefan #include <trace/events/block.h> 241da177e4SLinus Torvalds 2572d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2672d94861SAlasdair G Kergon 2771a16736SNamhyung Kim #ifdef CONFIG_PRINTK 2871a16736SNamhyung Kim /* 2971a16736SNamhyung Kim * ratelimit state to be used in DMXXX_LIMIT(). 3071a16736SNamhyung Kim */ 3171a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state, 3271a16736SNamhyung Kim DEFAULT_RATELIMIT_INTERVAL, 3371a16736SNamhyung Kim DEFAULT_RATELIMIT_BURST); 3471a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state); 3571a16736SNamhyung Kim #endif 3671a16736SNamhyung Kim 3760935eb2SMilan Broz /* 3860935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3960935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 4060935eb2SMilan Broz */ 4160935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4260935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4360935eb2SMilan Broz 441da177e4SLinus Torvalds static const char *_name = DM_NAME; 451da177e4SLinus Torvalds 461da177e4SLinus Torvalds static unsigned int major = 0; 471da177e4SLinus Torvalds static unsigned int _major = 0; 481da177e4SLinus Torvalds 49d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 50d15b774cSAlasdair G Kergon 51f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 522c140a24SMikulas Patocka 532c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 542c140a24SMikulas Patocka 552c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 562c140a24SMikulas Patocka 571da177e4SLinus Torvalds /* 588fbf26adSKiyoshi Ueda * For bio-based dm. 591da177e4SLinus Torvalds * One of these is allocated per bio. 601da177e4SLinus Torvalds */ 611da177e4SLinus Torvalds struct dm_io { 621da177e4SLinus Torvalds struct mapped_device *md; 631da177e4SLinus Torvalds int error; 641da177e4SLinus Torvalds atomic_t io_count; 656ae2fa67SRichard Kennedy struct bio *bio; 663eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 67f88fb981SKiyoshi Ueda spinlock_t endio_lock; 68fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 691da177e4SLinus Torvalds }; 701da177e4SLinus Torvalds 711da177e4SLinus Torvalds /* 728fbf26adSKiyoshi Ueda * For request-based dm. 738fbf26adSKiyoshi Ueda * One of these is allocated per request. 748fbf26adSKiyoshi Ueda */ 758fbf26adSKiyoshi Ueda struct dm_rq_target_io { 768fbf26adSKiyoshi Ueda struct mapped_device *md; 778fbf26adSKiyoshi Ueda struct dm_target *ti; 788fbf26adSKiyoshi Ueda struct request *orig, clone; 798fbf26adSKiyoshi Ueda int error; 808fbf26adSKiyoshi Ueda union map_info info; 818fbf26adSKiyoshi Ueda }; 828fbf26adSKiyoshi Ueda 838fbf26adSKiyoshi Ueda /* 8494818742SKent Overstreet * For request-based dm - the bio clones we allocate are embedded in these 8594818742SKent Overstreet * structs. 8694818742SKent Overstreet * 8794818742SKent Overstreet * We allocate these with bio_alloc_bioset, using the front_pad parameter when 8894818742SKent Overstreet * the bioset is created - this means the bio has to come at the end of the 8994818742SKent Overstreet * struct. 908fbf26adSKiyoshi Ueda */ 918fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 928fbf26adSKiyoshi Ueda struct bio *orig; 93cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 9494818742SKent Overstreet struct bio clone; 958fbf26adSKiyoshi Ueda }; 968fbf26adSKiyoshi Ueda 97cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 98cec47e3dSKiyoshi Ueda { 99cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 100cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 101cec47e3dSKiyoshi Ueda return NULL; 102cec47e3dSKiyoshi Ueda } 103cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 104cec47e3dSKiyoshi Ueda 105ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 106ba61fdd1SJeff Mahoney 1071da177e4SLinus Torvalds /* 1081da177e4SLinus Torvalds * Bits for the md->flags field. 1091da177e4SLinus Torvalds */ 1101eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1111da177e4SLinus Torvalds #define DMF_SUSPENDED 1 112aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 113fba9f90eSJeff Mahoney #define DMF_FREEING 3 1145c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1152e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 116d5b9dd04SMikulas Patocka #define DMF_MERGE_IS_OPTIONAL 6 1172c140a24SMikulas Patocka #define DMF_DEFERRED_REMOVE 7 1181da177e4SLinus Torvalds 119304f3f6aSMilan Broz /* 12083d5e5b0SMikulas Patocka * A dummy definition to make RCU happy. 12183d5e5b0SMikulas Patocka * struct dm_table should never be dereferenced in this file. 12283d5e5b0SMikulas Patocka */ 12383d5e5b0SMikulas Patocka struct dm_table { 12483d5e5b0SMikulas Patocka int undefined__; 12583d5e5b0SMikulas Patocka }; 12683d5e5b0SMikulas Patocka 12783d5e5b0SMikulas Patocka /* 128304f3f6aSMilan Broz * Work processed by per-device workqueue. 129304f3f6aSMilan Broz */ 1301da177e4SLinus Torvalds struct mapped_device { 13183d5e5b0SMikulas Patocka struct srcu_struct io_barrier; 132e61290a4SDaniel Walker struct mutex suspend_lock; 1331da177e4SLinus Torvalds atomic_t holders; 1345c6bd75dSAlasdair G Kergon atomic_t open_count; 1351da177e4SLinus Torvalds 1362a7faeb1SMikulas Patocka /* 1372a7faeb1SMikulas Patocka * The current mapping. 1382a7faeb1SMikulas Patocka * Use dm_get_live_table{_fast} or take suspend_lock for 1392a7faeb1SMikulas Patocka * dereference. 1402a7faeb1SMikulas Patocka */ 1412a7faeb1SMikulas Patocka struct dm_table *map; 1422a7faeb1SMikulas Patocka 1431da177e4SLinus Torvalds unsigned long flags; 1441da177e4SLinus Torvalds 145165125e1SJens Axboe struct request_queue *queue; 146a5664dadSMike Snitzer unsigned type; 1474a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 148a5664dadSMike Snitzer struct mutex type_lock; 149a5664dadSMike Snitzer 15036a0456fSAlasdair G Kergon struct target_type *immutable_target_type; 15136a0456fSAlasdair G Kergon 1521da177e4SLinus Torvalds struct gendisk *disk; 1537e51f257SMike Anderson char name[16]; 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds void *interface_ptr; 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds /* 1581da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1591da177e4SLinus Torvalds */ 160316d315bSNikanth Karthikesan atomic_t pending[2]; 1611da177e4SLinus Torvalds wait_queue_head_t wait; 16253d5914fSMikulas Patocka struct work_struct work; 1631da177e4SLinus Torvalds struct bio_list deferred; 164022c2611SMikulas Patocka spinlock_t deferred_lock; 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds /* 16729e4013dSTejun Heo * Processing queue (flush) 168304f3f6aSMilan Broz */ 169304f3f6aSMilan Broz struct workqueue_struct *wq; 170304f3f6aSMilan Broz 171304f3f6aSMilan Broz /* 1721da177e4SLinus Torvalds * io objects are allocated from here. 1731da177e4SLinus Torvalds */ 1741da177e4SLinus Torvalds mempool_t *io_pool; 1751da177e4SLinus Torvalds 1769faf400fSStefan Bader struct bio_set *bs; 1779faf400fSStefan Bader 1781da177e4SLinus Torvalds /* 1791da177e4SLinus Torvalds * Event handling. 1801da177e4SLinus Torvalds */ 1811da177e4SLinus Torvalds atomic_t event_nr; 1821da177e4SLinus Torvalds wait_queue_head_t eventq; 1837a8c3d3bSMike Anderson atomic_t uevent_seq; 1847a8c3d3bSMike Anderson struct list_head uevent_list; 1857a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1861da177e4SLinus Torvalds 1871da177e4SLinus Torvalds /* 1881da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 1891da177e4SLinus Torvalds */ 1901da177e4SLinus Torvalds struct super_block *frozen_sb; 191db8fef4fSMikulas Patocka struct block_device *bdev; 1923ac51e74SDarrick J. Wong 1933ac51e74SDarrick J. Wong /* forced geometry settings */ 1943ac51e74SDarrick J. Wong struct hd_geometry geometry; 195784aae73SMilan Broz 1962995fa78SMikulas Patocka /* kobject and completion */ 1972995fa78SMikulas Patocka struct dm_kobject_holder kobj_holder; 198be35f486SMikulas Patocka 199d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 200d87f4c14STejun Heo struct bio flush_bio; 201fd2ed4d2SMikulas Patocka 202fd2ed4d2SMikulas Patocka struct dm_stats stats; 2031da177e4SLinus Torvalds }; 2041da177e4SLinus Torvalds 205e6ee8c0bSKiyoshi Ueda /* 206e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 207e6ee8c0bSKiyoshi Ueda */ 208e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 209e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 210e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 211e6ee8c0bSKiyoshi Ueda }; 212e6ee8c0bSKiyoshi Ueda 2136cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 2146cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS 256 215f4790826SMike Snitzer #define RESERVED_MAX_IOS 1024 216e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 2178fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 21894818742SKent Overstreet 219f4790826SMike Snitzer /* 220e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 221e8603136SMike Snitzer */ 222e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 223e8603136SMike Snitzer 224e8603136SMike Snitzer /* 225f4790826SMike Snitzer * Request-based DM's mempools' reserved IOs set by the user. 226f4790826SMike Snitzer */ 227f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 228f4790826SMike Snitzer 229f4790826SMike Snitzer static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, 230f4790826SMike Snitzer unsigned def, unsigned max) 231f4790826SMike Snitzer { 232f4790826SMike Snitzer unsigned ios = ACCESS_ONCE(*reserved_ios); 233f4790826SMike Snitzer unsigned modified_ios = 0; 234f4790826SMike Snitzer 235f4790826SMike Snitzer if (!ios) 236f4790826SMike Snitzer modified_ios = def; 237f4790826SMike Snitzer else if (ios > max) 238f4790826SMike Snitzer modified_ios = max; 239f4790826SMike Snitzer 240f4790826SMike Snitzer if (modified_ios) { 241f4790826SMike Snitzer (void)cmpxchg(reserved_ios, ios, modified_ios); 242f4790826SMike Snitzer ios = modified_ios; 243f4790826SMike Snitzer } 244f4790826SMike Snitzer 245f4790826SMike Snitzer return ios; 246f4790826SMike Snitzer } 247f4790826SMike Snitzer 248e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 249e8603136SMike Snitzer { 250e8603136SMike Snitzer return __dm_get_reserved_ios(&reserved_bio_based_ios, 251e8603136SMike Snitzer RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); 252e8603136SMike Snitzer } 253e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 254e8603136SMike Snitzer 255f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void) 256f4790826SMike Snitzer { 257f4790826SMike Snitzer return __dm_get_reserved_ios(&reserved_rq_based_ios, 258f4790826SMike Snitzer RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); 259f4790826SMike Snitzer } 260f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 261f4790826SMike Snitzer 2621da177e4SLinus Torvalds static int __init local_init(void) 2631da177e4SLinus Torvalds { 26451157b4aSKiyoshi Ueda int r = -ENOMEM; 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 267028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 2681da177e4SLinus Torvalds if (!_io_cache) 26951157b4aSKiyoshi Ueda return r; 2701da177e4SLinus Torvalds 2718fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2728fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 273dba14160SMikulas Patocka goto out_free_io_cache; 2748fbf26adSKiyoshi Ueda 27551e5b2bdSMike Anderson r = dm_uevent_init(); 27651157b4aSKiyoshi Ueda if (r) 27723e5083bSJun'ichi Nomura goto out_free_rq_tio_cache; 27851e5b2bdSMike Anderson 2791da177e4SLinus Torvalds _major = major; 2801da177e4SLinus Torvalds r = register_blkdev(_major, _name); 28151157b4aSKiyoshi Ueda if (r < 0) 28251157b4aSKiyoshi Ueda goto out_uevent_exit; 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds if (!_major) 2851da177e4SLinus Torvalds _major = r; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds return 0; 28851157b4aSKiyoshi Ueda 28951157b4aSKiyoshi Ueda out_uevent_exit: 29051157b4aSKiyoshi Ueda dm_uevent_exit(); 2918fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2928fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 29351157b4aSKiyoshi Ueda out_free_io_cache: 29451157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 29551157b4aSKiyoshi Ueda 29651157b4aSKiyoshi Ueda return r; 2971da177e4SLinus Torvalds } 2981da177e4SLinus Torvalds 2991da177e4SLinus Torvalds static void local_exit(void) 3001da177e4SLinus Torvalds { 3012c140a24SMikulas Patocka flush_scheduled_work(); 3022c140a24SMikulas Patocka 3038fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 3041da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 30500d59405SAkinobu Mita unregister_blkdev(_major, _name); 30651e5b2bdSMike Anderson dm_uevent_exit(); 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds _major = 0; 3091da177e4SLinus Torvalds 3101da177e4SLinus Torvalds DMINFO("cleaned up"); 3111da177e4SLinus Torvalds } 3121da177e4SLinus Torvalds 313b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 3141da177e4SLinus Torvalds local_init, 3151da177e4SLinus Torvalds dm_target_init, 3161da177e4SLinus Torvalds dm_linear_init, 3171da177e4SLinus Torvalds dm_stripe_init, 318952b3557SMikulas Patocka dm_io_init, 319945fa4d2SMikulas Patocka dm_kcopyd_init, 3201da177e4SLinus Torvalds dm_interface_init, 321fd2ed4d2SMikulas Patocka dm_statistics_init, 3221da177e4SLinus Torvalds }; 3231da177e4SLinus Torvalds 324b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 3251da177e4SLinus Torvalds local_exit, 3261da177e4SLinus Torvalds dm_target_exit, 3271da177e4SLinus Torvalds dm_linear_exit, 3281da177e4SLinus Torvalds dm_stripe_exit, 329952b3557SMikulas Patocka dm_io_exit, 330945fa4d2SMikulas Patocka dm_kcopyd_exit, 3311da177e4SLinus Torvalds dm_interface_exit, 332fd2ed4d2SMikulas Patocka dm_statistics_exit, 3331da177e4SLinus Torvalds }; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds static int __init dm_init(void) 3361da177e4SLinus Torvalds { 3371da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 3381da177e4SLinus Torvalds 3391da177e4SLinus Torvalds int r, i; 3401da177e4SLinus Torvalds 3411da177e4SLinus Torvalds for (i = 0; i < count; i++) { 3421da177e4SLinus Torvalds r = _inits[i](); 3431da177e4SLinus Torvalds if (r) 3441da177e4SLinus Torvalds goto bad; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds 3471da177e4SLinus Torvalds return 0; 3481da177e4SLinus Torvalds 3491da177e4SLinus Torvalds bad: 3501da177e4SLinus Torvalds while (i--) 3511da177e4SLinus Torvalds _exits[i](); 3521da177e4SLinus Torvalds 3531da177e4SLinus Torvalds return r; 3541da177e4SLinus Torvalds } 3551da177e4SLinus Torvalds 3561da177e4SLinus Torvalds static void __exit dm_exit(void) 3571da177e4SLinus Torvalds { 3581da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3591da177e4SLinus Torvalds 3601da177e4SLinus Torvalds while (i--) 3611da177e4SLinus Torvalds _exits[i](); 362d15b774cSAlasdair G Kergon 363d15b774cSAlasdair G Kergon /* 364d15b774cSAlasdair G Kergon * Should be empty by this point. 365d15b774cSAlasdair G Kergon */ 366d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds /* 3701da177e4SLinus Torvalds * Block device functions 3711da177e4SLinus Torvalds */ 372432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 373432a212cSMike Anderson { 374432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 375432a212cSMike Anderson } 376432a212cSMike Anderson 377fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3781da177e4SLinus Torvalds { 3791da177e4SLinus Torvalds struct mapped_device *md; 3801da177e4SLinus Torvalds 381fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 382fba9f90eSJeff Mahoney 383fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 384fba9f90eSJeff Mahoney if (!md) 385fba9f90eSJeff Mahoney goto out; 386fba9f90eSJeff Mahoney 3875c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 388432a212cSMike Anderson dm_deleting_md(md)) { 389fba9f90eSJeff Mahoney md = NULL; 390fba9f90eSJeff Mahoney goto out; 391fba9f90eSJeff Mahoney } 392fba9f90eSJeff Mahoney 3931da177e4SLinus Torvalds dm_get(md); 3945c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 395fba9f90eSJeff Mahoney 396fba9f90eSJeff Mahoney out: 397fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 398fba9f90eSJeff Mahoney 399fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 4001da177e4SLinus Torvalds } 4011da177e4SLinus Torvalds 402db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 4031da177e4SLinus Torvalds { 404fe5f9f2cSAl Viro struct mapped_device *md = disk->private_data; 4056e9624b8SArnd Bergmann 4064a1aeb98SMilan Broz spin_lock(&_minor_lock); 4074a1aeb98SMilan Broz 4082c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 4092c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 4102c140a24SMikulas Patocka schedule_work(&deferred_remove_work); 4112c140a24SMikulas Patocka 4121da177e4SLinus Torvalds dm_put(md); 4134a1aeb98SMilan Broz 4144a1aeb98SMilan Broz spin_unlock(&_minor_lock); 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4175c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 4185c6bd75dSAlasdair G Kergon { 4195c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 4205c6bd75dSAlasdair G Kergon } 4215c6bd75dSAlasdair G Kergon 4225c6bd75dSAlasdair G Kergon /* 4235c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 4245c6bd75dSAlasdair G Kergon */ 4252c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 4265c6bd75dSAlasdair G Kergon { 4275c6bd75dSAlasdair G Kergon int r = 0; 4285c6bd75dSAlasdair G Kergon 4295c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 4305c6bd75dSAlasdair G Kergon 4312c140a24SMikulas Patocka if (dm_open_count(md)) { 4325c6bd75dSAlasdair G Kergon r = -EBUSY; 4332c140a24SMikulas Patocka if (mark_deferred) 4342c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 4352c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 4362c140a24SMikulas Patocka r = -EEXIST; 4375c6bd75dSAlasdair G Kergon else 4385c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 4395c6bd75dSAlasdair G Kergon 4405c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4415c6bd75dSAlasdair G Kergon 4425c6bd75dSAlasdair G Kergon return r; 4435c6bd75dSAlasdair G Kergon } 4445c6bd75dSAlasdair G Kergon 4452c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4462c140a24SMikulas Patocka { 4472c140a24SMikulas Patocka int r = 0; 4482c140a24SMikulas Patocka 4492c140a24SMikulas Patocka spin_lock(&_minor_lock); 4502c140a24SMikulas Patocka 4512c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4522c140a24SMikulas Patocka r = -EBUSY; 4532c140a24SMikulas Patocka else 4542c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4552c140a24SMikulas Patocka 4562c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4572c140a24SMikulas Patocka 4582c140a24SMikulas Patocka return r; 4592c140a24SMikulas Patocka } 4602c140a24SMikulas Patocka 4612c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4622c140a24SMikulas Patocka { 4632c140a24SMikulas Patocka dm_deferred_remove(); 4642c140a24SMikulas Patocka } 4652c140a24SMikulas Patocka 466fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 467fd2ed4d2SMikulas Patocka { 468fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 469fd2ed4d2SMikulas Patocka } 470fd2ed4d2SMikulas Patocka 471*9974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 472*9974fa2cSMike Snitzer { 473*9974fa2cSMike Snitzer return md->queue; 474*9974fa2cSMike Snitzer } 475*9974fa2cSMike Snitzer 476fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 477fd2ed4d2SMikulas Patocka { 478fd2ed4d2SMikulas Patocka return &md->stats; 479fd2ed4d2SMikulas Patocka } 480fd2ed4d2SMikulas Patocka 4813ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4823ac51e74SDarrick J. Wong { 4833ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4843ac51e74SDarrick J. Wong 4853ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4863ac51e74SDarrick J. Wong } 4873ac51e74SDarrick J. Wong 488fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 489aa129a22SMilan Broz unsigned int cmd, unsigned long arg) 490aa129a22SMilan Broz { 491fe5f9f2cSAl Viro struct mapped_device *md = bdev->bd_disk->private_data; 49283d5e5b0SMikulas Patocka int srcu_idx; 4936c182cd8SHannes Reinecke struct dm_table *map; 494aa129a22SMilan Broz struct dm_target *tgt; 495aa129a22SMilan Broz int r = -ENOTTY; 496aa129a22SMilan Broz 4976c182cd8SHannes Reinecke retry: 49883d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 49983d5e5b0SMikulas Patocka 500aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 501aa129a22SMilan Broz goto out; 502aa129a22SMilan Broz 503aa129a22SMilan Broz /* We only support devices that have a single target */ 504aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 505aa129a22SMilan Broz goto out; 506aa129a22SMilan Broz 507aa129a22SMilan Broz tgt = dm_table_get_target(map, 0); 508aa129a22SMilan Broz 5094f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 510aa129a22SMilan Broz r = -EAGAIN; 511aa129a22SMilan Broz goto out; 512aa129a22SMilan Broz } 513aa129a22SMilan Broz 514aa129a22SMilan Broz if (tgt->type->ioctl) 515647b3d00SAl Viro r = tgt->type->ioctl(tgt, cmd, arg); 516aa129a22SMilan Broz 517aa129a22SMilan Broz out: 51883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 519aa129a22SMilan Broz 5206c182cd8SHannes Reinecke if (r == -ENOTCONN) { 5216c182cd8SHannes Reinecke msleep(10); 5226c182cd8SHannes Reinecke goto retry; 5236c182cd8SHannes Reinecke } 5246c182cd8SHannes Reinecke 525aa129a22SMilan Broz return r; 526aa129a22SMilan Broz } 527aa129a22SMilan Broz 528028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 5291da177e4SLinus Torvalds { 5301da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 5311da177e4SLinus Torvalds } 5321da177e4SLinus Torvalds 533028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5341da177e4SLinus Torvalds { 5351da177e4SLinus Torvalds mempool_free(io, md->io_pool); 5361da177e4SLinus Torvalds } 5371da177e4SLinus Torvalds 538028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 5391da177e4SLinus Torvalds { 540dba14160SMikulas Patocka bio_put(&tio->clone); 5411da177e4SLinus Torvalds } 5421da177e4SLinus Torvalds 54308885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 54408885643SKiyoshi Ueda gfp_t gfp_mask) 545cec47e3dSKiyoshi Ueda { 5465f015204SJun'ichi Nomura return mempool_alloc(md->io_pool, gfp_mask); 547cec47e3dSKiyoshi Ueda } 548cec47e3dSKiyoshi Ueda 549cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 550cec47e3dSKiyoshi Ueda { 5515f015204SJun'ichi Nomura mempool_free(tio, tio->md->io_pool); 552cec47e3dSKiyoshi Ueda } 553cec47e3dSKiyoshi Ueda 55490abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 55590abb8c4SKiyoshi Ueda { 55690abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 55790abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 55890abb8c4SKiyoshi Ueda } 55990abb8c4SKiyoshi Ueda 5603eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 5613eaf840eSJun'ichi "Nick" Nomura { 5623eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 563fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 564c9959059STejun Heo int cpu; 565fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 5663eaf840eSJun'ichi "Nick" Nomura 5673eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 5683eaf840eSJun'ichi "Nick" Nomura 569074a7acaSTejun Heo cpu = part_stat_lock(); 570074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 571074a7acaSTejun Heo part_stat_unlock(); 5721e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 5731e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 574fd2ed4d2SMikulas Patocka 575fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 5764f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 577fd2ed4d2SMikulas Patocka bio_sectors(bio), false, 0, &io->stats_aux); 5783eaf840eSJun'ichi "Nick" Nomura } 5793eaf840eSJun'ichi "Nick" Nomura 580d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 5813eaf840eSJun'ichi "Nick" Nomura { 5823eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 5833eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 5843eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 585c9959059STejun Heo int pending, cpu; 5863eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 5873eaf840eSJun'ichi "Nick" Nomura 588074a7acaSTejun Heo cpu = part_stat_lock(); 589074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 590074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 591074a7acaSTejun Heo part_stat_unlock(); 5923eaf840eSJun'ichi "Nick" Nomura 593fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 5944f024f37SKent Overstreet dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, 595fd2ed4d2SMikulas Patocka bio_sectors(bio), true, duration, &io->stats_aux); 596fd2ed4d2SMikulas Patocka 597af7e466aSMikulas Patocka /* 598af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 599d87f4c14STejun Heo * a flush. 600af7e466aSMikulas Patocka */ 6011e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 6021e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 603316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 6043eaf840eSJun'ichi "Nick" Nomura 605d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 606d221d2e7SMikulas Patocka if (!pending) 607d221d2e7SMikulas Patocka wake_up(&md->wait); 6083eaf840eSJun'ichi "Nick" Nomura } 6093eaf840eSJun'ichi "Nick" Nomura 6101da177e4SLinus Torvalds /* 6111da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6121da177e4SLinus Torvalds */ 61392c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6141da177e4SLinus Torvalds { 61505447420SKiyoshi Ueda unsigned long flags; 6161da177e4SLinus Torvalds 61705447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6181da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 61905447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 62092c63902SMikulas Patocka queue_work(md->wq, &md->work); 6211da177e4SLinus Torvalds } 6221da177e4SLinus Torvalds 6231da177e4SLinus Torvalds /* 6241da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6251da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 62683d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6271da177e4SLinus Torvalds */ 62883d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6291da177e4SLinus Torvalds { 63083d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6311da177e4SLinus Torvalds 63283d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 63383d5e5b0SMikulas Patocka } 6341da177e4SLinus Torvalds 63583d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 63683d5e5b0SMikulas Patocka { 63783d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 63883d5e5b0SMikulas Patocka } 63983d5e5b0SMikulas Patocka 64083d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 64183d5e5b0SMikulas Patocka { 64283d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 64383d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 64483d5e5b0SMikulas Patocka } 64583d5e5b0SMikulas Patocka 64683d5e5b0SMikulas Patocka /* 64783d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 64883d5e5b0SMikulas Patocka * The caller must not block between these two functions. 64983d5e5b0SMikulas Patocka */ 65083d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 65183d5e5b0SMikulas Patocka { 65283d5e5b0SMikulas Patocka rcu_read_lock(); 65383d5e5b0SMikulas Patocka return rcu_dereference(md->map); 65483d5e5b0SMikulas Patocka } 65583d5e5b0SMikulas Patocka 65683d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 65783d5e5b0SMikulas Patocka { 65883d5e5b0SMikulas Patocka rcu_read_unlock(); 6591da177e4SLinus Torvalds } 6601da177e4SLinus Torvalds 6613ac51e74SDarrick J. Wong /* 6623ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 6633ac51e74SDarrick J. Wong */ 6643ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 6653ac51e74SDarrick J. Wong { 6663ac51e74SDarrick J. Wong *geo = md->geometry; 6673ac51e74SDarrick J. Wong 6683ac51e74SDarrick J. Wong return 0; 6693ac51e74SDarrick J. Wong } 6703ac51e74SDarrick J. Wong 6713ac51e74SDarrick J. Wong /* 6723ac51e74SDarrick J. Wong * Set the geometry of a device. 6733ac51e74SDarrick J. Wong */ 6743ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 6753ac51e74SDarrick J. Wong { 6763ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 6773ac51e74SDarrick J. Wong 6783ac51e74SDarrick J. Wong if (geo->start > sz) { 6793ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 6803ac51e74SDarrick J. Wong return -EINVAL; 6813ac51e74SDarrick J. Wong } 6823ac51e74SDarrick J. Wong 6833ac51e74SDarrick J. Wong md->geometry = *geo; 6843ac51e74SDarrick J. Wong 6853ac51e74SDarrick J. Wong return 0; 6863ac51e74SDarrick J. Wong } 6873ac51e74SDarrick J. Wong 6881da177e4SLinus Torvalds /*----------------------------------------------------------------- 6891da177e4SLinus Torvalds * CRUD START: 6901da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 6911da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 6921da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 6931da177e4SLinus Torvalds * interests of getting something for people to use I give 6941da177e4SLinus Torvalds * you this clearly demarcated crap. 6951da177e4SLinus Torvalds *---------------------------------------------------------------*/ 6961da177e4SLinus Torvalds 6972e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 6982e93ccc1SKiyoshi Ueda { 6992e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7002e93ccc1SKiyoshi Ueda } 7012e93ccc1SKiyoshi Ueda 7021da177e4SLinus Torvalds /* 7031da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7041da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7051da177e4SLinus Torvalds */ 706858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 7071da177e4SLinus Torvalds { 7082e93ccc1SKiyoshi Ueda unsigned long flags; 709b35f8caaSMilan Broz int io_error; 710b35f8caaSMilan Broz struct bio *bio; 711b35f8caaSMilan Broz struct mapped_device *md = io->md; 7122e93ccc1SKiyoshi Ueda 7132e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 714f88fb981SKiyoshi Ueda if (unlikely(error)) { 715f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 716f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 7171da177e4SLinus Torvalds io->error = error; 718f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 719f88fb981SKiyoshi Ueda } 7201da177e4SLinus Torvalds 7211da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 7222e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 7232e93ccc1SKiyoshi Ueda /* 7242e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 7252e93ccc1SKiyoshi Ueda */ 726022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 7276a8736d1STejun Heo if (__noflush_suspending(md)) 7286a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 7296a8736d1STejun Heo else 7302e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 7312e93ccc1SKiyoshi Ueda io->error = -EIO; 732022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 7332e93ccc1SKiyoshi Ueda } 7342e93ccc1SKiyoshi Ueda 735b35f8caaSMilan Broz io_error = io->error; 736b35f8caaSMilan Broz bio = io->bio; 737af7e466aSMikulas Patocka end_io_acct(io); 738a97f925aSMikulas Patocka free_io(md, io); 7391da177e4SLinus Torvalds 7406a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 7416a8736d1STejun Heo return; 7426a8736d1STejun Heo 7434f024f37SKent Overstreet if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 7441da177e4SLinus Torvalds /* 7456a8736d1STejun Heo * Preflush done for flush with data, reissue 7466a8736d1STejun Heo * without REQ_FLUSH. 7471da177e4SLinus Torvalds */ 7486a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 7496a8736d1STejun Heo queue_io(md, bio); 7505f3ea37cSArnaldo Carvalho de Melo } else { 751b372d360SMike Snitzer /* done with normal IO or empty flush */ 7520a82a8d1SLinus Torvalds trace_block_bio_complete(md->queue, bio, io_error); 753b35f8caaSMilan Broz bio_endio(bio, io_error); 7542e93ccc1SKiyoshi Ueda } 7551da177e4SLinus Torvalds } 756af7e466aSMikulas Patocka } 7571da177e4SLinus Torvalds 7586712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error) 7591da177e4SLinus Torvalds { 7601da177e4SLinus Torvalds int r = 0; 761bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 762b35f8caaSMilan Broz struct dm_io *io = tio->io; 7639faf400fSStefan Bader struct mapped_device *md = tio->io->md; 7641da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 7651da177e4SLinus Torvalds 7661da177e4SLinus Torvalds if (!bio_flagged(bio, BIO_UPTODATE) && !error) 7671da177e4SLinus Torvalds error = -EIO; 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds if (endio) { 7707de3ee57SMikulas Patocka r = endio(tio->ti, bio, error); 7712e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 7722e93ccc1SKiyoshi Ueda /* 7732e93ccc1SKiyoshi Ueda * error and requeue request are handled 7742e93ccc1SKiyoshi Ueda * in dec_pending(). 7752e93ccc1SKiyoshi Ueda */ 7761da177e4SLinus Torvalds error = r; 77745cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 77845cbcd79SKiyoshi Ueda /* The target will handle the io */ 7796712ecf8SNeilBrown return; 78045cbcd79SKiyoshi Ueda else if (r) { 78145cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 78245cbcd79SKiyoshi Ueda BUG(); 78345cbcd79SKiyoshi Ueda } 7841da177e4SLinus Torvalds } 7851da177e4SLinus Torvalds 7869faf400fSStefan Bader free_tio(md, tio); 787b35f8caaSMilan Broz dec_pending(io, error); 7881da177e4SLinus Torvalds } 7891da177e4SLinus Torvalds 790cec47e3dSKiyoshi Ueda /* 791cec47e3dSKiyoshi Ueda * Partial completion handling for request-based dm 792cec47e3dSKiyoshi Ueda */ 793cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error) 794cec47e3dSKiyoshi Ueda { 795bfc6d41cSMikulas Patocka struct dm_rq_clone_bio_info *info = 796bfc6d41cSMikulas Patocka container_of(clone, struct dm_rq_clone_bio_info, clone); 797cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = info->tio; 798cec47e3dSKiyoshi Ueda struct bio *bio = info->orig; 7994f024f37SKent Overstreet unsigned int nr_bytes = info->orig->bi_iter.bi_size; 800cec47e3dSKiyoshi Ueda 801cec47e3dSKiyoshi Ueda bio_put(clone); 802cec47e3dSKiyoshi Ueda 803cec47e3dSKiyoshi Ueda if (tio->error) 804cec47e3dSKiyoshi Ueda /* 805cec47e3dSKiyoshi Ueda * An error has already been detected on the request. 806cec47e3dSKiyoshi Ueda * Once error occurred, just let clone->end_io() handle 807cec47e3dSKiyoshi Ueda * the remainder. 808cec47e3dSKiyoshi Ueda */ 809cec47e3dSKiyoshi Ueda return; 810cec47e3dSKiyoshi Ueda else if (error) { 811cec47e3dSKiyoshi Ueda /* 812cec47e3dSKiyoshi Ueda * Don't notice the error to the upper layer yet. 813cec47e3dSKiyoshi Ueda * The error handling decision is made by the target driver, 814cec47e3dSKiyoshi Ueda * when the request is completed. 815cec47e3dSKiyoshi Ueda */ 816cec47e3dSKiyoshi Ueda tio->error = error; 817cec47e3dSKiyoshi Ueda return; 818cec47e3dSKiyoshi Ueda } 819cec47e3dSKiyoshi Ueda 820cec47e3dSKiyoshi Ueda /* 821cec47e3dSKiyoshi Ueda * I/O for the bio successfully completed. 822cec47e3dSKiyoshi Ueda * Notice the data completion to the upper layer. 823cec47e3dSKiyoshi Ueda */ 824cec47e3dSKiyoshi Ueda 825cec47e3dSKiyoshi Ueda /* 826cec47e3dSKiyoshi Ueda * bios are processed from the head of the list. 827cec47e3dSKiyoshi Ueda * So the completing bio should always be rq->bio. 828cec47e3dSKiyoshi Ueda * If it's not, something wrong is happening. 829cec47e3dSKiyoshi Ueda */ 830cec47e3dSKiyoshi Ueda if (tio->orig->bio != bio) 831cec47e3dSKiyoshi Ueda DMERR("bio completion is going in the middle of the request"); 832cec47e3dSKiyoshi Ueda 833cec47e3dSKiyoshi Ueda /* 834cec47e3dSKiyoshi Ueda * Update the original request. 835cec47e3dSKiyoshi Ueda * Do not use blk_end_request() here, because it may complete 836cec47e3dSKiyoshi Ueda * the original request before the clone, and break the ordering. 837cec47e3dSKiyoshi Ueda */ 838cec47e3dSKiyoshi Ueda blk_update_request(tio->orig, 0, nr_bytes); 839cec47e3dSKiyoshi Ueda } 840cec47e3dSKiyoshi Ueda 841cec47e3dSKiyoshi Ueda /* 842cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 843cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 844cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 845cec47e3dSKiyoshi Ueda */ 846b4324feeSKiyoshi Ueda static void rq_completed(struct mapped_device *md, int rw, int run_queue) 847cec47e3dSKiyoshi Ueda { 848b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 849cec47e3dSKiyoshi Ueda 850cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 851b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 852cec47e3dSKiyoshi Ueda wake_up(&md->wait); 853cec47e3dSKiyoshi Ueda 854a8c32a5cSJens Axboe /* 855a8c32a5cSJens Axboe * Run this off this callpath, as drivers could invoke end_io while 856a8c32a5cSJens Axboe * inside their request_fn (and holding the queue lock). Calling 857a8c32a5cSJens Axboe * back into ->request_fn() could deadlock attempting to grab the 858a8c32a5cSJens Axboe * queue lock again. 859a8c32a5cSJens Axboe */ 860cec47e3dSKiyoshi Ueda if (run_queue) 861a8c32a5cSJens Axboe blk_run_queue_async(md->queue); 862cec47e3dSKiyoshi Ueda 863cec47e3dSKiyoshi Ueda /* 864cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 865cec47e3dSKiyoshi Ueda */ 866cec47e3dSKiyoshi Ueda dm_put(md); 867cec47e3dSKiyoshi Ueda } 868cec47e3dSKiyoshi Ueda 869a77e28c7SKiyoshi Ueda static void free_rq_clone(struct request *clone) 870a77e28c7SKiyoshi Ueda { 871a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 872a77e28c7SKiyoshi Ueda 873a77e28c7SKiyoshi Ueda blk_rq_unprep_clone(clone); 874a77e28c7SKiyoshi Ueda free_rq_tio(tio); 875a77e28c7SKiyoshi Ueda } 876a77e28c7SKiyoshi Ueda 877980691e5SKiyoshi Ueda /* 878980691e5SKiyoshi Ueda * Complete the clone and the original request. 879980691e5SKiyoshi Ueda * Must be called without queue lock. 880980691e5SKiyoshi Ueda */ 881980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 882980691e5SKiyoshi Ueda { 883980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 884980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 885980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 886980691e5SKiyoshi Ueda struct request *rq = tio->orig; 887980691e5SKiyoshi Ueda 88829e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 889980691e5SKiyoshi Ueda rq->errors = clone->errors; 890980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 891980691e5SKiyoshi Ueda 892980691e5SKiyoshi Ueda if (rq->sense) 893980691e5SKiyoshi Ueda /* 894980691e5SKiyoshi Ueda * We are using the sense buffer of the original 895980691e5SKiyoshi Ueda * request. 896980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 897980691e5SKiyoshi Ueda */ 898980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 899980691e5SKiyoshi Ueda } 900980691e5SKiyoshi Ueda 901980691e5SKiyoshi Ueda free_rq_clone(clone); 902980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 90329e4013dSTejun Heo rq_completed(md, rw, true); 904980691e5SKiyoshi Ueda } 905980691e5SKiyoshi Ueda 906cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 907cec47e3dSKiyoshi Ueda { 908cec47e3dSKiyoshi Ueda struct request *clone = rq->special; 909cec47e3dSKiyoshi Ueda 910cec47e3dSKiyoshi Ueda rq->special = NULL; 911cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 912cec47e3dSKiyoshi Ueda 913a77e28c7SKiyoshi Ueda free_rq_clone(clone); 914cec47e3dSKiyoshi Ueda } 915cec47e3dSKiyoshi Ueda 916cec47e3dSKiyoshi Ueda /* 917cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 918cec47e3dSKiyoshi Ueda */ 919cec47e3dSKiyoshi Ueda void dm_requeue_unmapped_request(struct request *clone) 920cec47e3dSKiyoshi Ueda { 921b4324feeSKiyoshi Ueda int rw = rq_data_dir(clone); 922cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 923cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 924cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 925cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 926cec47e3dSKiyoshi Ueda unsigned long flags; 927cec47e3dSKiyoshi Ueda 928cec47e3dSKiyoshi Ueda dm_unprep_request(rq); 929cec47e3dSKiyoshi Ueda 930cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 931cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 932cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 933cec47e3dSKiyoshi Ueda 934b4324feeSKiyoshi Ueda rq_completed(md, rw, 0); 935cec47e3dSKiyoshi Ueda } 936cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 937cec47e3dSKiyoshi Ueda 938cec47e3dSKiyoshi Ueda static void __stop_queue(struct request_queue *q) 939cec47e3dSKiyoshi Ueda { 940cec47e3dSKiyoshi Ueda blk_stop_queue(q); 941cec47e3dSKiyoshi Ueda } 942cec47e3dSKiyoshi Ueda 943cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 944cec47e3dSKiyoshi Ueda { 945cec47e3dSKiyoshi Ueda unsigned long flags; 946cec47e3dSKiyoshi Ueda 947cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 948cec47e3dSKiyoshi Ueda __stop_queue(q); 949cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 950cec47e3dSKiyoshi Ueda } 951cec47e3dSKiyoshi Ueda 952cec47e3dSKiyoshi Ueda static void __start_queue(struct request_queue *q) 953cec47e3dSKiyoshi Ueda { 954cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 955cec47e3dSKiyoshi Ueda blk_start_queue(q); 956cec47e3dSKiyoshi Ueda } 957cec47e3dSKiyoshi Ueda 958cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 959cec47e3dSKiyoshi Ueda { 960cec47e3dSKiyoshi Ueda unsigned long flags; 961cec47e3dSKiyoshi Ueda 962cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 963cec47e3dSKiyoshi Ueda __start_queue(q); 964cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 965cec47e3dSKiyoshi Ueda } 966cec47e3dSKiyoshi Ueda 96711a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 96811a68244SKiyoshi Ueda { 96911a68244SKiyoshi Ueda int r = error; 97011a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 971ba1cbad9SMike Snitzer dm_request_endio_fn rq_end_io = NULL; 972ba1cbad9SMike Snitzer 973ba1cbad9SMike Snitzer if (tio->ti) { 974ba1cbad9SMike Snitzer rq_end_io = tio->ti->type->rq_end_io; 97511a68244SKiyoshi Ueda 97611a68244SKiyoshi Ueda if (mapped && rq_end_io) 97711a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 978ba1cbad9SMike Snitzer } 97911a68244SKiyoshi Ueda 98011a68244SKiyoshi Ueda if (r <= 0) 98111a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 98211a68244SKiyoshi Ueda dm_end_request(clone, r); 98311a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 98411a68244SKiyoshi Ueda /* The target will handle the I/O */ 98511a68244SKiyoshi Ueda return; 98611a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 98711a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 98811a68244SKiyoshi Ueda dm_requeue_unmapped_request(clone); 98911a68244SKiyoshi Ueda else { 99011a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 99111a68244SKiyoshi Ueda BUG(); 99211a68244SKiyoshi Ueda } 99311a68244SKiyoshi Ueda } 99411a68244SKiyoshi Ueda 995cec47e3dSKiyoshi Ueda /* 996cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 997cec47e3dSKiyoshi Ueda */ 998cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 999cec47e3dSKiyoshi Ueda { 100011a68244SKiyoshi Ueda bool mapped = true; 1001cec47e3dSKiyoshi Ueda struct request *clone = rq->completion_data; 1002cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1003cec47e3dSKiyoshi Ueda 100411a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 100511a68244SKiyoshi Ueda mapped = false; 1006cec47e3dSKiyoshi Ueda 100711a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 1008cec47e3dSKiyoshi Ueda } 1009cec47e3dSKiyoshi Ueda 1010cec47e3dSKiyoshi Ueda /* 1011cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 1012cec47e3dSKiyoshi Ueda * through softirq context. 1013cec47e3dSKiyoshi Ueda */ 1014cec47e3dSKiyoshi Ueda static void dm_complete_request(struct request *clone, int error) 1015cec47e3dSKiyoshi Ueda { 1016cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1017cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 1018cec47e3dSKiyoshi Ueda 1019cec47e3dSKiyoshi Ueda tio->error = error; 1020cec47e3dSKiyoshi Ueda rq->completion_data = clone; 1021cec47e3dSKiyoshi Ueda blk_complete_request(rq); 1022cec47e3dSKiyoshi Ueda } 1023cec47e3dSKiyoshi Ueda 1024cec47e3dSKiyoshi Ueda /* 1025cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 1026cec47e3dSKiyoshi Ueda * through softirq context. 1027cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 1028cec47e3dSKiyoshi Ueda * This may be used when the target's map_rq() function fails. 1029cec47e3dSKiyoshi Ueda */ 1030cec47e3dSKiyoshi Ueda void dm_kill_unmapped_request(struct request *clone, int error) 1031cec47e3dSKiyoshi Ueda { 1032cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1033cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 1034cec47e3dSKiyoshi Ueda 1035cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 1036cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 1037cec47e3dSKiyoshi Ueda } 1038cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 1039cec47e3dSKiyoshi Ueda 1040cec47e3dSKiyoshi Ueda /* 1041cec47e3dSKiyoshi Ueda * Called with the queue lock held 1042cec47e3dSKiyoshi Ueda */ 1043cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 1044cec47e3dSKiyoshi Ueda { 1045cec47e3dSKiyoshi Ueda /* 1046cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 1047cec47e3dSKiyoshi Ueda * the clone was dispatched. 1048cec47e3dSKiyoshi Ueda * The clone is *NOT* freed actually here because it is alloced from 1049cec47e3dSKiyoshi Ueda * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 1050cec47e3dSKiyoshi Ueda */ 1051cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 1052cec47e3dSKiyoshi Ueda 1053cec47e3dSKiyoshi Ueda /* 1054cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 1055cec47e3dSKiyoshi Ueda * hold the queue lock. Otherwise, deadlock could occur because: 1056cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 1057cec47e3dSKiyoshi Ueda * of the stacking during the completion 1058cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 1059cec47e3dSKiyoshi Ueda * against this queue 1060cec47e3dSKiyoshi Ueda */ 1061cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 1062cec47e3dSKiyoshi Ueda } 1063cec47e3dSKiyoshi Ueda 106456a67df7SMike Snitzer /* 106556a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 106656a67df7SMike Snitzer * target boundary. 106756a67df7SMike Snitzer */ 106856a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 10691da177e4SLinus Torvalds { 107056a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 107156a67df7SMike Snitzer 107256a67df7SMike Snitzer return ti->len - target_offset; 107356a67df7SMike Snitzer } 107456a67df7SMike Snitzer 107556a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 107656a67df7SMike Snitzer { 107756a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1078542f9038SMike Snitzer sector_t offset, max_len; 10791da177e4SLinus Torvalds 10801da177e4SLinus Torvalds /* 10811da177e4SLinus Torvalds * Does the target need to split even further? 10821da177e4SLinus Torvalds */ 1083542f9038SMike Snitzer if (ti->max_io_len) { 1084542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1085542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1086542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1087542f9038SMike Snitzer else 1088542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1089542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1090542f9038SMike Snitzer 1091542f9038SMike Snitzer if (len > max_len) 1092542f9038SMike Snitzer len = max_len; 10931da177e4SLinus Torvalds } 10941da177e4SLinus Torvalds 10951da177e4SLinus Torvalds return len; 10961da177e4SLinus Torvalds } 10971da177e4SLinus Torvalds 1098542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1099542f9038SMike Snitzer { 1100542f9038SMike Snitzer if (len > UINT_MAX) { 1101542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1102542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1103542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1104542f9038SMike Snitzer return -EINVAL; 1105542f9038SMike Snitzer } 1106542f9038SMike Snitzer 1107542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 1108542f9038SMike Snitzer 1109542f9038SMike Snitzer return 0; 1110542f9038SMike Snitzer } 1111542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1112542f9038SMike Snitzer 1113bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 11141da177e4SLinus Torvalds { 11151da177e4SLinus Torvalds int r; 11162056a782SJens Axboe sector_t sector; 11179faf400fSStefan Bader struct mapped_device *md; 1118dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1119bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 11201da177e4SLinus Torvalds 11211da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 11221da177e4SLinus Torvalds 11231da177e4SLinus Torvalds /* 11241da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 11251da177e4SLinus Torvalds * anything, the target has assumed ownership of 11261da177e4SLinus Torvalds * this io. 11271da177e4SLinus Torvalds */ 11281da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 11294f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 11307de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 113145cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 11321da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 11332056a782SJens Axboe 1134d07335e5SMike Snitzer trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 113522a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 11362056a782SJens Axboe 11371da177e4SLinus Torvalds generic_make_request(clone); 11382e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 11392e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 11409faf400fSStefan Bader md = tio->io->md; 11419faf400fSStefan Bader dec_pending(tio->io, r); 11429faf400fSStefan Bader free_tio(md, tio); 114345cbcd79SKiyoshi Ueda } else if (r) { 114445cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 114545cbcd79SKiyoshi Ueda BUG(); 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds } 11481da177e4SLinus Torvalds 11491da177e4SLinus Torvalds struct clone_info { 11501da177e4SLinus Torvalds struct mapped_device *md; 11511da177e4SLinus Torvalds struct dm_table *map; 11521da177e4SLinus Torvalds struct bio *bio; 11531da177e4SLinus Torvalds struct dm_io *io; 11541da177e4SLinus Torvalds sector_t sector; 11551da177e4SLinus Torvalds sector_t sector_count; 11561da177e4SLinus Torvalds }; 11571da177e4SLinus Torvalds 1158bd2a49b8SAlasdair G Kergon static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1159bd2a49b8SAlasdair G Kergon { 11604f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 11614f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 11621da177e4SLinus Torvalds } 11631da177e4SLinus Torvalds 11641da177e4SLinus Torvalds /* 11651da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 11661da177e4SLinus Torvalds */ 1167dba14160SMikulas Patocka static void clone_bio(struct dm_target_io *tio, struct bio *bio, 11681c3b13e6SKent Overstreet sector_t sector, unsigned len) 11691da177e4SLinus Torvalds { 1170dba14160SMikulas Patocka struct bio *clone = &tio->clone; 11711da177e4SLinus Torvalds 11721c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 11739c47008dSMartin K. Petersen 11741c3b13e6SKent Overstreet if (bio_integrity(bio)) 11751c3b13e6SKent Overstreet bio_integrity_clone(clone, bio, GFP_NOIO); 11761c3b13e6SKent Overstreet 11771c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 11781c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 11791c3b13e6SKent Overstreet 11801c3b13e6SKent Overstreet if (bio_integrity(bio)) 11811c3b13e6SKent Overstreet bio_integrity_trim(clone, 0, len); 11821da177e4SLinus Torvalds } 11831da177e4SLinus Torvalds 11849015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 1185bd2a49b8SAlasdair G Kergon struct dm_target *ti, int nr_iovecs, 118655a62eefSAlasdair G Kergon unsigned target_bio_nr) 1187f9ab94ceSMikulas Patocka { 1188dba14160SMikulas Patocka struct dm_target_io *tio; 1189dba14160SMikulas Patocka struct bio *clone; 1190dba14160SMikulas Patocka 1191dba14160SMikulas Patocka clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); 1192dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1193f9ab94ceSMikulas Patocka 1194f9ab94ceSMikulas Patocka tio->io = ci->io; 1195f9ab94ceSMikulas Patocka tio->ti = ti; 119655a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 11979015df24SAlasdair G Kergon 11989015df24SAlasdair G Kergon return tio; 11999015df24SAlasdair G Kergon } 12009015df24SAlasdair G Kergon 120114fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 120214fe594dSAlasdair G Kergon struct dm_target *ti, 120355a62eefSAlasdair G Kergon unsigned target_bio_nr, sector_t len) 12049015df24SAlasdair G Kergon { 120555a62eefSAlasdair G Kergon struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr); 1206dba14160SMikulas Patocka struct bio *clone = &tio->clone; 12079015df24SAlasdair G Kergon 120806a426ceSMike Snitzer /* 120906a426ceSMike Snitzer * Discard requests require the bio's inline iovecs be initialized. 121006a426ceSMike Snitzer * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 121106a426ceSMike Snitzer * and discard, so no need for concern about wasted bvec allocations. 121206a426ceSMike Snitzer */ 12131c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1214bd2a49b8SAlasdair G Kergon if (len) 1215bd2a49b8SAlasdair G Kergon bio_setup_sector(clone, ci->sector, len); 1216f9ab94ceSMikulas Patocka 1217bd2a49b8SAlasdair G Kergon __map_bio(tio); 1218f9ab94ceSMikulas Patocka } 1219f9ab94ceSMikulas Patocka 122014fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 122155a62eefSAlasdair G Kergon unsigned num_bios, sector_t len) 122206a426ceSMike Snitzer { 122355a62eefSAlasdair G Kergon unsigned target_bio_nr; 122406a426ceSMike Snitzer 122555a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 122614fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 122706a426ceSMike Snitzer } 122806a426ceSMike Snitzer 122914fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1230f9ab94ceSMikulas Patocka { 123106a426ceSMike Snitzer unsigned target_nr = 0; 1232f9ab94ceSMikulas Patocka struct dm_target *ti; 1233f9ab94ceSMikulas Patocka 1234b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1235f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 123614fe594dSAlasdair G Kergon __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0); 1237f9ab94ceSMikulas Patocka 1238f9ab94ceSMikulas Patocka return 0; 1239f9ab94ceSMikulas Patocka } 1240f9ab94ceSMikulas Patocka 1241e4c93811SAlasdair G Kergon static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 12421c3b13e6SKent Overstreet sector_t sector, unsigned len) 12435ae89a87SMike Snitzer { 1244dba14160SMikulas Patocka struct bio *bio = ci->bio; 12455ae89a87SMike Snitzer struct dm_target_io *tio; 1246b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1247b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 12485ae89a87SMike Snitzer 1249b0d8ed4dSAlasdair G Kergon /* 1250b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1251b0d8ed4dSAlasdair G Kergon */ 1252b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1253b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1254e4c93811SAlasdair G Kergon 1255b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 12561c3b13e6SKent Overstreet tio = alloc_tio(ci, ti, 0, target_bio_nr); 12571c3b13e6SKent Overstreet clone_bio(tio, bio, sector, len); 1258bd2a49b8SAlasdair G Kergon __map_bio(tio); 12595ae89a87SMike Snitzer } 1260b0d8ed4dSAlasdair G Kergon } 12615ae89a87SMike Snitzer 126255a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 126323508a96SMike Snitzer 126455a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 126523508a96SMike Snitzer { 126655a62eefSAlasdair G Kergon return ti->num_discard_bios; 126723508a96SMike Snitzer } 126823508a96SMike Snitzer 126955a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 127023508a96SMike Snitzer { 127155a62eefSAlasdair G Kergon return ti->num_write_same_bios; 127223508a96SMike Snitzer } 127323508a96SMike Snitzer 127423508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 127523508a96SMike Snitzer 127623508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 127723508a96SMike Snitzer { 127855a62eefSAlasdair G Kergon return ti->split_discard_bios; 127923508a96SMike Snitzer } 128023508a96SMike Snitzer 128114fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 128255a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 128323508a96SMike Snitzer is_split_required_fn is_split_required) 12845ae89a87SMike Snitzer { 12855ae89a87SMike Snitzer struct dm_target *ti; 1286a79245b3SMike Snitzer sector_t len; 128755a62eefSAlasdair G Kergon unsigned num_bios; 12885ae89a87SMike Snitzer 1289a79245b3SMike Snitzer do { 12905ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 12915ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 12925ae89a87SMike Snitzer return -EIO; 12935ae89a87SMike Snitzer 12945ae89a87SMike Snitzer /* 129523508a96SMike Snitzer * Even though the device advertised support for this type of 129623508a96SMike Snitzer * request, that does not mean every target supports it, and 1297936688d7SMike Snitzer * reconfiguration might also have changed that since the 12985ae89a87SMike Snitzer * check was performed. 12995ae89a87SMike Snitzer */ 130055a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 130155a62eefSAlasdair G Kergon if (!num_bios) 13025ae89a87SMike Snitzer return -EOPNOTSUPP; 13035ae89a87SMike Snitzer 130423508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1305a79245b3SMike Snitzer len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 13067acf0277SMikulas Patocka else 13077acf0277SMikulas Patocka len = min(ci->sector_count, max_io_len(ci->sector, ti)); 13085ae89a87SMike Snitzer 130914fe594dSAlasdair G Kergon __send_duplicate_bios(ci, ti, num_bios, len); 13105ae89a87SMike Snitzer 1311a79245b3SMike Snitzer ci->sector += len; 1312a79245b3SMike Snitzer } while (ci->sector_count -= len); 13135ae89a87SMike Snitzer 13145ae89a87SMike Snitzer return 0; 13155ae89a87SMike Snitzer } 13165ae89a87SMike Snitzer 131714fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 131823508a96SMike Snitzer { 131914fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 132023508a96SMike Snitzer is_split_required_for_discard); 132123508a96SMike Snitzer } 132223508a96SMike Snitzer 132314fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 132423508a96SMike Snitzer { 132514fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 132623508a96SMike Snitzer } 132723508a96SMike Snitzer 1328e4c93811SAlasdair G Kergon /* 1329e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1330e4c93811SAlasdair G Kergon */ 1331e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1332e4c93811SAlasdair G Kergon { 1333e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1334e4c93811SAlasdair G Kergon struct dm_target *ti; 13351c3b13e6SKent Overstreet unsigned len; 1336e4c93811SAlasdair G Kergon 1337e4c93811SAlasdair G Kergon if (unlikely(bio->bi_rw & REQ_DISCARD)) 1338e4c93811SAlasdair G Kergon return __send_discard(ci); 1339e4c93811SAlasdair G Kergon else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1340e4c93811SAlasdair G Kergon return __send_write_same(ci); 1341e4c93811SAlasdair G Kergon 1342e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1343e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1344e4c93811SAlasdair G Kergon return -EIO; 1345e4c93811SAlasdair G Kergon 13461c3b13e6SKent Overstreet len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1347e4c93811SAlasdair G Kergon 13481c3b13e6SKent Overstreet __clone_and_map_data_bio(ci, ti, ci->sector, len); 1349e4c93811SAlasdair G Kergon 1350e4c93811SAlasdair G Kergon ci->sector += len; 1351e4c93811SAlasdair G Kergon ci->sector_count -= len; 1352e4c93811SAlasdair G Kergon 1353e4c93811SAlasdair G Kergon return 0; 1354e4c93811SAlasdair G Kergon } 1355e4c93811SAlasdair G Kergon 1356e4c93811SAlasdair G Kergon /* 135714fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 13581da177e4SLinus Torvalds */ 135983d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 136083d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 13611da177e4SLinus Torvalds { 13621da177e4SLinus Torvalds struct clone_info ci; 1363512875bdSJun'ichi Nomura int error = 0; 13641da177e4SLinus Torvalds 136583d5e5b0SMikulas Patocka if (unlikely(!map)) { 1366f0b9a450SMikulas Patocka bio_io_error(bio); 1367f0b9a450SMikulas Patocka return; 1368f0b9a450SMikulas Patocka } 1369692d0eb9SMikulas Patocka 137083d5e5b0SMikulas Patocka ci.map = map; 13711da177e4SLinus Torvalds ci.md = md; 13721da177e4SLinus Torvalds ci.io = alloc_io(md); 13731da177e4SLinus Torvalds ci.io->error = 0; 13741da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 13751da177e4SLinus Torvalds ci.io->bio = bio; 13761da177e4SLinus Torvalds ci.io->md = md; 1377f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 13784f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 13791da177e4SLinus Torvalds 13803eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1381bd2a49b8SAlasdair G Kergon 1382b372d360SMike Snitzer if (bio->bi_rw & REQ_FLUSH) { 1383b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1384b372d360SMike Snitzer ci.sector_count = 0; 138514fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1386b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1387b372d360SMike Snitzer } else { 13886a8736d1STejun Heo ci.bio = bio; 1389f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1390512875bdSJun'ichi Nomura while (ci.sector_count && !error) 139114fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1392d87f4c14STejun Heo } 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds /* drop the extra reference count */ 1395512875bdSJun'ichi Nomura dec_pending(ci.io, error); 13969e4e5f87SMilan Broz } 13979e4e5f87SMilan Broz /*----------------------------------------------------------------- 13981da177e4SLinus Torvalds * CRUD END 13991da177e4SLinus Torvalds *---------------------------------------------------------------*/ 14001da177e4SLinus Torvalds 14011da177e4SLinus Torvalds static int dm_merge_bvec(struct request_queue *q, 14021da177e4SLinus Torvalds struct bvec_merge_data *bvm, 1403f6fccb12SMilan Broz struct bio_vec *biovec) 1404f6fccb12SMilan Broz { 1405f6fccb12SMilan Broz struct mapped_device *md = q->queuedata; 140683d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table_fast(md); 1407f6fccb12SMilan Broz struct dm_target *ti; 1408f6fccb12SMilan Broz sector_t max_sectors; 1409f6fccb12SMilan Broz int max_size = 0; 1410f6fccb12SMilan Broz 1411f6fccb12SMilan Broz if (unlikely(!map)) 1412f6fccb12SMilan Broz goto out; 1413f6fccb12SMilan Broz 1414f6fccb12SMilan Broz ti = dm_table_find_target(map, bvm->bi_sector); 1415f6fccb12SMilan Broz if (!dm_target_is_valid(ti)) 141683d5e5b0SMikulas Patocka goto out; 1417f6fccb12SMilan Broz 1418f6fccb12SMilan Broz /* 1419f6fccb12SMilan Broz * Find maximum amount of I/O that won't need splitting 1420f6fccb12SMilan Broz */ 142156a67df7SMike Snitzer max_sectors = min(max_io_len(bvm->bi_sector, ti), 1422f6fccb12SMilan Broz (sector_t) BIO_MAX_SECTORS); 1423f6fccb12SMilan Broz max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1424f6fccb12SMilan Broz if (max_size < 0) 1425f6fccb12SMilan Broz max_size = 0; 1426f6fccb12SMilan Broz 1427f6fccb12SMilan Broz /* 1428f6fccb12SMilan Broz * merge_bvec_fn() returns number of bytes 1429f6fccb12SMilan Broz * it can accept at this offset 1430f6fccb12SMilan Broz * max is precomputed maximal io size 1431f6fccb12SMilan Broz */ 1432f6fccb12SMilan Broz if (max_size && ti->type->merge) 1433f6fccb12SMilan Broz max_size = ti->type->merge(ti, bvm, biovec, max_size); 14348cbeb67aSMikulas Patocka /* 14358cbeb67aSMikulas Patocka * If the target doesn't support merge method and some of the devices 14368cbeb67aSMikulas Patocka * provided their merge_bvec method (we know this by looking at 14378cbeb67aSMikulas Patocka * queue_max_hw_sectors), then we can't allow bios with multiple vector 14388cbeb67aSMikulas Patocka * entries. So always set max_size to 0, and the code below allows 14398cbeb67aSMikulas Patocka * just one page. 14408cbeb67aSMikulas Patocka */ 14418cbeb67aSMikulas Patocka else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 14428cbeb67aSMikulas Patocka 14438cbeb67aSMikulas Patocka max_size = 0; 1444f6fccb12SMilan Broz 14455037108aSMikulas Patocka out: 144683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 1447f6fccb12SMilan Broz /* 1448f6fccb12SMilan Broz * Always allow an entire first page 1449f6fccb12SMilan Broz */ 1450f6fccb12SMilan Broz if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1451f6fccb12SMilan Broz max_size = biovec->bv_len; 1452f6fccb12SMilan Broz 1453f6fccb12SMilan Broz return max_size; 1454f6fccb12SMilan Broz } 1455f6fccb12SMilan Broz 14561da177e4SLinus Torvalds /* 14571da177e4SLinus Torvalds * The request function that just remaps the bio built up by 14581da177e4SLinus Torvalds * dm_merge_bvec. 14591da177e4SLinus Torvalds */ 14605a7bbad2SChristoph Hellwig static void _dm_request(struct request_queue *q, struct bio *bio) 14611da177e4SLinus Torvalds { 146212f03a49SKevin Corry int rw = bio_data_dir(bio); 14631da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1464c9959059STejun Heo int cpu; 146583d5e5b0SMikulas Patocka int srcu_idx; 146683d5e5b0SMikulas Patocka struct dm_table *map; 14671da177e4SLinus Torvalds 146883d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 14691da177e4SLinus Torvalds 1470074a7acaSTejun Heo cpu = part_stat_lock(); 1471074a7acaSTejun Heo part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 1472074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1473074a7acaSTejun Heo part_stat_unlock(); 147412f03a49SKevin Corry 14756a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 14766a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 147783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 14781da177e4SLinus Torvalds 14796a8736d1STejun Heo if (bio_rw(bio) != READA) 148092c63902SMikulas Patocka queue_io(md, bio); 14816a8736d1STejun Heo else 14826a8736d1STejun Heo bio_io_error(bio); 14835a7bbad2SChristoph Hellwig return; 14841da177e4SLinus Torvalds } 14851da177e4SLinus Torvalds 148683d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 148783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 14885a7bbad2SChristoph Hellwig return; 1489cec47e3dSKiyoshi Ueda } 1490cec47e3dSKiyoshi Ueda 1491fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md) 1492cec47e3dSKiyoshi Ueda { 1493cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1494cec47e3dSKiyoshi Ueda } 1495cec47e3dSKiyoshi Ueda 14965a7bbad2SChristoph Hellwig static void dm_request(struct request_queue *q, struct bio *bio) 1497cec47e3dSKiyoshi Ueda { 1498cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1499cec47e3dSKiyoshi Ueda 1500cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 15015a7bbad2SChristoph Hellwig blk_queue_bio(q, bio); 15025a7bbad2SChristoph Hellwig else 15035a7bbad2SChristoph Hellwig _dm_request(q, bio); 1504cec47e3dSKiyoshi Ueda } 1505cec47e3dSKiyoshi Ueda 1506cec47e3dSKiyoshi Ueda void dm_dispatch_request(struct request *rq) 1507cec47e3dSKiyoshi Ueda { 1508cec47e3dSKiyoshi Ueda int r; 1509cec47e3dSKiyoshi Ueda 1510cec47e3dSKiyoshi Ueda if (blk_queue_io_stat(rq->q)) 1511cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_IO_STAT; 1512cec47e3dSKiyoshi Ueda 1513cec47e3dSKiyoshi Ueda rq->start_time = jiffies; 1514cec47e3dSKiyoshi Ueda r = blk_insert_cloned_request(rq->q, rq); 1515cec47e3dSKiyoshi Ueda if (r) 1516cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1517cec47e3dSKiyoshi Ueda } 1518cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_dispatch_request); 1519cec47e3dSKiyoshi Ueda 1520cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1521cec47e3dSKiyoshi Ueda void *data) 1522cec47e3dSKiyoshi Ueda { 1523cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = data; 152494818742SKent Overstreet struct dm_rq_clone_bio_info *info = 152594818742SKent Overstreet container_of(bio, struct dm_rq_clone_bio_info, clone); 1526cec47e3dSKiyoshi Ueda 1527cec47e3dSKiyoshi Ueda info->orig = bio_orig; 1528cec47e3dSKiyoshi Ueda info->tio = tio; 1529cec47e3dSKiyoshi Ueda bio->bi_end_io = end_clone_bio; 1530cec47e3dSKiyoshi Ueda 1531cec47e3dSKiyoshi Ueda return 0; 1532cec47e3dSKiyoshi Ueda } 1533cec47e3dSKiyoshi Ueda 1534cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq, 1535cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio) 1536cec47e3dSKiyoshi Ueda { 1537d0bcb878SKiyoshi Ueda int r; 1538cec47e3dSKiyoshi Ueda 1539d0bcb878SKiyoshi Ueda r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1540d0bcb878SKiyoshi Ueda dm_rq_bio_constructor, tio); 1541cec47e3dSKiyoshi Ueda if (r) 1542cec47e3dSKiyoshi Ueda return r; 1543cec47e3dSKiyoshi Ueda 1544cec47e3dSKiyoshi Ueda clone->cmd = rq->cmd; 1545cec47e3dSKiyoshi Ueda clone->cmd_len = rq->cmd_len; 1546cec47e3dSKiyoshi Ueda clone->sense = rq->sense; 1547cec47e3dSKiyoshi Ueda clone->buffer = rq->buffer; 1548cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1549cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 1550cec47e3dSKiyoshi Ueda 1551cec47e3dSKiyoshi Ueda return 0; 1552cec47e3dSKiyoshi Ueda } 1553cec47e3dSKiyoshi Ueda 15546facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 15556facdaffSKiyoshi Ueda gfp_t gfp_mask) 15566facdaffSKiyoshi Ueda { 15576facdaffSKiyoshi Ueda struct request *clone; 15586facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 15596facdaffSKiyoshi Ueda 15606facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 15616facdaffSKiyoshi Ueda if (!tio) 15626facdaffSKiyoshi Ueda return NULL; 15636facdaffSKiyoshi Ueda 15646facdaffSKiyoshi Ueda tio->md = md; 15656facdaffSKiyoshi Ueda tio->ti = NULL; 15666facdaffSKiyoshi Ueda tio->orig = rq; 15676facdaffSKiyoshi Ueda tio->error = 0; 15686facdaffSKiyoshi Ueda memset(&tio->info, 0, sizeof(tio->info)); 15696facdaffSKiyoshi Ueda 15706facdaffSKiyoshi Ueda clone = &tio->clone; 15716facdaffSKiyoshi Ueda if (setup_clone(clone, rq, tio)) { 15726facdaffSKiyoshi Ueda /* -ENOMEM */ 15736facdaffSKiyoshi Ueda free_rq_tio(tio); 15746facdaffSKiyoshi Ueda return NULL; 15756facdaffSKiyoshi Ueda } 15766facdaffSKiyoshi Ueda 15776facdaffSKiyoshi Ueda return clone; 15786facdaffSKiyoshi Ueda } 15796facdaffSKiyoshi Ueda 1580cec47e3dSKiyoshi Ueda /* 1581cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1582cec47e3dSKiyoshi Ueda */ 1583cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1584cec47e3dSKiyoshi Ueda { 1585cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1586cec47e3dSKiyoshi Ueda struct request *clone; 1587cec47e3dSKiyoshi Ueda 1588cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1589cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1590cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1591cec47e3dSKiyoshi Ueda } 1592cec47e3dSKiyoshi Ueda 15936facdaffSKiyoshi Ueda clone = clone_rq(rq, md, GFP_ATOMIC); 15946facdaffSKiyoshi Ueda if (!clone) 1595cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1596cec47e3dSKiyoshi Ueda 1597cec47e3dSKiyoshi Ueda rq->special = clone; 1598cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1599cec47e3dSKiyoshi Ueda 1600cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1601cec47e3dSKiyoshi Ueda } 1602cec47e3dSKiyoshi Ueda 16039eef87daSKiyoshi Ueda /* 16049eef87daSKiyoshi Ueda * Returns: 16059eef87daSKiyoshi Ueda * 0 : the request has been processed (not requeued) 16069eef87daSKiyoshi Ueda * !0 : the request has been requeued 16079eef87daSKiyoshi Ueda */ 16089eef87daSKiyoshi Ueda static int map_request(struct dm_target *ti, struct request *clone, 1609cec47e3dSKiyoshi Ueda struct mapped_device *md) 1610cec47e3dSKiyoshi Ueda { 16119eef87daSKiyoshi Ueda int r, requeued = 0; 1612cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1613cec47e3dSKiyoshi Ueda 1614cec47e3dSKiyoshi Ueda tio->ti = ti; 1615cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1616cec47e3dSKiyoshi Ueda switch (r) { 1617cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1618cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1619cec47e3dSKiyoshi Ueda break; 1620cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1621cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 16226db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 16236db4ccd6SJun'ichi Nomura blk_rq_pos(tio->orig)); 1624cec47e3dSKiyoshi Ueda dm_dispatch_request(clone); 1625cec47e3dSKiyoshi Ueda break; 1626cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1627cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 1628cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 16299eef87daSKiyoshi Ueda requeued = 1; 1630cec47e3dSKiyoshi Ueda break; 1631cec47e3dSKiyoshi Ueda default: 1632cec47e3dSKiyoshi Ueda if (r > 0) { 1633cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1634cec47e3dSKiyoshi Ueda BUG(); 1635cec47e3dSKiyoshi Ueda } 1636cec47e3dSKiyoshi Ueda 1637cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1638cec47e3dSKiyoshi Ueda dm_kill_unmapped_request(clone, r); 1639cec47e3dSKiyoshi Ueda break; 1640cec47e3dSKiyoshi Ueda } 16419eef87daSKiyoshi Ueda 16429eef87daSKiyoshi Ueda return requeued; 1643cec47e3dSKiyoshi Ueda } 1644cec47e3dSKiyoshi Ueda 1645ba1cbad9SMike Snitzer static struct request *dm_start_request(struct mapped_device *md, struct request *orig) 1646ba1cbad9SMike Snitzer { 1647ba1cbad9SMike Snitzer struct request *clone; 1648ba1cbad9SMike Snitzer 1649ba1cbad9SMike Snitzer blk_start_request(orig); 1650ba1cbad9SMike Snitzer clone = orig->special; 1651ba1cbad9SMike Snitzer atomic_inc(&md->pending[rq_data_dir(clone)]); 1652ba1cbad9SMike Snitzer 1653ba1cbad9SMike Snitzer /* 1654ba1cbad9SMike Snitzer * Hold the md reference here for the in-flight I/O. 1655ba1cbad9SMike Snitzer * We can't rely on the reference count by device opener, 1656ba1cbad9SMike Snitzer * because the device may be closed during the request completion 1657ba1cbad9SMike Snitzer * when all bios are completed. 1658ba1cbad9SMike Snitzer * See the comment in rq_completed() too. 1659ba1cbad9SMike Snitzer */ 1660ba1cbad9SMike Snitzer dm_get(md); 1661ba1cbad9SMike Snitzer 1662ba1cbad9SMike Snitzer return clone; 1663ba1cbad9SMike Snitzer } 1664ba1cbad9SMike Snitzer 1665cec47e3dSKiyoshi Ueda /* 1666cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 1667cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1668cec47e3dSKiyoshi Ueda */ 1669cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 1670cec47e3dSKiyoshi Ueda { 1671cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 167283d5e5b0SMikulas Patocka int srcu_idx; 167383d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1674cec47e3dSKiyoshi Ueda struct dm_target *ti; 1675b4324feeSKiyoshi Ueda struct request *rq, *clone; 167629e4013dSTejun Heo sector_t pos; 1677cec47e3dSKiyoshi Ueda 1678cec47e3dSKiyoshi Ueda /* 1679b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 1680b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 1681b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 1682b4324feeSKiyoshi Ueda * dm_suspend(). 1683cec47e3dSKiyoshi Ueda */ 16847eaceaccSJens Axboe while (!blk_queue_stopped(q)) { 1685cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 1686cec47e3dSKiyoshi Ueda if (!rq) 16877eaceaccSJens Axboe goto delay_and_out; 1688cec47e3dSKiyoshi Ueda 168929e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 169029e4013dSTejun Heo pos = 0; 169129e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 169229e4013dSTejun Heo pos = blk_rq_pos(rq); 1693d0bcb878SKiyoshi Ueda 169429e4013dSTejun Heo ti = dm_table_find_target(map, pos); 1695ba1cbad9SMike Snitzer if (!dm_target_is_valid(ti)) { 1696ba1cbad9SMike Snitzer /* 1697ba1cbad9SMike Snitzer * Must perform setup, that dm_done() requires, 1698ba1cbad9SMike Snitzer * before calling dm_kill_unmapped_request 1699ba1cbad9SMike Snitzer */ 1700ba1cbad9SMike Snitzer DMERR_LIMIT("request attempted access beyond the end of device"); 1701ba1cbad9SMike Snitzer clone = dm_start_request(md, rq); 1702ba1cbad9SMike Snitzer dm_kill_unmapped_request(clone, -EIO); 1703ba1cbad9SMike Snitzer continue; 1704ba1cbad9SMike Snitzer } 170529e4013dSTejun Heo 1706cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 17077eaceaccSJens Axboe goto delay_and_out; 1708cec47e3dSKiyoshi Ueda 1709ba1cbad9SMike Snitzer clone = dm_start_request(md, rq); 1710b4324feeSKiyoshi Ueda 1711cec47e3dSKiyoshi Ueda spin_unlock(q->queue_lock); 17129eef87daSKiyoshi Ueda if (map_request(ti, clone, md)) 17139eef87daSKiyoshi Ueda goto requeued; 17149eef87daSKiyoshi Ueda 1715052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 1716052189a2SKiyoshi Ueda spin_lock(q->queue_lock); 1717cec47e3dSKiyoshi Ueda } 1718cec47e3dSKiyoshi Ueda 1719cec47e3dSKiyoshi Ueda goto out; 1720cec47e3dSKiyoshi Ueda 17219eef87daSKiyoshi Ueda requeued: 1722052189a2SKiyoshi Ueda BUG_ON(!irqs_disabled()); 1723052189a2SKiyoshi Ueda spin_lock(q->queue_lock); 17249eef87daSKiyoshi Ueda 17257eaceaccSJens Axboe delay_and_out: 17267eaceaccSJens Axboe blk_delay_queue(q, HZ / 10); 1727cec47e3dSKiyoshi Ueda out: 172883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1729cec47e3dSKiyoshi Ueda } 1730cec47e3dSKiyoshi Ueda 1731cec47e3dSKiyoshi Ueda int dm_underlying_device_busy(struct request_queue *q) 1732cec47e3dSKiyoshi Ueda { 1733cec47e3dSKiyoshi Ueda return blk_lld_busy(q); 1734cec47e3dSKiyoshi Ueda } 1735cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1736cec47e3dSKiyoshi Ueda 1737cec47e3dSKiyoshi Ueda static int dm_lld_busy(struct request_queue *q) 1738cec47e3dSKiyoshi Ueda { 1739cec47e3dSKiyoshi Ueda int r; 1740cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 174183d5e5b0SMikulas Patocka struct dm_table *map = dm_get_live_table_fast(md); 1742cec47e3dSKiyoshi Ueda 1743cec47e3dSKiyoshi Ueda if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1744cec47e3dSKiyoshi Ueda r = 1; 1745cec47e3dSKiyoshi Ueda else 1746cec47e3dSKiyoshi Ueda r = dm_table_any_busy_target(map); 1747cec47e3dSKiyoshi Ueda 174883d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 1749cec47e3dSKiyoshi Ueda 1750cec47e3dSKiyoshi Ueda return r; 1751cec47e3dSKiyoshi Ueda } 1752cec47e3dSKiyoshi Ueda 17531da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 17541da177e4SLinus Torvalds { 17558a57dfc6SChandra Seetharaman int r = bdi_bits; 17568a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 17578a57dfc6SChandra Seetharaman struct dm_table *map; 17581da177e4SLinus Torvalds 17591eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 176083d5e5b0SMikulas Patocka map = dm_get_live_table_fast(md); 17618a57dfc6SChandra Seetharaman if (map) { 1762cec47e3dSKiyoshi Ueda /* 1763cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 1764cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 1765cec47e3dSKiyoshi Ueda */ 1766cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1767cec47e3dSKiyoshi Ueda r = md->queue->backing_dev_info.state & 1768cec47e3dSKiyoshi Ueda bdi_bits; 1769cec47e3dSKiyoshi Ueda else 17701da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 17718a57dfc6SChandra Seetharaman } 177283d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 17738a57dfc6SChandra Seetharaman } 17748a57dfc6SChandra Seetharaman 17751da177e4SLinus Torvalds return r; 17761da177e4SLinus Torvalds } 17771da177e4SLinus Torvalds 17781da177e4SLinus Torvalds /*----------------------------------------------------------------- 17791da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 17801da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17812b06cfffSAlasdair G Kergon static void free_minor(int minor) 17821da177e4SLinus Torvalds { 1783f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17841da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1785f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17861da177e4SLinus Torvalds } 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds /* 17891da177e4SLinus Torvalds * See if the device with a specific minor # is free. 17901da177e4SLinus Torvalds */ 1791cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 17921da177e4SLinus Torvalds { 1793c9d76be6STejun Heo int r; 17941da177e4SLinus Torvalds 17951da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17961da177e4SLinus Torvalds return -EINVAL; 17971da177e4SLinus Torvalds 1798c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1799f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18001da177e4SLinus Torvalds 1801c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 18021da177e4SLinus Torvalds 1803f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1804c9d76be6STejun Heo idr_preload_end(); 1805c9d76be6STejun Heo if (r < 0) 1806c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1807c9d76be6STejun Heo return 0; 18081da177e4SLinus Torvalds } 18091da177e4SLinus Torvalds 1810cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 18111da177e4SLinus Torvalds { 1812c9d76be6STejun Heo int r; 18131da177e4SLinus Torvalds 1814c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1815f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18161da177e4SLinus Torvalds 1817c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 18181da177e4SLinus Torvalds 1819f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1820c9d76be6STejun Heo idr_preload_end(); 1821c9d76be6STejun Heo if (r < 0) 18221da177e4SLinus Torvalds return r; 1823c9d76be6STejun Heo *minor = r; 1824c9d76be6STejun Heo return 0; 18251da177e4SLinus Torvalds } 18261da177e4SLinus Torvalds 182783d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 18281da177e4SLinus Torvalds 182953d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 183053d5914fSMikulas Patocka 18314a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 18324a0b4ddfSMike Snitzer { 18334a0b4ddfSMike Snitzer /* 18344a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 18354a0b4ddfSMike Snitzer * devices. The type of this dm device has not been decided yet. 18364a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 18374a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 18384a0b4ddfSMike Snitzer * for request stacking support until then. 18394a0b4ddfSMike Snitzer * 18404a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 18414a0b4ddfSMike Snitzer */ 18424a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 18434a0b4ddfSMike Snitzer 18444a0b4ddfSMike Snitzer md->queue->queuedata = md; 18454a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 18464a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_data = md; 18474a0b4ddfSMike Snitzer blk_queue_make_request(md->queue, dm_request); 18484a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 18494a0b4ddfSMike Snitzer blk_queue_merge_bvec(md->queue, dm_merge_bvec); 18504a0b4ddfSMike Snitzer } 18514a0b4ddfSMike Snitzer 18521da177e4SLinus Torvalds /* 18531da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 18541da177e4SLinus Torvalds */ 18552b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 18561da177e4SLinus Torvalds { 18571da177e4SLinus Torvalds int r; 1858cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1859ba61fdd1SJeff Mahoney void *old_md; 18601da177e4SLinus Torvalds 18611da177e4SLinus Torvalds if (!md) { 18621da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18631da177e4SLinus Torvalds return NULL; 18641da177e4SLinus Torvalds } 18651da177e4SLinus Torvalds 186610da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18676ed7ade8SMilan Broz goto bad_module_get; 186810da4f79SJeff Mahoney 18691da177e4SLinus Torvalds /* get a minor number for the dev */ 18702b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1871cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18722b06cfffSAlasdair G Kergon else 1873cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18741da177e4SLinus Torvalds if (r < 0) 18756ed7ade8SMilan Broz goto bad_minor; 18761da177e4SLinus Torvalds 187783d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 187883d5e5b0SMikulas Patocka if (r < 0) 187983d5e5b0SMikulas Patocka goto bad_io_barrier; 188083d5e5b0SMikulas Patocka 1881a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1882e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1883a5664dadSMike Snitzer mutex_init(&md->type_lock); 1884022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18851da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18865c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 18871da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 18887a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 18897a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 18907a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 18911da177e4SLinus Torvalds 18924a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 18931da177e4SLinus Torvalds if (!md->queue) 18946ed7ade8SMilan Broz goto bad_queue; 18951da177e4SLinus Torvalds 18964a0b4ddfSMike Snitzer dm_init_md_queue(md); 18979faf400fSStefan Bader 18981da177e4SLinus Torvalds md->disk = alloc_disk(1); 18991da177e4SLinus Torvalds if (!md->disk) 19006ed7ade8SMilan Broz goto bad_disk; 19011da177e4SLinus Torvalds 1902316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1903316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1904f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 190553d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1906f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 19072995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1908f0b04115SJeff Mahoney 19091da177e4SLinus Torvalds md->disk->major = _major; 19101da177e4SLinus Torvalds md->disk->first_minor = minor; 19111da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19121da177e4SLinus Torvalds md->disk->queue = md->queue; 19131da177e4SLinus Torvalds md->disk->private_data = md; 19141da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 19151da177e4SLinus Torvalds add_disk(md->disk); 19167e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 19171da177e4SLinus Torvalds 1918670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1919304f3f6aSMilan Broz if (!md->wq) 1920304f3f6aSMilan Broz goto bad_thread; 1921304f3f6aSMilan Broz 192232a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 192332a926daSMikulas Patocka if (!md->bdev) 192432a926daSMikulas Patocka goto bad_bdev; 192532a926daSMikulas Patocka 19266a8736d1STejun Heo bio_init(&md->flush_bio); 19276a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 19286a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 19296a8736d1STejun Heo 1930fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1931fd2ed4d2SMikulas Patocka 1932ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1933f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1934ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1935f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1936ba61fdd1SJeff Mahoney 1937ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1938ba61fdd1SJeff Mahoney 19391da177e4SLinus Torvalds return md; 19401da177e4SLinus Torvalds 194132a926daSMikulas Patocka bad_bdev: 194232a926daSMikulas Patocka destroy_workqueue(md->wq); 1943304f3f6aSMilan Broz bad_thread: 194403022c54SZdenek Kabelac del_gendisk(md->disk); 1945304f3f6aSMilan Broz put_disk(md->disk); 19466ed7ade8SMilan Broz bad_disk: 19471312f40eSAl Viro blk_cleanup_queue(md->queue); 19486ed7ade8SMilan Broz bad_queue: 194983d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 195083d5e5b0SMikulas Patocka bad_io_barrier: 19511da177e4SLinus Torvalds free_minor(minor); 19526ed7ade8SMilan Broz bad_minor: 195310da4f79SJeff Mahoney module_put(THIS_MODULE); 19546ed7ade8SMilan Broz bad_module_get: 19551da177e4SLinus Torvalds kfree(md); 19561da177e4SLinus Torvalds return NULL; 19571da177e4SLinus Torvalds } 19581da177e4SLinus Torvalds 1959ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1960ae9da83fSJun'ichi Nomura 19611da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19621da177e4SLinus Torvalds { 1963f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 196463d94e48SJun'ichi Nomura 1965ae9da83fSJun'ichi Nomura unlock_fs(md); 1966db8fef4fSMikulas Patocka bdput(md->bdev); 1967304f3f6aSMilan Broz destroy_workqueue(md->wq); 1968e6ee8c0bSKiyoshi Ueda if (md->io_pool) 19691da177e4SLinus Torvalds mempool_destroy(md->io_pool); 1970e6ee8c0bSKiyoshi Ueda if (md->bs) 19719faf400fSStefan Bader bioset_free(md->bs); 19729c47008dSMartin K. Petersen blk_integrity_unregister(md->disk); 19731da177e4SLinus Torvalds del_gendisk(md->disk); 197483d5e5b0SMikulas Patocka cleanup_srcu_struct(&md->io_barrier); 197563d94e48SJun'ichi Nomura free_minor(minor); 1976fba9f90eSJeff Mahoney 1977fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 1978fba9f90eSJeff Mahoney md->disk->private_data = NULL; 1979fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 1980fba9f90eSJeff Mahoney 19811da177e4SLinus Torvalds put_disk(md->disk); 19821312f40eSAl Viro blk_cleanup_queue(md->queue); 1983fd2ed4d2SMikulas Patocka dm_stats_cleanup(&md->stats); 198410da4f79SJeff Mahoney module_put(THIS_MODULE); 19851da177e4SLinus Torvalds kfree(md); 19861da177e4SLinus Torvalds } 19871da177e4SLinus Torvalds 1988e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1989e6ee8c0bSKiyoshi Ueda { 1990c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1991e6ee8c0bSKiyoshi Ueda 19925f015204SJun'ichi Nomura if (md->io_pool && md->bs) { 199316245bdcSJun'ichi Nomura /* The md already has necessary mempools. */ 199416245bdcSJun'ichi Nomura if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { 1995c0820cf5SMikulas Patocka /* 199616245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 199716245bdcSJun'ichi Nomura * because a different table was loaded. 1998c0820cf5SMikulas Patocka */ 1999c0820cf5SMikulas Patocka bioset_free(md->bs); 2000c0820cf5SMikulas Patocka md->bs = p->bs; 2001c0820cf5SMikulas Patocka p->bs = NULL; 200216245bdcSJun'ichi Nomura } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { 200316245bdcSJun'ichi Nomura /* 200416245bdcSJun'ichi Nomura * There's no need to reload with request-based dm 200516245bdcSJun'ichi Nomura * because the size of front_pad doesn't change. 200616245bdcSJun'ichi Nomura * Note for future: If you are to reload bioset, 200716245bdcSJun'ichi Nomura * prep-ed requests in the queue may refer 200816245bdcSJun'ichi Nomura * to bio from the old bioset, so you must walk 200916245bdcSJun'ichi Nomura * through the queue to unprep. 201016245bdcSJun'ichi Nomura */ 201116245bdcSJun'ichi Nomura } 2012e6ee8c0bSKiyoshi Ueda goto out; 2013c0820cf5SMikulas Patocka } 2014e6ee8c0bSKiyoshi Ueda 20155f015204SJun'ichi Nomura BUG_ON(!p || md->io_pool || md->bs); 2016e6ee8c0bSKiyoshi Ueda 2017e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 2018e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 2019e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 2020e6ee8c0bSKiyoshi Ueda p->bs = NULL; 2021e6ee8c0bSKiyoshi Ueda 2022e6ee8c0bSKiyoshi Ueda out: 2023e6ee8c0bSKiyoshi Ueda /* mempool bind completed, now no need any mempools in the table */ 2024e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 2025e6ee8c0bSKiyoshi Ueda } 2026e6ee8c0bSKiyoshi Ueda 20271da177e4SLinus Torvalds /* 20281da177e4SLinus Torvalds * Bind a table to the device. 20291da177e4SLinus Torvalds */ 20301da177e4SLinus Torvalds static void event_callback(void *context) 20311da177e4SLinus Torvalds { 20327a8c3d3bSMike Anderson unsigned long flags; 20337a8c3d3bSMike Anderson LIST_HEAD(uevents); 20341da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 20351da177e4SLinus Torvalds 20367a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 20377a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 20387a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 20397a8c3d3bSMike Anderson 2040ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 20417a8c3d3bSMike Anderson 20421da177e4SLinus Torvalds atomic_inc(&md->event_nr); 20431da177e4SLinus Torvalds wake_up(&md->eventq); 20441da177e4SLinus Torvalds } 20451da177e4SLinus Torvalds 2046c217649bSMike Snitzer /* 2047c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2048c217649bSMike Snitzer */ 20494e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 20501da177e4SLinus Torvalds { 20514e90188bSAlasdair G Kergon set_capacity(md->disk, size); 20521da177e4SLinus Torvalds 2053db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 20541da177e4SLinus Torvalds } 20551da177e4SLinus Torvalds 2056042d2a9bSAlasdair G Kergon /* 2057d5b9dd04SMikulas Patocka * Return 1 if the queue has a compulsory merge_bvec_fn function. 2058d5b9dd04SMikulas Patocka * 2059d5b9dd04SMikulas Patocka * If this function returns 0, then the device is either a non-dm 2060d5b9dd04SMikulas Patocka * device without a merge_bvec_fn, or it is a dm device that is 2061d5b9dd04SMikulas Patocka * able to split any bios it receives that are too big. 2062d5b9dd04SMikulas Patocka */ 2063d5b9dd04SMikulas Patocka int dm_queue_merge_is_compulsory(struct request_queue *q) 2064d5b9dd04SMikulas Patocka { 2065d5b9dd04SMikulas Patocka struct mapped_device *dev_md; 2066d5b9dd04SMikulas Patocka 2067d5b9dd04SMikulas Patocka if (!q->merge_bvec_fn) 2068d5b9dd04SMikulas Patocka return 0; 2069d5b9dd04SMikulas Patocka 2070d5b9dd04SMikulas Patocka if (q->make_request_fn == dm_request) { 2071d5b9dd04SMikulas Patocka dev_md = q->queuedata; 2072d5b9dd04SMikulas Patocka if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) 2073d5b9dd04SMikulas Patocka return 0; 2074d5b9dd04SMikulas Patocka } 2075d5b9dd04SMikulas Patocka 2076d5b9dd04SMikulas Patocka return 1; 2077d5b9dd04SMikulas Patocka } 2078d5b9dd04SMikulas Patocka 2079d5b9dd04SMikulas Patocka static int dm_device_merge_is_compulsory(struct dm_target *ti, 2080d5b9dd04SMikulas Patocka struct dm_dev *dev, sector_t start, 2081d5b9dd04SMikulas Patocka sector_t len, void *data) 2082d5b9dd04SMikulas Patocka { 2083d5b9dd04SMikulas Patocka struct block_device *bdev = dev->bdev; 2084d5b9dd04SMikulas Patocka struct request_queue *q = bdev_get_queue(bdev); 2085d5b9dd04SMikulas Patocka 2086d5b9dd04SMikulas Patocka return dm_queue_merge_is_compulsory(q); 2087d5b9dd04SMikulas Patocka } 2088d5b9dd04SMikulas Patocka 2089d5b9dd04SMikulas Patocka /* 2090d5b9dd04SMikulas Patocka * Return 1 if it is acceptable to ignore merge_bvec_fn based 2091d5b9dd04SMikulas Patocka * on the properties of the underlying devices. 2092d5b9dd04SMikulas Patocka */ 2093d5b9dd04SMikulas Patocka static int dm_table_merge_is_optional(struct dm_table *table) 2094d5b9dd04SMikulas Patocka { 2095d5b9dd04SMikulas Patocka unsigned i = 0; 2096d5b9dd04SMikulas Patocka struct dm_target *ti; 2097d5b9dd04SMikulas Patocka 2098d5b9dd04SMikulas Patocka while (i < dm_table_get_num_targets(table)) { 2099d5b9dd04SMikulas Patocka ti = dm_table_get_target(table, i++); 2100d5b9dd04SMikulas Patocka 2101d5b9dd04SMikulas Patocka if (ti->type->iterate_devices && 2102d5b9dd04SMikulas Patocka ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) 2103d5b9dd04SMikulas Patocka return 0; 2104d5b9dd04SMikulas Patocka } 2105d5b9dd04SMikulas Patocka 2106d5b9dd04SMikulas Patocka return 1; 2107d5b9dd04SMikulas Patocka } 2108d5b9dd04SMikulas Patocka 2109d5b9dd04SMikulas Patocka /* 2110042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2111042d2a9bSAlasdair G Kergon */ 2112042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2113754c5fc7SMike Snitzer struct queue_limits *limits) 21141da177e4SLinus Torvalds { 2115042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2116165125e1SJens Axboe struct request_queue *q = md->queue; 21171da177e4SLinus Torvalds sector_t size; 2118d5b9dd04SMikulas Patocka int merge_is_optional; 21191da177e4SLinus Torvalds 21201da177e4SLinus Torvalds size = dm_table_get_size(t); 21213ac51e74SDarrick J. Wong 21223ac51e74SDarrick J. Wong /* 21233ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 21243ac51e74SDarrick J. Wong */ 2125fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 21263ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 21273ac51e74SDarrick J. Wong 21284e90188bSAlasdair G Kergon __set_size(md, size); 21291da177e4SLinus Torvalds 2130cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 21312ca3310eSAlasdair G Kergon 2132e6ee8c0bSKiyoshi Ueda /* 2133e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2134e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2135e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2136e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2137e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2138e6ee8c0bSKiyoshi Ueda */ 2139e6ee8c0bSKiyoshi Ueda if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2140e6ee8c0bSKiyoshi Ueda stop_queue(q); 2141e6ee8c0bSKiyoshi Ueda 2142e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2143e6ee8c0bSKiyoshi Ueda 2144d5b9dd04SMikulas Patocka merge_is_optional = dm_table_merge_is_optional(t); 2145d5b9dd04SMikulas Patocka 2146042d2a9bSAlasdair G Kergon old_map = md->map; 214783d5e5b0SMikulas Patocka rcu_assign_pointer(md->map, t); 214836a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 214936a0456fSAlasdair G Kergon 2150754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 2151d5b9dd04SMikulas Patocka if (merge_is_optional) 2152d5b9dd04SMikulas Patocka set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2153d5b9dd04SMikulas Patocka else 2154d5b9dd04SMikulas Patocka clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 215583d5e5b0SMikulas Patocka dm_sync_table(md); 21562ca3310eSAlasdair G Kergon 2157042d2a9bSAlasdair G Kergon return old_map; 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds 2160a7940155SAlasdair G Kergon /* 2161a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2162a7940155SAlasdair G Kergon */ 2163a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 21641da177e4SLinus Torvalds { 21651da177e4SLinus Torvalds struct dm_table *map = md->map; 21661da177e4SLinus Torvalds 21671da177e4SLinus Torvalds if (!map) 2168a7940155SAlasdair G Kergon return NULL; 21691da177e4SLinus Torvalds 21701da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 21719cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 217283d5e5b0SMikulas Patocka dm_sync_table(md); 2173a7940155SAlasdair G Kergon 2174a7940155SAlasdair G Kergon return map; 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds 21771da177e4SLinus Torvalds /* 21781da177e4SLinus Torvalds * Constructor for a new device. 21791da177e4SLinus Torvalds */ 21802b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 21811da177e4SLinus Torvalds { 21821da177e4SLinus Torvalds struct mapped_device *md; 21831da177e4SLinus Torvalds 21842b06cfffSAlasdair G Kergon md = alloc_dev(minor); 21851da177e4SLinus Torvalds if (!md) 21861da177e4SLinus Torvalds return -ENXIO; 21871da177e4SLinus Torvalds 2188784aae73SMilan Broz dm_sysfs_init(md); 2189784aae73SMilan Broz 21901da177e4SLinus Torvalds *result = md; 21911da177e4SLinus Torvalds return 0; 21921da177e4SLinus Torvalds } 21931da177e4SLinus Torvalds 2194a5664dadSMike Snitzer /* 2195a5664dadSMike Snitzer * Functions to manage md->type. 2196a5664dadSMike Snitzer * All are required to hold md->type_lock. 2197a5664dadSMike Snitzer */ 2198a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2199a5664dadSMike Snitzer { 2200a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2201a5664dadSMike Snitzer } 2202a5664dadSMike Snitzer 2203a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2204a5664dadSMike Snitzer { 2205a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2206a5664dadSMike Snitzer } 2207a5664dadSMike Snitzer 2208a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2209a5664dadSMike Snitzer { 221000c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2211a5664dadSMike Snitzer md->type = type; 2212a5664dadSMike Snitzer } 2213a5664dadSMike Snitzer 2214a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2215a5664dadSMike Snitzer { 221600c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2217a5664dadSMike Snitzer return md->type; 2218a5664dadSMike Snitzer } 2219a5664dadSMike Snitzer 222036a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 222136a0456fSAlasdair G Kergon { 222236a0456fSAlasdair G Kergon return md->immutable_target_type; 222336a0456fSAlasdair G Kergon } 222436a0456fSAlasdair G Kergon 22254a0b4ddfSMike Snitzer /* 2226f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2227f84cb8a4SMike Snitzer * count on 'md'. 2228f84cb8a4SMike Snitzer */ 2229f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2230f84cb8a4SMike Snitzer { 2231f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2232f84cb8a4SMike Snitzer return &md->queue->limits; 2233f84cb8a4SMike Snitzer } 2234f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2235f84cb8a4SMike Snitzer 2236f84cb8a4SMike Snitzer /* 22374a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 22384a0b4ddfSMike Snitzer */ 22394a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 22404a0b4ddfSMike Snitzer { 22414a0b4ddfSMike Snitzer struct request_queue *q = NULL; 22424a0b4ddfSMike Snitzer 22434a0b4ddfSMike Snitzer if (md->queue->elevator) 22444a0b4ddfSMike Snitzer return 1; 22454a0b4ddfSMike Snitzer 22464a0b4ddfSMike Snitzer /* Fully initialize the queue */ 22474a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 22484a0b4ddfSMike Snitzer if (!q) 22494a0b4ddfSMike Snitzer return 0; 22504a0b4ddfSMike Snitzer 22514a0b4ddfSMike Snitzer md->queue = q; 22524a0b4ddfSMike Snitzer dm_init_md_queue(md); 22534a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 22544a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 22554a0b4ddfSMike Snitzer blk_queue_lld_busy(md->queue, dm_lld_busy); 22564a0b4ddfSMike Snitzer 22574a0b4ddfSMike Snitzer elv_register_queue(md->queue); 22584a0b4ddfSMike Snitzer 22594a0b4ddfSMike Snitzer return 1; 22604a0b4ddfSMike Snitzer } 22614a0b4ddfSMike Snitzer 22624a0b4ddfSMike Snitzer /* 22634a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 22644a0b4ddfSMike Snitzer */ 22654a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 22664a0b4ddfSMike Snitzer { 22674a0b4ddfSMike Snitzer if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 22684a0b4ddfSMike Snitzer !dm_init_request_based_queue(md)) { 22694a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 22704a0b4ddfSMike Snitzer return -EINVAL; 22714a0b4ddfSMike Snitzer } 22724a0b4ddfSMike Snitzer 22734a0b4ddfSMike Snitzer return 0; 22744a0b4ddfSMike Snitzer } 22754a0b4ddfSMike Snitzer 2276637842cfSDavid Teigland static struct mapped_device *dm_find_md(dev_t dev) 22771da177e4SLinus Torvalds { 22781da177e4SLinus Torvalds struct mapped_device *md; 22791da177e4SLinus Torvalds unsigned minor = MINOR(dev); 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 22821da177e4SLinus Torvalds return NULL; 22831da177e4SLinus Torvalds 2284f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22851da177e4SLinus Torvalds 22861da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 2287fba9f90eSJeff Mahoney if (md && (md == MINOR_ALLOCED || 2288f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2289abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2290fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2291637842cfSDavid Teigland md = NULL; 2292fba9f90eSJeff Mahoney goto out; 2293fba9f90eSJeff Mahoney } 22941da177e4SLinus Torvalds 2295fba9f90eSJeff Mahoney out: 2296f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22971da177e4SLinus Torvalds 2298637842cfSDavid Teigland return md; 2299637842cfSDavid Teigland } 2300637842cfSDavid Teigland 2301d229a958SDavid Teigland struct mapped_device *dm_get_md(dev_t dev) 2302d229a958SDavid Teigland { 2303d229a958SDavid Teigland struct mapped_device *md = dm_find_md(dev); 2304d229a958SDavid Teigland 2305d229a958SDavid Teigland if (md) 2306d229a958SDavid Teigland dm_get(md); 2307d229a958SDavid Teigland 2308d229a958SDavid Teigland return md; 2309d229a958SDavid Teigland } 23103cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2311d229a958SDavid Teigland 23129ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2313637842cfSDavid Teigland { 23149ade92a9SAlasdair G Kergon return md->interface_ptr; 23151da177e4SLinus Torvalds } 23161da177e4SLinus Torvalds 23171da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 23181da177e4SLinus Torvalds { 23191da177e4SLinus Torvalds md->interface_ptr = ptr; 23201da177e4SLinus Torvalds } 23211da177e4SLinus Torvalds 23221da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 23231da177e4SLinus Torvalds { 23241da177e4SLinus Torvalds atomic_inc(&md->holders); 23253f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 23261da177e4SLinus Torvalds } 23271da177e4SLinus Torvalds 232872d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 232972d94861SAlasdair G Kergon { 233072d94861SAlasdair G Kergon return md->name; 233172d94861SAlasdair G Kergon } 233272d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 233372d94861SAlasdair G Kergon 23343f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 23351da177e4SLinus Torvalds { 23361134e5aeSMike Anderson struct dm_table *map; 233783d5e5b0SMikulas Patocka int srcu_idx; 23381da177e4SLinus Torvalds 23393f77316dSKiyoshi Ueda might_sleep(); 2340fba9f90eSJeff Mahoney 23413f77316dSKiyoshi Ueda spin_lock(&_minor_lock); 234283d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 23433f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2344fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2345f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23463f77316dSKiyoshi Ueda 23474f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 23481da177e4SLinus Torvalds dm_table_presuspend_targets(map); 23491da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 23501da177e4SLinus Torvalds } 23513f77316dSKiyoshi Ueda 235283d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 235383d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 235483d5e5b0SMikulas Patocka 23553f77316dSKiyoshi Ueda /* 23563f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 23573f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 23583f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 23593f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 23603f77316dSKiyoshi Ueda */ 23613f77316dSKiyoshi Ueda if (wait) 23623f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 23633f77316dSKiyoshi Ueda msleep(1); 23643f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 23653f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 23663f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 23673f77316dSKiyoshi Ueda 2368784aae73SMilan Broz dm_sysfs_exit(md); 2369a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 23701da177e4SLinus Torvalds free_dev(md); 23711da177e4SLinus Torvalds } 23723f77316dSKiyoshi Ueda 23733f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 23743f77316dSKiyoshi Ueda { 23753f77316dSKiyoshi Ueda __dm_destroy(md, true); 23763f77316dSKiyoshi Ueda } 23773f77316dSKiyoshi Ueda 23783f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 23793f77316dSKiyoshi Ueda { 23803f77316dSKiyoshi Ueda __dm_destroy(md, false); 23813f77316dSKiyoshi Ueda } 23823f77316dSKiyoshi Ueda 23833f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 23843f77316dSKiyoshi Ueda { 23853f77316dSKiyoshi Ueda atomic_dec(&md->holders); 23861da177e4SLinus Torvalds } 238779eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 23881da177e4SLinus Torvalds 2389401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 239046125c1cSMilan Broz { 239146125c1cSMilan Broz int r = 0; 2392b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2393b44ebeb0SMikulas Patocka 2394b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 239546125c1cSMilan Broz 239646125c1cSMilan Broz while (1) { 2397401600dfSMikulas Patocka set_current_state(interruptible); 239846125c1cSMilan Broz 2399b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 240046125c1cSMilan Broz break; 240146125c1cSMilan Broz 2402401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2403401600dfSMikulas Patocka signal_pending(current)) { 240446125c1cSMilan Broz r = -EINTR; 240546125c1cSMilan Broz break; 240646125c1cSMilan Broz } 240746125c1cSMilan Broz 240846125c1cSMilan Broz io_schedule(); 240946125c1cSMilan Broz } 241046125c1cSMilan Broz set_current_state(TASK_RUNNING); 241146125c1cSMilan Broz 2412b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2413b44ebeb0SMikulas Patocka 241446125c1cSMilan Broz return r; 241546125c1cSMilan Broz } 241646125c1cSMilan Broz 24171da177e4SLinus Torvalds /* 24181da177e4SLinus Torvalds * Process the deferred bios 24191da177e4SLinus Torvalds */ 2420ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 24211da177e4SLinus Torvalds { 2422ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2423ef208587SMikulas Patocka work); 24246d6f10dfSMilan Broz struct bio *c; 242583d5e5b0SMikulas Patocka int srcu_idx; 242683d5e5b0SMikulas Patocka struct dm_table *map; 24271da177e4SLinus Torvalds 242883d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2429ef208587SMikulas Patocka 24303b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2431022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2432022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2433022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2434022c2611SMikulas Patocka 24356a8736d1STejun Heo if (!c) 2436df12ee99SAlasdair G Kergon break; 243773d410c0SMilan Broz 2438e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2439e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2440af7e466aSMikulas Patocka else 244183d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2442e6ee8c0bSKiyoshi Ueda } 24433b00b203SMikulas Patocka 244483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 24451da177e4SLinus Torvalds } 24461da177e4SLinus Torvalds 24479a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2448304f3f6aSMilan Broz { 24493b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24503b00b203SMikulas Patocka smp_mb__after_clear_bit(); 245153d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2452304f3f6aSMilan Broz } 2453304f3f6aSMilan Broz 24541da177e4SLinus Torvalds /* 2455042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24561da177e4SLinus Torvalds */ 2457042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24581da177e4SLinus Torvalds { 245987eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2460754c5fc7SMike Snitzer struct queue_limits limits; 2461042d2a9bSAlasdair G Kergon int r; 24621da177e4SLinus Torvalds 2463e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24641da177e4SLinus Torvalds 24651da177e4SLinus Torvalds /* device must be suspended */ 24664f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 246793c534aeSAlasdair G Kergon goto out; 24681da177e4SLinus Torvalds 24693ae70656SMike Snitzer /* 24703ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 24713ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 24723ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 24733ae70656SMike Snitzer * reappear. 24743ae70656SMike Snitzer */ 24753ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 247683d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 24773ae70656SMike Snitzer if (live_map) 24783ae70656SMike Snitzer limits = md->queue->limits; 247983d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 24803ae70656SMike Snitzer } 24813ae70656SMike Snitzer 248287eb5b21SMike Christie if (!live_map) { 2483754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2484042d2a9bSAlasdair G Kergon if (r) { 2485042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2486754c5fc7SMike Snitzer goto out; 2487042d2a9bSAlasdair G Kergon } 248887eb5b21SMike Christie } 2489754c5fc7SMike Snitzer 2490042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 24911da177e4SLinus Torvalds 249293c534aeSAlasdair G Kergon out: 2493e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2494042d2a9bSAlasdair G Kergon return map; 24951da177e4SLinus Torvalds } 24961da177e4SLinus Torvalds 24971da177e4SLinus Torvalds /* 24981da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 24991da177e4SLinus Torvalds * device. 25001da177e4SLinus Torvalds */ 25012ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 25021da177e4SLinus Torvalds { 2503e39e2e95SAlasdair G Kergon int r; 25041da177e4SLinus Torvalds 25051da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2506dfbe03f6SAlasdair G Kergon 2507db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2508dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2509cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2510e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2511e39e2e95SAlasdair G Kergon return r; 2512dfbe03f6SAlasdair G Kergon } 2513dfbe03f6SAlasdair G Kergon 2514aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2515aa8d7c2fSAlasdair G Kergon 25161da177e4SLinus Torvalds return 0; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 25192ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 25201da177e4SLinus Torvalds { 2521aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2522aa8d7c2fSAlasdair G Kergon return; 2523aa8d7c2fSAlasdair G Kergon 2524db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 25251da177e4SLinus Torvalds md->frozen_sb = NULL; 2526aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25271da177e4SLinus Torvalds } 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds /* 25301da177e4SLinus Torvalds * We need to be able to change a mapping table under a mounted 25311da177e4SLinus Torvalds * filesystem. For example we might want to move some data in 25321da177e4SLinus Torvalds * the background. Before the table can be swapped with 25331da177e4SLinus Torvalds * dm_bind_table, dm_suspend must be called to flush any in 25341da177e4SLinus Torvalds * flight bios and ensure that any further io gets deferred. 25351da177e4SLinus Torvalds */ 2536cec47e3dSKiyoshi Ueda /* 2537cec47e3dSKiyoshi Ueda * Suspend mechanism in request-based dm. 2538cec47e3dSKiyoshi Ueda * 25399f518b27SKiyoshi Ueda * 1. Flush all I/Os by lock_fs() if needed. 25409f518b27SKiyoshi Ueda * 2. Stop dispatching any I/O by stopping the request_queue. 25419f518b27SKiyoshi Ueda * 3. Wait for all in-flight I/Os to be completed or requeued. 2542cec47e3dSKiyoshi Ueda * 25439f518b27SKiyoshi Ueda * To abort suspend, start the request_queue. 2544cec47e3dSKiyoshi Ueda */ 2545a3d77d35SKiyoshi Ueda int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 25461da177e4SLinus Torvalds { 25472ca3310eSAlasdair G Kergon struct dm_table *map = NULL; 254846125c1cSMilan Broz int r = 0; 2549a3d77d35SKiyoshi Ueda int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 25502e93ccc1SKiyoshi Ueda int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 25511da177e4SLinus Torvalds 2552e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 25532ca3310eSAlasdair G Kergon 25544f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 255573d410c0SMilan Broz r = -EINVAL; 2556d287483dSAlasdair G Kergon goto out_unlock; 255773d410c0SMilan Broz } 25581da177e4SLinus Torvalds 255983d5e5b0SMikulas Patocka map = md->map; 2560cf222b37SAlasdair G Kergon 25612e93ccc1SKiyoshi Ueda /* 25622e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 25632e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25642e93ccc1SKiyoshi Ueda */ 25652e93ccc1SKiyoshi Ueda if (noflush) 25662e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 25672e93ccc1SKiyoshi Ueda 2568436d4108SAlasdair G Kergon /* This does not get reverted if there's an error later. */ 25691da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25701da177e4SLinus Torvalds 25712e93ccc1SKiyoshi Ueda /* 25729f518b27SKiyoshi Ueda * Flush I/O to the device. 25739f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 25749f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 25759f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 25762e93ccc1SKiyoshi Ueda */ 257732a926daSMikulas Patocka if (!noflush && do_lockfs) { 25782ca3310eSAlasdair G Kergon r = lock_fs(md); 25792ca3310eSAlasdair G Kergon if (r) 258083d5e5b0SMikulas Patocka goto out_unlock; 2581aa8d7c2fSAlasdair G Kergon } 25821da177e4SLinus Torvalds 25831da177e4SLinus Torvalds /* 25843b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 25853b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 25863b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 25873b00b203SMikulas Patocka * dm_wq_work. 25883b00b203SMikulas Patocka * 25893b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 25903b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 25916a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 25926a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 25936a8736d1STejun Heo * flush_workqueue(md->wq). 25941da177e4SLinus Torvalds */ 25951eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 259683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25971da177e4SLinus Torvalds 2598d0bcb878SKiyoshi Ueda /* 259929e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 260029e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2601d0bcb878SKiyoshi Ueda */ 2602cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 26039f518b27SKiyoshi Ueda stop_queue(md->queue); 2604cec47e3dSKiyoshi Ueda 2605d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2606d0bcb878SKiyoshi Ueda 26071da177e4SLinus Torvalds /* 26083b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 26093b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 26103b00b203SMikulas Patocka * to finish. 26111da177e4SLinus Torvalds */ 2612401600dfSMikulas Patocka r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 26131da177e4SLinus Torvalds 26146d6f10dfSMilan Broz if (noflush) 2615022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 261683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26172e93ccc1SKiyoshi Ueda 26181da177e4SLinus Torvalds /* were we interrupted ? */ 261946125c1cSMilan Broz if (r < 0) { 26209a1fb464SMikulas Patocka dm_queue_flush(md); 262173d410c0SMilan Broz 2622cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 26239f518b27SKiyoshi Ueda start_queue(md->queue); 2624cec47e3dSKiyoshi Ueda 26252ca3310eSAlasdair G Kergon unlock_fs(md); 262683d5e5b0SMikulas Patocka goto out_unlock; /* pushback list is already flushed, so skip flush */ 26272ca3310eSAlasdair G Kergon } 26282ca3310eSAlasdair G Kergon 26293b00b203SMikulas Patocka /* 26303b00b203SMikulas Patocka * If dm_wait_for_completion returned 0, the device is completely 26313b00b203SMikulas Patocka * quiescent now. There is no request-processing activity. All new 26323b00b203SMikulas Patocka * requests are being added to md->deferred list. 26333b00b203SMikulas Patocka */ 26343b00b203SMikulas Patocka 26351da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 26361da177e4SLinus Torvalds 26374d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 26384d4471cbSKiyoshi Ueda 2639d287483dSAlasdair G Kergon out_unlock: 2640e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2641cf222b37SAlasdair G Kergon return r; 26421da177e4SLinus Torvalds } 26431da177e4SLinus Torvalds 26441da177e4SLinus Torvalds int dm_resume(struct mapped_device *md) 26451da177e4SLinus Torvalds { 2646cf222b37SAlasdair G Kergon int r = -EINVAL; 2647cf222b37SAlasdair G Kergon struct dm_table *map = NULL; 26481da177e4SLinus Torvalds 2649e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 26504f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 2651cf222b37SAlasdair G Kergon goto out; 2652cf222b37SAlasdair G Kergon 265383d5e5b0SMikulas Patocka map = md->map; 26542ca3310eSAlasdair G Kergon if (!map || !dm_table_get_size(map)) 2655cf222b37SAlasdair G Kergon goto out; 26561da177e4SLinus Torvalds 26578757b776SMilan Broz r = dm_table_resume_targets(map); 26588757b776SMilan Broz if (r) 26598757b776SMilan Broz goto out; 26602ca3310eSAlasdair G Kergon 26619a1fb464SMikulas Patocka dm_queue_flush(md); 26622ca3310eSAlasdair G Kergon 2663cec47e3dSKiyoshi Ueda /* 2664cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2665cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2666cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2667cec47e3dSKiyoshi Ueda */ 2668cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2669cec47e3dSKiyoshi Ueda start_queue(md->queue); 2670cec47e3dSKiyoshi Ueda 26712ca3310eSAlasdair G Kergon unlock_fs(md); 26722ca3310eSAlasdair G Kergon 26732ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 26742ca3310eSAlasdair G Kergon 2675cf222b37SAlasdair G Kergon r = 0; 2676cf222b37SAlasdair G Kergon out: 2677e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 26782ca3310eSAlasdair G Kergon 2679cf222b37SAlasdair G Kergon return r; 26801da177e4SLinus Torvalds } 26811da177e4SLinus Torvalds 2682fd2ed4d2SMikulas Patocka /* 2683fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2684fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2685fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2686fd2ed4d2SMikulas Patocka * 2687fd2ed4d2SMikulas Patocka * Internal suspend holds md->suspend_lock, which prevents interaction with 2688fd2ed4d2SMikulas Patocka * userspace-driven suspend. 2689fd2ed4d2SMikulas Patocka */ 2690fd2ed4d2SMikulas Patocka 2691fd2ed4d2SMikulas Patocka void dm_internal_suspend(struct mapped_device *md) 2692fd2ed4d2SMikulas Patocka { 2693fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2694fd2ed4d2SMikulas Patocka if (dm_suspended_md(md)) 2695fd2ed4d2SMikulas Patocka return; 2696fd2ed4d2SMikulas Patocka 2697fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2698fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2699fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2700fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2701fd2ed4d2SMikulas Patocka } 2702fd2ed4d2SMikulas Patocka 2703fd2ed4d2SMikulas Patocka void dm_internal_resume(struct mapped_device *md) 2704fd2ed4d2SMikulas Patocka { 2705fd2ed4d2SMikulas Patocka if (dm_suspended_md(md)) 2706fd2ed4d2SMikulas Patocka goto done; 2707fd2ed4d2SMikulas Patocka 2708fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2709fd2ed4d2SMikulas Patocka 2710fd2ed4d2SMikulas Patocka done: 2711fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2712fd2ed4d2SMikulas Patocka } 2713fd2ed4d2SMikulas Patocka 27141da177e4SLinus Torvalds /*----------------------------------------------------------------- 27151da177e4SLinus Torvalds * Event notification. 27161da177e4SLinus Torvalds *---------------------------------------------------------------*/ 27173abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 271860935eb2SMilan Broz unsigned cookie) 271969267a30SAlasdair G Kergon { 272060935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 272160935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 272260935eb2SMilan Broz 272360935eb2SMilan Broz if (!cookie) 27243abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 272560935eb2SMilan Broz else { 272660935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 272760935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 27283abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 27293abf85b5SPeter Rajnoha action, envp); 273060935eb2SMilan Broz } 273169267a30SAlasdair G Kergon } 273269267a30SAlasdair G Kergon 27337a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 27347a8c3d3bSMike Anderson { 27357a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 27367a8c3d3bSMike Anderson } 27377a8c3d3bSMike Anderson 27381da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 27391da177e4SLinus Torvalds { 27401da177e4SLinus Torvalds return atomic_read(&md->event_nr); 27411da177e4SLinus Torvalds } 27421da177e4SLinus Torvalds 27431da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 27441da177e4SLinus Torvalds { 27451da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 27461da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 27471da177e4SLinus Torvalds } 27481da177e4SLinus Torvalds 27497a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 27507a8c3d3bSMike Anderson { 27517a8c3d3bSMike Anderson unsigned long flags; 27527a8c3d3bSMike Anderson 27537a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 27547a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 27557a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 27567a8c3d3bSMike Anderson } 27577a8c3d3bSMike Anderson 27581da177e4SLinus Torvalds /* 27591da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 27601da177e4SLinus Torvalds * count on 'md'. 27611da177e4SLinus Torvalds */ 27621da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 27631da177e4SLinus Torvalds { 27641da177e4SLinus Torvalds return md->disk; 27651da177e4SLinus Torvalds } 27661da177e4SLinus Torvalds 2767784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2768784aae73SMilan Broz { 27692995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2770784aae73SMilan Broz } 2771784aae73SMilan Broz 2772784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2773784aae73SMilan Broz { 2774784aae73SMilan Broz struct mapped_device *md; 2775784aae73SMilan Broz 27762995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2777784aae73SMilan Broz 27784d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 2779432a212cSMike Anderson dm_deleting_md(md)) 27804d89b7b4SMilan Broz return NULL; 27814d89b7b4SMilan Broz 2782784aae73SMilan Broz dm_get(md); 2783784aae73SMilan Broz return md; 2784784aae73SMilan Broz } 2785784aae73SMilan Broz 27864f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 27871da177e4SLinus Torvalds { 27881da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 27891da177e4SLinus Torvalds } 27901da177e4SLinus Torvalds 27912c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 27922c140a24SMikulas Patocka { 27932c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 27942c140a24SMikulas Patocka } 27952c140a24SMikulas Patocka 279664dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 279764dbce58SKiyoshi Ueda { 2798ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 279964dbce58SKiyoshi Ueda } 280064dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 280164dbce58SKiyoshi Ueda 28022e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 28032e93ccc1SKiyoshi Ueda { 2804ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 28052e93ccc1SKiyoshi Ueda } 28062e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 28072e93ccc1SKiyoshi Ueda 2808c0820cf5SMikulas Patocka struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) 2809e6ee8c0bSKiyoshi Ueda { 28105f015204SJun'ichi Nomura struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 28115f015204SJun'ichi Nomura struct kmem_cache *cachep; 28125f015204SJun'ichi Nomura unsigned int pool_size; 28135f015204SJun'ichi Nomura unsigned int front_pad; 2814e6ee8c0bSKiyoshi Ueda 2815e6ee8c0bSKiyoshi Ueda if (!pools) 2816e6ee8c0bSKiyoshi Ueda return NULL; 2817e6ee8c0bSKiyoshi Ueda 281823e5083bSJun'ichi Nomura if (type == DM_TYPE_BIO_BASED) { 28195f015204SJun'ichi Nomura cachep = _io_cache; 2820e8603136SMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 28215f015204SJun'ichi Nomura front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 28225f015204SJun'ichi Nomura } else if (type == DM_TYPE_REQUEST_BASED) { 28235f015204SJun'ichi Nomura cachep = _rq_tio_cache; 2824f4790826SMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 28255f015204SJun'ichi Nomura front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 28265f015204SJun'ichi Nomura /* per_bio_data_size is not used. See __bind_mempools(). */ 28275f015204SJun'ichi Nomura WARN_ON(per_bio_data_size != 0); 28285f015204SJun'ichi Nomura } else 28295f015204SJun'ichi Nomura goto out; 28305f015204SJun'ichi Nomura 28316cfa5857SMike Snitzer pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 2832e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 28335f015204SJun'ichi Nomura goto out; 2834e6ee8c0bSKiyoshi Ueda 28355f015204SJun'ichi Nomura pools->bs = bioset_create(pool_size, front_pad); 2836e6ee8c0bSKiyoshi Ueda if (!pools->bs) 28375f015204SJun'ichi Nomura goto out; 2838e6ee8c0bSKiyoshi Ueda 2839a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 28405f015204SJun'ichi Nomura goto out; 2841a91a2785SMartin K. Petersen 2842e6ee8c0bSKiyoshi Ueda return pools; 2843e6ee8c0bSKiyoshi Ueda 28445f015204SJun'ichi Nomura out: 28455f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2846e6ee8c0bSKiyoshi Ueda 2847e6ee8c0bSKiyoshi Ueda return NULL; 2848e6ee8c0bSKiyoshi Ueda } 2849e6ee8c0bSKiyoshi Ueda 2850e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2851e6ee8c0bSKiyoshi Ueda { 2852e6ee8c0bSKiyoshi Ueda if (!pools) 2853e6ee8c0bSKiyoshi Ueda return; 2854e6ee8c0bSKiyoshi Ueda 2855e6ee8c0bSKiyoshi Ueda if (pools->io_pool) 2856e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 2857e6ee8c0bSKiyoshi Ueda 2858e6ee8c0bSKiyoshi Ueda if (pools->bs) 2859e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2860e6ee8c0bSKiyoshi Ueda 2861e6ee8c0bSKiyoshi Ueda kfree(pools); 2862e6ee8c0bSKiyoshi Ueda } 2863e6ee8c0bSKiyoshi Ueda 286483d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 28651da177e4SLinus Torvalds .open = dm_blk_open, 28661da177e4SLinus Torvalds .release = dm_blk_close, 2867aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 28683ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 28691da177e4SLinus Torvalds .owner = THIS_MODULE 28701da177e4SLinus Torvalds }; 28711da177e4SLinus Torvalds 28721da177e4SLinus Torvalds /* 28731da177e4SLinus Torvalds * module hooks 28741da177e4SLinus Torvalds */ 28751da177e4SLinus Torvalds module_init(dm_init); 28761da177e4SLinus Torvalds module_exit(dm_exit); 28771da177e4SLinus Torvalds 28781da177e4SLinus Torvalds module_param(major, uint, 0); 28791da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 2880f4790826SMike Snitzer 2881e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2882e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2883e8603136SMike Snitzer 2884f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 2885f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 2886f4790826SMike Snitzer 28871da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 28881da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 28891da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2890