11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/buffer_head.h> 186e9624b8SArnd Bergmann #include <linux/smp_lock.h> 191da177e4SLinus Torvalds #include <linux/mempool.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 223ac51e74SDarrick J. Wong #include <linux/hdreg.h> 233f77316dSKiyoshi Ueda #include <linux/delay.h> 2455782138SLi Zefan 2555782138SLi Zefan #include <trace/events/block.h> 261da177e4SLinus Torvalds 2772d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2872d94861SAlasdair G Kergon 2960935eb2SMilan Broz /* 3060935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3160935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3260935eb2SMilan Broz */ 3360935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3460935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3560935eb2SMilan Broz 361da177e4SLinus Torvalds static const char *_name = DM_NAME; 371da177e4SLinus Torvalds 381da177e4SLinus Torvalds static unsigned int major = 0; 391da177e4SLinus Torvalds static unsigned int _major = 0; 401da177e4SLinus Torvalds 41f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 421da177e4SLinus Torvalds /* 438fbf26adSKiyoshi Ueda * For bio-based dm. 441da177e4SLinus Torvalds * One of these is allocated per bio. 451da177e4SLinus Torvalds */ 461da177e4SLinus Torvalds struct dm_io { 471da177e4SLinus Torvalds struct mapped_device *md; 481da177e4SLinus Torvalds int error; 491da177e4SLinus Torvalds atomic_t io_count; 506ae2fa67SRichard Kennedy struct bio *bio; 513eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 52f88fb981SKiyoshi Ueda spinlock_t endio_lock; 531da177e4SLinus Torvalds }; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 568fbf26adSKiyoshi Ueda * For bio-based dm. 571da177e4SLinus Torvalds * One of these is allocated per target within a bio. Hopefully 581da177e4SLinus Torvalds * this will be simplified out one day. 591da177e4SLinus Torvalds */ 60028867acSAlasdair G Kergon struct dm_target_io { 611da177e4SLinus Torvalds struct dm_io *io; 621da177e4SLinus Torvalds struct dm_target *ti; 631da177e4SLinus Torvalds union map_info info; 641da177e4SLinus Torvalds }; 651da177e4SLinus Torvalds 668fbf26adSKiyoshi Ueda /* 678fbf26adSKiyoshi Ueda * For request-based dm. 688fbf26adSKiyoshi Ueda * One of these is allocated per request. 698fbf26adSKiyoshi Ueda */ 708fbf26adSKiyoshi Ueda struct dm_rq_target_io { 718fbf26adSKiyoshi Ueda struct mapped_device *md; 728fbf26adSKiyoshi Ueda struct dm_target *ti; 738fbf26adSKiyoshi Ueda struct request *orig, clone; 748fbf26adSKiyoshi Ueda int error; 758fbf26adSKiyoshi Ueda union map_info info; 768fbf26adSKiyoshi Ueda }; 778fbf26adSKiyoshi Ueda 788fbf26adSKiyoshi Ueda /* 798fbf26adSKiyoshi Ueda * For request-based dm. 808fbf26adSKiyoshi Ueda * One of these is allocated per bio. 818fbf26adSKiyoshi Ueda */ 828fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 838fbf26adSKiyoshi Ueda struct bio *orig; 84cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 858fbf26adSKiyoshi Ueda }; 868fbf26adSKiyoshi Ueda 871da177e4SLinus Torvalds union map_info *dm_get_mapinfo(struct bio *bio) 881da177e4SLinus Torvalds { 891da177e4SLinus Torvalds if (bio && bio->bi_private) 90028867acSAlasdair G Kergon return &((struct dm_target_io *)bio->bi_private)->info; 911da177e4SLinus Torvalds return NULL; 921da177e4SLinus Torvalds } 931da177e4SLinus Torvalds 94cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 95cec47e3dSKiyoshi Ueda { 96cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 97cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 98cec47e3dSKiyoshi Ueda return NULL; 99cec47e3dSKiyoshi Ueda } 100cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 101cec47e3dSKiyoshi Ueda 102ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 103ba61fdd1SJeff Mahoney 1041da177e4SLinus Torvalds /* 1051da177e4SLinus Torvalds * Bits for the md->flags field. 1061da177e4SLinus Torvalds */ 1071eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1081da177e4SLinus Torvalds #define DMF_SUSPENDED 1 109aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 110fba9f90eSJeff Mahoney #define DMF_FREEING 3 1115c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1122e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1131da177e4SLinus Torvalds 114304f3f6aSMilan Broz /* 115304f3f6aSMilan Broz * Work processed by per-device workqueue. 116304f3f6aSMilan Broz */ 1171da177e4SLinus Torvalds struct mapped_device { 1182ca3310eSAlasdair G Kergon struct rw_semaphore io_lock; 119e61290a4SDaniel Walker struct mutex suspend_lock; 1201da177e4SLinus Torvalds rwlock_t map_lock; 1211da177e4SLinus Torvalds atomic_t holders; 1225c6bd75dSAlasdair G Kergon atomic_t open_count; 1231da177e4SLinus Torvalds 1241da177e4SLinus Torvalds unsigned long flags; 1251da177e4SLinus Torvalds 126165125e1SJens Axboe struct request_queue *queue; 127a5664dadSMike Snitzer unsigned type; 1284a0b4ddfSMike Snitzer /* Protect queue and type against concurrent access. */ 129a5664dadSMike Snitzer struct mutex type_lock; 130a5664dadSMike Snitzer 1311da177e4SLinus Torvalds struct gendisk *disk; 1327e51f257SMike Anderson char name[16]; 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds void *interface_ptr; 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds /* 1371da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1381da177e4SLinus Torvalds */ 139316d315bSNikanth Karthikesan atomic_t pending[2]; 1401da177e4SLinus Torvalds wait_queue_head_t wait; 14153d5914fSMikulas Patocka struct work_struct work; 1421da177e4SLinus Torvalds struct bio_list deferred; 143022c2611SMikulas Patocka spinlock_t deferred_lock; 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds /* 14629e4013dSTejun Heo * Processing queue (flush) 147304f3f6aSMilan Broz */ 148304f3f6aSMilan Broz struct workqueue_struct *wq; 149304f3f6aSMilan Broz 150304f3f6aSMilan Broz /* 1511da177e4SLinus Torvalds * The current mapping. 1521da177e4SLinus Torvalds */ 1531da177e4SLinus Torvalds struct dm_table *map; 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds /* 1561da177e4SLinus Torvalds * io objects are allocated from here. 1571da177e4SLinus Torvalds */ 1581da177e4SLinus Torvalds mempool_t *io_pool; 1591da177e4SLinus Torvalds mempool_t *tio_pool; 1601da177e4SLinus Torvalds 1619faf400fSStefan Bader struct bio_set *bs; 1629faf400fSStefan Bader 1631da177e4SLinus Torvalds /* 1641da177e4SLinus Torvalds * Event handling. 1651da177e4SLinus Torvalds */ 1661da177e4SLinus Torvalds atomic_t event_nr; 1671da177e4SLinus Torvalds wait_queue_head_t eventq; 1687a8c3d3bSMike Anderson atomic_t uevent_seq; 1697a8c3d3bSMike Anderson struct list_head uevent_list; 1707a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds /* 1731da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 1741da177e4SLinus Torvalds */ 1751da177e4SLinus Torvalds struct super_block *frozen_sb; 176db8fef4fSMikulas Patocka struct block_device *bdev; 1773ac51e74SDarrick J. Wong 1783ac51e74SDarrick J. Wong /* forced geometry settings */ 1793ac51e74SDarrick J. Wong struct hd_geometry geometry; 180784aae73SMilan Broz 181cec47e3dSKiyoshi Ueda /* For saving the address of __make_request for request based dm */ 182cec47e3dSKiyoshi Ueda make_request_fn *saved_make_request_fn; 183cec47e3dSKiyoshi Ueda 184784aae73SMilan Broz /* sysfs handle */ 185784aae73SMilan Broz struct kobject kobj; 18652b1fd5aSMikulas Patocka 187d87f4c14STejun Heo /* zero-length flush that will be cloned and submitted to targets */ 188d87f4c14STejun Heo struct bio flush_bio; 1891da177e4SLinus Torvalds }; 1901da177e4SLinus Torvalds 191e6ee8c0bSKiyoshi Ueda /* 192e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 193e6ee8c0bSKiyoshi Ueda */ 194e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 195e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 196e6ee8c0bSKiyoshi Ueda mempool_t *tio_pool; 197e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 198e6ee8c0bSKiyoshi Ueda }; 199e6ee8c0bSKiyoshi Ueda 2001da177e4SLinus Torvalds #define MIN_IOS 256 201e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 202e18b890bSChristoph Lameter static struct kmem_cache *_tio_cache; 2038fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 2048fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_bio_info_cache; 2051da177e4SLinus Torvalds 2061da177e4SLinus Torvalds static int __init local_init(void) 2071da177e4SLinus Torvalds { 20851157b4aSKiyoshi Ueda int r = -ENOMEM; 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 211028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 2121da177e4SLinus Torvalds if (!_io_cache) 21351157b4aSKiyoshi Ueda return r; 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds /* allocate a slab for the target ios */ 216028867acSAlasdair G Kergon _tio_cache = KMEM_CACHE(dm_target_io, 0); 21751157b4aSKiyoshi Ueda if (!_tio_cache) 21851157b4aSKiyoshi Ueda goto out_free_io_cache; 2191da177e4SLinus Torvalds 2208fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2218fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 2228fbf26adSKiyoshi Ueda goto out_free_tio_cache; 2238fbf26adSKiyoshi Ueda 2248fbf26adSKiyoshi Ueda _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); 2258fbf26adSKiyoshi Ueda if (!_rq_bio_info_cache) 2268fbf26adSKiyoshi Ueda goto out_free_rq_tio_cache; 2278fbf26adSKiyoshi Ueda 22851e5b2bdSMike Anderson r = dm_uevent_init(); 22951157b4aSKiyoshi Ueda if (r) 2308fbf26adSKiyoshi Ueda goto out_free_rq_bio_info_cache; 23151e5b2bdSMike Anderson 2321da177e4SLinus Torvalds _major = major; 2331da177e4SLinus Torvalds r = register_blkdev(_major, _name); 23451157b4aSKiyoshi Ueda if (r < 0) 23551157b4aSKiyoshi Ueda goto out_uevent_exit; 2361da177e4SLinus Torvalds 2371da177e4SLinus Torvalds if (!_major) 2381da177e4SLinus Torvalds _major = r; 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds return 0; 24151157b4aSKiyoshi Ueda 24251157b4aSKiyoshi Ueda out_uevent_exit: 24351157b4aSKiyoshi Ueda dm_uevent_exit(); 2448fbf26adSKiyoshi Ueda out_free_rq_bio_info_cache: 2458fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_bio_info_cache); 2468fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2478fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 24851157b4aSKiyoshi Ueda out_free_tio_cache: 24951157b4aSKiyoshi Ueda kmem_cache_destroy(_tio_cache); 25051157b4aSKiyoshi Ueda out_free_io_cache: 25151157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 25251157b4aSKiyoshi Ueda 25351157b4aSKiyoshi Ueda return r; 2541da177e4SLinus Torvalds } 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds static void local_exit(void) 2571da177e4SLinus Torvalds { 2588fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_bio_info_cache); 2598fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 2601da177e4SLinus Torvalds kmem_cache_destroy(_tio_cache); 2611da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 26200d59405SAkinobu Mita unregister_blkdev(_major, _name); 26351e5b2bdSMike Anderson dm_uevent_exit(); 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds _major = 0; 2661da177e4SLinus Torvalds 2671da177e4SLinus Torvalds DMINFO("cleaned up"); 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 270b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2711da177e4SLinus Torvalds local_init, 2721da177e4SLinus Torvalds dm_target_init, 2731da177e4SLinus Torvalds dm_linear_init, 2741da177e4SLinus Torvalds dm_stripe_init, 275952b3557SMikulas Patocka dm_io_init, 276945fa4d2SMikulas Patocka dm_kcopyd_init, 2771da177e4SLinus Torvalds dm_interface_init, 2781da177e4SLinus Torvalds }; 2791da177e4SLinus Torvalds 280b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2811da177e4SLinus Torvalds local_exit, 2821da177e4SLinus Torvalds dm_target_exit, 2831da177e4SLinus Torvalds dm_linear_exit, 2841da177e4SLinus Torvalds dm_stripe_exit, 285952b3557SMikulas Patocka dm_io_exit, 286945fa4d2SMikulas Patocka dm_kcopyd_exit, 2871da177e4SLinus Torvalds dm_interface_exit, 2881da177e4SLinus Torvalds }; 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds static int __init dm_init(void) 2911da177e4SLinus Torvalds { 2921da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds int r, i; 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2971da177e4SLinus Torvalds r = _inits[i](); 2981da177e4SLinus Torvalds if (r) 2991da177e4SLinus Torvalds goto bad; 3001da177e4SLinus Torvalds } 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds return 0; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds bad: 3051da177e4SLinus Torvalds while (i--) 3061da177e4SLinus Torvalds _exits[i](); 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds return r; 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds static void __exit dm_exit(void) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3141da177e4SLinus Torvalds 3151da177e4SLinus Torvalds while (i--) 3161da177e4SLinus Torvalds _exits[i](); 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds /* 3201da177e4SLinus Torvalds * Block device functions 3211da177e4SLinus Torvalds */ 322432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 323432a212cSMike Anderson { 324432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 325432a212cSMike Anderson } 326432a212cSMike Anderson 327fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3281da177e4SLinus Torvalds { 3291da177e4SLinus Torvalds struct mapped_device *md; 3301da177e4SLinus Torvalds 3316e9624b8SArnd Bergmann lock_kernel(); 332fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 333fba9f90eSJeff Mahoney 334fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 335fba9f90eSJeff Mahoney if (!md) 336fba9f90eSJeff Mahoney goto out; 337fba9f90eSJeff Mahoney 3385c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 339432a212cSMike Anderson dm_deleting_md(md)) { 340fba9f90eSJeff Mahoney md = NULL; 341fba9f90eSJeff Mahoney goto out; 342fba9f90eSJeff Mahoney } 343fba9f90eSJeff Mahoney 3441da177e4SLinus Torvalds dm_get(md); 3455c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 346fba9f90eSJeff Mahoney 347fba9f90eSJeff Mahoney out: 348fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 3496e9624b8SArnd Bergmann unlock_kernel(); 350fba9f90eSJeff Mahoney 351fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3521da177e4SLinus Torvalds } 3531da177e4SLinus Torvalds 354fe5f9f2cSAl Viro static int dm_blk_close(struct gendisk *disk, fmode_t mode) 3551da177e4SLinus Torvalds { 356fe5f9f2cSAl Viro struct mapped_device *md = disk->private_data; 3576e9624b8SArnd Bergmann 3586e9624b8SArnd Bergmann lock_kernel(); 3595c6bd75dSAlasdair G Kergon atomic_dec(&md->open_count); 3601da177e4SLinus Torvalds dm_put(md); 3616e9624b8SArnd Bergmann unlock_kernel(); 3626e9624b8SArnd Bergmann 3631da177e4SLinus Torvalds return 0; 3641da177e4SLinus Torvalds } 3651da177e4SLinus Torvalds 3665c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3675c6bd75dSAlasdair G Kergon { 3685c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3695c6bd75dSAlasdair G Kergon } 3705c6bd75dSAlasdair G Kergon 3715c6bd75dSAlasdair G Kergon /* 3725c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3735c6bd75dSAlasdair G Kergon */ 3745c6bd75dSAlasdair G Kergon int dm_lock_for_deletion(struct mapped_device *md) 3755c6bd75dSAlasdair G Kergon { 3765c6bd75dSAlasdair G Kergon int r = 0; 3775c6bd75dSAlasdair G Kergon 3785c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3795c6bd75dSAlasdair G Kergon 3805c6bd75dSAlasdair G Kergon if (dm_open_count(md)) 3815c6bd75dSAlasdair G Kergon r = -EBUSY; 3825c6bd75dSAlasdair G Kergon else 3835c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3845c6bd75dSAlasdair G Kergon 3855c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3865c6bd75dSAlasdair G Kergon 3875c6bd75dSAlasdair G Kergon return r; 3885c6bd75dSAlasdair G Kergon } 3895c6bd75dSAlasdair G Kergon 3903ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3913ac51e74SDarrick J. Wong { 3923ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 3933ac51e74SDarrick J. Wong 3943ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 3953ac51e74SDarrick J. Wong } 3963ac51e74SDarrick J. Wong 397fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 398aa129a22SMilan Broz unsigned int cmd, unsigned long arg) 399aa129a22SMilan Broz { 400fe5f9f2cSAl Viro struct mapped_device *md = bdev->bd_disk->private_data; 4017c666411SAlasdair G Kergon struct dm_table *map = dm_get_live_table(md); 402aa129a22SMilan Broz struct dm_target *tgt; 403aa129a22SMilan Broz int r = -ENOTTY; 404aa129a22SMilan Broz 405aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 406aa129a22SMilan Broz goto out; 407aa129a22SMilan Broz 408aa129a22SMilan Broz /* We only support devices that have a single target */ 409aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 410aa129a22SMilan Broz goto out; 411aa129a22SMilan Broz 412aa129a22SMilan Broz tgt = dm_table_get_target(map, 0); 413aa129a22SMilan Broz 4144f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 415aa129a22SMilan Broz r = -EAGAIN; 416aa129a22SMilan Broz goto out; 417aa129a22SMilan Broz } 418aa129a22SMilan Broz 419aa129a22SMilan Broz if (tgt->type->ioctl) 420647b3d00SAl Viro r = tgt->type->ioctl(tgt, cmd, arg); 421aa129a22SMilan Broz 422aa129a22SMilan Broz out: 423aa129a22SMilan Broz dm_table_put(map); 424aa129a22SMilan Broz 425aa129a22SMilan Broz return r; 426aa129a22SMilan Broz } 427aa129a22SMilan Broz 428028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 4291da177e4SLinus Torvalds { 4301da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 4311da177e4SLinus Torvalds } 4321da177e4SLinus Torvalds 433028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 4341da177e4SLinus Torvalds { 4351da177e4SLinus Torvalds mempool_free(io, md->io_pool); 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds 438028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 4391da177e4SLinus Torvalds { 4401da177e4SLinus Torvalds mempool_free(tio, md->tio_pool); 4411da177e4SLinus Torvalds } 4421da177e4SLinus Torvalds 44308885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, 44408885643SKiyoshi Ueda gfp_t gfp_mask) 445cec47e3dSKiyoshi Ueda { 44608885643SKiyoshi Ueda return mempool_alloc(md->tio_pool, gfp_mask); 447cec47e3dSKiyoshi Ueda } 448cec47e3dSKiyoshi Ueda 449cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 450cec47e3dSKiyoshi Ueda { 451cec47e3dSKiyoshi Ueda mempool_free(tio, tio->md->tio_pool); 452cec47e3dSKiyoshi Ueda } 453cec47e3dSKiyoshi Ueda 454cec47e3dSKiyoshi Ueda static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) 455cec47e3dSKiyoshi Ueda { 456cec47e3dSKiyoshi Ueda return mempool_alloc(md->io_pool, GFP_ATOMIC); 457cec47e3dSKiyoshi Ueda } 458cec47e3dSKiyoshi Ueda 459cec47e3dSKiyoshi Ueda static void free_bio_info(struct dm_rq_clone_bio_info *info) 460cec47e3dSKiyoshi Ueda { 461cec47e3dSKiyoshi Ueda mempool_free(info, info->tio->md->io_pool); 462cec47e3dSKiyoshi Ueda } 463cec47e3dSKiyoshi Ueda 46490abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md) 46590abb8c4SKiyoshi Ueda { 46690abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 46790abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 46890abb8c4SKiyoshi Ueda } 46990abb8c4SKiyoshi Ueda 4703eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 4713eaf840eSJun'ichi "Nick" Nomura { 4723eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 473c9959059STejun Heo int cpu; 474316d315bSNikanth Karthikesan int rw = bio_data_dir(io->bio); 4753eaf840eSJun'ichi "Nick" Nomura 4763eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 4773eaf840eSJun'ichi "Nick" Nomura 478074a7acaSTejun Heo cpu = part_stat_lock(); 479074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 480074a7acaSTejun Heo part_stat_unlock(); 481316d315bSNikanth Karthikesan dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 4823eaf840eSJun'ichi "Nick" Nomura } 4833eaf840eSJun'ichi "Nick" Nomura 484d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 4853eaf840eSJun'ichi "Nick" Nomura { 4863eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 4873eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 4883eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 489c9959059STejun Heo int pending, cpu; 4903eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 4913eaf840eSJun'ichi "Nick" Nomura 492074a7acaSTejun Heo cpu = part_stat_lock(); 493074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 494074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 495074a7acaSTejun Heo part_stat_unlock(); 4963eaf840eSJun'ichi "Nick" Nomura 497af7e466aSMikulas Patocka /* 498af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 499d87f4c14STejun Heo * a flush. 500af7e466aSMikulas Patocka */ 501316d315bSNikanth Karthikesan dm_disk(md)->part0.in_flight[rw] = pending = 502316d315bSNikanth Karthikesan atomic_dec_return(&md->pending[rw]); 503316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 5043eaf840eSJun'ichi "Nick" Nomura 505d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 506d221d2e7SMikulas Patocka if (!pending) 507d221d2e7SMikulas Patocka wake_up(&md->wait); 5083eaf840eSJun'ichi "Nick" Nomura } 5093eaf840eSJun'ichi "Nick" Nomura 5101da177e4SLinus Torvalds /* 5111da177e4SLinus Torvalds * Add the bio to the list of deferred io. 5121da177e4SLinus Torvalds */ 51392c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 5141da177e4SLinus Torvalds { 515*05447420SKiyoshi Ueda unsigned long flags; 516*05447420SKiyoshi Ueda 517*05447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 5181da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 519*05447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 52092c63902SMikulas Patocka queue_work(md->wq, &md->work); 5211da177e4SLinus Torvalds } 5221da177e4SLinus Torvalds 5231da177e4SLinus Torvalds /* 5241da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5251da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 5261da177e4SLinus Torvalds * dm_table_put() when finished. 5271da177e4SLinus Torvalds */ 5287c666411SAlasdair G Kergon struct dm_table *dm_get_live_table(struct mapped_device *md) 5291da177e4SLinus Torvalds { 5301da177e4SLinus Torvalds struct dm_table *t; 531523d9297SKiyoshi Ueda unsigned long flags; 5321da177e4SLinus Torvalds 533523d9297SKiyoshi Ueda read_lock_irqsave(&md->map_lock, flags); 5341da177e4SLinus Torvalds t = md->map; 5351da177e4SLinus Torvalds if (t) 5361da177e4SLinus Torvalds dm_table_get(t); 537523d9297SKiyoshi Ueda read_unlock_irqrestore(&md->map_lock, flags); 5381da177e4SLinus Torvalds 5391da177e4SLinus Torvalds return t; 5401da177e4SLinus Torvalds } 5411da177e4SLinus Torvalds 5423ac51e74SDarrick J. Wong /* 5433ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 5443ac51e74SDarrick J. Wong */ 5453ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 5463ac51e74SDarrick J. Wong { 5473ac51e74SDarrick J. Wong *geo = md->geometry; 5483ac51e74SDarrick J. Wong 5493ac51e74SDarrick J. Wong return 0; 5503ac51e74SDarrick J. Wong } 5513ac51e74SDarrick J. Wong 5523ac51e74SDarrick J. Wong /* 5533ac51e74SDarrick J. Wong * Set the geometry of a device. 5543ac51e74SDarrick J. Wong */ 5553ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 5563ac51e74SDarrick J. Wong { 5573ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 5583ac51e74SDarrick J. Wong 5593ac51e74SDarrick J. Wong if (geo->start > sz) { 5603ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 5613ac51e74SDarrick J. Wong return -EINVAL; 5623ac51e74SDarrick J. Wong } 5633ac51e74SDarrick J. Wong 5643ac51e74SDarrick J. Wong md->geometry = *geo; 5653ac51e74SDarrick J. Wong 5663ac51e74SDarrick J. Wong return 0; 5673ac51e74SDarrick J. Wong } 5683ac51e74SDarrick J. Wong 5691da177e4SLinus Torvalds /*----------------------------------------------------------------- 5701da177e4SLinus Torvalds * CRUD START: 5711da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 5721da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 5731da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 5741da177e4SLinus Torvalds * interests of getting something for people to use I give 5751da177e4SLinus Torvalds * you this clearly demarcated crap. 5761da177e4SLinus Torvalds *---------------------------------------------------------------*/ 5771da177e4SLinus Torvalds 5782e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 5792e93ccc1SKiyoshi Ueda { 5802e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 5812e93ccc1SKiyoshi Ueda } 5822e93ccc1SKiyoshi Ueda 5831da177e4SLinus Torvalds /* 5841da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 5851da177e4SLinus Torvalds * cloned into, completing the original io if necc. 5861da177e4SLinus Torvalds */ 587858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 5881da177e4SLinus Torvalds { 5892e93ccc1SKiyoshi Ueda unsigned long flags; 590b35f8caaSMilan Broz int io_error; 591b35f8caaSMilan Broz struct bio *bio; 592b35f8caaSMilan Broz struct mapped_device *md = io->md; 5932e93ccc1SKiyoshi Ueda 5942e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 595f88fb981SKiyoshi Ueda if (unlikely(error)) { 596f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 597f88fb981SKiyoshi Ueda if (!(io->error > 0 && __noflush_suspending(md))) 5981da177e4SLinus Torvalds io->error = error; 599f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 600f88fb981SKiyoshi Ueda } 6011da177e4SLinus Torvalds 6021da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 6032e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 6042e93ccc1SKiyoshi Ueda /* 6052e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 6062e93ccc1SKiyoshi Ueda */ 607022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 6086a8736d1STejun Heo if (__noflush_suspending(md)) 6096a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 6106a8736d1STejun Heo else 6112e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 6122e93ccc1SKiyoshi Ueda io->error = -EIO; 613022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 6142e93ccc1SKiyoshi Ueda } 6152e93ccc1SKiyoshi Ueda 616b35f8caaSMilan Broz io_error = io->error; 617b35f8caaSMilan Broz bio = io->bio; 618af7e466aSMikulas Patocka end_io_acct(io); 619a97f925aSMikulas Patocka free_io(md, io); 620b35f8caaSMilan Broz 6216a8736d1STejun Heo if (io_error == DM_ENDIO_REQUEUE) 6226a8736d1STejun Heo return; 6236a8736d1STejun Heo 6246a8736d1STejun Heo if (!(bio->bi_rw & REQ_FLUSH) || !bio->bi_size) { 625b35f8caaSMilan Broz trace_block_bio_complete(md->queue, bio); 626b35f8caaSMilan Broz bio_endio(bio, io_error); 6276a8736d1STejun Heo } else { 6286a8736d1STejun Heo /* 6296a8736d1STejun Heo * Preflush done for flush with data, reissue 6306a8736d1STejun Heo * without REQ_FLUSH. 6316a8736d1STejun Heo */ 6326a8736d1STejun Heo bio->bi_rw &= ~REQ_FLUSH; 6336a8736d1STejun Heo queue_io(md, bio); 6341da177e4SLinus Torvalds } 635af7e466aSMikulas Patocka } 6361da177e4SLinus Torvalds } 6371da177e4SLinus Torvalds 6386712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error) 6391da177e4SLinus Torvalds { 6401da177e4SLinus Torvalds int r = 0; 641028867acSAlasdair G Kergon struct dm_target_io *tio = bio->bi_private; 642b35f8caaSMilan Broz struct dm_io *io = tio->io; 6439faf400fSStefan Bader struct mapped_device *md = tio->io->md; 6441da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 6451da177e4SLinus Torvalds 6461da177e4SLinus Torvalds if (!bio_flagged(bio, BIO_UPTODATE) && !error) 6471da177e4SLinus Torvalds error = -EIO; 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds if (endio) { 6501da177e4SLinus Torvalds r = endio(tio->ti, bio, error, &tio->info); 6512e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 6522e93ccc1SKiyoshi Ueda /* 6532e93ccc1SKiyoshi Ueda * error and requeue request are handled 6542e93ccc1SKiyoshi Ueda * in dec_pending(). 6552e93ccc1SKiyoshi Ueda */ 6561da177e4SLinus Torvalds error = r; 65745cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 65845cbcd79SKiyoshi Ueda /* The target will handle the io */ 6596712ecf8SNeilBrown return; 66045cbcd79SKiyoshi Ueda else if (r) { 66145cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 66245cbcd79SKiyoshi Ueda BUG(); 66345cbcd79SKiyoshi Ueda } 6641da177e4SLinus Torvalds } 6651da177e4SLinus Torvalds 6669faf400fSStefan Bader /* 6679faf400fSStefan Bader * Store md for cleanup instead of tio which is about to get freed. 6689faf400fSStefan Bader */ 6699faf400fSStefan Bader bio->bi_private = md->bs; 6709faf400fSStefan Bader 6719faf400fSStefan Bader free_tio(md, tio); 672b35f8caaSMilan Broz bio_put(bio); 673b35f8caaSMilan Broz dec_pending(io, error); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds 676cec47e3dSKiyoshi Ueda /* 677cec47e3dSKiyoshi Ueda * Partial completion handling for request-based dm 678cec47e3dSKiyoshi Ueda */ 679cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error) 680cec47e3dSKiyoshi Ueda { 681cec47e3dSKiyoshi Ueda struct dm_rq_clone_bio_info *info = clone->bi_private; 682cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = info->tio; 683cec47e3dSKiyoshi Ueda struct bio *bio = info->orig; 684cec47e3dSKiyoshi Ueda unsigned int nr_bytes = info->orig->bi_size; 685cec47e3dSKiyoshi Ueda 686cec47e3dSKiyoshi Ueda bio_put(clone); 687cec47e3dSKiyoshi Ueda 688cec47e3dSKiyoshi Ueda if (tio->error) 689cec47e3dSKiyoshi Ueda /* 690cec47e3dSKiyoshi Ueda * An error has already been detected on the request. 691cec47e3dSKiyoshi Ueda * Once error occurred, just let clone->end_io() handle 692cec47e3dSKiyoshi Ueda * the remainder. 693cec47e3dSKiyoshi Ueda */ 694cec47e3dSKiyoshi Ueda return; 695cec47e3dSKiyoshi Ueda else if (error) { 696cec47e3dSKiyoshi Ueda /* 697cec47e3dSKiyoshi Ueda * Don't notice the error to the upper layer yet. 698cec47e3dSKiyoshi Ueda * The error handling decision is made by the target driver, 699cec47e3dSKiyoshi Ueda * when the request is completed. 700cec47e3dSKiyoshi Ueda */ 701cec47e3dSKiyoshi Ueda tio->error = error; 702cec47e3dSKiyoshi Ueda return; 703cec47e3dSKiyoshi Ueda } 704cec47e3dSKiyoshi Ueda 705cec47e3dSKiyoshi Ueda /* 706cec47e3dSKiyoshi Ueda * I/O for the bio successfully completed. 707cec47e3dSKiyoshi Ueda * Notice the data completion to the upper layer. 708cec47e3dSKiyoshi Ueda */ 709cec47e3dSKiyoshi Ueda 710cec47e3dSKiyoshi Ueda /* 711cec47e3dSKiyoshi Ueda * bios are processed from the head of the list. 712cec47e3dSKiyoshi Ueda * So the completing bio should always be rq->bio. 713cec47e3dSKiyoshi Ueda * If it's not, something wrong is happening. 714cec47e3dSKiyoshi Ueda */ 715cec47e3dSKiyoshi Ueda if (tio->orig->bio != bio) 716cec47e3dSKiyoshi Ueda DMERR("bio completion is going in the middle of the request"); 717cec47e3dSKiyoshi Ueda 718cec47e3dSKiyoshi Ueda /* 719cec47e3dSKiyoshi Ueda * Update the original request. 720cec47e3dSKiyoshi Ueda * Do not use blk_end_request() here, because it may complete 721cec47e3dSKiyoshi Ueda * the original request before the clone, and break the ordering. 722cec47e3dSKiyoshi Ueda */ 723cec47e3dSKiyoshi Ueda blk_update_request(tio->orig, 0, nr_bytes); 724cec47e3dSKiyoshi Ueda } 725cec47e3dSKiyoshi Ueda 726cec47e3dSKiyoshi Ueda /* 727cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 728cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 729cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 730cec47e3dSKiyoshi Ueda */ 731b4324feeSKiyoshi Ueda static void rq_completed(struct mapped_device *md, int rw, int run_queue) 732cec47e3dSKiyoshi Ueda { 733b4324feeSKiyoshi Ueda atomic_dec(&md->pending[rw]); 734cec47e3dSKiyoshi Ueda 735cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 736b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 737cec47e3dSKiyoshi Ueda wake_up(&md->wait); 738cec47e3dSKiyoshi Ueda 739cec47e3dSKiyoshi Ueda if (run_queue) 740b4324feeSKiyoshi Ueda blk_run_queue(md->queue); 741cec47e3dSKiyoshi Ueda 742cec47e3dSKiyoshi Ueda /* 743cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 744cec47e3dSKiyoshi Ueda */ 745cec47e3dSKiyoshi Ueda dm_put(md); 746cec47e3dSKiyoshi Ueda } 747cec47e3dSKiyoshi Ueda 748a77e28c7SKiyoshi Ueda static void free_rq_clone(struct request *clone) 749a77e28c7SKiyoshi Ueda { 750a77e28c7SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 751a77e28c7SKiyoshi Ueda 752a77e28c7SKiyoshi Ueda blk_rq_unprep_clone(clone); 753a77e28c7SKiyoshi Ueda free_rq_tio(tio); 754a77e28c7SKiyoshi Ueda } 755a77e28c7SKiyoshi Ueda 756980691e5SKiyoshi Ueda /* 757980691e5SKiyoshi Ueda * Complete the clone and the original request. 758980691e5SKiyoshi Ueda * Must be called without queue lock. 759980691e5SKiyoshi Ueda */ 760980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 761980691e5SKiyoshi Ueda { 762980691e5SKiyoshi Ueda int rw = rq_data_dir(clone); 763980691e5SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 764980691e5SKiyoshi Ueda struct mapped_device *md = tio->md; 765980691e5SKiyoshi Ueda struct request *rq = tio->orig; 766980691e5SKiyoshi Ueda 76729e4013dSTejun Heo if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 768980691e5SKiyoshi Ueda rq->errors = clone->errors; 769980691e5SKiyoshi Ueda rq->resid_len = clone->resid_len; 770980691e5SKiyoshi Ueda 771980691e5SKiyoshi Ueda if (rq->sense) 772980691e5SKiyoshi Ueda /* 773980691e5SKiyoshi Ueda * We are using the sense buffer of the original 774980691e5SKiyoshi Ueda * request. 775980691e5SKiyoshi Ueda * So setting the length of the sense data is enough. 776980691e5SKiyoshi Ueda */ 777980691e5SKiyoshi Ueda rq->sense_len = clone->sense_len; 778980691e5SKiyoshi Ueda } 779980691e5SKiyoshi Ueda 780980691e5SKiyoshi Ueda free_rq_clone(clone); 781980691e5SKiyoshi Ueda blk_end_request_all(rq, error); 78229e4013dSTejun Heo rq_completed(md, rw, true); 783980691e5SKiyoshi Ueda } 784980691e5SKiyoshi Ueda 785cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 786cec47e3dSKiyoshi Ueda { 787cec47e3dSKiyoshi Ueda struct request *clone = rq->special; 788cec47e3dSKiyoshi Ueda 789cec47e3dSKiyoshi Ueda rq->special = NULL; 790cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 791cec47e3dSKiyoshi Ueda 792a77e28c7SKiyoshi Ueda free_rq_clone(clone); 793cec47e3dSKiyoshi Ueda } 794cec47e3dSKiyoshi Ueda 795cec47e3dSKiyoshi Ueda /* 796cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 797cec47e3dSKiyoshi Ueda */ 798cec47e3dSKiyoshi Ueda void dm_requeue_unmapped_request(struct request *clone) 799cec47e3dSKiyoshi Ueda { 800b4324feeSKiyoshi Ueda int rw = rq_data_dir(clone); 801cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 802cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 803cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 804cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 805cec47e3dSKiyoshi Ueda unsigned long flags; 806cec47e3dSKiyoshi Ueda 807cec47e3dSKiyoshi Ueda dm_unprep_request(rq); 808cec47e3dSKiyoshi Ueda 809cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 810cec47e3dSKiyoshi Ueda if (elv_queue_empty(q)) 811cec47e3dSKiyoshi Ueda blk_plug_device(q); 812cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 813cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 814cec47e3dSKiyoshi Ueda 815b4324feeSKiyoshi Ueda rq_completed(md, rw, 0); 816cec47e3dSKiyoshi Ueda } 817cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 818cec47e3dSKiyoshi Ueda 819cec47e3dSKiyoshi Ueda static void __stop_queue(struct request_queue *q) 820cec47e3dSKiyoshi Ueda { 821cec47e3dSKiyoshi Ueda blk_stop_queue(q); 822cec47e3dSKiyoshi Ueda } 823cec47e3dSKiyoshi Ueda 824cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 825cec47e3dSKiyoshi Ueda { 826cec47e3dSKiyoshi Ueda unsigned long flags; 827cec47e3dSKiyoshi Ueda 828cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 829cec47e3dSKiyoshi Ueda __stop_queue(q); 830cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 831cec47e3dSKiyoshi Ueda } 832cec47e3dSKiyoshi Ueda 833cec47e3dSKiyoshi Ueda static void __start_queue(struct request_queue *q) 834cec47e3dSKiyoshi Ueda { 835cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 836cec47e3dSKiyoshi Ueda blk_start_queue(q); 837cec47e3dSKiyoshi Ueda } 838cec47e3dSKiyoshi Ueda 839cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 840cec47e3dSKiyoshi Ueda { 841cec47e3dSKiyoshi Ueda unsigned long flags; 842cec47e3dSKiyoshi Ueda 843cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 844cec47e3dSKiyoshi Ueda __start_queue(q); 845cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 846cec47e3dSKiyoshi Ueda } 847cec47e3dSKiyoshi Ueda 84811a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped) 84911a68244SKiyoshi Ueda { 85011a68244SKiyoshi Ueda int r = error; 85111a68244SKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 85211a68244SKiyoshi Ueda dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 85311a68244SKiyoshi Ueda 85411a68244SKiyoshi Ueda if (mapped && rq_end_io) 85511a68244SKiyoshi Ueda r = rq_end_io(tio->ti, clone, error, &tio->info); 85611a68244SKiyoshi Ueda 85711a68244SKiyoshi Ueda if (r <= 0) 85811a68244SKiyoshi Ueda /* The target wants to complete the I/O */ 85911a68244SKiyoshi Ueda dm_end_request(clone, r); 86011a68244SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 86111a68244SKiyoshi Ueda /* The target will handle the I/O */ 86211a68244SKiyoshi Ueda return; 86311a68244SKiyoshi Ueda else if (r == DM_ENDIO_REQUEUE) 86411a68244SKiyoshi Ueda /* The target wants to requeue the I/O */ 86511a68244SKiyoshi Ueda dm_requeue_unmapped_request(clone); 86611a68244SKiyoshi Ueda else { 86711a68244SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 86811a68244SKiyoshi Ueda BUG(); 86911a68244SKiyoshi Ueda } 87011a68244SKiyoshi Ueda } 87111a68244SKiyoshi Ueda 872cec47e3dSKiyoshi Ueda /* 873cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 874cec47e3dSKiyoshi Ueda */ 875cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 876cec47e3dSKiyoshi Ueda { 87711a68244SKiyoshi Ueda bool mapped = true; 878cec47e3dSKiyoshi Ueda struct request *clone = rq->completion_data; 879cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 880cec47e3dSKiyoshi Ueda 88111a68244SKiyoshi Ueda if (rq->cmd_flags & REQ_FAILED) 88211a68244SKiyoshi Ueda mapped = false; 883cec47e3dSKiyoshi Ueda 88411a68244SKiyoshi Ueda dm_done(clone, tio->error, mapped); 885cec47e3dSKiyoshi Ueda } 886cec47e3dSKiyoshi Ueda 887cec47e3dSKiyoshi Ueda /* 888cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 889cec47e3dSKiyoshi Ueda * through softirq context. 890cec47e3dSKiyoshi Ueda */ 891cec47e3dSKiyoshi Ueda static void dm_complete_request(struct request *clone, int error) 892cec47e3dSKiyoshi Ueda { 893cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 894cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 895cec47e3dSKiyoshi Ueda 896cec47e3dSKiyoshi Ueda tio->error = error; 897cec47e3dSKiyoshi Ueda rq->completion_data = clone; 898cec47e3dSKiyoshi Ueda blk_complete_request(rq); 899cec47e3dSKiyoshi Ueda } 900cec47e3dSKiyoshi Ueda 901cec47e3dSKiyoshi Ueda /* 902cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 903cec47e3dSKiyoshi Ueda * through softirq context. 904cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 905cec47e3dSKiyoshi Ueda * This may be used when the target's map_rq() function fails. 906cec47e3dSKiyoshi Ueda */ 907cec47e3dSKiyoshi Ueda void dm_kill_unmapped_request(struct request *clone, int error) 908cec47e3dSKiyoshi Ueda { 909cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 910cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 911cec47e3dSKiyoshi Ueda 912cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 913cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 914cec47e3dSKiyoshi Ueda } 915cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 916cec47e3dSKiyoshi Ueda 917cec47e3dSKiyoshi Ueda /* 918cec47e3dSKiyoshi Ueda * Called with the queue lock held 919cec47e3dSKiyoshi Ueda */ 920cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 921cec47e3dSKiyoshi Ueda { 922cec47e3dSKiyoshi Ueda /* 923cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 924cec47e3dSKiyoshi Ueda * the clone was dispatched. 925cec47e3dSKiyoshi Ueda * The clone is *NOT* freed actually here because it is alloced from 926cec47e3dSKiyoshi Ueda * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 927cec47e3dSKiyoshi Ueda */ 928cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 929cec47e3dSKiyoshi Ueda 930cec47e3dSKiyoshi Ueda /* 931cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 932cec47e3dSKiyoshi Ueda * hold the queue lock. Otherwise, deadlock could occur because: 933cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 934cec47e3dSKiyoshi Ueda * of the stacking during the completion 935cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 936cec47e3dSKiyoshi Ueda * against this queue 937cec47e3dSKiyoshi Ueda */ 938cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 939cec47e3dSKiyoshi Ueda } 940cec47e3dSKiyoshi Ueda 94156a67df7SMike Snitzer /* 94256a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 94356a67df7SMike Snitzer * target boundary. 94456a67df7SMike Snitzer */ 94556a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 9461da177e4SLinus Torvalds { 94756a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 94856a67df7SMike Snitzer 94956a67df7SMike Snitzer return ti->len - target_offset; 95056a67df7SMike Snitzer } 95156a67df7SMike Snitzer 95256a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 95356a67df7SMike Snitzer { 95456a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 9551da177e4SLinus Torvalds 9561da177e4SLinus Torvalds /* 9571da177e4SLinus Torvalds * Does the target need to split even further ? 9581da177e4SLinus Torvalds */ 9591da177e4SLinus Torvalds if (ti->split_io) { 9601da177e4SLinus Torvalds sector_t boundary; 96156a67df7SMike Snitzer sector_t offset = dm_target_offset(ti, sector); 9621da177e4SLinus Torvalds boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 9631da177e4SLinus Torvalds - offset; 9641da177e4SLinus Torvalds if (len > boundary) 9651da177e4SLinus Torvalds len = boundary; 9661da177e4SLinus Torvalds } 9671da177e4SLinus Torvalds 9681da177e4SLinus Torvalds return len; 9691da177e4SLinus Torvalds } 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds static void __map_bio(struct dm_target *ti, struct bio *clone, 972028867acSAlasdair G Kergon struct dm_target_io *tio) 9731da177e4SLinus Torvalds { 9741da177e4SLinus Torvalds int r; 9752056a782SJens Axboe sector_t sector; 9769faf400fSStefan Bader struct mapped_device *md; 9771da177e4SLinus Torvalds 9781da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 9791da177e4SLinus Torvalds clone->bi_private = tio; 9801da177e4SLinus Torvalds 9811da177e4SLinus Torvalds /* 9821da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 9831da177e4SLinus Torvalds * anything, the target has assumed ownership of 9841da177e4SLinus Torvalds * this io. 9851da177e4SLinus Torvalds */ 9861da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 9872056a782SJens Axboe sector = clone->bi_sector; 9881da177e4SLinus Torvalds r = ti->type->map(ti, clone, &tio->info); 98945cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 9901da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 9912056a782SJens Axboe 9925f3ea37cSArnaldo Carvalho de Melo trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, 99322a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 9942056a782SJens Axboe 9951da177e4SLinus Torvalds generic_make_request(clone); 9962e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 9972e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 9989faf400fSStefan Bader md = tio->io->md; 9999faf400fSStefan Bader dec_pending(tio->io, r); 10009faf400fSStefan Bader /* 10019faf400fSStefan Bader * Store bio_set for cleanup. 10029faf400fSStefan Bader */ 10039faf400fSStefan Bader clone->bi_private = md->bs; 10041da177e4SLinus Torvalds bio_put(clone); 10059faf400fSStefan Bader free_tio(md, tio); 100645cbcd79SKiyoshi Ueda } else if (r) { 100745cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 100845cbcd79SKiyoshi Ueda BUG(); 10091da177e4SLinus Torvalds } 10101da177e4SLinus Torvalds } 10111da177e4SLinus Torvalds 10121da177e4SLinus Torvalds struct clone_info { 10131da177e4SLinus Torvalds struct mapped_device *md; 10141da177e4SLinus Torvalds struct dm_table *map; 10151da177e4SLinus Torvalds struct bio *bio; 10161da177e4SLinus Torvalds struct dm_io *io; 10171da177e4SLinus Torvalds sector_t sector; 10181da177e4SLinus Torvalds sector_t sector_count; 10191da177e4SLinus Torvalds unsigned short idx; 10201da177e4SLinus Torvalds }; 10211da177e4SLinus Torvalds 10223676347aSPeter Osterlund static void dm_bio_destructor(struct bio *bio) 10233676347aSPeter Osterlund { 10249faf400fSStefan Bader struct bio_set *bs = bio->bi_private; 10259faf400fSStefan Bader 10269faf400fSStefan Bader bio_free(bio, bs); 10273676347aSPeter Osterlund } 10283676347aSPeter Osterlund 10291da177e4SLinus Torvalds /* 1030d87f4c14STejun Heo * Creates a little bio that just does part of a bvec. 10311da177e4SLinus Torvalds */ 10321da177e4SLinus Torvalds static struct bio *split_bvec(struct bio *bio, sector_t sector, 10331da177e4SLinus Torvalds unsigned short idx, unsigned int offset, 10349faf400fSStefan Bader unsigned int len, struct bio_set *bs) 10351da177e4SLinus Torvalds { 10361da177e4SLinus Torvalds struct bio *clone; 10371da177e4SLinus Torvalds struct bio_vec *bv = bio->bi_io_vec + idx; 10381da177e4SLinus Torvalds 10399faf400fSStefan Bader clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 10403676347aSPeter Osterlund clone->bi_destructor = dm_bio_destructor; 10411da177e4SLinus Torvalds *clone->bi_io_vec = *bv; 10421da177e4SLinus Torvalds 10431da177e4SLinus Torvalds clone->bi_sector = sector; 10441da177e4SLinus Torvalds clone->bi_bdev = bio->bi_bdev; 1045d87f4c14STejun Heo clone->bi_rw = bio->bi_rw; 10461da177e4SLinus Torvalds clone->bi_vcnt = 1; 10471da177e4SLinus Torvalds clone->bi_size = to_bytes(len); 10481da177e4SLinus Torvalds clone->bi_io_vec->bv_offset = offset; 10491da177e4SLinus Torvalds clone->bi_io_vec->bv_len = clone->bi_size; 1050f3e1d26eSMartin K. Petersen clone->bi_flags |= 1 << BIO_CLONED; 10511da177e4SLinus Torvalds 10529c47008dSMartin K. Petersen if (bio_integrity(bio)) { 10537878cba9SMartin K. Petersen bio_integrity_clone(clone, bio, GFP_NOIO, bs); 10549c47008dSMartin K. Petersen bio_integrity_trim(clone, 10559c47008dSMartin K. Petersen bio_sector_offset(bio, idx, offset), len); 10569c47008dSMartin K. Petersen } 10579c47008dSMartin K. Petersen 10581da177e4SLinus Torvalds return clone; 10591da177e4SLinus Torvalds } 10601da177e4SLinus Torvalds 10611da177e4SLinus Torvalds /* 10621da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 10631da177e4SLinus Torvalds */ 10641da177e4SLinus Torvalds static struct bio *clone_bio(struct bio *bio, sector_t sector, 10651da177e4SLinus Torvalds unsigned short idx, unsigned short bv_count, 10669faf400fSStefan Bader unsigned int len, struct bio_set *bs) 10671da177e4SLinus Torvalds { 10681da177e4SLinus Torvalds struct bio *clone; 10691da177e4SLinus Torvalds 10709faf400fSStefan Bader clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 10719faf400fSStefan Bader __bio_clone(clone, bio); 10729faf400fSStefan Bader clone->bi_destructor = dm_bio_destructor; 10731da177e4SLinus Torvalds clone->bi_sector = sector; 10741da177e4SLinus Torvalds clone->bi_idx = idx; 10751da177e4SLinus Torvalds clone->bi_vcnt = idx + bv_count; 10761da177e4SLinus Torvalds clone->bi_size = to_bytes(len); 10771da177e4SLinus Torvalds clone->bi_flags &= ~(1 << BIO_SEG_VALID); 10781da177e4SLinus Torvalds 10799c47008dSMartin K. Petersen if (bio_integrity(bio)) { 10807878cba9SMartin K. Petersen bio_integrity_clone(clone, bio, GFP_NOIO, bs); 10819c47008dSMartin K. Petersen 10829c47008dSMartin K. Petersen if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 10839c47008dSMartin K. Petersen bio_integrity_trim(clone, 10849c47008dSMartin K. Petersen bio_sector_offset(bio, idx, 0), len); 10859c47008dSMartin K. Petersen } 10869c47008dSMartin K. Petersen 10871da177e4SLinus Torvalds return clone; 10881da177e4SLinus Torvalds } 10891da177e4SLinus Torvalds 10909015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 10919015df24SAlasdair G Kergon struct dm_target *ti) 1092f9ab94ceSMikulas Patocka { 10939015df24SAlasdair G Kergon struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); 1094f9ab94ceSMikulas Patocka 1095f9ab94ceSMikulas Patocka tio->io = ci->io; 1096f9ab94ceSMikulas Patocka tio->ti = ti; 1097f9ab94ceSMikulas Patocka memset(&tio->info, 0, sizeof(tio->info)); 10989015df24SAlasdair G Kergon 10999015df24SAlasdair G Kergon return tio; 11009015df24SAlasdair G Kergon } 11019015df24SAlasdair G Kergon 110206a426ceSMike Snitzer static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, 1103a79245b3SMike Snitzer unsigned request_nr, sector_t len) 11049015df24SAlasdair G Kergon { 11059015df24SAlasdair G Kergon struct dm_target_io *tio = alloc_tio(ci, ti); 11069015df24SAlasdair G Kergon struct bio *clone; 11079015df24SAlasdair G Kergon 110857cba5d3SMike Snitzer tio->info.target_request_nr = request_nr; 1109f9ab94ceSMikulas Patocka 111006a426ceSMike Snitzer /* 111106a426ceSMike Snitzer * Discard requests require the bio's inline iovecs be initialized. 111206a426ceSMike Snitzer * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 111306a426ceSMike Snitzer * and discard, so no need for concern about wasted bvec allocations. 111406a426ceSMike Snitzer */ 111506a426ceSMike Snitzer clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); 1116f9ab94ceSMikulas Patocka __bio_clone(clone, ci->bio); 1117f9ab94ceSMikulas Patocka clone->bi_destructor = dm_bio_destructor; 1118a79245b3SMike Snitzer if (len) { 1119a79245b3SMike Snitzer clone->bi_sector = ci->sector; 1120a79245b3SMike Snitzer clone->bi_size = to_bytes(len); 1121a79245b3SMike Snitzer } 1122f9ab94ceSMikulas Patocka 1123f9ab94ceSMikulas Patocka __map_bio(ti, clone, tio); 1124f9ab94ceSMikulas Patocka } 1125f9ab94ceSMikulas Patocka 112606a426ceSMike Snitzer static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, 1127a79245b3SMike Snitzer unsigned num_requests, sector_t len) 112806a426ceSMike Snitzer { 112906a426ceSMike Snitzer unsigned request_nr; 113006a426ceSMike Snitzer 113106a426ceSMike Snitzer for (request_nr = 0; request_nr < num_requests; request_nr++) 1132a79245b3SMike Snitzer __issue_target_request(ci, ti, request_nr, len); 113306a426ceSMike Snitzer } 113406a426ceSMike Snitzer 1135d87f4c14STejun Heo static int __clone_and_map_flush(struct clone_info *ci) 1136f9ab94ceSMikulas Patocka { 113706a426ceSMike Snitzer unsigned target_nr = 0; 1138f9ab94ceSMikulas Patocka struct dm_target *ti; 1139f9ab94ceSMikulas Patocka 1140f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 1141a79245b3SMike Snitzer __issue_target_requests(ci, ti, ti->num_flush_requests, 0); 1142f9ab94ceSMikulas Patocka 1143f9ab94ceSMikulas Patocka ci->sector_count = 0; 1144f9ab94ceSMikulas Patocka 1145f9ab94ceSMikulas Patocka return 0; 1146f9ab94ceSMikulas Patocka } 1147f9ab94ceSMikulas Patocka 11485ae89a87SMike Snitzer /* 11495ae89a87SMike Snitzer * Perform all io with a single clone. 11505ae89a87SMike Snitzer */ 11515ae89a87SMike Snitzer static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) 11525ae89a87SMike Snitzer { 11535ae89a87SMike Snitzer struct bio *clone, *bio = ci->bio; 11545ae89a87SMike Snitzer struct dm_target_io *tio; 11555ae89a87SMike Snitzer 11565ae89a87SMike Snitzer tio = alloc_tio(ci, ti); 11575ae89a87SMike Snitzer clone = clone_bio(bio, ci->sector, ci->idx, 11585ae89a87SMike Snitzer bio->bi_vcnt - ci->idx, ci->sector_count, 11595ae89a87SMike Snitzer ci->md->bs); 11605ae89a87SMike Snitzer __map_bio(ti, clone, tio); 11615ae89a87SMike Snitzer ci->sector_count = 0; 11625ae89a87SMike Snitzer } 11635ae89a87SMike Snitzer 11645ae89a87SMike Snitzer static int __clone_and_map_discard(struct clone_info *ci) 11655ae89a87SMike Snitzer { 11665ae89a87SMike Snitzer struct dm_target *ti; 1167a79245b3SMike Snitzer sector_t len; 11685ae89a87SMike Snitzer 1169a79245b3SMike Snitzer do { 11705ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 11715ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 11725ae89a87SMike Snitzer return -EIO; 11735ae89a87SMike Snitzer 11745ae89a87SMike Snitzer /* 11755ae89a87SMike Snitzer * Even though the device advertised discard support, 11765ae89a87SMike Snitzer * reconfiguration might have changed that since the 11775ae89a87SMike Snitzer * check was performed. 11785ae89a87SMike Snitzer */ 11795ae89a87SMike Snitzer if (!ti->num_discard_requests) 11805ae89a87SMike Snitzer return -EOPNOTSUPP; 11815ae89a87SMike Snitzer 1182a79245b3SMike Snitzer len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 11835ae89a87SMike Snitzer 1184a79245b3SMike Snitzer __issue_target_requests(ci, ti, ti->num_discard_requests, len); 11855ae89a87SMike Snitzer 1186a79245b3SMike Snitzer ci->sector += len; 1187a79245b3SMike Snitzer } while (ci->sector_count -= len); 11885ae89a87SMike Snitzer 11895ae89a87SMike Snitzer return 0; 11905ae89a87SMike Snitzer } 11915ae89a87SMike Snitzer 1192512875bdSJun'ichi Nomura static int __clone_and_map(struct clone_info *ci) 11931da177e4SLinus Torvalds { 11941da177e4SLinus Torvalds struct bio *clone, *bio = ci->bio; 1195512875bdSJun'ichi Nomura struct dm_target *ti; 1196512875bdSJun'ichi Nomura sector_t len = 0, max; 1197028867acSAlasdair G Kergon struct dm_target_io *tio; 11981da177e4SLinus Torvalds 11995ae89a87SMike Snitzer if (unlikely(bio->bi_rw & REQ_DISCARD)) 12005ae89a87SMike Snitzer return __clone_and_map_discard(ci); 12015ae89a87SMike Snitzer 1202512875bdSJun'ichi Nomura ti = dm_table_find_target(ci->map, ci->sector); 1203512875bdSJun'ichi Nomura if (!dm_target_is_valid(ti)) 1204512875bdSJun'ichi Nomura return -EIO; 1205512875bdSJun'ichi Nomura 120656a67df7SMike Snitzer max = max_io_len(ci->sector, ti); 1207512875bdSJun'ichi Nomura 12081da177e4SLinus Torvalds if (ci->sector_count <= max) { 12091da177e4SLinus Torvalds /* 12101da177e4SLinus Torvalds * Optimise for the simple case where we can do all of 12111da177e4SLinus Torvalds * the remaining io with a single clone. 12121da177e4SLinus Torvalds */ 12135ae89a87SMike Snitzer __clone_and_map_simple(ci, ti); 12141da177e4SLinus Torvalds 12151da177e4SLinus Torvalds } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 12161da177e4SLinus Torvalds /* 12171da177e4SLinus Torvalds * There are some bvecs that don't span targets. 12181da177e4SLinus Torvalds * Do as many of these as possible. 12191da177e4SLinus Torvalds */ 12201da177e4SLinus Torvalds int i; 12211da177e4SLinus Torvalds sector_t remaining = max; 12221da177e4SLinus Torvalds sector_t bv_len; 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 12251da177e4SLinus Torvalds bv_len = to_sector(bio->bi_io_vec[i].bv_len); 12261da177e4SLinus Torvalds 12271da177e4SLinus Torvalds if (bv_len > remaining) 12281da177e4SLinus Torvalds break; 12291da177e4SLinus Torvalds 12301da177e4SLinus Torvalds remaining -= bv_len; 12311da177e4SLinus Torvalds len += bv_len; 12321da177e4SLinus Torvalds } 12331da177e4SLinus Torvalds 12345ae89a87SMike Snitzer tio = alloc_tio(ci, ti); 12359faf400fSStefan Bader clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 12369faf400fSStefan Bader ci->md->bs); 12371da177e4SLinus Torvalds __map_bio(ti, clone, tio); 12381da177e4SLinus Torvalds 12391da177e4SLinus Torvalds ci->sector += len; 12401da177e4SLinus Torvalds ci->sector_count -= len; 12411da177e4SLinus Torvalds ci->idx = i; 12421da177e4SLinus Torvalds 12431da177e4SLinus Torvalds } else { 12441da177e4SLinus Torvalds /* 1245d2044a94SAlasdair G Kergon * Handle a bvec that must be split between two or more targets. 12461da177e4SLinus Torvalds */ 12471da177e4SLinus Torvalds struct bio_vec *bv = bio->bi_io_vec + ci->idx; 1248d2044a94SAlasdair G Kergon sector_t remaining = to_sector(bv->bv_len); 1249d2044a94SAlasdair G Kergon unsigned int offset = 0; 12501da177e4SLinus Torvalds 1251d2044a94SAlasdair G Kergon do { 1252d2044a94SAlasdair G Kergon if (offset) { 12531da177e4SLinus Torvalds ti = dm_table_find_target(ci->map, ci->sector); 1254512875bdSJun'ichi Nomura if (!dm_target_is_valid(ti)) 1255512875bdSJun'ichi Nomura return -EIO; 1256512875bdSJun'ichi Nomura 125756a67df7SMike Snitzer max = max_io_len(ci->sector, ti); 1258d2044a94SAlasdair G Kergon } 1259d2044a94SAlasdair G Kergon 1260d2044a94SAlasdair G Kergon len = min(remaining, max); 1261d2044a94SAlasdair G Kergon 12625ae89a87SMike Snitzer tio = alloc_tio(ci, ti); 1263d2044a94SAlasdair G Kergon clone = split_bvec(bio, ci->sector, ci->idx, 12649faf400fSStefan Bader bv->bv_offset + offset, len, 12659faf400fSStefan Bader ci->md->bs); 1266d2044a94SAlasdair G Kergon 12671da177e4SLinus Torvalds __map_bio(ti, clone, tio); 12681da177e4SLinus Torvalds 12691da177e4SLinus Torvalds ci->sector += len; 12701da177e4SLinus Torvalds ci->sector_count -= len; 1271d2044a94SAlasdair G Kergon offset += to_bytes(len); 1272d2044a94SAlasdair G Kergon } while (remaining -= len); 1273d2044a94SAlasdair G Kergon 12741da177e4SLinus Torvalds ci->idx++; 12751da177e4SLinus Torvalds } 1276512875bdSJun'ichi Nomura 1277512875bdSJun'ichi Nomura return 0; 12781da177e4SLinus Torvalds } 12791da177e4SLinus Torvalds 12801da177e4SLinus Torvalds /* 12818a53c28dSMikulas Patocka * Split the bio into several clones and submit it to targets. 12821da177e4SLinus Torvalds */ 1283f0b9a450SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) 12841da177e4SLinus Torvalds { 12856a8736d1STejun Heo bool is_flush = bio->bi_rw & REQ_FLUSH; 12861da177e4SLinus Torvalds struct clone_info ci; 1287512875bdSJun'ichi Nomura int error = 0; 12881da177e4SLinus Torvalds 12897c666411SAlasdair G Kergon ci.map = dm_get_live_table(md); 1290f0b9a450SMikulas Patocka if (unlikely(!ci.map)) { 1291f0b9a450SMikulas Patocka bio_io_error(bio); 1292f0b9a450SMikulas Patocka return; 1293f0b9a450SMikulas Patocka } 1294692d0eb9SMikulas Patocka 12951da177e4SLinus Torvalds ci.md = md; 12961da177e4SLinus Torvalds ci.io = alloc_io(md); 12971da177e4SLinus Torvalds ci.io->error = 0; 12981da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 12991da177e4SLinus Torvalds ci.io->bio = bio; 13001da177e4SLinus Torvalds ci.io->md = md; 1301f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 13021da177e4SLinus Torvalds ci.sector = bio->bi_sector; 13036a8736d1STejun Heo ci.idx = bio->bi_idx; 13046a8736d1STejun Heo 13056a8736d1STejun Heo if (!is_flush) { 13066a8736d1STejun Heo ci.bio = bio; 13071da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 13086a8736d1STejun Heo } else { 13096a8736d1STejun Heo ci.bio = &ci.md->flush_bio; 1310f9ab94ceSMikulas Patocka ci.sector_count = 1; 1311d87f4c14STejun Heo } 13121da177e4SLinus Torvalds 13133eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1314d87f4c14STejun Heo while (ci.sector_count && !error) { 13156a8736d1STejun Heo if (!is_flush) 1316512875bdSJun'ichi Nomura error = __clone_and_map(&ci); 1317d87f4c14STejun Heo else 1318d87f4c14STejun Heo error = __clone_and_map_flush(&ci); 1319d87f4c14STejun Heo } 13201da177e4SLinus Torvalds 13211da177e4SLinus Torvalds /* drop the extra reference count */ 1322512875bdSJun'ichi Nomura dec_pending(ci.io, error); 13231da177e4SLinus Torvalds dm_table_put(ci.map); 13241da177e4SLinus Torvalds } 13251da177e4SLinus Torvalds /*----------------------------------------------------------------- 13261da177e4SLinus Torvalds * CRUD END 13271da177e4SLinus Torvalds *---------------------------------------------------------------*/ 13281da177e4SLinus Torvalds 1329f6fccb12SMilan Broz static int dm_merge_bvec(struct request_queue *q, 1330f6fccb12SMilan Broz struct bvec_merge_data *bvm, 1331f6fccb12SMilan Broz struct bio_vec *biovec) 1332f6fccb12SMilan Broz { 1333f6fccb12SMilan Broz struct mapped_device *md = q->queuedata; 13347c666411SAlasdair G Kergon struct dm_table *map = dm_get_live_table(md); 1335f6fccb12SMilan Broz struct dm_target *ti; 1336f6fccb12SMilan Broz sector_t max_sectors; 13375037108aSMikulas Patocka int max_size = 0; 1338f6fccb12SMilan Broz 1339f6fccb12SMilan Broz if (unlikely(!map)) 13405037108aSMikulas Patocka goto out; 1341f6fccb12SMilan Broz 1342f6fccb12SMilan Broz ti = dm_table_find_target(map, bvm->bi_sector); 1343b01cd5acSMikulas Patocka if (!dm_target_is_valid(ti)) 1344b01cd5acSMikulas Patocka goto out_table; 1345f6fccb12SMilan Broz 1346f6fccb12SMilan Broz /* 1347f6fccb12SMilan Broz * Find maximum amount of I/O that won't need splitting 1348f6fccb12SMilan Broz */ 134956a67df7SMike Snitzer max_sectors = min(max_io_len(bvm->bi_sector, ti), 1350f6fccb12SMilan Broz (sector_t) BIO_MAX_SECTORS); 1351f6fccb12SMilan Broz max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1352f6fccb12SMilan Broz if (max_size < 0) 1353f6fccb12SMilan Broz max_size = 0; 1354f6fccb12SMilan Broz 1355f6fccb12SMilan Broz /* 1356f6fccb12SMilan Broz * merge_bvec_fn() returns number of bytes 1357f6fccb12SMilan Broz * it can accept at this offset 1358f6fccb12SMilan Broz * max is precomputed maximal io size 1359f6fccb12SMilan Broz */ 1360f6fccb12SMilan Broz if (max_size && ti->type->merge) 1361f6fccb12SMilan Broz max_size = ti->type->merge(ti, bvm, biovec, max_size); 13628cbeb67aSMikulas Patocka /* 13638cbeb67aSMikulas Patocka * If the target doesn't support merge method and some of the devices 13648cbeb67aSMikulas Patocka * provided their merge_bvec method (we know this by looking at 13658cbeb67aSMikulas Patocka * queue_max_hw_sectors), then we can't allow bios with multiple vector 13668cbeb67aSMikulas Patocka * entries. So always set max_size to 0, and the code below allows 13678cbeb67aSMikulas Patocka * just one page. 13688cbeb67aSMikulas Patocka */ 13698cbeb67aSMikulas Patocka else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 13708cbeb67aSMikulas Patocka 13718cbeb67aSMikulas Patocka max_size = 0; 1372f6fccb12SMilan Broz 1373b01cd5acSMikulas Patocka out_table: 13745037108aSMikulas Patocka dm_table_put(map); 13755037108aSMikulas Patocka 13765037108aSMikulas Patocka out: 1377f6fccb12SMilan Broz /* 1378f6fccb12SMilan Broz * Always allow an entire first page 1379f6fccb12SMilan Broz */ 1380f6fccb12SMilan Broz if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1381f6fccb12SMilan Broz max_size = biovec->bv_len; 1382f6fccb12SMilan Broz 1383f6fccb12SMilan Broz return max_size; 1384f6fccb12SMilan Broz } 1385f6fccb12SMilan Broz 13861da177e4SLinus Torvalds /* 13871da177e4SLinus Torvalds * The request function that just remaps the bio built up by 13881da177e4SLinus Torvalds * dm_merge_bvec. 13891da177e4SLinus Torvalds */ 1390cec47e3dSKiyoshi Ueda static int _dm_request(struct request_queue *q, struct bio *bio) 13911da177e4SLinus Torvalds { 139212f03a49SKevin Corry int rw = bio_data_dir(bio); 13931da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1394c9959059STejun Heo int cpu; 13951da177e4SLinus Torvalds 13962ca3310eSAlasdair G Kergon down_read(&md->io_lock); 13971da177e4SLinus Torvalds 1398074a7acaSTejun Heo cpu = part_stat_lock(); 1399074a7acaSTejun Heo part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 1400074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1401074a7acaSTejun Heo part_stat_unlock(); 140212f03a49SKevin Corry 14036a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 14046a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 14052ca3310eSAlasdair G Kergon up_read(&md->io_lock); 14061da177e4SLinus Torvalds 14076a8736d1STejun Heo if (bio_rw(bio) != READA) 140892c63902SMikulas Patocka queue_io(md, bio); 14096a8736d1STejun Heo else 14106a8736d1STejun Heo bio_io_error(bio); 141192c63902SMikulas Patocka return 0; 14121da177e4SLinus Torvalds } 14131da177e4SLinus Torvalds 1414f0b9a450SMikulas Patocka __split_and_process_bio(md, bio); 14152ca3310eSAlasdair G Kergon up_read(&md->io_lock); 1416f0b9a450SMikulas Patocka return 0; 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds 1419cec47e3dSKiyoshi Ueda static int dm_make_request(struct request_queue *q, struct bio *bio) 1420cec47e3dSKiyoshi Ueda { 1421cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1422cec47e3dSKiyoshi Ueda 1423cec47e3dSKiyoshi Ueda return md->saved_make_request_fn(q, bio); /* call __make_request() */ 1424cec47e3dSKiyoshi Ueda } 1425cec47e3dSKiyoshi Ueda 1426cec47e3dSKiyoshi Ueda static int dm_request_based(struct mapped_device *md) 1427cec47e3dSKiyoshi Ueda { 1428cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1429cec47e3dSKiyoshi Ueda } 1430cec47e3dSKiyoshi Ueda 1431cec47e3dSKiyoshi Ueda static int dm_request(struct request_queue *q, struct bio *bio) 1432cec47e3dSKiyoshi Ueda { 1433cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1434cec47e3dSKiyoshi Ueda 1435cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1436cec47e3dSKiyoshi Ueda return dm_make_request(q, bio); 1437cec47e3dSKiyoshi Ueda 1438cec47e3dSKiyoshi Ueda return _dm_request(q, bio); 1439cec47e3dSKiyoshi Ueda } 1440cec47e3dSKiyoshi Ueda 1441cec47e3dSKiyoshi Ueda void dm_dispatch_request(struct request *rq) 1442cec47e3dSKiyoshi Ueda { 1443cec47e3dSKiyoshi Ueda int r; 1444cec47e3dSKiyoshi Ueda 1445cec47e3dSKiyoshi Ueda if (blk_queue_io_stat(rq->q)) 1446cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_IO_STAT; 1447cec47e3dSKiyoshi Ueda 1448cec47e3dSKiyoshi Ueda rq->start_time = jiffies; 1449cec47e3dSKiyoshi Ueda r = blk_insert_cloned_request(rq->q, rq); 1450cec47e3dSKiyoshi Ueda if (r) 1451cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1452cec47e3dSKiyoshi Ueda } 1453cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_dispatch_request); 1454cec47e3dSKiyoshi Ueda 1455cec47e3dSKiyoshi Ueda static void dm_rq_bio_destructor(struct bio *bio) 1456cec47e3dSKiyoshi Ueda { 1457cec47e3dSKiyoshi Ueda struct dm_rq_clone_bio_info *info = bio->bi_private; 1458cec47e3dSKiyoshi Ueda struct mapped_device *md = info->tio->md; 1459cec47e3dSKiyoshi Ueda 1460cec47e3dSKiyoshi Ueda free_bio_info(info); 1461cec47e3dSKiyoshi Ueda bio_free(bio, md->bs); 1462cec47e3dSKiyoshi Ueda } 1463cec47e3dSKiyoshi Ueda 1464cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1465cec47e3dSKiyoshi Ueda void *data) 1466cec47e3dSKiyoshi Ueda { 1467cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = data; 1468cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 1469cec47e3dSKiyoshi Ueda struct dm_rq_clone_bio_info *info = alloc_bio_info(md); 1470cec47e3dSKiyoshi Ueda 1471cec47e3dSKiyoshi Ueda if (!info) 1472cec47e3dSKiyoshi Ueda return -ENOMEM; 1473cec47e3dSKiyoshi Ueda 1474cec47e3dSKiyoshi Ueda info->orig = bio_orig; 1475cec47e3dSKiyoshi Ueda info->tio = tio; 1476cec47e3dSKiyoshi Ueda bio->bi_end_io = end_clone_bio; 1477cec47e3dSKiyoshi Ueda bio->bi_private = info; 1478cec47e3dSKiyoshi Ueda bio->bi_destructor = dm_rq_bio_destructor; 1479cec47e3dSKiyoshi Ueda 1480cec47e3dSKiyoshi Ueda return 0; 1481cec47e3dSKiyoshi Ueda } 1482cec47e3dSKiyoshi Ueda 1483cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq, 1484cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio) 1485cec47e3dSKiyoshi Ueda { 1486d0bcb878SKiyoshi Ueda int r; 1487cec47e3dSKiyoshi Ueda 1488d0bcb878SKiyoshi Ueda r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1489d0bcb878SKiyoshi Ueda dm_rq_bio_constructor, tio); 1490cec47e3dSKiyoshi Ueda if (r) 1491cec47e3dSKiyoshi Ueda return r; 1492cec47e3dSKiyoshi Ueda 1493cec47e3dSKiyoshi Ueda clone->cmd = rq->cmd; 1494cec47e3dSKiyoshi Ueda clone->cmd_len = rq->cmd_len; 1495cec47e3dSKiyoshi Ueda clone->sense = rq->sense; 1496cec47e3dSKiyoshi Ueda clone->buffer = rq->buffer; 1497cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1498cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 1499cec47e3dSKiyoshi Ueda 1500cec47e3dSKiyoshi Ueda return 0; 1501cec47e3dSKiyoshi Ueda } 1502cec47e3dSKiyoshi Ueda 15036facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md, 15046facdaffSKiyoshi Ueda gfp_t gfp_mask) 15056facdaffSKiyoshi Ueda { 15066facdaffSKiyoshi Ueda struct request *clone; 15076facdaffSKiyoshi Ueda struct dm_rq_target_io *tio; 15086facdaffSKiyoshi Ueda 15096facdaffSKiyoshi Ueda tio = alloc_rq_tio(md, gfp_mask); 15106facdaffSKiyoshi Ueda if (!tio) 15116facdaffSKiyoshi Ueda return NULL; 15126facdaffSKiyoshi Ueda 15136facdaffSKiyoshi Ueda tio->md = md; 15146facdaffSKiyoshi Ueda tio->ti = NULL; 15156facdaffSKiyoshi Ueda tio->orig = rq; 15166facdaffSKiyoshi Ueda tio->error = 0; 15176facdaffSKiyoshi Ueda memset(&tio->info, 0, sizeof(tio->info)); 15186facdaffSKiyoshi Ueda 15196facdaffSKiyoshi Ueda clone = &tio->clone; 15206facdaffSKiyoshi Ueda if (setup_clone(clone, rq, tio)) { 15216facdaffSKiyoshi Ueda /* -ENOMEM */ 15226facdaffSKiyoshi Ueda free_rq_tio(tio); 15236facdaffSKiyoshi Ueda return NULL; 15246facdaffSKiyoshi Ueda } 15256facdaffSKiyoshi Ueda 15266facdaffSKiyoshi Ueda return clone; 15276facdaffSKiyoshi Ueda } 15286facdaffSKiyoshi Ueda 1529cec47e3dSKiyoshi Ueda /* 1530cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1531cec47e3dSKiyoshi Ueda */ 1532cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1533cec47e3dSKiyoshi Ueda { 1534cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1535cec47e3dSKiyoshi Ueda struct request *clone; 1536cec47e3dSKiyoshi Ueda 1537cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1538cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1539cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1540cec47e3dSKiyoshi Ueda } 1541cec47e3dSKiyoshi Ueda 15426facdaffSKiyoshi Ueda clone = clone_rq(rq, md, GFP_ATOMIC); 15436facdaffSKiyoshi Ueda if (!clone) 1544cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1545cec47e3dSKiyoshi Ueda 1546cec47e3dSKiyoshi Ueda rq->special = clone; 1547cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1548cec47e3dSKiyoshi Ueda 1549cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1550cec47e3dSKiyoshi Ueda } 1551cec47e3dSKiyoshi Ueda 15529eef87daSKiyoshi Ueda /* 15539eef87daSKiyoshi Ueda * Returns: 15549eef87daSKiyoshi Ueda * 0 : the request has been processed (not requeued) 15559eef87daSKiyoshi Ueda * !0 : the request has been requeued 15569eef87daSKiyoshi Ueda */ 15579eef87daSKiyoshi Ueda static int map_request(struct dm_target *ti, struct request *clone, 1558cec47e3dSKiyoshi Ueda struct mapped_device *md) 1559cec47e3dSKiyoshi Ueda { 15609eef87daSKiyoshi Ueda int r, requeued = 0; 1561cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1562cec47e3dSKiyoshi Ueda 1563cec47e3dSKiyoshi Ueda /* 1564cec47e3dSKiyoshi Ueda * Hold the md reference here for the in-flight I/O. 1565cec47e3dSKiyoshi Ueda * We can't rely on the reference count by device opener, 1566cec47e3dSKiyoshi Ueda * because the device may be closed during the request completion 1567cec47e3dSKiyoshi Ueda * when all bios are completed. 1568cec47e3dSKiyoshi Ueda * See the comment in rq_completed() too. 1569cec47e3dSKiyoshi Ueda */ 1570cec47e3dSKiyoshi Ueda dm_get(md); 1571cec47e3dSKiyoshi Ueda 1572cec47e3dSKiyoshi Ueda tio->ti = ti; 1573cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1574cec47e3dSKiyoshi Ueda switch (r) { 1575cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1576cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1577cec47e3dSKiyoshi Ueda break; 1578cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1579cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 15806db4ccd6SJun'ichi Nomura trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 15816db4ccd6SJun'ichi Nomura blk_rq_pos(tio->orig)); 1582cec47e3dSKiyoshi Ueda dm_dispatch_request(clone); 1583cec47e3dSKiyoshi Ueda break; 1584cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1585cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 1586cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 15879eef87daSKiyoshi Ueda requeued = 1; 1588cec47e3dSKiyoshi Ueda break; 1589cec47e3dSKiyoshi Ueda default: 1590cec47e3dSKiyoshi Ueda if (r > 0) { 1591cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1592cec47e3dSKiyoshi Ueda BUG(); 1593cec47e3dSKiyoshi Ueda } 1594cec47e3dSKiyoshi Ueda 1595cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1596cec47e3dSKiyoshi Ueda dm_kill_unmapped_request(clone, r); 1597cec47e3dSKiyoshi Ueda break; 1598cec47e3dSKiyoshi Ueda } 15999eef87daSKiyoshi Ueda 16009eef87daSKiyoshi Ueda return requeued; 1601cec47e3dSKiyoshi Ueda } 1602cec47e3dSKiyoshi Ueda 1603cec47e3dSKiyoshi Ueda /* 1604cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 1605cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1606cec47e3dSKiyoshi Ueda */ 1607cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 1608cec47e3dSKiyoshi Ueda { 1609cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 16107c666411SAlasdair G Kergon struct dm_table *map = dm_get_live_table(md); 1611cec47e3dSKiyoshi Ueda struct dm_target *ti; 1612b4324feeSKiyoshi Ueda struct request *rq, *clone; 161329e4013dSTejun Heo sector_t pos; 1614cec47e3dSKiyoshi Ueda 1615cec47e3dSKiyoshi Ueda /* 1616b4324feeSKiyoshi Ueda * For suspend, check blk_queue_stopped() and increment 1617b4324feeSKiyoshi Ueda * ->pending within a single queue_lock not to increment the 1618b4324feeSKiyoshi Ueda * number of in-flight I/Os after the queue is stopped in 1619b4324feeSKiyoshi Ueda * dm_suspend(). 1620cec47e3dSKiyoshi Ueda */ 1621cec47e3dSKiyoshi Ueda while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1622cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 1623cec47e3dSKiyoshi Ueda if (!rq) 1624cec47e3dSKiyoshi Ueda goto plug_and_out; 1625cec47e3dSKiyoshi Ueda 162629e4013dSTejun Heo /* always use block 0 to find the target for flushes for now */ 162729e4013dSTejun Heo pos = 0; 162829e4013dSTejun Heo if (!(rq->cmd_flags & REQ_FLUSH)) 162929e4013dSTejun Heo pos = blk_rq_pos(rq); 1630d0bcb878SKiyoshi Ueda 163129e4013dSTejun Heo ti = dm_table_find_target(map, pos); 163229e4013dSTejun Heo BUG_ON(!dm_target_is_valid(ti)); 163329e4013dSTejun Heo 1634cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 1635cec47e3dSKiyoshi Ueda goto plug_and_out; 1636cec47e3dSKiyoshi Ueda 1637cec47e3dSKiyoshi Ueda blk_start_request(rq); 1638b4324feeSKiyoshi Ueda clone = rq->special; 1639b4324feeSKiyoshi Ueda atomic_inc(&md->pending[rq_data_dir(clone)]); 1640b4324feeSKiyoshi Ueda 1641cec47e3dSKiyoshi Ueda spin_unlock(q->queue_lock); 16429eef87daSKiyoshi Ueda if (map_request(ti, clone, md)) 16439eef87daSKiyoshi Ueda goto requeued; 16449eef87daSKiyoshi Ueda 1645cec47e3dSKiyoshi Ueda spin_lock_irq(q->queue_lock); 1646cec47e3dSKiyoshi Ueda } 1647cec47e3dSKiyoshi Ueda 1648cec47e3dSKiyoshi Ueda goto out; 1649cec47e3dSKiyoshi Ueda 16509eef87daSKiyoshi Ueda requeued: 16519eef87daSKiyoshi Ueda spin_lock_irq(q->queue_lock); 16529eef87daSKiyoshi Ueda 1653cec47e3dSKiyoshi Ueda plug_and_out: 1654cec47e3dSKiyoshi Ueda if (!elv_queue_empty(q)) 1655cec47e3dSKiyoshi Ueda /* Some requests still remain, retry later */ 1656cec47e3dSKiyoshi Ueda blk_plug_device(q); 1657cec47e3dSKiyoshi Ueda 1658cec47e3dSKiyoshi Ueda out: 1659cec47e3dSKiyoshi Ueda dm_table_put(map); 1660cec47e3dSKiyoshi Ueda 1661cec47e3dSKiyoshi Ueda return; 1662cec47e3dSKiyoshi Ueda } 1663cec47e3dSKiyoshi Ueda 1664cec47e3dSKiyoshi Ueda int dm_underlying_device_busy(struct request_queue *q) 1665cec47e3dSKiyoshi Ueda { 1666cec47e3dSKiyoshi Ueda return blk_lld_busy(q); 1667cec47e3dSKiyoshi Ueda } 1668cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1669cec47e3dSKiyoshi Ueda 1670cec47e3dSKiyoshi Ueda static int dm_lld_busy(struct request_queue *q) 1671cec47e3dSKiyoshi Ueda { 1672cec47e3dSKiyoshi Ueda int r; 1673cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 16747c666411SAlasdair G Kergon struct dm_table *map = dm_get_live_table(md); 1675cec47e3dSKiyoshi Ueda 1676cec47e3dSKiyoshi Ueda if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1677cec47e3dSKiyoshi Ueda r = 1; 1678cec47e3dSKiyoshi Ueda else 1679cec47e3dSKiyoshi Ueda r = dm_table_any_busy_target(map); 1680cec47e3dSKiyoshi Ueda 1681cec47e3dSKiyoshi Ueda dm_table_put(map); 1682cec47e3dSKiyoshi Ueda 1683cec47e3dSKiyoshi Ueda return r; 1684cec47e3dSKiyoshi Ueda } 1685cec47e3dSKiyoshi Ueda 1686165125e1SJens Axboe static void dm_unplug_all(struct request_queue *q) 16871da177e4SLinus Torvalds { 16881da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 16897c666411SAlasdair G Kergon struct dm_table *map = dm_get_live_table(md); 16901da177e4SLinus Torvalds 16911da177e4SLinus Torvalds if (map) { 1692cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1693cec47e3dSKiyoshi Ueda generic_unplug_device(q); 1694cec47e3dSKiyoshi Ueda 16951da177e4SLinus Torvalds dm_table_unplug_all(map); 16961da177e4SLinus Torvalds dm_table_put(map); 16971da177e4SLinus Torvalds } 16981da177e4SLinus Torvalds } 16991da177e4SLinus Torvalds 17001da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 17011da177e4SLinus Torvalds { 17028a57dfc6SChandra Seetharaman int r = bdi_bits; 17038a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 17048a57dfc6SChandra Seetharaman struct dm_table *map; 17051da177e4SLinus Torvalds 17061eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 17077c666411SAlasdair G Kergon map = dm_get_live_table(md); 17088a57dfc6SChandra Seetharaman if (map) { 1709cec47e3dSKiyoshi Ueda /* 1710cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 1711cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 1712cec47e3dSKiyoshi Ueda */ 1713cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1714cec47e3dSKiyoshi Ueda r = md->queue->backing_dev_info.state & 1715cec47e3dSKiyoshi Ueda bdi_bits; 1716cec47e3dSKiyoshi Ueda else 17171da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 1718cec47e3dSKiyoshi Ueda 17191da177e4SLinus Torvalds dm_table_put(map); 17208a57dfc6SChandra Seetharaman } 17218a57dfc6SChandra Seetharaman } 17228a57dfc6SChandra Seetharaman 17231da177e4SLinus Torvalds return r; 17241da177e4SLinus Torvalds } 17251da177e4SLinus Torvalds 17261da177e4SLinus Torvalds /*----------------------------------------------------------------- 17271da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 17281da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17291da177e4SLinus Torvalds static DEFINE_IDR(_minor_idr); 17301da177e4SLinus Torvalds 17312b06cfffSAlasdair G Kergon static void free_minor(int minor) 17321da177e4SLinus Torvalds { 1733f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17341da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1735f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17361da177e4SLinus Torvalds } 17371da177e4SLinus Torvalds 17381da177e4SLinus Torvalds /* 17391da177e4SLinus Torvalds * See if the device with a specific minor # is free. 17401da177e4SLinus Torvalds */ 1741cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 17421da177e4SLinus Torvalds { 17431da177e4SLinus Torvalds int r, m; 17441da177e4SLinus Torvalds 17451da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17461da177e4SLinus Torvalds return -EINVAL; 17471da177e4SLinus Torvalds 174862f75c2fSJeff Mahoney r = idr_pre_get(&_minor_idr, GFP_KERNEL); 174962f75c2fSJeff Mahoney if (!r) 175062f75c2fSJeff Mahoney return -ENOMEM; 175162f75c2fSJeff Mahoney 1752f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17531da177e4SLinus Torvalds 17541da177e4SLinus Torvalds if (idr_find(&_minor_idr, minor)) { 17551da177e4SLinus Torvalds r = -EBUSY; 17561da177e4SLinus Torvalds goto out; 17571da177e4SLinus Torvalds } 17581da177e4SLinus Torvalds 1759ba61fdd1SJeff Mahoney r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 176062f75c2fSJeff Mahoney if (r) 17611da177e4SLinus Torvalds goto out; 17621da177e4SLinus Torvalds 17631da177e4SLinus Torvalds if (m != minor) { 17641da177e4SLinus Torvalds idr_remove(&_minor_idr, m); 17651da177e4SLinus Torvalds r = -EBUSY; 17661da177e4SLinus Torvalds goto out; 17671da177e4SLinus Torvalds } 17681da177e4SLinus Torvalds 17691da177e4SLinus Torvalds out: 1770f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17711da177e4SLinus Torvalds return r; 17721da177e4SLinus Torvalds } 17731da177e4SLinus Torvalds 1774cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 17751da177e4SLinus Torvalds { 17762b06cfffSAlasdair G Kergon int r, m; 17771da177e4SLinus Torvalds 17781da177e4SLinus Torvalds r = idr_pre_get(&_minor_idr, GFP_KERNEL); 177962f75c2fSJeff Mahoney if (!r) 178062f75c2fSJeff Mahoney return -ENOMEM; 178162f75c2fSJeff Mahoney 1782f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17831da177e4SLinus Torvalds 1784ba61fdd1SJeff Mahoney r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1785cf13ab8eSFrederik Deweerdt if (r) 17861da177e4SLinus Torvalds goto out; 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds if (m >= (1 << MINORBITS)) { 17891da177e4SLinus Torvalds idr_remove(&_minor_idr, m); 17901da177e4SLinus Torvalds r = -ENOSPC; 17911da177e4SLinus Torvalds goto out; 17921da177e4SLinus Torvalds } 17931da177e4SLinus Torvalds 17941da177e4SLinus Torvalds *minor = m; 17951da177e4SLinus Torvalds 17961da177e4SLinus Torvalds out: 1797f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17981da177e4SLinus Torvalds return r; 17991da177e4SLinus Torvalds } 18001da177e4SLinus Torvalds 180183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 18021da177e4SLinus Torvalds 180353d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 180453d5914fSMikulas Patocka 18054a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md) 18064a0b4ddfSMike Snitzer { 18074a0b4ddfSMike Snitzer /* 18084a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 18094a0b4ddfSMike Snitzer * devices. The type of this dm device has not been decided yet. 18104a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 18114a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 18124a0b4ddfSMike Snitzer * for request stacking support until then. 18134a0b4ddfSMike Snitzer * 18144a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 18154a0b4ddfSMike Snitzer */ 18164a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 18174a0b4ddfSMike Snitzer 18184a0b4ddfSMike Snitzer md->queue->queuedata = md; 18194a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_fn = dm_any_congested; 18204a0b4ddfSMike Snitzer md->queue->backing_dev_info.congested_data = md; 18214a0b4ddfSMike Snitzer blk_queue_make_request(md->queue, dm_request); 18224a0b4ddfSMike Snitzer blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 18234a0b4ddfSMike Snitzer md->queue->unplug_fn = dm_unplug_all; 18244a0b4ddfSMike Snitzer blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1825d87f4c14STejun Heo blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); 18264a0b4ddfSMike Snitzer } 18274a0b4ddfSMike Snitzer 18281da177e4SLinus Torvalds /* 18291da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 18301da177e4SLinus Torvalds */ 18312b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 18321da177e4SLinus Torvalds { 18331da177e4SLinus Torvalds int r; 1834cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1835ba61fdd1SJeff Mahoney void *old_md; 18361da177e4SLinus Torvalds 18371da177e4SLinus Torvalds if (!md) { 18381da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18391da177e4SLinus Torvalds return NULL; 18401da177e4SLinus Torvalds } 18411da177e4SLinus Torvalds 184210da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18436ed7ade8SMilan Broz goto bad_module_get; 184410da4f79SJeff Mahoney 18451da177e4SLinus Torvalds /* get a minor number for the dev */ 18462b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1847cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18482b06cfffSAlasdair G Kergon else 1849cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18501da177e4SLinus Torvalds if (r < 0) 18516ed7ade8SMilan Broz goto bad_minor; 18521da177e4SLinus Torvalds 1853a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 18542ca3310eSAlasdair G Kergon init_rwsem(&md->io_lock); 1855e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1856a5664dadSMike Snitzer mutex_init(&md->type_lock); 1857022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18581da177e4SLinus Torvalds rwlock_init(&md->map_lock); 18591da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18605c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 18611da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 18627a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 18637a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 18647a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 18651da177e4SLinus Torvalds 18664a0b4ddfSMike Snitzer md->queue = blk_alloc_queue(GFP_KERNEL); 18671da177e4SLinus Torvalds if (!md->queue) 18686ed7ade8SMilan Broz goto bad_queue; 18691da177e4SLinus Torvalds 18704a0b4ddfSMike Snitzer dm_init_md_queue(md); 18719faf400fSStefan Bader 18721da177e4SLinus Torvalds md->disk = alloc_disk(1); 18731da177e4SLinus Torvalds if (!md->disk) 18746ed7ade8SMilan Broz goto bad_disk; 18751da177e4SLinus Torvalds 1876316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1877316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1878f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 187953d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1880f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 1881f0b04115SJeff Mahoney 18821da177e4SLinus Torvalds md->disk->major = _major; 18831da177e4SLinus Torvalds md->disk->first_minor = minor; 18841da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 18851da177e4SLinus Torvalds md->disk->queue = md->queue; 18861da177e4SLinus Torvalds md->disk->private_data = md; 18871da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 18881da177e4SLinus Torvalds add_disk(md->disk); 18897e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 18901da177e4SLinus Torvalds 1891304f3f6aSMilan Broz md->wq = create_singlethread_workqueue("kdmflush"); 1892304f3f6aSMilan Broz if (!md->wq) 1893304f3f6aSMilan Broz goto bad_thread; 1894304f3f6aSMilan Broz 189532a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 189632a926daSMikulas Patocka if (!md->bdev) 189732a926daSMikulas Patocka goto bad_bdev; 189832a926daSMikulas Patocka 18996a8736d1STejun Heo bio_init(&md->flush_bio); 19006a8736d1STejun Heo md->flush_bio.bi_bdev = md->bdev; 19016a8736d1STejun Heo md->flush_bio.bi_rw = WRITE_FLUSH; 19026a8736d1STejun Heo 1903ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1904f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1905ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1906f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1907ba61fdd1SJeff Mahoney 1908ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1909ba61fdd1SJeff Mahoney 19101da177e4SLinus Torvalds return md; 19111da177e4SLinus Torvalds 191232a926daSMikulas Patocka bad_bdev: 191332a926daSMikulas Patocka destroy_workqueue(md->wq); 1914304f3f6aSMilan Broz bad_thread: 191503022c54SZdenek Kabelac del_gendisk(md->disk); 1916304f3f6aSMilan Broz put_disk(md->disk); 19176ed7ade8SMilan Broz bad_disk: 19181312f40eSAl Viro blk_cleanup_queue(md->queue); 19196ed7ade8SMilan Broz bad_queue: 19201da177e4SLinus Torvalds free_minor(minor); 19216ed7ade8SMilan Broz bad_minor: 192210da4f79SJeff Mahoney module_put(THIS_MODULE); 19236ed7ade8SMilan Broz bad_module_get: 19241da177e4SLinus Torvalds kfree(md); 19251da177e4SLinus Torvalds return NULL; 19261da177e4SLinus Torvalds } 19271da177e4SLinus Torvalds 1928ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1929ae9da83fSJun'ichi Nomura 19301da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19311da177e4SLinus Torvalds { 1932f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 193363d94e48SJun'ichi Nomura 1934ae9da83fSJun'ichi Nomura unlock_fs(md); 1935db8fef4fSMikulas Patocka bdput(md->bdev); 1936304f3f6aSMilan Broz destroy_workqueue(md->wq); 1937e6ee8c0bSKiyoshi Ueda if (md->tio_pool) 19381da177e4SLinus Torvalds mempool_destroy(md->tio_pool); 1939e6ee8c0bSKiyoshi Ueda if (md->io_pool) 19401da177e4SLinus Torvalds mempool_destroy(md->io_pool); 1941e6ee8c0bSKiyoshi Ueda if (md->bs) 19429faf400fSStefan Bader bioset_free(md->bs); 19439c47008dSMartin K. Petersen blk_integrity_unregister(md->disk); 19441da177e4SLinus Torvalds del_gendisk(md->disk); 194563d94e48SJun'ichi Nomura free_minor(minor); 1946fba9f90eSJeff Mahoney 1947fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 1948fba9f90eSJeff Mahoney md->disk->private_data = NULL; 1949fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 1950fba9f90eSJeff Mahoney 19511da177e4SLinus Torvalds put_disk(md->disk); 19521312f40eSAl Viro blk_cleanup_queue(md->queue); 195310da4f79SJeff Mahoney module_put(THIS_MODULE); 19541da177e4SLinus Torvalds kfree(md); 19551da177e4SLinus Torvalds } 19561da177e4SLinus Torvalds 1957e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1958e6ee8c0bSKiyoshi Ueda { 1959e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *p; 1960e6ee8c0bSKiyoshi Ueda 1961e6ee8c0bSKiyoshi Ueda if (md->io_pool && md->tio_pool && md->bs) 1962e6ee8c0bSKiyoshi Ueda /* the md already has necessary mempools */ 1963e6ee8c0bSKiyoshi Ueda goto out; 1964e6ee8c0bSKiyoshi Ueda 1965e6ee8c0bSKiyoshi Ueda p = dm_table_get_md_mempools(t); 1966e6ee8c0bSKiyoshi Ueda BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1967e6ee8c0bSKiyoshi Ueda 1968e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 1969e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 1970e6ee8c0bSKiyoshi Ueda md->tio_pool = p->tio_pool; 1971e6ee8c0bSKiyoshi Ueda p->tio_pool = NULL; 1972e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 1973e6ee8c0bSKiyoshi Ueda p->bs = NULL; 1974e6ee8c0bSKiyoshi Ueda 1975e6ee8c0bSKiyoshi Ueda out: 1976e6ee8c0bSKiyoshi Ueda /* mempool bind completed, now no need any mempools in the table */ 1977e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 1978e6ee8c0bSKiyoshi Ueda } 1979e6ee8c0bSKiyoshi Ueda 19801da177e4SLinus Torvalds /* 19811da177e4SLinus Torvalds * Bind a table to the device. 19821da177e4SLinus Torvalds */ 19831da177e4SLinus Torvalds static void event_callback(void *context) 19841da177e4SLinus Torvalds { 19857a8c3d3bSMike Anderson unsigned long flags; 19867a8c3d3bSMike Anderson LIST_HEAD(uevents); 19871da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 19881da177e4SLinus Torvalds 19897a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 19907a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 19917a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 19927a8c3d3bSMike Anderson 1993ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 19947a8c3d3bSMike Anderson 19951da177e4SLinus Torvalds atomic_inc(&md->event_nr); 19961da177e4SLinus Torvalds wake_up(&md->eventq); 19971da177e4SLinus Torvalds } 19981da177e4SLinus Torvalds 19994e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 20001da177e4SLinus Torvalds { 20014e90188bSAlasdair G Kergon set_capacity(md->disk, size); 20021da177e4SLinus Torvalds 2003db8fef4fSMikulas Patocka mutex_lock(&md->bdev->bd_inode->i_mutex); 2004db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2005db8fef4fSMikulas Patocka mutex_unlock(&md->bdev->bd_inode->i_mutex); 20061da177e4SLinus Torvalds } 20071da177e4SLinus Torvalds 2008042d2a9bSAlasdair G Kergon /* 2009042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2010042d2a9bSAlasdair G Kergon */ 2011042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2012754c5fc7SMike Snitzer struct queue_limits *limits) 20131da177e4SLinus Torvalds { 2014042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2015165125e1SJens Axboe struct request_queue *q = md->queue; 20161da177e4SLinus Torvalds sector_t size; 2017523d9297SKiyoshi Ueda unsigned long flags; 20181da177e4SLinus Torvalds 20191da177e4SLinus Torvalds size = dm_table_get_size(t); 20203ac51e74SDarrick J. Wong 20213ac51e74SDarrick J. Wong /* 20223ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20233ac51e74SDarrick J. Wong */ 20243ac51e74SDarrick J. Wong if (size != get_capacity(md->disk)) 20253ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20263ac51e74SDarrick J. Wong 20274e90188bSAlasdair G Kergon __set_size(md, size); 20281da177e4SLinus Torvalds 2029cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20302ca3310eSAlasdair G Kergon 2031e6ee8c0bSKiyoshi Ueda /* 2032e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2033e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2034e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2035e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2036e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2037e6ee8c0bSKiyoshi Ueda */ 2038e6ee8c0bSKiyoshi Ueda if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2039e6ee8c0bSKiyoshi Ueda stop_queue(q); 2040e6ee8c0bSKiyoshi Ueda 2041e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 2042e6ee8c0bSKiyoshi Ueda 2043523d9297SKiyoshi Ueda write_lock_irqsave(&md->map_lock, flags); 2044042d2a9bSAlasdair G Kergon old_map = md->map; 20452ca3310eSAlasdair G Kergon md->map = t; 2046754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 2047523d9297SKiyoshi Ueda write_unlock_irqrestore(&md->map_lock, flags); 20482ca3310eSAlasdair G Kergon 2049042d2a9bSAlasdair G Kergon return old_map; 20501da177e4SLinus Torvalds } 20511da177e4SLinus Torvalds 2052a7940155SAlasdair G Kergon /* 2053a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2054a7940155SAlasdair G Kergon */ 2055a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 20561da177e4SLinus Torvalds { 20571da177e4SLinus Torvalds struct dm_table *map = md->map; 2058523d9297SKiyoshi Ueda unsigned long flags; 20591da177e4SLinus Torvalds 20601da177e4SLinus Torvalds if (!map) 2061a7940155SAlasdair G Kergon return NULL; 20621da177e4SLinus Torvalds 20631da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 2064523d9297SKiyoshi Ueda write_lock_irqsave(&md->map_lock, flags); 20651da177e4SLinus Torvalds md->map = NULL; 2066523d9297SKiyoshi Ueda write_unlock_irqrestore(&md->map_lock, flags); 2067a7940155SAlasdair G Kergon 2068a7940155SAlasdair G Kergon return map; 20691da177e4SLinus Torvalds } 20701da177e4SLinus Torvalds 20711da177e4SLinus Torvalds /* 20721da177e4SLinus Torvalds * Constructor for a new device. 20731da177e4SLinus Torvalds */ 20742b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 20751da177e4SLinus Torvalds { 20761da177e4SLinus Torvalds struct mapped_device *md; 20771da177e4SLinus Torvalds 20782b06cfffSAlasdair G Kergon md = alloc_dev(minor); 20791da177e4SLinus Torvalds if (!md) 20801da177e4SLinus Torvalds return -ENXIO; 20811da177e4SLinus Torvalds 2082784aae73SMilan Broz dm_sysfs_init(md); 2083784aae73SMilan Broz 20841da177e4SLinus Torvalds *result = md; 20851da177e4SLinus Torvalds return 0; 20861da177e4SLinus Torvalds } 20871da177e4SLinus Torvalds 2088a5664dadSMike Snitzer /* 2089a5664dadSMike Snitzer * Functions to manage md->type. 2090a5664dadSMike Snitzer * All are required to hold md->type_lock. 2091a5664dadSMike Snitzer */ 2092a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2093a5664dadSMike Snitzer { 2094a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2095a5664dadSMike Snitzer } 2096a5664dadSMike Snitzer 2097a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2098a5664dadSMike Snitzer { 2099a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2100a5664dadSMike Snitzer } 2101a5664dadSMike Snitzer 2102a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type) 2103a5664dadSMike Snitzer { 2104a5664dadSMike Snitzer md->type = type; 2105a5664dadSMike Snitzer } 2106a5664dadSMike Snitzer 2107a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md) 2108a5664dadSMike Snitzer { 2109a5664dadSMike Snitzer return md->type; 2110a5664dadSMike Snitzer } 2111a5664dadSMike Snitzer 21124a0b4ddfSMike Snitzer /* 21134a0b4ddfSMike Snitzer * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 21144a0b4ddfSMike Snitzer */ 21154a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md) 21164a0b4ddfSMike Snitzer { 21174a0b4ddfSMike Snitzer struct request_queue *q = NULL; 21184a0b4ddfSMike Snitzer 21194a0b4ddfSMike Snitzer if (md->queue->elevator) 21204a0b4ddfSMike Snitzer return 1; 21214a0b4ddfSMike Snitzer 21224a0b4ddfSMike Snitzer /* Fully initialize the queue */ 21234a0b4ddfSMike Snitzer q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 21244a0b4ddfSMike Snitzer if (!q) 21254a0b4ddfSMike Snitzer return 0; 21264a0b4ddfSMike Snitzer 21274a0b4ddfSMike Snitzer md->queue = q; 21284a0b4ddfSMike Snitzer md->saved_make_request_fn = md->queue->make_request_fn; 21294a0b4ddfSMike Snitzer dm_init_md_queue(md); 21304a0b4ddfSMike Snitzer blk_queue_softirq_done(md->queue, dm_softirq_done); 21314a0b4ddfSMike Snitzer blk_queue_prep_rq(md->queue, dm_prep_fn); 21324a0b4ddfSMike Snitzer blk_queue_lld_busy(md->queue, dm_lld_busy); 21334a0b4ddfSMike Snitzer 21344a0b4ddfSMike Snitzer elv_register_queue(md->queue); 21354a0b4ddfSMike Snitzer 21364a0b4ddfSMike Snitzer return 1; 21374a0b4ddfSMike Snitzer } 21384a0b4ddfSMike Snitzer 21394a0b4ddfSMike Snitzer /* 21404a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 21414a0b4ddfSMike Snitzer */ 21424a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md) 21434a0b4ddfSMike Snitzer { 21444a0b4ddfSMike Snitzer if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 21454a0b4ddfSMike Snitzer !dm_init_request_based_queue(md)) { 21464a0b4ddfSMike Snitzer DMWARN("Cannot initialize queue for request-based mapped device"); 21474a0b4ddfSMike Snitzer return -EINVAL; 21484a0b4ddfSMike Snitzer } 21494a0b4ddfSMike Snitzer 21504a0b4ddfSMike Snitzer return 0; 21514a0b4ddfSMike Snitzer } 21524a0b4ddfSMike Snitzer 2153637842cfSDavid Teigland static struct mapped_device *dm_find_md(dev_t dev) 21541da177e4SLinus Torvalds { 21551da177e4SLinus Torvalds struct mapped_device *md; 21561da177e4SLinus Torvalds unsigned minor = MINOR(dev); 21571da177e4SLinus Torvalds 21581da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 21591da177e4SLinus Torvalds return NULL; 21601da177e4SLinus Torvalds 2161f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21621da177e4SLinus Torvalds 21631da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 2164fba9f90eSJeff Mahoney if (md && (md == MINOR_ALLOCED || 2165f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 2166abdc568bSKiyoshi Ueda dm_deleting_md(md) || 2167fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 2168637842cfSDavid Teigland md = NULL; 2169fba9f90eSJeff Mahoney goto out; 2170fba9f90eSJeff Mahoney } 21711da177e4SLinus Torvalds 2172fba9f90eSJeff Mahoney out: 2173f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21741da177e4SLinus Torvalds 2175637842cfSDavid Teigland return md; 2176637842cfSDavid Teigland } 2177637842cfSDavid Teigland 2178d229a958SDavid Teigland struct mapped_device *dm_get_md(dev_t dev) 2179d229a958SDavid Teigland { 2180d229a958SDavid Teigland struct mapped_device *md = dm_find_md(dev); 2181d229a958SDavid Teigland 2182d229a958SDavid Teigland if (md) 2183d229a958SDavid Teigland dm_get(md); 2184d229a958SDavid Teigland 2185d229a958SDavid Teigland return md; 2186d229a958SDavid Teigland } 2187d229a958SDavid Teigland 21889ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2189637842cfSDavid Teigland { 21909ade92a9SAlasdair G Kergon return md->interface_ptr; 21911da177e4SLinus Torvalds } 21921da177e4SLinus Torvalds 21931da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 21941da177e4SLinus Torvalds { 21951da177e4SLinus Torvalds md->interface_ptr = ptr; 21961da177e4SLinus Torvalds } 21971da177e4SLinus Torvalds 21981da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 21991da177e4SLinus Torvalds { 22001da177e4SLinus Torvalds atomic_inc(&md->holders); 22013f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 22021da177e4SLinus Torvalds } 22031da177e4SLinus Torvalds 220472d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 220572d94861SAlasdair G Kergon { 220672d94861SAlasdair G Kergon return md->name; 220772d94861SAlasdair G Kergon } 220872d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 220972d94861SAlasdair G Kergon 22103f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 22111da177e4SLinus Torvalds { 22121134e5aeSMike Anderson struct dm_table *map; 22131da177e4SLinus Torvalds 22143f77316dSKiyoshi Ueda might_sleep(); 2215fba9f90eSJeff Mahoney 22163f77316dSKiyoshi Ueda spin_lock(&_minor_lock); 22177c666411SAlasdair G Kergon map = dm_get_live_table(md); 22183f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2219fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2220f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22213f77316dSKiyoshi Ueda 22224f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 22231da177e4SLinus Torvalds dm_table_presuspend_targets(map); 22241da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 22251da177e4SLinus Torvalds } 22263f77316dSKiyoshi Ueda 22273f77316dSKiyoshi Ueda /* 22283f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 22293f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 22303f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 22313f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 22323f77316dSKiyoshi Ueda */ 22333f77316dSKiyoshi Ueda if (wait) 22343f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 22353f77316dSKiyoshi Ueda msleep(1); 22363f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 22373f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 22383f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 22393f77316dSKiyoshi Ueda 2240784aae73SMilan Broz dm_sysfs_exit(md); 22411134e5aeSMike Anderson dm_table_put(map); 2242a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 22431da177e4SLinus Torvalds free_dev(md); 22441da177e4SLinus Torvalds } 22453f77316dSKiyoshi Ueda 22463f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 22473f77316dSKiyoshi Ueda { 22483f77316dSKiyoshi Ueda __dm_destroy(md, true); 22493f77316dSKiyoshi Ueda } 22503f77316dSKiyoshi Ueda 22513f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 22523f77316dSKiyoshi Ueda { 22533f77316dSKiyoshi Ueda __dm_destroy(md, false); 22543f77316dSKiyoshi Ueda } 22553f77316dSKiyoshi Ueda 22563f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 22573f77316dSKiyoshi Ueda { 22583f77316dSKiyoshi Ueda atomic_dec(&md->holders); 22591da177e4SLinus Torvalds } 226079eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 22611da177e4SLinus Torvalds 2262401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 226346125c1cSMilan Broz { 226446125c1cSMilan Broz int r = 0; 2265b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2266b44ebeb0SMikulas Patocka 2267b44ebeb0SMikulas Patocka dm_unplug_all(md->queue); 2268b44ebeb0SMikulas Patocka 2269b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 227046125c1cSMilan Broz 227146125c1cSMilan Broz while (1) { 2272401600dfSMikulas Patocka set_current_state(interruptible); 227346125c1cSMilan Broz 227446125c1cSMilan Broz smp_mb(); 2275b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 227646125c1cSMilan Broz break; 227746125c1cSMilan Broz 2278401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2279401600dfSMikulas Patocka signal_pending(current)) { 228046125c1cSMilan Broz r = -EINTR; 228146125c1cSMilan Broz break; 228246125c1cSMilan Broz } 228346125c1cSMilan Broz 228446125c1cSMilan Broz io_schedule(); 228546125c1cSMilan Broz } 228646125c1cSMilan Broz set_current_state(TASK_RUNNING); 228746125c1cSMilan Broz 2288b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2289b44ebeb0SMikulas Patocka 229046125c1cSMilan Broz return r; 229146125c1cSMilan Broz } 229246125c1cSMilan Broz 22931da177e4SLinus Torvalds /* 22941da177e4SLinus Torvalds * Process the deferred bios 22951da177e4SLinus Torvalds */ 2296ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 22971da177e4SLinus Torvalds { 2298ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2299ef208587SMikulas Patocka work); 23006d6f10dfSMilan Broz struct bio *c; 23011da177e4SLinus Torvalds 23026a8736d1STejun Heo down_read(&md->io_lock); 2303ef208587SMikulas Patocka 23043b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2305022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2306022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2307022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2308022c2611SMikulas Patocka 23096a8736d1STejun Heo if (!c) 2310df12ee99SAlasdair G Kergon break; 231173d410c0SMilan Broz 23126a8736d1STejun Heo up_read(&md->io_lock); 23133b00b203SMikulas Patocka 2314e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2315e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2316af7e466aSMikulas Patocka else 2317df12ee99SAlasdair G Kergon __split_and_process_bio(md, c); 23186a8736d1STejun Heo 23196a8736d1STejun Heo down_read(&md->io_lock); 2320e6ee8c0bSKiyoshi Ueda } 23213b00b203SMikulas Patocka 23226a8736d1STejun Heo up_read(&md->io_lock); 23231da177e4SLinus Torvalds } 23241da177e4SLinus Torvalds 23259a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2326304f3f6aSMilan Broz { 23273b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 23283b00b203SMikulas Patocka smp_mb__after_clear_bit(); 232953d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2330304f3f6aSMilan Broz } 2331304f3f6aSMilan Broz 23321da177e4SLinus Torvalds /* 2333042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 23341da177e4SLinus Torvalds */ 2335042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 23361da177e4SLinus Torvalds { 2337042d2a9bSAlasdair G Kergon struct dm_table *map = ERR_PTR(-EINVAL); 2338754c5fc7SMike Snitzer struct queue_limits limits; 2339042d2a9bSAlasdair G Kergon int r; 23401da177e4SLinus Torvalds 2341e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 23421da177e4SLinus Torvalds 23431da177e4SLinus Torvalds /* device must be suspended */ 23444f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 234593c534aeSAlasdair G Kergon goto out; 23461da177e4SLinus Torvalds 2347754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2348042d2a9bSAlasdair G Kergon if (r) { 2349042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2350754c5fc7SMike Snitzer goto out; 2351042d2a9bSAlasdair G Kergon } 2352754c5fc7SMike Snitzer 2353042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 23541da177e4SLinus Torvalds 235593c534aeSAlasdair G Kergon out: 2356e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2357042d2a9bSAlasdair G Kergon return map; 23581da177e4SLinus Torvalds } 23591da177e4SLinus Torvalds 23601da177e4SLinus Torvalds /* 23611da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 23621da177e4SLinus Torvalds * device. 23631da177e4SLinus Torvalds */ 23642ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 23651da177e4SLinus Torvalds { 2366e39e2e95SAlasdair G Kergon int r; 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2369dfbe03f6SAlasdair G Kergon 2370db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2371dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2372cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2373e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2374e39e2e95SAlasdair G Kergon return r; 2375dfbe03f6SAlasdair G Kergon } 2376dfbe03f6SAlasdair G Kergon 2377aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2378aa8d7c2fSAlasdair G Kergon 23791da177e4SLinus Torvalds return 0; 23801da177e4SLinus Torvalds } 23811da177e4SLinus Torvalds 23822ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 23831da177e4SLinus Torvalds { 2384aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2385aa8d7c2fSAlasdair G Kergon return; 2386aa8d7c2fSAlasdair G Kergon 2387db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 23881da177e4SLinus Torvalds md->frozen_sb = NULL; 2389aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds /* 23931da177e4SLinus Torvalds * We need to be able to change a mapping table under a mounted 23941da177e4SLinus Torvalds * filesystem. For example we might want to move some data in 23951da177e4SLinus Torvalds * the background. Before the table can be swapped with 23961da177e4SLinus Torvalds * dm_bind_table, dm_suspend must be called to flush any in 23971da177e4SLinus Torvalds * flight bios and ensure that any further io gets deferred. 23981da177e4SLinus Torvalds */ 2399cec47e3dSKiyoshi Ueda /* 2400cec47e3dSKiyoshi Ueda * Suspend mechanism in request-based dm. 2401cec47e3dSKiyoshi Ueda * 24029f518b27SKiyoshi Ueda * 1. Flush all I/Os by lock_fs() if needed. 24039f518b27SKiyoshi Ueda * 2. Stop dispatching any I/O by stopping the request_queue. 24049f518b27SKiyoshi Ueda * 3. Wait for all in-flight I/Os to be completed or requeued. 2405cec47e3dSKiyoshi Ueda * 24069f518b27SKiyoshi Ueda * To abort suspend, start the request_queue. 2407cec47e3dSKiyoshi Ueda */ 2408a3d77d35SKiyoshi Ueda int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 24091da177e4SLinus Torvalds { 24102ca3310eSAlasdair G Kergon struct dm_table *map = NULL; 241146125c1cSMilan Broz int r = 0; 2412a3d77d35SKiyoshi Ueda int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 24132e93ccc1SKiyoshi Ueda int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 24141da177e4SLinus Torvalds 2415e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24162ca3310eSAlasdair G Kergon 24174f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 241873d410c0SMilan Broz r = -EINVAL; 2419d287483dSAlasdair G Kergon goto out_unlock; 242073d410c0SMilan Broz } 24211da177e4SLinus Torvalds 24227c666411SAlasdair G Kergon map = dm_get_live_table(md); 2423cf222b37SAlasdair G Kergon 24242e93ccc1SKiyoshi Ueda /* 24252e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 24262e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 24272e93ccc1SKiyoshi Ueda */ 24282e93ccc1SKiyoshi Ueda if (noflush) 24292e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 24302e93ccc1SKiyoshi Ueda 2431436d4108SAlasdair G Kergon /* This does not get reverted if there's an error later. */ 24321da177e4SLinus Torvalds dm_table_presuspend_targets(map); 24331da177e4SLinus Torvalds 24342e93ccc1SKiyoshi Ueda /* 24359f518b27SKiyoshi Ueda * Flush I/O to the device. 24369f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 24379f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 24389f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 24392e93ccc1SKiyoshi Ueda */ 244032a926daSMikulas Patocka if (!noflush && do_lockfs) { 24412ca3310eSAlasdair G Kergon r = lock_fs(md); 24422ca3310eSAlasdair G Kergon if (r) 24432ca3310eSAlasdair G Kergon goto out; 2444aa8d7c2fSAlasdair G Kergon } 24451da177e4SLinus Torvalds 24461da177e4SLinus Torvalds /* 24473b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 24483b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 24493b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 24503b00b203SMikulas Patocka * dm_wq_work. 24513b00b203SMikulas Patocka * 24523b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 24533b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 24546a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 24556a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 24566a8736d1STejun Heo * flush_workqueue(md->wq). 24571da177e4SLinus Torvalds */ 24582ca3310eSAlasdair G Kergon down_write(&md->io_lock); 24591eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24602ca3310eSAlasdair G Kergon up_write(&md->io_lock); 24611da177e4SLinus Torvalds 2462d0bcb878SKiyoshi Ueda /* 246329e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 246429e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2465d0bcb878SKiyoshi Ueda */ 2466cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 24679f518b27SKiyoshi Ueda stop_queue(md->queue); 2468cec47e3dSKiyoshi Ueda 2469d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2470d0bcb878SKiyoshi Ueda 24711da177e4SLinus Torvalds /* 24723b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 24733b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 24743b00b203SMikulas Patocka * to finish. 24751da177e4SLinus Torvalds */ 2476401600dfSMikulas Patocka r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 24771da177e4SLinus Torvalds 24782ca3310eSAlasdair G Kergon down_write(&md->io_lock); 24796d6f10dfSMilan Broz if (noflush) 2480022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 248194d6351eSMilan Broz up_write(&md->io_lock); 24822e93ccc1SKiyoshi Ueda 24831da177e4SLinus Torvalds /* were we interrupted ? */ 248446125c1cSMilan Broz if (r < 0) { 24859a1fb464SMikulas Patocka dm_queue_flush(md); 248673d410c0SMilan Broz 2487cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 24889f518b27SKiyoshi Ueda start_queue(md->queue); 2489cec47e3dSKiyoshi Ueda 24902ca3310eSAlasdair G Kergon unlock_fs(md); 24912e93ccc1SKiyoshi Ueda goto out; /* pushback list is already flushed, so skip flush */ 24922ca3310eSAlasdair G Kergon } 24932ca3310eSAlasdair G Kergon 24943b00b203SMikulas Patocka /* 24953b00b203SMikulas Patocka * If dm_wait_for_completion returned 0, the device is completely 24963b00b203SMikulas Patocka * quiescent now. There is no request-processing activity. All new 24973b00b203SMikulas Patocka * requests are being added to md->deferred list. 24983b00b203SMikulas Patocka */ 24993b00b203SMikulas Patocka 25001da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 25011da177e4SLinus Torvalds 25024d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 25034d4471cbSKiyoshi Ueda 25042ca3310eSAlasdair G Kergon out: 25051da177e4SLinus Torvalds dm_table_put(map); 2506d287483dSAlasdair G Kergon 2507d287483dSAlasdair G Kergon out_unlock: 2508e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2509cf222b37SAlasdair G Kergon return r; 25101da177e4SLinus Torvalds } 25111da177e4SLinus Torvalds 25121da177e4SLinus Torvalds int dm_resume(struct mapped_device *md) 25131da177e4SLinus Torvalds { 2514cf222b37SAlasdair G Kergon int r = -EINVAL; 2515cf222b37SAlasdair G Kergon struct dm_table *map = NULL; 25161da177e4SLinus Torvalds 2517e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 25184f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 2519cf222b37SAlasdair G Kergon goto out; 2520cf222b37SAlasdair G Kergon 25217c666411SAlasdair G Kergon map = dm_get_live_table(md); 25222ca3310eSAlasdair G Kergon if (!map || !dm_table_get_size(map)) 2523cf222b37SAlasdair G Kergon goto out; 25241da177e4SLinus Torvalds 25258757b776SMilan Broz r = dm_table_resume_targets(map); 25268757b776SMilan Broz if (r) 25278757b776SMilan Broz goto out; 25282ca3310eSAlasdair G Kergon 25299a1fb464SMikulas Patocka dm_queue_flush(md); 25302ca3310eSAlasdair G Kergon 2531cec47e3dSKiyoshi Ueda /* 2532cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2533cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2534cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2535cec47e3dSKiyoshi Ueda */ 2536cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2537cec47e3dSKiyoshi Ueda start_queue(md->queue); 2538cec47e3dSKiyoshi Ueda 25392ca3310eSAlasdair G Kergon unlock_fs(md); 25402ca3310eSAlasdair G Kergon 25412ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 25422ca3310eSAlasdair G Kergon 25431da177e4SLinus Torvalds dm_table_unplug_all(map); 2544cf222b37SAlasdair G Kergon r = 0; 2545cf222b37SAlasdair G Kergon out: 2546cf222b37SAlasdair G Kergon dm_table_put(map); 2547e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 25482ca3310eSAlasdair G Kergon 2549cf222b37SAlasdair G Kergon return r; 25501da177e4SLinus Torvalds } 25511da177e4SLinus Torvalds 25521da177e4SLinus Torvalds /*----------------------------------------------------------------- 25531da177e4SLinus Torvalds * Event notification. 25541da177e4SLinus Torvalds *---------------------------------------------------------------*/ 25553abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 255660935eb2SMilan Broz unsigned cookie) 255769267a30SAlasdair G Kergon { 255860935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 255960935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 256060935eb2SMilan Broz 256160935eb2SMilan Broz if (!cookie) 25623abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 256360935eb2SMilan Broz else { 256460935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 256560935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 25663abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 25673abf85b5SPeter Rajnoha action, envp); 256860935eb2SMilan Broz } 256969267a30SAlasdair G Kergon } 257069267a30SAlasdair G Kergon 25717a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 25727a8c3d3bSMike Anderson { 25737a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 25747a8c3d3bSMike Anderson } 25757a8c3d3bSMike Anderson 25761da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 25771da177e4SLinus Torvalds { 25781da177e4SLinus Torvalds return atomic_read(&md->event_nr); 25791da177e4SLinus Torvalds } 25801da177e4SLinus Torvalds 25811da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 25821da177e4SLinus Torvalds { 25831da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 25841da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 25851da177e4SLinus Torvalds } 25861da177e4SLinus Torvalds 25877a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 25887a8c3d3bSMike Anderson { 25897a8c3d3bSMike Anderson unsigned long flags; 25907a8c3d3bSMike Anderson 25917a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 25927a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 25937a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 25947a8c3d3bSMike Anderson } 25957a8c3d3bSMike Anderson 25961da177e4SLinus Torvalds /* 25971da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 25981da177e4SLinus Torvalds * count on 'md'. 25991da177e4SLinus Torvalds */ 26001da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 26011da177e4SLinus Torvalds { 26021da177e4SLinus Torvalds return md->disk; 26031da177e4SLinus Torvalds } 26041da177e4SLinus Torvalds 2605784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2606784aae73SMilan Broz { 2607784aae73SMilan Broz return &md->kobj; 2608784aae73SMilan Broz } 2609784aae73SMilan Broz 2610784aae73SMilan Broz /* 2611784aae73SMilan Broz * struct mapped_device should not be exported outside of dm.c 2612784aae73SMilan Broz * so use this check to verify that kobj is part of md structure 2613784aae73SMilan Broz */ 2614784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2615784aae73SMilan Broz { 2616784aae73SMilan Broz struct mapped_device *md; 2617784aae73SMilan Broz 2618784aae73SMilan Broz md = container_of(kobj, struct mapped_device, kobj); 2619784aae73SMilan Broz if (&md->kobj != kobj) 2620784aae73SMilan Broz return NULL; 2621784aae73SMilan Broz 26224d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 2623432a212cSMike Anderson dm_deleting_md(md)) 26244d89b7b4SMilan Broz return NULL; 26254d89b7b4SMilan Broz 2626784aae73SMilan Broz dm_get(md); 2627784aae73SMilan Broz return md; 2628784aae73SMilan Broz } 2629784aae73SMilan Broz 26304f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 26311da177e4SLinus Torvalds { 26321da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 26331da177e4SLinus Torvalds } 26341da177e4SLinus Torvalds 263564dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 263664dbce58SKiyoshi Ueda { 2637ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 263864dbce58SKiyoshi Ueda } 263964dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 264064dbce58SKiyoshi Ueda 26412e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 26422e93ccc1SKiyoshi Ueda { 2643ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 26442e93ccc1SKiyoshi Ueda } 26452e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 26462e93ccc1SKiyoshi Ueda 2647e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) 2648e6ee8c0bSKiyoshi Ueda { 2649e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2650e6ee8c0bSKiyoshi Ueda 2651e6ee8c0bSKiyoshi Ueda if (!pools) 2652e6ee8c0bSKiyoshi Ueda return NULL; 2653e6ee8c0bSKiyoshi Ueda 2654e6ee8c0bSKiyoshi Ueda pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2655e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _io_cache) : 2656e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); 2657e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 2658e6ee8c0bSKiyoshi Ueda goto free_pools_and_out; 2659e6ee8c0bSKiyoshi Ueda 2660e6ee8c0bSKiyoshi Ueda pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? 2661e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _tio_cache) : 2662e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); 2663e6ee8c0bSKiyoshi Ueda if (!pools->tio_pool) 2664e6ee8c0bSKiyoshi Ueda goto free_io_pool_and_out; 2665e6ee8c0bSKiyoshi Ueda 2666e6ee8c0bSKiyoshi Ueda pools->bs = (type == DM_TYPE_BIO_BASED) ? 2667e6ee8c0bSKiyoshi Ueda bioset_create(16, 0) : bioset_create(MIN_IOS, 0); 2668e6ee8c0bSKiyoshi Ueda if (!pools->bs) 2669e6ee8c0bSKiyoshi Ueda goto free_tio_pool_and_out; 2670e6ee8c0bSKiyoshi Ueda 2671e6ee8c0bSKiyoshi Ueda return pools; 2672e6ee8c0bSKiyoshi Ueda 2673e6ee8c0bSKiyoshi Ueda free_tio_pool_and_out: 2674e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->tio_pool); 2675e6ee8c0bSKiyoshi Ueda 2676e6ee8c0bSKiyoshi Ueda free_io_pool_and_out: 2677e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 2678e6ee8c0bSKiyoshi Ueda 2679e6ee8c0bSKiyoshi Ueda free_pools_and_out: 2680e6ee8c0bSKiyoshi Ueda kfree(pools); 2681e6ee8c0bSKiyoshi Ueda 2682e6ee8c0bSKiyoshi Ueda return NULL; 2683e6ee8c0bSKiyoshi Ueda } 2684e6ee8c0bSKiyoshi Ueda 2685e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2686e6ee8c0bSKiyoshi Ueda { 2687e6ee8c0bSKiyoshi Ueda if (!pools) 2688e6ee8c0bSKiyoshi Ueda return; 2689e6ee8c0bSKiyoshi Ueda 2690e6ee8c0bSKiyoshi Ueda if (pools->io_pool) 2691e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 2692e6ee8c0bSKiyoshi Ueda 2693e6ee8c0bSKiyoshi Ueda if (pools->tio_pool) 2694e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->tio_pool); 2695e6ee8c0bSKiyoshi Ueda 2696e6ee8c0bSKiyoshi Ueda if (pools->bs) 2697e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2698e6ee8c0bSKiyoshi Ueda 2699e6ee8c0bSKiyoshi Ueda kfree(pools); 2700e6ee8c0bSKiyoshi Ueda } 2701e6ee8c0bSKiyoshi Ueda 270283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 27031da177e4SLinus Torvalds .open = dm_blk_open, 27041da177e4SLinus Torvalds .release = dm_blk_close, 2705aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 27063ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 27071da177e4SLinus Torvalds .owner = THIS_MODULE 27081da177e4SLinus Torvalds }; 27091da177e4SLinus Torvalds 27101da177e4SLinus Torvalds EXPORT_SYMBOL(dm_get_mapinfo); 27111da177e4SLinus Torvalds 27121da177e4SLinus Torvalds /* 27131da177e4SLinus Torvalds * module hooks 27141da177e4SLinus Torvalds */ 27151da177e4SLinus Torvalds module_init(dm_init); 27161da177e4SLinus Torvalds module_exit(dm_exit); 27171da177e4SLinus Torvalds 27181da177e4SLinus Torvalds module_param(major, uint, 0); 27191da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 27201da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 27211da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 27221da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2723