11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include "dm.h" 951e5b2bdSMike Anderson #include "dm-uevent.h" 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/module.h> 1348c9c27bSArjan van de Ven #include <linux/mutex.h> 141da177e4SLinus Torvalds #include <linux/moduleparam.h> 151da177e4SLinus Torvalds #include <linux/blkpg.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/buffer_head.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 191da177e4SLinus Torvalds #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/idr.h> 213ac51e74SDarrick J. Wong #include <linux/hdreg.h> 2255782138SLi Zefan 2355782138SLi Zefan #include <trace/events/block.h> 241da177e4SLinus Torvalds 2572d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 2672d94861SAlasdair G Kergon 2760935eb2SMilan Broz /* 2860935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 2960935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3060935eb2SMilan Broz */ 3160935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3260935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3360935eb2SMilan Broz 341da177e4SLinus Torvalds static const char *_name = DM_NAME; 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds static unsigned int major = 0; 371da177e4SLinus Torvalds static unsigned int _major = 0; 381da177e4SLinus Torvalds 39f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 401da177e4SLinus Torvalds /* 418fbf26adSKiyoshi Ueda * For bio-based dm. 421da177e4SLinus Torvalds * One of these is allocated per bio. 431da177e4SLinus Torvalds */ 441da177e4SLinus Torvalds struct dm_io { 451da177e4SLinus Torvalds struct mapped_device *md; 461da177e4SLinus Torvalds int error; 471da177e4SLinus Torvalds atomic_t io_count; 486ae2fa67SRichard Kennedy struct bio *bio; 493eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 501da177e4SLinus Torvalds }; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds /* 538fbf26adSKiyoshi Ueda * For bio-based dm. 541da177e4SLinus Torvalds * One of these is allocated per target within a bio. Hopefully 551da177e4SLinus Torvalds * this will be simplified out one day. 561da177e4SLinus Torvalds */ 57028867acSAlasdair G Kergon struct dm_target_io { 581da177e4SLinus Torvalds struct dm_io *io; 591da177e4SLinus Torvalds struct dm_target *ti; 601da177e4SLinus Torvalds union map_info info; 611da177e4SLinus Torvalds }; 621da177e4SLinus Torvalds 638fbf26adSKiyoshi Ueda /* 648fbf26adSKiyoshi Ueda * For request-based dm. 658fbf26adSKiyoshi Ueda * One of these is allocated per request. 668fbf26adSKiyoshi Ueda */ 678fbf26adSKiyoshi Ueda struct dm_rq_target_io { 688fbf26adSKiyoshi Ueda struct mapped_device *md; 698fbf26adSKiyoshi Ueda struct dm_target *ti; 708fbf26adSKiyoshi Ueda struct request *orig, clone; 718fbf26adSKiyoshi Ueda int error; 728fbf26adSKiyoshi Ueda union map_info info; 738fbf26adSKiyoshi Ueda }; 748fbf26adSKiyoshi Ueda 758fbf26adSKiyoshi Ueda /* 768fbf26adSKiyoshi Ueda * For request-based dm. 778fbf26adSKiyoshi Ueda * One of these is allocated per bio. 788fbf26adSKiyoshi Ueda */ 798fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info { 808fbf26adSKiyoshi Ueda struct bio *orig; 81cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 828fbf26adSKiyoshi Ueda }; 838fbf26adSKiyoshi Ueda 841da177e4SLinus Torvalds union map_info *dm_get_mapinfo(struct bio *bio) 851da177e4SLinus Torvalds { 861da177e4SLinus Torvalds if (bio && bio->bi_private) 87028867acSAlasdair G Kergon return &((struct dm_target_io *)bio->bi_private)->info; 881da177e4SLinus Torvalds return NULL; 891da177e4SLinus Torvalds } 901da177e4SLinus Torvalds 91cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq) 92cec47e3dSKiyoshi Ueda { 93cec47e3dSKiyoshi Ueda if (rq && rq->end_io_data) 94cec47e3dSKiyoshi Ueda return &((struct dm_rq_target_io *)rq->end_io_data)->info; 95cec47e3dSKiyoshi Ueda return NULL; 96cec47e3dSKiyoshi Ueda } 97cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); 98cec47e3dSKiyoshi Ueda 99ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 100ba61fdd1SJeff Mahoney 1011da177e4SLinus Torvalds /* 1021da177e4SLinus Torvalds * Bits for the md->flags field. 1031da177e4SLinus Torvalds */ 1041eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1051da177e4SLinus Torvalds #define DMF_SUSPENDED 1 106aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 107fba9f90eSJeff Mahoney #define DMF_FREEING 3 1085c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1092e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1101eb787ecSAlasdair G Kergon #define DMF_QUEUE_IO_TO_THREAD 6 1111da177e4SLinus Torvalds 112304f3f6aSMilan Broz /* 113304f3f6aSMilan Broz * Work processed by per-device workqueue. 114304f3f6aSMilan Broz */ 1151da177e4SLinus Torvalds struct mapped_device { 1162ca3310eSAlasdair G Kergon struct rw_semaphore io_lock; 117e61290a4SDaniel Walker struct mutex suspend_lock; 1181da177e4SLinus Torvalds rwlock_t map_lock; 1191da177e4SLinus Torvalds atomic_t holders; 1205c6bd75dSAlasdair G Kergon atomic_t open_count; 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds unsigned long flags; 1231da177e4SLinus Torvalds 124165125e1SJens Axboe struct request_queue *queue; 1251da177e4SLinus Torvalds struct gendisk *disk; 1267e51f257SMike Anderson char name[16]; 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds void *interface_ptr; 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds /* 1311da177e4SLinus Torvalds * A list of ios that arrived while we were suspended. 1321da177e4SLinus Torvalds */ 1331da177e4SLinus Torvalds atomic_t pending; 1341da177e4SLinus Torvalds wait_queue_head_t wait; 13553d5914fSMikulas Patocka struct work_struct work; 1361da177e4SLinus Torvalds struct bio_list deferred; 137022c2611SMikulas Patocka spinlock_t deferred_lock; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds /* 140af7e466aSMikulas Patocka * An error from the barrier request currently being processed. 141af7e466aSMikulas Patocka */ 142af7e466aSMikulas Patocka int barrier_error; 143af7e466aSMikulas Patocka 144af7e466aSMikulas Patocka /* 145304f3f6aSMilan Broz * Processing queue (flush/barriers) 146304f3f6aSMilan Broz */ 147304f3f6aSMilan Broz struct workqueue_struct *wq; 148304f3f6aSMilan Broz 149304f3f6aSMilan Broz /* 1501da177e4SLinus Torvalds * The current mapping. 1511da177e4SLinus Torvalds */ 1521da177e4SLinus Torvalds struct dm_table *map; 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds /* 1551da177e4SLinus Torvalds * io objects are allocated from here. 1561da177e4SLinus Torvalds */ 1571da177e4SLinus Torvalds mempool_t *io_pool; 1581da177e4SLinus Torvalds mempool_t *tio_pool; 1591da177e4SLinus Torvalds 1609faf400fSStefan Bader struct bio_set *bs; 1619faf400fSStefan Bader 1621da177e4SLinus Torvalds /* 1631da177e4SLinus Torvalds * Event handling. 1641da177e4SLinus Torvalds */ 1651da177e4SLinus Torvalds atomic_t event_nr; 1661da177e4SLinus Torvalds wait_queue_head_t eventq; 1677a8c3d3bSMike Anderson atomic_t uevent_seq; 1687a8c3d3bSMike Anderson struct list_head uevent_list; 1697a8c3d3bSMike Anderson spinlock_t uevent_lock; /* Protect access to uevent_list */ 1701da177e4SLinus Torvalds 1711da177e4SLinus Torvalds /* 1721da177e4SLinus Torvalds * freeze/thaw support require holding onto a super block 1731da177e4SLinus Torvalds */ 1741da177e4SLinus Torvalds struct super_block *frozen_sb; 175db8fef4fSMikulas Patocka struct block_device *bdev; 1763ac51e74SDarrick J. Wong 1773ac51e74SDarrick J. Wong /* forced geometry settings */ 1783ac51e74SDarrick J. Wong struct hd_geometry geometry; 179784aae73SMilan Broz 180cec47e3dSKiyoshi Ueda /* marker of flush suspend for request-based dm */ 181cec47e3dSKiyoshi Ueda struct request suspend_rq; 182cec47e3dSKiyoshi Ueda 183cec47e3dSKiyoshi Ueda /* For saving the address of __make_request for request based dm */ 184cec47e3dSKiyoshi Ueda make_request_fn *saved_make_request_fn; 185cec47e3dSKiyoshi Ueda 186784aae73SMilan Broz /* sysfs handle */ 187784aae73SMilan Broz struct kobject kobj; 18852b1fd5aSMikulas Patocka 18952b1fd5aSMikulas Patocka /* zero-length barrier that will be cloned and submitted to targets */ 19052b1fd5aSMikulas Patocka struct bio barrier_bio; 1911da177e4SLinus Torvalds }; 1921da177e4SLinus Torvalds 193e6ee8c0bSKiyoshi Ueda /* 194e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 195e6ee8c0bSKiyoshi Ueda */ 196e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 197e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 198e6ee8c0bSKiyoshi Ueda mempool_t *tio_pool; 199e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 200e6ee8c0bSKiyoshi Ueda }; 201e6ee8c0bSKiyoshi Ueda 2021da177e4SLinus Torvalds #define MIN_IOS 256 203e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 204e18b890bSChristoph Lameter static struct kmem_cache *_tio_cache; 2058fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 2068fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_bio_info_cache; 2071da177e4SLinus Torvalds 2081da177e4SLinus Torvalds static int __init local_init(void) 2091da177e4SLinus Torvalds { 21051157b4aSKiyoshi Ueda int r = -ENOMEM; 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 213028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 2141da177e4SLinus Torvalds if (!_io_cache) 21551157b4aSKiyoshi Ueda return r; 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds /* allocate a slab for the target ios */ 218028867acSAlasdair G Kergon _tio_cache = KMEM_CACHE(dm_target_io, 0); 21951157b4aSKiyoshi Ueda if (!_tio_cache) 22051157b4aSKiyoshi Ueda goto out_free_io_cache; 2211da177e4SLinus Torvalds 2228fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 2238fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 2248fbf26adSKiyoshi Ueda goto out_free_tio_cache; 2258fbf26adSKiyoshi Ueda 2268fbf26adSKiyoshi Ueda _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); 2278fbf26adSKiyoshi Ueda if (!_rq_bio_info_cache) 2288fbf26adSKiyoshi Ueda goto out_free_rq_tio_cache; 2298fbf26adSKiyoshi Ueda 23051e5b2bdSMike Anderson r = dm_uevent_init(); 23151157b4aSKiyoshi Ueda if (r) 2328fbf26adSKiyoshi Ueda goto out_free_rq_bio_info_cache; 23351e5b2bdSMike Anderson 2341da177e4SLinus Torvalds _major = major; 2351da177e4SLinus Torvalds r = register_blkdev(_major, _name); 23651157b4aSKiyoshi Ueda if (r < 0) 23751157b4aSKiyoshi Ueda goto out_uevent_exit; 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds if (!_major) 2401da177e4SLinus Torvalds _major = r; 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds return 0; 24351157b4aSKiyoshi Ueda 24451157b4aSKiyoshi Ueda out_uevent_exit: 24551157b4aSKiyoshi Ueda dm_uevent_exit(); 2468fbf26adSKiyoshi Ueda out_free_rq_bio_info_cache: 2478fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_bio_info_cache); 2488fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2498fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 25051157b4aSKiyoshi Ueda out_free_tio_cache: 25151157b4aSKiyoshi Ueda kmem_cache_destroy(_tio_cache); 25251157b4aSKiyoshi Ueda out_free_io_cache: 25351157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 25451157b4aSKiyoshi Ueda 25551157b4aSKiyoshi Ueda return r; 2561da177e4SLinus Torvalds } 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds static void local_exit(void) 2591da177e4SLinus Torvalds { 2608fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_bio_info_cache); 2618fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 2621da177e4SLinus Torvalds kmem_cache_destroy(_tio_cache); 2631da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 26400d59405SAkinobu Mita unregister_blkdev(_major, _name); 26551e5b2bdSMike Anderson dm_uevent_exit(); 2661da177e4SLinus Torvalds 2671da177e4SLinus Torvalds _major = 0; 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds DMINFO("cleaned up"); 2701da177e4SLinus Torvalds } 2711da177e4SLinus Torvalds 272b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2731da177e4SLinus Torvalds local_init, 2741da177e4SLinus Torvalds dm_target_init, 2751da177e4SLinus Torvalds dm_linear_init, 2761da177e4SLinus Torvalds dm_stripe_init, 277945fa4d2SMikulas Patocka dm_kcopyd_init, 2781da177e4SLinus Torvalds dm_interface_init, 2791da177e4SLinus Torvalds }; 2801da177e4SLinus Torvalds 281b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2821da177e4SLinus Torvalds local_exit, 2831da177e4SLinus Torvalds dm_target_exit, 2841da177e4SLinus Torvalds dm_linear_exit, 2851da177e4SLinus Torvalds dm_stripe_exit, 286945fa4d2SMikulas Patocka dm_kcopyd_exit, 2871da177e4SLinus Torvalds dm_interface_exit, 2881da177e4SLinus Torvalds }; 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds static int __init dm_init(void) 2911da177e4SLinus Torvalds { 2921da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds int r, i; 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2971da177e4SLinus Torvalds r = _inits[i](); 2981da177e4SLinus Torvalds if (r) 2991da177e4SLinus Torvalds goto bad; 3001da177e4SLinus Torvalds } 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds return 0; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds bad: 3051da177e4SLinus Torvalds while (i--) 3061da177e4SLinus Torvalds _exits[i](); 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds return r; 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds static void __exit dm_exit(void) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3141da177e4SLinus Torvalds 3151da177e4SLinus Torvalds while (i--) 3161da177e4SLinus Torvalds _exits[i](); 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds /* 3201da177e4SLinus Torvalds * Block device functions 3211da177e4SLinus Torvalds */ 322fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3231da177e4SLinus Torvalds { 3241da177e4SLinus Torvalds struct mapped_device *md; 3251da177e4SLinus Torvalds 326fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 327fba9f90eSJeff Mahoney 328fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 329fba9f90eSJeff Mahoney if (!md) 330fba9f90eSJeff Mahoney goto out; 331fba9f90eSJeff Mahoney 3325c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 3335c6bd75dSAlasdair G Kergon test_bit(DMF_DELETING, &md->flags)) { 334fba9f90eSJeff Mahoney md = NULL; 335fba9f90eSJeff Mahoney goto out; 336fba9f90eSJeff Mahoney } 337fba9f90eSJeff Mahoney 3381da177e4SLinus Torvalds dm_get(md); 3395c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 340fba9f90eSJeff Mahoney 341fba9f90eSJeff Mahoney out: 342fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 343fba9f90eSJeff Mahoney 344fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds 347fe5f9f2cSAl Viro static int dm_blk_close(struct gendisk *disk, fmode_t mode) 3481da177e4SLinus Torvalds { 349fe5f9f2cSAl Viro struct mapped_device *md = disk->private_data; 3505c6bd75dSAlasdair G Kergon atomic_dec(&md->open_count); 3511da177e4SLinus Torvalds dm_put(md); 3521da177e4SLinus Torvalds return 0; 3531da177e4SLinus Torvalds } 3541da177e4SLinus Torvalds 3555c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3565c6bd75dSAlasdair G Kergon { 3575c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3585c6bd75dSAlasdair G Kergon } 3595c6bd75dSAlasdair G Kergon 3605c6bd75dSAlasdair G Kergon /* 3615c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3625c6bd75dSAlasdair G Kergon */ 3635c6bd75dSAlasdair G Kergon int dm_lock_for_deletion(struct mapped_device *md) 3645c6bd75dSAlasdair G Kergon { 3655c6bd75dSAlasdair G Kergon int r = 0; 3665c6bd75dSAlasdair G Kergon 3675c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3685c6bd75dSAlasdair G Kergon 3695c6bd75dSAlasdair G Kergon if (dm_open_count(md)) 3705c6bd75dSAlasdair G Kergon r = -EBUSY; 3715c6bd75dSAlasdair G Kergon else 3725c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3735c6bd75dSAlasdair G Kergon 3745c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3755c6bd75dSAlasdair G Kergon 3765c6bd75dSAlasdair G Kergon return r; 3775c6bd75dSAlasdair G Kergon } 3785c6bd75dSAlasdair G Kergon 3793ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3803ac51e74SDarrick J. Wong { 3813ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 3823ac51e74SDarrick J. Wong 3833ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 3843ac51e74SDarrick J. Wong } 3853ac51e74SDarrick J. Wong 386fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 387aa129a22SMilan Broz unsigned int cmd, unsigned long arg) 388aa129a22SMilan Broz { 389fe5f9f2cSAl Viro struct mapped_device *md = bdev->bd_disk->private_data; 390fe5f9f2cSAl Viro struct dm_table *map = dm_get_table(md); 391aa129a22SMilan Broz struct dm_target *tgt; 392aa129a22SMilan Broz int r = -ENOTTY; 393aa129a22SMilan Broz 394aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 395aa129a22SMilan Broz goto out; 396aa129a22SMilan Broz 397aa129a22SMilan Broz /* We only support devices that have a single target */ 398aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 399aa129a22SMilan Broz goto out; 400aa129a22SMilan Broz 401aa129a22SMilan Broz tgt = dm_table_get_target(map, 0); 402aa129a22SMilan Broz 403aa129a22SMilan Broz if (dm_suspended(md)) { 404aa129a22SMilan Broz r = -EAGAIN; 405aa129a22SMilan Broz goto out; 406aa129a22SMilan Broz } 407aa129a22SMilan Broz 408aa129a22SMilan Broz if (tgt->type->ioctl) 409647b3d00SAl Viro r = tgt->type->ioctl(tgt, cmd, arg); 410aa129a22SMilan Broz 411aa129a22SMilan Broz out: 412aa129a22SMilan Broz dm_table_put(map); 413aa129a22SMilan Broz 414aa129a22SMilan Broz return r; 415aa129a22SMilan Broz } 416aa129a22SMilan Broz 417028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 4181da177e4SLinus Torvalds { 4191da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 4201da177e4SLinus Torvalds } 4211da177e4SLinus Torvalds 422028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 4231da177e4SLinus Torvalds { 4241da177e4SLinus Torvalds mempool_free(io, md->io_pool); 4251da177e4SLinus Torvalds } 4261da177e4SLinus Torvalds 427028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 4281da177e4SLinus Torvalds { 4291da177e4SLinus Torvalds mempool_free(tio, md->tio_pool); 4301da177e4SLinus Torvalds } 4311da177e4SLinus Torvalds 432cec47e3dSKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) 433cec47e3dSKiyoshi Ueda { 434cec47e3dSKiyoshi Ueda return mempool_alloc(md->tio_pool, GFP_ATOMIC); 435cec47e3dSKiyoshi Ueda } 436cec47e3dSKiyoshi Ueda 437cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio) 438cec47e3dSKiyoshi Ueda { 439cec47e3dSKiyoshi Ueda mempool_free(tio, tio->md->tio_pool); 440cec47e3dSKiyoshi Ueda } 441cec47e3dSKiyoshi Ueda 442cec47e3dSKiyoshi Ueda static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) 443cec47e3dSKiyoshi Ueda { 444cec47e3dSKiyoshi Ueda return mempool_alloc(md->io_pool, GFP_ATOMIC); 445cec47e3dSKiyoshi Ueda } 446cec47e3dSKiyoshi Ueda 447cec47e3dSKiyoshi Ueda static void free_bio_info(struct dm_rq_clone_bio_info *info) 448cec47e3dSKiyoshi Ueda { 449cec47e3dSKiyoshi Ueda mempool_free(info, info->tio->md->io_pool); 450cec47e3dSKiyoshi Ueda } 451cec47e3dSKiyoshi Ueda 4523eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 4533eaf840eSJun'ichi "Nick" Nomura { 4543eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 455c9959059STejun Heo int cpu; 4563eaf840eSJun'ichi "Nick" Nomura 4573eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 4583eaf840eSJun'ichi "Nick" Nomura 459074a7acaSTejun Heo cpu = part_stat_lock(); 460074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 461074a7acaSTejun Heo part_stat_unlock(); 462074a7acaSTejun Heo dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 4633eaf840eSJun'ichi "Nick" Nomura } 4643eaf840eSJun'ichi "Nick" Nomura 465d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 4663eaf840eSJun'ichi "Nick" Nomura { 4673eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 4683eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 4693eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 470c9959059STejun Heo int pending, cpu; 4713eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 4723eaf840eSJun'ichi "Nick" Nomura 473074a7acaSTejun Heo cpu = part_stat_lock(); 474074a7acaSTejun Heo part_round_stats(cpu, &dm_disk(md)->part0); 475074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); 476074a7acaSTejun Heo part_stat_unlock(); 4773eaf840eSJun'ichi "Nick" Nomura 478af7e466aSMikulas Patocka /* 479af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 480af7e466aSMikulas Patocka * a barrier. 481af7e466aSMikulas Patocka */ 482074a7acaSTejun Heo dm_disk(md)->part0.in_flight = pending = 483074a7acaSTejun Heo atomic_dec_return(&md->pending); 4843eaf840eSJun'ichi "Nick" Nomura 485d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 486d221d2e7SMikulas Patocka if (!pending) 487d221d2e7SMikulas Patocka wake_up(&md->wait); 4883eaf840eSJun'ichi "Nick" Nomura } 4893eaf840eSJun'ichi "Nick" Nomura 4901da177e4SLinus Torvalds /* 4911da177e4SLinus Torvalds * Add the bio to the list of deferred io. 4921da177e4SLinus Torvalds */ 49392c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 4941da177e4SLinus Torvalds { 4952ca3310eSAlasdair G Kergon down_write(&md->io_lock); 4961da177e4SLinus Torvalds 497022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 4981da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 499022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 5001da177e4SLinus Torvalds 50192c63902SMikulas Patocka if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) 50292c63902SMikulas Patocka queue_work(md->wq, &md->work); 50392c63902SMikulas Patocka 5042ca3310eSAlasdair G Kergon up_write(&md->io_lock); 5051da177e4SLinus Torvalds } 5061da177e4SLinus Torvalds 5071da177e4SLinus Torvalds /* 5081da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5091da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 5101da177e4SLinus Torvalds * dm_table_put() when finished. 5111da177e4SLinus Torvalds */ 5121da177e4SLinus Torvalds struct dm_table *dm_get_table(struct mapped_device *md) 5131da177e4SLinus Torvalds { 5141da177e4SLinus Torvalds struct dm_table *t; 515523d9297SKiyoshi Ueda unsigned long flags; 5161da177e4SLinus Torvalds 517523d9297SKiyoshi Ueda read_lock_irqsave(&md->map_lock, flags); 5181da177e4SLinus Torvalds t = md->map; 5191da177e4SLinus Torvalds if (t) 5201da177e4SLinus Torvalds dm_table_get(t); 521523d9297SKiyoshi Ueda read_unlock_irqrestore(&md->map_lock, flags); 5221da177e4SLinus Torvalds 5231da177e4SLinus Torvalds return t; 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds 5263ac51e74SDarrick J. Wong /* 5273ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 5283ac51e74SDarrick J. Wong */ 5293ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 5303ac51e74SDarrick J. Wong { 5313ac51e74SDarrick J. Wong *geo = md->geometry; 5323ac51e74SDarrick J. Wong 5333ac51e74SDarrick J. Wong return 0; 5343ac51e74SDarrick J. Wong } 5353ac51e74SDarrick J. Wong 5363ac51e74SDarrick J. Wong /* 5373ac51e74SDarrick J. Wong * Set the geometry of a device. 5383ac51e74SDarrick J. Wong */ 5393ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 5403ac51e74SDarrick J. Wong { 5413ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 5423ac51e74SDarrick J. Wong 5433ac51e74SDarrick J. Wong if (geo->start > sz) { 5443ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 5453ac51e74SDarrick J. Wong return -EINVAL; 5463ac51e74SDarrick J. Wong } 5473ac51e74SDarrick J. Wong 5483ac51e74SDarrick J. Wong md->geometry = *geo; 5493ac51e74SDarrick J. Wong 5503ac51e74SDarrick J. Wong return 0; 5513ac51e74SDarrick J. Wong } 5523ac51e74SDarrick J. Wong 5531da177e4SLinus Torvalds /*----------------------------------------------------------------- 5541da177e4SLinus Torvalds * CRUD START: 5551da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 5561da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 5571da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 5581da177e4SLinus Torvalds * interests of getting something for people to use I give 5591da177e4SLinus Torvalds * you this clearly demarcated crap. 5601da177e4SLinus Torvalds *---------------------------------------------------------------*/ 5611da177e4SLinus Torvalds 5622e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 5632e93ccc1SKiyoshi Ueda { 5642e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 5652e93ccc1SKiyoshi Ueda } 5662e93ccc1SKiyoshi Ueda 5671da177e4SLinus Torvalds /* 5681da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 5691da177e4SLinus Torvalds * cloned into, completing the original io if necc. 5701da177e4SLinus Torvalds */ 571858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error) 5721da177e4SLinus Torvalds { 5732e93ccc1SKiyoshi Ueda unsigned long flags; 574b35f8caaSMilan Broz int io_error; 575b35f8caaSMilan Broz struct bio *bio; 576b35f8caaSMilan Broz struct mapped_device *md = io->md; 5772e93ccc1SKiyoshi Ueda 5782e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 579b35f8caaSMilan Broz if (error && !(io->error > 0 && __noflush_suspending(md))) 5801da177e4SLinus Torvalds io->error = error; 5811da177e4SLinus Torvalds 5821da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 5832e93ccc1SKiyoshi Ueda if (io->error == DM_ENDIO_REQUEUE) { 5842e93ccc1SKiyoshi Ueda /* 5852e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 5862e93ccc1SKiyoshi Ueda */ 587022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 5882761e95fSMikulas Patocka if (__noflush_suspending(md)) { 5892761e95fSMikulas Patocka if (!bio_barrier(io->bio)) 5902761e95fSMikulas Patocka bio_list_add_head(&md->deferred, 5912761e95fSMikulas Patocka io->bio); 5922761e95fSMikulas Patocka } else 5932e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 5942e93ccc1SKiyoshi Ueda io->error = -EIO; 595022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 5962e93ccc1SKiyoshi Ueda } 5972e93ccc1SKiyoshi Ueda 598b35f8caaSMilan Broz io_error = io->error; 599b35f8caaSMilan Broz bio = io->bio; 6002056a782SJens Axboe 601af7e466aSMikulas Patocka if (bio_barrier(bio)) { 602af7e466aSMikulas Patocka /* 603af7e466aSMikulas Patocka * There can be just one barrier request so we use 604af7e466aSMikulas Patocka * a per-device variable for error reporting. 605af7e466aSMikulas Patocka * Note that you can't touch the bio after end_io_acct 606af7e466aSMikulas Patocka */ 607fdb9572bSMikulas Patocka if (!md->barrier_error && io_error != -EOPNOTSUPP) 608af7e466aSMikulas Patocka md->barrier_error = io_error; 609af7e466aSMikulas Patocka end_io_acct(io); 610af7e466aSMikulas Patocka } else { 611af7e466aSMikulas Patocka end_io_acct(io); 612b35f8caaSMilan Broz 613b35f8caaSMilan Broz if (io_error != DM_ENDIO_REQUEUE) { 614b35f8caaSMilan Broz trace_block_bio_complete(md->queue, bio); 615b35f8caaSMilan Broz 616b35f8caaSMilan Broz bio_endio(bio, io_error); 6172e93ccc1SKiyoshi Ueda } 6181da177e4SLinus Torvalds } 619af7e466aSMikulas Patocka 620af7e466aSMikulas Patocka free_io(md, io); 621af7e466aSMikulas Patocka } 6221da177e4SLinus Torvalds } 6231da177e4SLinus Torvalds 6246712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error) 6251da177e4SLinus Torvalds { 6261da177e4SLinus Torvalds int r = 0; 627028867acSAlasdair G Kergon struct dm_target_io *tio = bio->bi_private; 628b35f8caaSMilan Broz struct dm_io *io = tio->io; 6299faf400fSStefan Bader struct mapped_device *md = tio->io->md; 6301da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 6311da177e4SLinus Torvalds 6321da177e4SLinus Torvalds if (!bio_flagged(bio, BIO_UPTODATE) && !error) 6331da177e4SLinus Torvalds error = -EIO; 6341da177e4SLinus Torvalds 6351da177e4SLinus Torvalds if (endio) { 6361da177e4SLinus Torvalds r = endio(tio->ti, bio, error, &tio->info); 6372e93ccc1SKiyoshi Ueda if (r < 0 || r == DM_ENDIO_REQUEUE) 6382e93ccc1SKiyoshi Ueda /* 6392e93ccc1SKiyoshi Ueda * error and requeue request are handled 6402e93ccc1SKiyoshi Ueda * in dec_pending(). 6412e93ccc1SKiyoshi Ueda */ 6421da177e4SLinus Torvalds error = r; 64345cbcd79SKiyoshi Ueda else if (r == DM_ENDIO_INCOMPLETE) 64445cbcd79SKiyoshi Ueda /* The target will handle the io */ 6456712ecf8SNeilBrown return; 64645cbcd79SKiyoshi Ueda else if (r) { 64745cbcd79SKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", r); 64845cbcd79SKiyoshi Ueda BUG(); 64945cbcd79SKiyoshi Ueda } 6501da177e4SLinus Torvalds } 6511da177e4SLinus Torvalds 6529faf400fSStefan Bader /* 6539faf400fSStefan Bader * Store md for cleanup instead of tio which is about to get freed. 6549faf400fSStefan Bader */ 6559faf400fSStefan Bader bio->bi_private = md->bs; 6569faf400fSStefan Bader 6579faf400fSStefan Bader free_tio(md, tio); 658b35f8caaSMilan Broz bio_put(bio); 659b35f8caaSMilan Broz dec_pending(io, error); 6601da177e4SLinus Torvalds } 6611da177e4SLinus Torvalds 662cec47e3dSKiyoshi Ueda /* 663cec47e3dSKiyoshi Ueda * Partial completion handling for request-based dm 664cec47e3dSKiyoshi Ueda */ 665cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error) 666cec47e3dSKiyoshi Ueda { 667cec47e3dSKiyoshi Ueda struct dm_rq_clone_bio_info *info = clone->bi_private; 668cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = info->tio; 669cec47e3dSKiyoshi Ueda struct bio *bio = info->orig; 670cec47e3dSKiyoshi Ueda unsigned int nr_bytes = info->orig->bi_size; 671cec47e3dSKiyoshi Ueda 672cec47e3dSKiyoshi Ueda bio_put(clone); 673cec47e3dSKiyoshi Ueda 674cec47e3dSKiyoshi Ueda if (tio->error) 675cec47e3dSKiyoshi Ueda /* 676cec47e3dSKiyoshi Ueda * An error has already been detected on the request. 677cec47e3dSKiyoshi Ueda * Once error occurred, just let clone->end_io() handle 678cec47e3dSKiyoshi Ueda * the remainder. 679cec47e3dSKiyoshi Ueda */ 680cec47e3dSKiyoshi Ueda return; 681cec47e3dSKiyoshi Ueda else if (error) { 682cec47e3dSKiyoshi Ueda /* 683cec47e3dSKiyoshi Ueda * Don't notice the error to the upper layer yet. 684cec47e3dSKiyoshi Ueda * The error handling decision is made by the target driver, 685cec47e3dSKiyoshi Ueda * when the request is completed. 686cec47e3dSKiyoshi Ueda */ 687cec47e3dSKiyoshi Ueda tio->error = error; 688cec47e3dSKiyoshi Ueda return; 689cec47e3dSKiyoshi Ueda } 690cec47e3dSKiyoshi Ueda 691cec47e3dSKiyoshi Ueda /* 692cec47e3dSKiyoshi Ueda * I/O for the bio successfully completed. 693cec47e3dSKiyoshi Ueda * Notice the data completion to the upper layer. 694cec47e3dSKiyoshi Ueda */ 695cec47e3dSKiyoshi Ueda 696cec47e3dSKiyoshi Ueda /* 697cec47e3dSKiyoshi Ueda * bios are processed from the head of the list. 698cec47e3dSKiyoshi Ueda * So the completing bio should always be rq->bio. 699cec47e3dSKiyoshi Ueda * If it's not, something wrong is happening. 700cec47e3dSKiyoshi Ueda */ 701cec47e3dSKiyoshi Ueda if (tio->orig->bio != bio) 702cec47e3dSKiyoshi Ueda DMERR("bio completion is going in the middle of the request"); 703cec47e3dSKiyoshi Ueda 704cec47e3dSKiyoshi Ueda /* 705cec47e3dSKiyoshi Ueda * Update the original request. 706cec47e3dSKiyoshi Ueda * Do not use blk_end_request() here, because it may complete 707cec47e3dSKiyoshi Ueda * the original request before the clone, and break the ordering. 708cec47e3dSKiyoshi Ueda */ 709cec47e3dSKiyoshi Ueda blk_update_request(tio->orig, 0, nr_bytes); 710cec47e3dSKiyoshi Ueda } 711cec47e3dSKiyoshi Ueda 712cec47e3dSKiyoshi Ueda /* 713cec47e3dSKiyoshi Ueda * Don't touch any member of the md after calling this function because 714cec47e3dSKiyoshi Ueda * the md may be freed in dm_put() at the end of this function. 715cec47e3dSKiyoshi Ueda * Or do dm_get() before calling this function and dm_put() later. 716cec47e3dSKiyoshi Ueda */ 717cec47e3dSKiyoshi Ueda static void rq_completed(struct mapped_device *md, int run_queue) 718cec47e3dSKiyoshi Ueda { 719cec47e3dSKiyoshi Ueda int wakeup_waiters = 0; 720cec47e3dSKiyoshi Ueda struct request_queue *q = md->queue; 721cec47e3dSKiyoshi Ueda unsigned long flags; 722cec47e3dSKiyoshi Ueda 723cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 724cec47e3dSKiyoshi Ueda if (!queue_in_flight(q)) 725cec47e3dSKiyoshi Ueda wakeup_waiters = 1; 726cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 727cec47e3dSKiyoshi Ueda 728cec47e3dSKiyoshi Ueda /* nudge anyone waiting on suspend queue */ 729cec47e3dSKiyoshi Ueda if (wakeup_waiters) 730cec47e3dSKiyoshi Ueda wake_up(&md->wait); 731cec47e3dSKiyoshi Ueda 732cec47e3dSKiyoshi Ueda if (run_queue) 733cec47e3dSKiyoshi Ueda blk_run_queue(q); 734cec47e3dSKiyoshi Ueda 735cec47e3dSKiyoshi Ueda /* 736cec47e3dSKiyoshi Ueda * dm_put() must be at the end of this function. See the comment above 737cec47e3dSKiyoshi Ueda */ 738cec47e3dSKiyoshi Ueda dm_put(md); 739cec47e3dSKiyoshi Ueda } 740cec47e3dSKiyoshi Ueda 741cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq) 742cec47e3dSKiyoshi Ueda { 743cec47e3dSKiyoshi Ueda struct request *clone = rq->special; 744cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 745cec47e3dSKiyoshi Ueda 746cec47e3dSKiyoshi Ueda rq->special = NULL; 747cec47e3dSKiyoshi Ueda rq->cmd_flags &= ~REQ_DONTPREP; 748cec47e3dSKiyoshi Ueda 749cec47e3dSKiyoshi Ueda blk_rq_unprep_clone(clone); 750cec47e3dSKiyoshi Ueda free_rq_tio(tio); 751cec47e3dSKiyoshi Ueda } 752cec47e3dSKiyoshi Ueda 753cec47e3dSKiyoshi Ueda /* 754cec47e3dSKiyoshi Ueda * Requeue the original request of a clone. 755cec47e3dSKiyoshi Ueda */ 756cec47e3dSKiyoshi Ueda void dm_requeue_unmapped_request(struct request *clone) 757cec47e3dSKiyoshi Ueda { 758cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 759cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 760cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 761cec47e3dSKiyoshi Ueda struct request_queue *q = rq->q; 762cec47e3dSKiyoshi Ueda unsigned long flags; 763cec47e3dSKiyoshi Ueda 764cec47e3dSKiyoshi Ueda dm_unprep_request(rq); 765cec47e3dSKiyoshi Ueda 766cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 767cec47e3dSKiyoshi Ueda if (elv_queue_empty(q)) 768cec47e3dSKiyoshi Ueda blk_plug_device(q); 769cec47e3dSKiyoshi Ueda blk_requeue_request(q, rq); 770cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 771cec47e3dSKiyoshi Ueda 772cec47e3dSKiyoshi Ueda rq_completed(md, 0); 773cec47e3dSKiyoshi Ueda } 774cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 775cec47e3dSKiyoshi Ueda 776cec47e3dSKiyoshi Ueda static void __stop_queue(struct request_queue *q) 777cec47e3dSKiyoshi Ueda { 778cec47e3dSKiyoshi Ueda blk_stop_queue(q); 779cec47e3dSKiyoshi Ueda } 780cec47e3dSKiyoshi Ueda 781cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q) 782cec47e3dSKiyoshi Ueda { 783cec47e3dSKiyoshi Ueda unsigned long flags; 784cec47e3dSKiyoshi Ueda 785cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 786cec47e3dSKiyoshi Ueda __stop_queue(q); 787cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 788cec47e3dSKiyoshi Ueda } 789cec47e3dSKiyoshi Ueda 790cec47e3dSKiyoshi Ueda static void __start_queue(struct request_queue *q) 791cec47e3dSKiyoshi Ueda { 792cec47e3dSKiyoshi Ueda if (blk_queue_stopped(q)) 793cec47e3dSKiyoshi Ueda blk_start_queue(q); 794cec47e3dSKiyoshi Ueda } 795cec47e3dSKiyoshi Ueda 796cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q) 797cec47e3dSKiyoshi Ueda { 798cec47e3dSKiyoshi Ueda unsigned long flags; 799cec47e3dSKiyoshi Ueda 800cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 801cec47e3dSKiyoshi Ueda __start_queue(q); 802cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 803cec47e3dSKiyoshi Ueda } 804cec47e3dSKiyoshi Ueda 805cec47e3dSKiyoshi Ueda /* 806cec47e3dSKiyoshi Ueda * Complete the clone and the original request. 807cec47e3dSKiyoshi Ueda * Must be called without queue lock. 808cec47e3dSKiyoshi Ueda */ 809cec47e3dSKiyoshi Ueda static void dm_end_request(struct request *clone, int error) 810cec47e3dSKiyoshi Ueda { 811cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 812cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 813cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 814cec47e3dSKiyoshi Ueda 815cec47e3dSKiyoshi Ueda if (blk_pc_request(rq)) { 816cec47e3dSKiyoshi Ueda rq->errors = clone->errors; 817cec47e3dSKiyoshi Ueda rq->resid_len = clone->resid_len; 818cec47e3dSKiyoshi Ueda 819cec47e3dSKiyoshi Ueda if (rq->sense) 820cec47e3dSKiyoshi Ueda /* 821cec47e3dSKiyoshi Ueda * We are using the sense buffer of the original 822cec47e3dSKiyoshi Ueda * request. 823cec47e3dSKiyoshi Ueda * So setting the length of the sense data is enough. 824cec47e3dSKiyoshi Ueda */ 825cec47e3dSKiyoshi Ueda rq->sense_len = clone->sense_len; 826cec47e3dSKiyoshi Ueda } 827cec47e3dSKiyoshi Ueda 828cec47e3dSKiyoshi Ueda BUG_ON(clone->bio); 829cec47e3dSKiyoshi Ueda free_rq_tio(tio); 830cec47e3dSKiyoshi Ueda 831cec47e3dSKiyoshi Ueda blk_end_request_all(rq, error); 832cec47e3dSKiyoshi Ueda 833cec47e3dSKiyoshi Ueda rq_completed(md, 1); 834cec47e3dSKiyoshi Ueda } 835cec47e3dSKiyoshi Ueda 836cec47e3dSKiyoshi Ueda /* 837cec47e3dSKiyoshi Ueda * Request completion handler for request-based dm 838cec47e3dSKiyoshi Ueda */ 839cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq) 840cec47e3dSKiyoshi Ueda { 841cec47e3dSKiyoshi Ueda struct request *clone = rq->completion_data; 842cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 843cec47e3dSKiyoshi Ueda dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 844cec47e3dSKiyoshi Ueda int error = tio->error; 845cec47e3dSKiyoshi Ueda 846cec47e3dSKiyoshi Ueda if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) 847cec47e3dSKiyoshi Ueda error = rq_end_io(tio->ti, clone, error, &tio->info); 848cec47e3dSKiyoshi Ueda 849cec47e3dSKiyoshi Ueda if (error <= 0) 850cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 851cec47e3dSKiyoshi Ueda dm_end_request(clone, error); 852cec47e3dSKiyoshi Ueda else if (error == DM_ENDIO_INCOMPLETE) 853cec47e3dSKiyoshi Ueda /* The target will handle the I/O */ 854cec47e3dSKiyoshi Ueda return; 855cec47e3dSKiyoshi Ueda else if (error == DM_ENDIO_REQUEUE) 856cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 857cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 858cec47e3dSKiyoshi Ueda else { 859cec47e3dSKiyoshi Ueda DMWARN("unimplemented target endio return value: %d", error); 860cec47e3dSKiyoshi Ueda BUG(); 861cec47e3dSKiyoshi Ueda } 862cec47e3dSKiyoshi Ueda } 863cec47e3dSKiyoshi Ueda 864cec47e3dSKiyoshi Ueda /* 865cec47e3dSKiyoshi Ueda * Complete the clone and the original request with the error status 866cec47e3dSKiyoshi Ueda * through softirq context. 867cec47e3dSKiyoshi Ueda */ 868cec47e3dSKiyoshi Ueda static void dm_complete_request(struct request *clone, int error) 869cec47e3dSKiyoshi Ueda { 870cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 871cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 872cec47e3dSKiyoshi Ueda 873cec47e3dSKiyoshi Ueda tio->error = error; 874cec47e3dSKiyoshi Ueda rq->completion_data = clone; 875cec47e3dSKiyoshi Ueda blk_complete_request(rq); 876cec47e3dSKiyoshi Ueda } 877cec47e3dSKiyoshi Ueda 878cec47e3dSKiyoshi Ueda /* 879cec47e3dSKiyoshi Ueda * Complete the not-mapped clone and the original request with the error status 880cec47e3dSKiyoshi Ueda * through softirq context. 881cec47e3dSKiyoshi Ueda * Target's rq_end_io() function isn't called. 882cec47e3dSKiyoshi Ueda * This may be used when the target's map_rq() function fails. 883cec47e3dSKiyoshi Ueda */ 884cec47e3dSKiyoshi Ueda void dm_kill_unmapped_request(struct request *clone, int error) 885cec47e3dSKiyoshi Ueda { 886cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 887cec47e3dSKiyoshi Ueda struct request *rq = tio->orig; 888cec47e3dSKiyoshi Ueda 889cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_FAILED; 890cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 891cec47e3dSKiyoshi Ueda } 892cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 893cec47e3dSKiyoshi Ueda 894cec47e3dSKiyoshi Ueda /* 895cec47e3dSKiyoshi Ueda * Called with the queue lock held 896cec47e3dSKiyoshi Ueda */ 897cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error) 898cec47e3dSKiyoshi Ueda { 899cec47e3dSKiyoshi Ueda /* 900cec47e3dSKiyoshi Ueda * For just cleaning up the information of the queue in which 901cec47e3dSKiyoshi Ueda * the clone was dispatched. 902cec47e3dSKiyoshi Ueda * The clone is *NOT* freed actually here because it is alloced from 903cec47e3dSKiyoshi Ueda * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 904cec47e3dSKiyoshi Ueda */ 905cec47e3dSKiyoshi Ueda __blk_put_request(clone->q, clone); 906cec47e3dSKiyoshi Ueda 907cec47e3dSKiyoshi Ueda /* 908cec47e3dSKiyoshi Ueda * Actual request completion is done in a softirq context which doesn't 909cec47e3dSKiyoshi Ueda * hold the queue lock. Otherwise, deadlock could occur because: 910cec47e3dSKiyoshi Ueda * - another request may be submitted by the upper level driver 911cec47e3dSKiyoshi Ueda * of the stacking during the completion 912cec47e3dSKiyoshi Ueda * - the submission which requires queue lock may be done 913cec47e3dSKiyoshi Ueda * against this queue 914cec47e3dSKiyoshi Ueda */ 915cec47e3dSKiyoshi Ueda dm_complete_request(clone, error); 916cec47e3dSKiyoshi Ueda } 917cec47e3dSKiyoshi Ueda 9181da177e4SLinus Torvalds static sector_t max_io_len(struct mapped_device *md, 9191da177e4SLinus Torvalds sector_t sector, struct dm_target *ti) 9201da177e4SLinus Torvalds { 9211da177e4SLinus Torvalds sector_t offset = sector - ti->begin; 9221da177e4SLinus Torvalds sector_t len = ti->len - offset; 9231da177e4SLinus Torvalds 9241da177e4SLinus Torvalds /* 9251da177e4SLinus Torvalds * Does the target need to split even further ? 9261da177e4SLinus Torvalds */ 9271da177e4SLinus Torvalds if (ti->split_io) { 9281da177e4SLinus Torvalds sector_t boundary; 9291da177e4SLinus Torvalds boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 9301da177e4SLinus Torvalds - offset; 9311da177e4SLinus Torvalds if (len > boundary) 9321da177e4SLinus Torvalds len = boundary; 9331da177e4SLinus Torvalds } 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds return len; 9361da177e4SLinus Torvalds } 9371da177e4SLinus Torvalds 9381da177e4SLinus Torvalds static void __map_bio(struct dm_target *ti, struct bio *clone, 939028867acSAlasdair G Kergon struct dm_target_io *tio) 9401da177e4SLinus Torvalds { 9411da177e4SLinus Torvalds int r; 9422056a782SJens Axboe sector_t sector; 9439faf400fSStefan Bader struct mapped_device *md; 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 9461da177e4SLinus Torvalds clone->bi_private = tio; 9471da177e4SLinus Torvalds 9481da177e4SLinus Torvalds /* 9491da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 9501da177e4SLinus Torvalds * anything, the target has assumed ownership of 9511da177e4SLinus Torvalds * this io. 9521da177e4SLinus Torvalds */ 9531da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 9542056a782SJens Axboe sector = clone->bi_sector; 9551da177e4SLinus Torvalds r = ti->type->map(ti, clone, &tio->info); 95645cbcd79SKiyoshi Ueda if (r == DM_MAPIO_REMAPPED) { 9571da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 9582056a782SJens Axboe 9595f3ea37cSArnaldo Carvalho de Melo trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, 96022a7c31aSAlan D. Brunelle tio->io->bio->bi_bdev->bd_dev, sector); 9612056a782SJens Axboe 9621da177e4SLinus Torvalds generic_make_request(clone); 9632e93ccc1SKiyoshi Ueda } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 9642e93ccc1SKiyoshi Ueda /* error the io and bail out, or requeue it if needed */ 9659faf400fSStefan Bader md = tio->io->md; 9669faf400fSStefan Bader dec_pending(tio->io, r); 9679faf400fSStefan Bader /* 9689faf400fSStefan Bader * Store bio_set for cleanup. 9699faf400fSStefan Bader */ 9709faf400fSStefan Bader clone->bi_private = md->bs; 9711da177e4SLinus Torvalds bio_put(clone); 9729faf400fSStefan Bader free_tio(md, tio); 97345cbcd79SKiyoshi Ueda } else if (r) { 97445cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 97545cbcd79SKiyoshi Ueda BUG(); 9761da177e4SLinus Torvalds } 9771da177e4SLinus Torvalds } 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds struct clone_info { 9801da177e4SLinus Torvalds struct mapped_device *md; 9811da177e4SLinus Torvalds struct dm_table *map; 9821da177e4SLinus Torvalds struct bio *bio; 9831da177e4SLinus Torvalds struct dm_io *io; 9841da177e4SLinus Torvalds sector_t sector; 9851da177e4SLinus Torvalds sector_t sector_count; 9861da177e4SLinus Torvalds unsigned short idx; 9871da177e4SLinus Torvalds }; 9881da177e4SLinus Torvalds 9893676347aSPeter Osterlund static void dm_bio_destructor(struct bio *bio) 9903676347aSPeter Osterlund { 9919faf400fSStefan Bader struct bio_set *bs = bio->bi_private; 9929faf400fSStefan Bader 9939faf400fSStefan Bader bio_free(bio, bs); 9943676347aSPeter Osterlund } 9953676347aSPeter Osterlund 9961da177e4SLinus Torvalds /* 9971da177e4SLinus Torvalds * Creates a little bio that is just does part of a bvec. 9981da177e4SLinus Torvalds */ 9991da177e4SLinus Torvalds static struct bio *split_bvec(struct bio *bio, sector_t sector, 10001da177e4SLinus Torvalds unsigned short idx, unsigned int offset, 10019faf400fSStefan Bader unsigned int len, struct bio_set *bs) 10021da177e4SLinus Torvalds { 10031da177e4SLinus Torvalds struct bio *clone; 10041da177e4SLinus Torvalds struct bio_vec *bv = bio->bi_io_vec + idx; 10051da177e4SLinus Torvalds 10069faf400fSStefan Bader clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 10073676347aSPeter Osterlund clone->bi_destructor = dm_bio_destructor; 10081da177e4SLinus Torvalds *clone->bi_io_vec = *bv; 10091da177e4SLinus Torvalds 10101da177e4SLinus Torvalds clone->bi_sector = sector; 10111da177e4SLinus Torvalds clone->bi_bdev = bio->bi_bdev; 1012af7e466aSMikulas Patocka clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); 10131da177e4SLinus Torvalds clone->bi_vcnt = 1; 10141da177e4SLinus Torvalds clone->bi_size = to_bytes(len); 10151da177e4SLinus Torvalds clone->bi_io_vec->bv_offset = offset; 10161da177e4SLinus Torvalds clone->bi_io_vec->bv_len = clone->bi_size; 1017f3e1d26eSMartin K. Petersen clone->bi_flags |= 1 << BIO_CLONED; 10181da177e4SLinus Torvalds 10199c47008dSMartin K. Petersen if (bio_integrity(bio)) { 1020*7878cba9SMartin K. Petersen bio_integrity_clone(clone, bio, GFP_NOIO, bs); 10219c47008dSMartin K. Petersen bio_integrity_trim(clone, 10229c47008dSMartin K. Petersen bio_sector_offset(bio, idx, offset), len); 10239c47008dSMartin K. Petersen } 10249c47008dSMartin K. Petersen 10251da177e4SLinus Torvalds return clone; 10261da177e4SLinus Torvalds } 10271da177e4SLinus Torvalds 10281da177e4SLinus Torvalds /* 10291da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 10301da177e4SLinus Torvalds */ 10311da177e4SLinus Torvalds static struct bio *clone_bio(struct bio *bio, sector_t sector, 10321da177e4SLinus Torvalds unsigned short idx, unsigned short bv_count, 10339faf400fSStefan Bader unsigned int len, struct bio_set *bs) 10341da177e4SLinus Torvalds { 10351da177e4SLinus Torvalds struct bio *clone; 10361da177e4SLinus Torvalds 10379faf400fSStefan Bader clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 10389faf400fSStefan Bader __bio_clone(clone, bio); 1039af7e466aSMikulas Patocka clone->bi_rw &= ~(1 << BIO_RW_BARRIER); 10409faf400fSStefan Bader clone->bi_destructor = dm_bio_destructor; 10411da177e4SLinus Torvalds clone->bi_sector = sector; 10421da177e4SLinus Torvalds clone->bi_idx = idx; 10431da177e4SLinus Torvalds clone->bi_vcnt = idx + bv_count; 10441da177e4SLinus Torvalds clone->bi_size = to_bytes(len); 10451da177e4SLinus Torvalds clone->bi_flags &= ~(1 << BIO_SEG_VALID); 10461da177e4SLinus Torvalds 10479c47008dSMartin K. Petersen if (bio_integrity(bio)) { 1048*7878cba9SMartin K. Petersen bio_integrity_clone(clone, bio, GFP_NOIO, bs); 10499c47008dSMartin K. Petersen 10509c47008dSMartin K. Petersen if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 10519c47008dSMartin K. Petersen bio_integrity_trim(clone, 10529c47008dSMartin K. Petersen bio_sector_offset(bio, idx, 0), len); 10539c47008dSMartin K. Petersen } 10549c47008dSMartin K. Petersen 10551da177e4SLinus Torvalds return clone; 10561da177e4SLinus Torvalds } 10571da177e4SLinus Torvalds 10589015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 10599015df24SAlasdair G Kergon struct dm_target *ti) 1060f9ab94ceSMikulas Patocka { 10619015df24SAlasdair G Kergon struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); 1062f9ab94ceSMikulas Patocka 1063f9ab94ceSMikulas Patocka tio->io = ci->io; 1064f9ab94ceSMikulas Patocka tio->ti = ti; 1065f9ab94ceSMikulas Patocka memset(&tio->info, 0, sizeof(tio->info)); 10669015df24SAlasdair G Kergon 10679015df24SAlasdair G Kergon return tio; 10689015df24SAlasdair G Kergon } 10699015df24SAlasdair G Kergon 10709015df24SAlasdair G Kergon static void __flush_target(struct clone_info *ci, struct dm_target *ti, 10719015df24SAlasdair G Kergon unsigned flush_nr) 10729015df24SAlasdair G Kergon { 10739015df24SAlasdair G Kergon struct dm_target_io *tio = alloc_tio(ci, ti); 10749015df24SAlasdair G Kergon struct bio *clone; 10759015df24SAlasdair G Kergon 1076f9ab94ceSMikulas Patocka tio->info.flush_request = flush_nr; 1077f9ab94ceSMikulas Patocka 1078f9ab94ceSMikulas Patocka clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1079f9ab94ceSMikulas Patocka __bio_clone(clone, ci->bio); 1080f9ab94ceSMikulas Patocka clone->bi_destructor = dm_bio_destructor; 1081f9ab94ceSMikulas Patocka 1082f9ab94ceSMikulas Patocka __map_bio(ti, clone, tio); 1083f9ab94ceSMikulas Patocka } 1084f9ab94ceSMikulas Patocka 1085f9ab94ceSMikulas Patocka static int __clone_and_map_empty_barrier(struct clone_info *ci) 1086f9ab94ceSMikulas Patocka { 1087f9ab94ceSMikulas Patocka unsigned target_nr = 0, flush_nr; 1088f9ab94ceSMikulas Patocka struct dm_target *ti; 1089f9ab94ceSMikulas Patocka 1090f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 1091f9ab94ceSMikulas Patocka for (flush_nr = 0; flush_nr < ti->num_flush_requests; 1092f9ab94ceSMikulas Patocka flush_nr++) 1093f9ab94ceSMikulas Patocka __flush_target(ci, ti, flush_nr); 1094f9ab94ceSMikulas Patocka 1095f9ab94ceSMikulas Patocka ci->sector_count = 0; 1096f9ab94ceSMikulas Patocka 1097f9ab94ceSMikulas Patocka return 0; 1098f9ab94ceSMikulas Patocka } 1099f9ab94ceSMikulas Patocka 1100512875bdSJun'ichi Nomura static int __clone_and_map(struct clone_info *ci) 11011da177e4SLinus Torvalds { 11021da177e4SLinus Torvalds struct bio *clone, *bio = ci->bio; 1103512875bdSJun'ichi Nomura struct dm_target *ti; 1104512875bdSJun'ichi Nomura sector_t len = 0, max; 1105028867acSAlasdair G Kergon struct dm_target_io *tio; 11061da177e4SLinus Torvalds 1107f9ab94ceSMikulas Patocka if (unlikely(bio_empty_barrier(bio))) 1108f9ab94ceSMikulas Patocka return __clone_and_map_empty_barrier(ci); 1109f9ab94ceSMikulas Patocka 1110512875bdSJun'ichi Nomura ti = dm_table_find_target(ci->map, ci->sector); 1111512875bdSJun'ichi Nomura if (!dm_target_is_valid(ti)) 1112512875bdSJun'ichi Nomura return -EIO; 1113512875bdSJun'ichi Nomura 1114512875bdSJun'ichi Nomura max = max_io_len(ci->md, ci->sector, ti); 1115512875bdSJun'ichi Nomura 11161da177e4SLinus Torvalds /* 11171da177e4SLinus Torvalds * Allocate a target io object. 11181da177e4SLinus Torvalds */ 11199015df24SAlasdair G Kergon tio = alloc_tio(ci, ti); 11201da177e4SLinus Torvalds 11211da177e4SLinus Torvalds if (ci->sector_count <= max) { 11221da177e4SLinus Torvalds /* 11231da177e4SLinus Torvalds * Optimise for the simple case where we can do all of 11241da177e4SLinus Torvalds * the remaining io with a single clone. 11251da177e4SLinus Torvalds */ 11261da177e4SLinus Torvalds clone = clone_bio(bio, ci->sector, ci->idx, 11279faf400fSStefan Bader bio->bi_vcnt - ci->idx, ci->sector_count, 11289faf400fSStefan Bader ci->md->bs); 11291da177e4SLinus Torvalds __map_bio(ti, clone, tio); 11301da177e4SLinus Torvalds ci->sector_count = 0; 11311da177e4SLinus Torvalds 11321da177e4SLinus Torvalds } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 11331da177e4SLinus Torvalds /* 11341da177e4SLinus Torvalds * There are some bvecs that don't span targets. 11351da177e4SLinus Torvalds * Do as many of these as possible. 11361da177e4SLinus Torvalds */ 11371da177e4SLinus Torvalds int i; 11381da177e4SLinus Torvalds sector_t remaining = max; 11391da177e4SLinus Torvalds sector_t bv_len; 11401da177e4SLinus Torvalds 11411da177e4SLinus Torvalds for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 11421da177e4SLinus Torvalds bv_len = to_sector(bio->bi_io_vec[i].bv_len); 11431da177e4SLinus Torvalds 11441da177e4SLinus Torvalds if (bv_len > remaining) 11451da177e4SLinus Torvalds break; 11461da177e4SLinus Torvalds 11471da177e4SLinus Torvalds remaining -= bv_len; 11481da177e4SLinus Torvalds len += bv_len; 11491da177e4SLinus Torvalds } 11501da177e4SLinus Torvalds 11519faf400fSStefan Bader clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 11529faf400fSStefan Bader ci->md->bs); 11531da177e4SLinus Torvalds __map_bio(ti, clone, tio); 11541da177e4SLinus Torvalds 11551da177e4SLinus Torvalds ci->sector += len; 11561da177e4SLinus Torvalds ci->sector_count -= len; 11571da177e4SLinus Torvalds ci->idx = i; 11581da177e4SLinus Torvalds 11591da177e4SLinus Torvalds } else { 11601da177e4SLinus Torvalds /* 1161d2044a94SAlasdair G Kergon * Handle a bvec that must be split between two or more targets. 11621da177e4SLinus Torvalds */ 11631da177e4SLinus Torvalds struct bio_vec *bv = bio->bi_io_vec + ci->idx; 1164d2044a94SAlasdair G Kergon sector_t remaining = to_sector(bv->bv_len); 1165d2044a94SAlasdair G Kergon unsigned int offset = 0; 11661da177e4SLinus Torvalds 1167d2044a94SAlasdair G Kergon do { 1168d2044a94SAlasdair G Kergon if (offset) { 11691da177e4SLinus Torvalds ti = dm_table_find_target(ci->map, ci->sector); 1170512875bdSJun'ichi Nomura if (!dm_target_is_valid(ti)) 1171512875bdSJun'ichi Nomura return -EIO; 1172512875bdSJun'ichi Nomura 1173d2044a94SAlasdair G Kergon max = max_io_len(ci->md, ci->sector, ti); 11741da177e4SLinus Torvalds 11759015df24SAlasdair G Kergon tio = alloc_tio(ci, ti); 1176d2044a94SAlasdair G Kergon } 1177d2044a94SAlasdair G Kergon 1178d2044a94SAlasdair G Kergon len = min(remaining, max); 1179d2044a94SAlasdair G Kergon 1180d2044a94SAlasdair G Kergon clone = split_bvec(bio, ci->sector, ci->idx, 11819faf400fSStefan Bader bv->bv_offset + offset, len, 11829faf400fSStefan Bader ci->md->bs); 1183d2044a94SAlasdair G Kergon 11841da177e4SLinus Torvalds __map_bio(ti, clone, tio); 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds ci->sector += len; 11871da177e4SLinus Torvalds ci->sector_count -= len; 1188d2044a94SAlasdair G Kergon offset += to_bytes(len); 1189d2044a94SAlasdair G Kergon } while (remaining -= len); 1190d2044a94SAlasdair G Kergon 11911da177e4SLinus Torvalds ci->idx++; 11921da177e4SLinus Torvalds } 1193512875bdSJun'ichi Nomura 1194512875bdSJun'ichi Nomura return 0; 11951da177e4SLinus Torvalds } 11961da177e4SLinus Torvalds 11971da177e4SLinus Torvalds /* 11988a53c28dSMikulas Patocka * Split the bio into several clones and submit it to targets. 11991da177e4SLinus Torvalds */ 1200f0b9a450SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) 12011da177e4SLinus Torvalds { 12021da177e4SLinus Torvalds struct clone_info ci; 1203512875bdSJun'ichi Nomura int error = 0; 12041da177e4SLinus Torvalds 12051da177e4SLinus Torvalds ci.map = dm_get_table(md); 1206f0b9a450SMikulas Patocka if (unlikely(!ci.map)) { 1207af7e466aSMikulas Patocka if (!bio_barrier(bio)) 1208f0b9a450SMikulas Patocka bio_io_error(bio); 1209af7e466aSMikulas Patocka else 12105aa2781dSMikulas Patocka if (!md->barrier_error) 1211af7e466aSMikulas Patocka md->barrier_error = -EIO; 1212f0b9a450SMikulas Patocka return; 1213f0b9a450SMikulas Patocka } 1214692d0eb9SMikulas Patocka 12151da177e4SLinus Torvalds ci.md = md; 12161da177e4SLinus Torvalds ci.bio = bio; 12171da177e4SLinus Torvalds ci.io = alloc_io(md); 12181da177e4SLinus Torvalds ci.io->error = 0; 12191da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 12201da177e4SLinus Torvalds ci.io->bio = bio; 12211da177e4SLinus Torvalds ci.io->md = md; 12221da177e4SLinus Torvalds ci.sector = bio->bi_sector; 12231da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 1224f9ab94ceSMikulas Patocka if (unlikely(bio_empty_barrier(bio))) 1225f9ab94ceSMikulas Patocka ci.sector_count = 1; 12261da177e4SLinus Torvalds ci.idx = bio->bi_idx; 12271da177e4SLinus Torvalds 12283eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1229512875bdSJun'ichi Nomura while (ci.sector_count && !error) 1230512875bdSJun'ichi Nomura error = __clone_and_map(&ci); 12311da177e4SLinus Torvalds 12321da177e4SLinus Torvalds /* drop the extra reference count */ 1233512875bdSJun'ichi Nomura dec_pending(ci.io, error); 12341da177e4SLinus Torvalds dm_table_put(ci.map); 12351da177e4SLinus Torvalds } 12361da177e4SLinus Torvalds /*----------------------------------------------------------------- 12371da177e4SLinus Torvalds * CRUD END 12381da177e4SLinus Torvalds *---------------------------------------------------------------*/ 12391da177e4SLinus Torvalds 1240f6fccb12SMilan Broz static int dm_merge_bvec(struct request_queue *q, 1241f6fccb12SMilan Broz struct bvec_merge_data *bvm, 1242f6fccb12SMilan Broz struct bio_vec *biovec) 1243f6fccb12SMilan Broz { 1244f6fccb12SMilan Broz struct mapped_device *md = q->queuedata; 1245f6fccb12SMilan Broz struct dm_table *map = dm_get_table(md); 1246f6fccb12SMilan Broz struct dm_target *ti; 1247f6fccb12SMilan Broz sector_t max_sectors; 12485037108aSMikulas Patocka int max_size = 0; 1249f6fccb12SMilan Broz 1250f6fccb12SMilan Broz if (unlikely(!map)) 12515037108aSMikulas Patocka goto out; 1252f6fccb12SMilan Broz 1253f6fccb12SMilan Broz ti = dm_table_find_target(map, bvm->bi_sector); 1254b01cd5acSMikulas Patocka if (!dm_target_is_valid(ti)) 1255b01cd5acSMikulas Patocka goto out_table; 1256f6fccb12SMilan Broz 1257f6fccb12SMilan Broz /* 1258f6fccb12SMilan Broz * Find maximum amount of I/O that won't need splitting 1259f6fccb12SMilan Broz */ 1260f6fccb12SMilan Broz max_sectors = min(max_io_len(md, bvm->bi_sector, ti), 1261f6fccb12SMilan Broz (sector_t) BIO_MAX_SECTORS); 1262f6fccb12SMilan Broz max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1263f6fccb12SMilan Broz if (max_size < 0) 1264f6fccb12SMilan Broz max_size = 0; 1265f6fccb12SMilan Broz 1266f6fccb12SMilan Broz /* 1267f6fccb12SMilan Broz * merge_bvec_fn() returns number of bytes 1268f6fccb12SMilan Broz * it can accept at this offset 1269f6fccb12SMilan Broz * max is precomputed maximal io size 1270f6fccb12SMilan Broz */ 1271f6fccb12SMilan Broz if (max_size && ti->type->merge) 1272f6fccb12SMilan Broz max_size = ti->type->merge(ti, bvm, biovec, max_size); 12738cbeb67aSMikulas Patocka /* 12748cbeb67aSMikulas Patocka * If the target doesn't support merge method and some of the devices 12758cbeb67aSMikulas Patocka * provided their merge_bvec method (we know this by looking at 12768cbeb67aSMikulas Patocka * queue_max_hw_sectors), then we can't allow bios with multiple vector 12778cbeb67aSMikulas Patocka * entries. So always set max_size to 0, and the code below allows 12788cbeb67aSMikulas Patocka * just one page. 12798cbeb67aSMikulas Patocka */ 12808cbeb67aSMikulas Patocka else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 12818cbeb67aSMikulas Patocka 12828cbeb67aSMikulas Patocka max_size = 0; 1283f6fccb12SMilan Broz 1284b01cd5acSMikulas Patocka out_table: 12855037108aSMikulas Patocka dm_table_put(map); 12865037108aSMikulas Patocka 12875037108aSMikulas Patocka out: 1288f6fccb12SMilan Broz /* 1289f6fccb12SMilan Broz * Always allow an entire first page 1290f6fccb12SMilan Broz */ 1291f6fccb12SMilan Broz if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 1292f6fccb12SMilan Broz max_size = biovec->bv_len; 1293f6fccb12SMilan Broz 1294f6fccb12SMilan Broz return max_size; 1295f6fccb12SMilan Broz } 1296f6fccb12SMilan Broz 12971da177e4SLinus Torvalds /* 12981da177e4SLinus Torvalds * The request function that just remaps the bio built up by 12991da177e4SLinus Torvalds * dm_merge_bvec. 13001da177e4SLinus Torvalds */ 1301cec47e3dSKiyoshi Ueda static int _dm_request(struct request_queue *q, struct bio *bio) 13021da177e4SLinus Torvalds { 130312f03a49SKevin Corry int rw = bio_data_dir(bio); 13041da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1305c9959059STejun Heo int cpu; 13061da177e4SLinus Torvalds 13072ca3310eSAlasdair G Kergon down_read(&md->io_lock); 13081da177e4SLinus Torvalds 1309074a7acaSTejun Heo cpu = part_stat_lock(); 1310074a7acaSTejun Heo part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); 1311074a7acaSTejun Heo part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); 1312074a7acaSTejun Heo part_stat_unlock(); 131312f03a49SKevin Corry 13141da177e4SLinus Torvalds /* 13151eb787ecSAlasdair G Kergon * If we're suspended or the thread is processing barriers 13161eb787ecSAlasdair G Kergon * we have to queue this io for later. 13171da177e4SLinus Torvalds */ 1318af7e466aSMikulas Patocka if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1319af7e466aSMikulas Patocka unlikely(bio_barrier(bio))) { 13202ca3310eSAlasdair G Kergon up_read(&md->io_lock); 13211da177e4SLinus Torvalds 132254d9a1b4SAlasdair G Kergon if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 132354d9a1b4SAlasdair G Kergon bio_rw(bio) == READA) { 132454d9a1b4SAlasdair G Kergon bio_io_error(bio); 132554d9a1b4SAlasdair G Kergon return 0; 132654d9a1b4SAlasdair G Kergon } 13271da177e4SLinus Torvalds 132892c63902SMikulas Patocka queue_io(md, bio); 13291da177e4SLinus Torvalds 133092c63902SMikulas Patocka return 0; 13311da177e4SLinus Torvalds } 13321da177e4SLinus Torvalds 1333f0b9a450SMikulas Patocka __split_and_process_bio(md, bio); 13342ca3310eSAlasdair G Kergon up_read(&md->io_lock); 1335f0b9a450SMikulas Patocka return 0; 13361da177e4SLinus Torvalds } 13371da177e4SLinus Torvalds 1338cec47e3dSKiyoshi Ueda static int dm_make_request(struct request_queue *q, struct bio *bio) 1339cec47e3dSKiyoshi Ueda { 1340cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1341cec47e3dSKiyoshi Ueda 1342cec47e3dSKiyoshi Ueda if (unlikely(bio_barrier(bio))) { 1343cec47e3dSKiyoshi Ueda bio_endio(bio, -EOPNOTSUPP); 1344cec47e3dSKiyoshi Ueda return 0; 1345cec47e3dSKiyoshi Ueda } 1346cec47e3dSKiyoshi Ueda 1347cec47e3dSKiyoshi Ueda return md->saved_make_request_fn(q, bio); /* call __make_request() */ 1348cec47e3dSKiyoshi Ueda } 1349cec47e3dSKiyoshi Ueda 1350cec47e3dSKiyoshi Ueda static int dm_request_based(struct mapped_device *md) 1351cec47e3dSKiyoshi Ueda { 1352cec47e3dSKiyoshi Ueda return blk_queue_stackable(md->queue); 1353cec47e3dSKiyoshi Ueda } 1354cec47e3dSKiyoshi Ueda 1355cec47e3dSKiyoshi Ueda static int dm_request(struct request_queue *q, struct bio *bio) 1356cec47e3dSKiyoshi Ueda { 1357cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1358cec47e3dSKiyoshi Ueda 1359cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1360cec47e3dSKiyoshi Ueda return dm_make_request(q, bio); 1361cec47e3dSKiyoshi Ueda 1362cec47e3dSKiyoshi Ueda return _dm_request(q, bio); 1363cec47e3dSKiyoshi Ueda } 1364cec47e3dSKiyoshi Ueda 1365cec47e3dSKiyoshi Ueda void dm_dispatch_request(struct request *rq) 1366cec47e3dSKiyoshi Ueda { 1367cec47e3dSKiyoshi Ueda int r; 1368cec47e3dSKiyoshi Ueda 1369cec47e3dSKiyoshi Ueda if (blk_queue_io_stat(rq->q)) 1370cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_IO_STAT; 1371cec47e3dSKiyoshi Ueda 1372cec47e3dSKiyoshi Ueda rq->start_time = jiffies; 1373cec47e3dSKiyoshi Ueda r = blk_insert_cloned_request(rq->q, rq); 1374cec47e3dSKiyoshi Ueda if (r) 1375cec47e3dSKiyoshi Ueda dm_complete_request(rq, r); 1376cec47e3dSKiyoshi Ueda } 1377cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_dispatch_request); 1378cec47e3dSKiyoshi Ueda 1379cec47e3dSKiyoshi Ueda static void dm_rq_bio_destructor(struct bio *bio) 1380cec47e3dSKiyoshi Ueda { 1381cec47e3dSKiyoshi Ueda struct dm_rq_clone_bio_info *info = bio->bi_private; 1382cec47e3dSKiyoshi Ueda struct mapped_device *md = info->tio->md; 1383cec47e3dSKiyoshi Ueda 1384cec47e3dSKiyoshi Ueda free_bio_info(info); 1385cec47e3dSKiyoshi Ueda bio_free(bio, md->bs); 1386cec47e3dSKiyoshi Ueda } 1387cec47e3dSKiyoshi Ueda 1388cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1389cec47e3dSKiyoshi Ueda void *data) 1390cec47e3dSKiyoshi Ueda { 1391cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = data; 1392cec47e3dSKiyoshi Ueda struct mapped_device *md = tio->md; 1393cec47e3dSKiyoshi Ueda struct dm_rq_clone_bio_info *info = alloc_bio_info(md); 1394cec47e3dSKiyoshi Ueda 1395cec47e3dSKiyoshi Ueda if (!info) 1396cec47e3dSKiyoshi Ueda return -ENOMEM; 1397cec47e3dSKiyoshi Ueda 1398cec47e3dSKiyoshi Ueda info->orig = bio_orig; 1399cec47e3dSKiyoshi Ueda info->tio = tio; 1400cec47e3dSKiyoshi Ueda bio->bi_end_io = end_clone_bio; 1401cec47e3dSKiyoshi Ueda bio->bi_private = info; 1402cec47e3dSKiyoshi Ueda bio->bi_destructor = dm_rq_bio_destructor; 1403cec47e3dSKiyoshi Ueda 1404cec47e3dSKiyoshi Ueda return 0; 1405cec47e3dSKiyoshi Ueda } 1406cec47e3dSKiyoshi Ueda 1407cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq, 1408cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio) 1409cec47e3dSKiyoshi Ueda { 1410cec47e3dSKiyoshi Ueda int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1411cec47e3dSKiyoshi Ueda dm_rq_bio_constructor, tio); 1412cec47e3dSKiyoshi Ueda 1413cec47e3dSKiyoshi Ueda if (r) 1414cec47e3dSKiyoshi Ueda return r; 1415cec47e3dSKiyoshi Ueda 1416cec47e3dSKiyoshi Ueda clone->cmd = rq->cmd; 1417cec47e3dSKiyoshi Ueda clone->cmd_len = rq->cmd_len; 1418cec47e3dSKiyoshi Ueda clone->sense = rq->sense; 1419cec47e3dSKiyoshi Ueda clone->buffer = rq->buffer; 1420cec47e3dSKiyoshi Ueda clone->end_io = end_clone_request; 1421cec47e3dSKiyoshi Ueda clone->end_io_data = tio; 1422cec47e3dSKiyoshi Ueda 1423cec47e3dSKiyoshi Ueda return 0; 1424cec47e3dSKiyoshi Ueda } 1425cec47e3dSKiyoshi Ueda 1426cec47e3dSKiyoshi Ueda static int dm_rq_flush_suspending(struct mapped_device *md) 1427cec47e3dSKiyoshi Ueda { 1428cec47e3dSKiyoshi Ueda return !md->suspend_rq.special; 1429cec47e3dSKiyoshi Ueda } 1430cec47e3dSKiyoshi Ueda 1431cec47e3dSKiyoshi Ueda /* 1432cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1433cec47e3dSKiyoshi Ueda */ 1434cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq) 1435cec47e3dSKiyoshi Ueda { 1436cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1437cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio; 1438cec47e3dSKiyoshi Ueda struct request *clone; 1439cec47e3dSKiyoshi Ueda 1440cec47e3dSKiyoshi Ueda if (unlikely(rq == &md->suspend_rq)) { 1441cec47e3dSKiyoshi Ueda if (dm_rq_flush_suspending(md)) 1442cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1443cec47e3dSKiyoshi Ueda else 1444cec47e3dSKiyoshi Ueda /* The flush suspend was interrupted */ 1445cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1446cec47e3dSKiyoshi Ueda } 1447cec47e3dSKiyoshi Ueda 1448cec47e3dSKiyoshi Ueda if (unlikely(rq->special)) { 1449cec47e3dSKiyoshi Ueda DMWARN("Already has something in rq->special."); 1450cec47e3dSKiyoshi Ueda return BLKPREP_KILL; 1451cec47e3dSKiyoshi Ueda } 1452cec47e3dSKiyoshi Ueda 1453cec47e3dSKiyoshi Ueda tio = alloc_rq_tio(md); /* Only one for each original request */ 1454cec47e3dSKiyoshi Ueda if (!tio) 1455cec47e3dSKiyoshi Ueda /* -ENOMEM */ 1456cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1457cec47e3dSKiyoshi Ueda 1458cec47e3dSKiyoshi Ueda tio->md = md; 1459cec47e3dSKiyoshi Ueda tio->ti = NULL; 1460cec47e3dSKiyoshi Ueda tio->orig = rq; 1461cec47e3dSKiyoshi Ueda tio->error = 0; 1462cec47e3dSKiyoshi Ueda memset(&tio->info, 0, sizeof(tio->info)); 1463cec47e3dSKiyoshi Ueda 1464cec47e3dSKiyoshi Ueda clone = &tio->clone; 1465cec47e3dSKiyoshi Ueda if (setup_clone(clone, rq, tio)) { 1466cec47e3dSKiyoshi Ueda /* -ENOMEM */ 1467cec47e3dSKiyoshi Ueda free_rq_tio(tio); 1468cec47e3dSKiyoshi Ueda return BLKPREP_DEFER; 1469cec47e3dSKiyoshi Ueda } 1470cec47e3dSKiyoshi Ueda 1471cec47e3dSKiyoshi Ueda rq->special = clone; 1472cec47e3dSKiyoshi Ueda rq->cmd_flags |= REQ_DONTPREP; 1473cec47e3dSKiyoshi Ueda 1474cec47e3dSKiyoshi Ueda return BLKPREP_OK; 1475cec47e3dSKiyoshi Ueda } 1476cec47e3dSKiyoshi Ueda 1477cec47e3dSKiyoshi Ueda static void map_request(struct dm_target *ti, struct request *rq, 1478cec47e3dSKiyoshi Ueda struct mapped_device *md) 1479cec47e3dSKiyoshi Ueda { 1480cec47e3dSKiyoshi Ueda int r; 1481cec47e3dSKiyoshi Ueda struct request *clone = rq->special; 1482cec47e3dSKiyoshi Ueda struct dm_rq_target_io *tio = clone->end_io_data; 1483cec47e3dSKiyoshi Ueda 1484cec47e3dSKiyoshi Ueda /* 1485cec47e3dSKiyoshi Ueda * Hold the md reference here for the in-flight I/O. 1486cec47e3dSKiyoshi Ueda * We can't rely on the reference count by device opener, 1487cec47e3dSKiyoshi Ueda * because the device may be closed during the request completion 1488cec47e3dSKiyoshi Ueda * when all bios are completed. 1489cec47e3dSKiyoshi Ueda * See the comment in rq_completed() too. 1490cec47e3dSKiyoshi Ueda */ 1491cec47e3dSKiyoshi Ueda dm_get(md); 1492cec47e3dSKiyoshi Ueda 1493cec47e3dSKiyoshi Ueda tio->ti = ti; 1494cec47e3dSKiyoshi Ueda r = ti->type->map_rq(ti, clone, &tio->info); 1495cec47e3dSKiyoshi Ueda switch (r) { 1496cec47e3dSKiyoshi Ueda case DM_MAPIO_SUBMITTED: 1497cec47e3dSKiyoshi Ueda /* The target has taken the I/O to submit by itself later */ 1498cec47e3dSKiyoshi Ueda break; 1499cec47e3dSKiyoshi Ueda case DM_MAPIO_REMAPPED: 1500cec47e3dSKiyoshi Ueda /* The target has remapped the I/O so dispatch it */ 1501cec47e3dSKiyoshi Ueda dm_dispatch_request(clone); 1502cec47e3dSKiyoshi Ueda break; 1503cec47e3dSKiyoshi Ueda case DM_MAPIO_REQUEUE: 1504cec47e3dSKiyoshi Ueda /* The target wants to requeue the I/O */ 1505cec47e3dSKiyoshi Ueda dm_requeue_unmapped_request(clone); 1506cec47e3dSKiyoshi Ueda break; 1507cec47e3dSKiyoshi Ueda default: 1508cec47e3dSKiyoshi Ueda if (r > 0) { 1509cec47e3dSKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 1510cec47e3dSKiyoshi Ueda BUG(); 1511cec47e3dSKiyoshi Ueda } 1512cec47e3dSKiyoshi Ueda 1513cec47e3dSKiyoshi Ueda /* The target wants to complete the I/O */ 1514cec47e3dSKiyoshi Ueda dm_kill_unmapped_request(clone, r); 1515cec47e3dSKiyoshi Ueda break; 1516cec47e3dSKiyoshi Ueda } 1517cec47e3dSKiyoshi Ueda } 1518cec47e3dSKiyoshi Ueda 1519cec47e3dSKiyoshi Ueda /* 1520cec47e3dSKiyoshi Ueda * q->request_fn for request-based dm. 1521cec47e3dSKiyoshi Ueda * Called with the queue lock held. 1522cec47e3dSKiyoshi Ueda */ 1523cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q) 1524cec47e3dSKiyoshi Ueda { 1525cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1526cec47e3dSKiyoshi Ueda struct dm_table *map = dm_get_table(md); 1527cec47e3dSKiyoshi Ueda struct dm_target *ti; 1528cec47e3dSKiyoshi Ueda struct request *rq; 1529cec47e3dSKiyoshi Ueda 1530cec47e3dSKiyoshi Ueda /* 1531cec47e3dSKiyoshi Ueda * For noflush suspend, check blk_queue_stopped() to immediately 1532cec47e3dSKiyoshi Ueda * quit I/O dispatching. 1533cec47e3dSKiyoshi Ueda */ 1534cec47e3dSKiyoshi Ueda while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1535cec47e3dSKiyoshi Ueda rq = blk_peek_request(q); 1536cec47e3dSKiyoshi Ueda if (!rq) 1537cec47e3dSKiyoshi Ueda goto plug_and_out; 1538cec47e3dSKiyoshi Ueda 1539cec47e3dSKiyoshi Ueda if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ 1540cec47e3dSKiyoshi Ueda if (queue_in_flight(q)) 1541cec47e3dSKiyoshi Ueda /* Not quiet yet. Wait more */ 1542cec47e3dSKiyoshi Ueda goto plug_and_out; 1543cec47e3dSKiyoshi Ueda 1544cec47e3dSKiyoshi Ueda /* This device should be quiet now */ 1545cec47e3dSKiyoshi Ueda __stop_queue(q); 1546cec47e3dSKiyoshi Ueda blk_start_request(rq); 1547cec47e3dSKiyoshi Ueda __blk_end_request_all(rq, 0); 1548cec47e3dSKiyoshi Ueda wake_up(&md->wait); 1549cec47e3dSKiyoshi Ueda goto out; 1550cec47e3dSKiyoshi Ueda } 1551cec47e3dSKiyoshi Ueda 1552cec47e3dSKiyoshi Ueda ti = dm_table_find_target(map, blk_rq_pos(rq)); 1553cec47e3dSKiyoshi Ueda if (ti->type->busy && ti->type->busy(ti)) 1554cec47e3dSKiyoshi Ueda goto plug_and_out; 1555cec47e3dSKiyoshi Ueda 1556cec47e3dSKiyoshi Ueda blk_start_request(rq); 1557cec47e3dSKiyoshi Ueda spin_unlock(q->queue_lock); 1558cec47e3dSKiyoshi Ueda map_request(ti, rq, md); 1559cec47e3dSKiyoshi Ueda spin_lock_irq(q->queue_lock); 1560cec47e3dSKiyoshi Ueda } 1561cec47e3dSKiyoshi Ueda 1562cec47e3dSKiyoshi Ueda goto out; 1563cec47e3dSKiyoshi Ueda 1564cec47e3dSKiyoshi Ueda plug_and_out: 1565cec47e3dSKiyoshi Ueda if (!elv_queue_empty(q)) 1566cec47e3dSKiyoshi Ueda /* Some requests still remain, retry later */ 1567cec47e3dSKiyoshi Ueda blk_plug_device(q); 1568cec47e3dSKiyoshi Ueda 1569cec47e3dSKiyoshi Ueda out: 1570cec47e3dSKiyoshi Ueda dm_table_put(map); 1571cec47e3dSKiyoshi Ueda 1572cec47e3dSKiyoshi Ueda return; 1573cec47e3dSKiyoshi Ueda } 1574cec47e3dSKiyoshi Ueda 1575cec47e3dSKiyoshi Ueda int dm_underlying_device_busy(struct request_queue *q) 1576cec47e3dSKiyoshi Ueda { 1577cec47e3dSKiyoshi Ueda return blk_lld_busy(q); 1578cec47e3dSKiyoshi Ueda } 1579cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_underlying_device_busy); 1580cec47e3dSKiyoshi Ueda 1581cec47e3dSKiyoshi Ueda static int dm_lld_busy(struct request_queue *q) 1582cec47e3dSKiyoshi Ueda { 1583cec47e3dSKiyoshi Ueda int r; 1584cec47e3dSKiyoshi Ueda struct mapped_device *md = q->queuedata; 1585cec47e3dSKiyoshi Ueda struct dm_table *map = dm_get_table(md); 1586cec47e3dSKiyoshi Ueda 1587cec47e3dSKiyoshi Ueda if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1588cec47e3dSKiyoshi Ueda r = 1; 1589cec47e3dSKiyoshi Ueda else 1590cec47e3dSKiyoshi Ueda r = dm_table_any_busy_target(map); 1591cec47e3dSKiyoshi Ueda 1592cec47e3dSKiyoshi Ueda dm_table_put(map); 1593cec47e3dSKiyoshi Ueda 1594cec47e3dSKiyoshi Ueda return r; 1595cec47e3dSKiyoshi Ueda } 1596cec47e3dSKiyoshi Ueda 1597165125e1SJens Axboe static void dm_unplug_all(struct request_queue *q) 15981da177e4SLinus Torvalds { 15991da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 16001da177e4SLinus Torvalds struct dm_table *map = dm_get_table(md); 16011da177e4SLinus Torvalds 16021da177e4SLinus Torvalds if (map) { 1603cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1604cec47e3dSKiyoshi Ueda generic_unplug_device(q); 1605cec47e3dSKiyoshi Ueda 16061da177e4SLinus Torvalds dm_table_unplug_all(map); 16071da177e4SLinus Torvalds dm_table_put(map); 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds } 16101da177e4SLinus Torvalds 16111da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 16121da177e4SLinus Torvalds { 16138a57dfc6SChandra Seetharaman int r = bdi_bits; 16148a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 16158a57dfc6SChandra Seetharaman struct dm_table *map; 16161da177e4SLinus Torvalds 16171eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 16188a57dfc6SChandra Seetharaman map = dm_get_table(md); 16198a57dfc6SChandra Seetharaman if (map) { 1620cec47e3dSKiyoshi Ueda /* 1621cec47e3dSKiyoshi Ueda * Request-based dm cares about only own queue for 1622cec47e3dSKiyoshi Ueda * the query about congestion status of request_queue 1623cec47e3dSKiyoshi Ueda */ 1624cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 1625cec47e3dSKiyoshi Ueda r = md->queue->backing_dev_info.state & 1626cec47e3dSKiyoshi Ueda bdi_bits; 1627cec47e3dSKiyoshi Ueda else 16281da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 1629cec47e3dSKiyoshi Ueda 16301da177e4SLinus Torvalds dm_table_put(map); 16318a57dfc6SChandra Seetharaman } 16328a57dfc6SChandra Seetharaman } 16338a57dfc6SChandra Seetharaman 16341da177e4SLinus Torvalds return r; 16351da177e4SLinus Torvalds } 16361da177e4SLinus Torvalds 16371da177e4SLinus Torvalds /*----------------------------------------------------------------- 16381da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 16391da177e4SLinus Torvalds *---------------------------------------------------------------*/ 16401da177e4SLinus Torvalds static DEFINE_IDR(_minor_idr); 16411da177e4SLinus Torvalds 16422b06cfffSAlasdair G Kergon static void free_minor(int minor) 16431da177e4SLinus Torvalds { 1644f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16451da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1646f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 16471da177e4SLinus Torvalds } 16481da177e4SLinus Torvalds 16491da177e4SLinus Torvalds /* 16501da177e4SLinus Torvalds * See if the device with a specific minor # is free. 16511da177e4SLinus Torvalds */ 1652cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 16531da177e4SLinus Torvalds { 16541da177e4SLinus Torvalds int r, m; 16551da177e4SLinus Torvalds 16561da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 16571da177e4SLinus Torvalds return -EINVAL; 16581da177e4SLinus Torvalds 165962f75c2fSJeff Mahoney r = idr_pre_get(&_minor_idr, GFP_KERNEL); 166062f75c2fSJeff Mahoney if (!r) 166162f75c2fSJeff Mahoney return -ENOMEM; 166262f75c2fSJeff Mahoney 1663f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16641da177e4SLinus Torvalds 16651da177e4SLinus Torvalds if (idr_find(&_minor_idr, minor)) { 16661da177e4SLinus Torvalds r = -EBUSY; 16671da177e4SLinus Torvalds goto out; 16681da177e4SLinus Torvalds } 16691da177e4SLinus Torvalds 1670ba61fdd1SJeff Mahoney r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 167162f75c2fSJeff Mahoney if (r) 16721da177e4SLinus Torvalds goto out; 16731da177e4SLinus Torvalds 16741da177e4SLinus Torvalds if (m != minor) { 16751da177e4SLinus Torvalds idr_remove(&_minor_idr, m); 16761da177e4SLinus Torvalds r = -EBUSY; 16771da177e4SLinus Torvalds goto out; 16781da177e4SLinus Torvalds } 16791da177e4SLinus Torvalds 16801da177e4SLinus Torvalds out: 1681f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 16821da177e4SLinus Torvalds return r; 16831da177e4SLinus Torvalds } 16841da177e4SLinus Torvalds 1685cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 16861da177e4SLinus Torvalds { 16872b06cfffSAlasdair G Kergon int r, m; 16881da177e4SLinus Torvalds 16891da177e4SLinus Torvalds r = idr_pre_get(&_minor_idr, GFP_KERNEL); 169062f75c2fSJeff Mahoney if (!r) 169162f75c2fSJeff Mahoney return -ENOMEM; 169262f75c2fSJeff Mahoney 1693f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16941da177e4SLinus Torvalds 1695ba61fdd1SJeff Mahoney r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1696cf13ab8eSFrederik Deweerdt if (r) 16971da177e4SLinus Torvalds goto out; 16981da177e4SLinus Torvalds 16991da177e4SLinus Torvalds if (m >= (1 << MINORBITS)) { 17001da177e4SLinus Torvalds idr_remove(&_minor_idr, m); 17011da177e4SLinus Torvalds r = -ENOSPC; 17021da177e4SLinus Torvalds goto out; 17031da177e4SLinus Torvalds } 17041da177e4SLinus Torvalds 17051da177e4SLinus Torvalds *minor = m; 17061da177e4SLinus Torvalds 17071da177e4SLinus Torvalds out: 1708f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17091da177e4SLinus Torvalds return r; 17101da177e4SLinus Torvalds } 17111da177e4SLinus Torvalds 17121da177e4SLinus Torvalds static struct block_device_operations dm_blk_dops; 17131da177e4SLinus Torvalds 171453d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 171553d5914fSMikulas Patocka 17161da177e4SLinus Torvalds /* 17171da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 17181da177e4SLinus Torvalds */ 17192b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 17201da177e4SLinus Torvalds { 17211da177e4SLinus Torvalds int r; 1722cf13ab8eSFrederik Deweerdt struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1723ba61fdd1SJeff Mahoney void *old_md; 17241da177e4SLinus Torvalds 17251da177e4SLinus Torvalds if (!md) { 17261da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 17271da177e4SLinus Torvalds return NULL; 17281da177e4SLinus Torvalds } 17291da177e4SLinus Torvalds 173010da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 17316ed7ade8SMilan Broz goto bad_module_get; 173210da4f79SJeff Mahoney 17331da177e4SLinus Torvalds /* get a minor number for the dev */ 17342b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1735cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 17362b06cfffSAlasdair G Kergon else 1737cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 17381da177e4SLinus Torvalds if (r < 0) 17396ed7ade8SMilan Broz goto bad_minor; 17401da177e4SLinus Torvalds 17412ca3310eSAlasdair G Kergon init_rwsem(&md->io_lock); 1742e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1743022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 17441da177e4SLinus Torvalds rwlock_init(&md->map_lock); 17451da177e4SLinus Torvalds atomic_set(&md->holders, 1); 17465c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 17471da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 17487a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 17497a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 17507a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 17511da177e4SLinus Torvalds 1752e6ee8c0bSKiyoshi Ueda md->queue = blk_init_queue(dm_request_fn, NULL); 17531da177e4SLinus Torvalds if (!md->queue) 17546ed7ade8SMilan Broz goto bad_queue; 17551da177e4SLinus Torvalds 1756e6ee8c0bSKiyoshi Ueda /* 1757e6ee8c0bSKiyoshi Ueda * Request-based dm devices cannot be stacked on top of bio-based dm 1758e6ee8c0bSKiyoshi Ueda * devices. The type of this dm device has not been decided yet, 1759e6ee8c0bSKiyoshi Ueda * although we initialized the queue using blk_init_queue(). 1760e6ee8c0bSKiyoshi Ueda * The type is decided at the first table loading time. 1761e6ee8c0bSKiyoshi Ueda * To prevent problematic device stacking, clear the queue flag 1762e6ee8c0bSKiyoshi Ueda * for request stacking support until then. 1763e6ee8c0bSKiyoshi Ueda * 1764e6ee8c0bSKiyoshi Ueda * This queue is new, so no concurrency on the queue_flags. 1765e6ee8c0bSKiyoshi Ueda */ 1766e6ee8c0bSKiyoshi Ueda queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1767e6ee8c0bSKiyoshi Ueda md->saved_make_request_fn = md->queue->make_request_fn; 17681da177e4SLinus Torvalds md->queue->queuedata = md; 17691da177e4SLinus Torvalds md->queue->backing_dev_info.congested_fn = dm_any_congested; 17701da177e4SLinus Torvalds md->queue->backing_dev_info.congested_data = md; 17711da177e4SLinus Torvalds blk_queue_make_request(md->queue, dm_request); 1772daef265fSJens Axboe blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 17731da177e4SLinus Torvalds md->queue->unplug_fn = dm_unplug_all; 1774f6fccb12SMilan Broz blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1775e6ee8c0bSKiyoshi Ueda blk_queue_softirq_done(md->queue, dm_softirq_done); 1776e6ee8c0bSKiyoshi Ueda blk_queue_prep_rq(md->queue, dm_prep_fn); 1777e6ee8c0bSKiyoshi Ueda blk_queue_lld_busy(md->queue, dm_lld_busy); 17789faf400fSStefan Bader 17791da177e4SLinus Torvalds md->disk = alloc_disk(1); 17801da177e4SLinus Torvalds if (!md->disk) 17816ed7ade8SMilan Broz goto bad_disk; 17821da177e4SLinus Torvalds 1783f0b04115SJeff Mahoney atomic_set(&md->pending, 0); 1784f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 178553d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1786f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 1787f0b04115SJeff Mahoney 17881da177e4SLinus Torvalds md->disk->major = _major; 17891da177e4SLinus Torvalds md->disk->first_minor = minor; 17901da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 17911da177e4SLinus Torvalds md->disk->queue = md->queue; 17921da177e4SLinus Torvalds md->disk->private_data = md; 17931da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 17941da177e4SLinus Torvalds add_disk(md->disk); 17957e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 17961da177e4SLinus Torvalds 1797304f3f6aSMilan Broz md->wq = create_singlethread_workqueue("kdmflush"); 1798304f3f6aSMilan Broz if (!md->wq) 1799304f3f6aSMilan Broz goto bad_thread; 1800304f3f6aSMilan Broz 180132a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 180232a926daSMikulas Patocka if (!md->bdev) 180332a926daSMikulas Patocka goto bad_bdev; 180432a926daSMikulas Patocka 1805ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1806f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1807ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1808f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1809ba61fdd1SJeff Mahoney 1810ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1811ba61fdd1SJeff Mahoney 18121da177e4SLinus Torvalds return md; 18131da177e4SLinus Torvalds 181432a926daSMikulas Patocka bad_bdev: 181532a926daSMikulas Patocka destroy_workqueue(md->wq); 1816304f3f6aSMilan Broz bad_thread: 1817304f3f6aSMilan Broz put_disk(md->disk); 18186ed7ade8SMilan Broz bad_disk: 18191312f40eSAl Viro blk_cleanup_queue(md->queue); 18206ed7ade8SMilan Broz bad_queue: 18211da177e4SLinus Torvalds free_minor(minor); 18226ed7ade8SMilan Broz bad_minor: 182310da4f79SJeff Mahoney module_put(THIS_MODULE); 18246ed7ade8SMilan Broz bad_module_get: 18251da177e4SLinus Torvalds kfree(md); 18261da177e4SLinus Torvalds return NULL; 18271da177e4SLinus Torvalds } 18281da177e4SLinus Torvalds 1829ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1830ae9da83fSJun'ichi Nomura 18311da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 18321da177e4SLinus Torvalds { 1833f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 183463d94e48SJun'ichi Nomura 1835ae9da83fSJun'ichi Nomura unlock_fs(md); 1836db8fef4fSMikulas Patocka bdput(md->bdev); 1837304f3f6aSMilan Broz destroy_workqueue(md->wq); 1838e6ee8c0bSKiyoshi Ueda if (md->tio_pool) 18391da177e4SLinus Torvalds mempool_destroy(md->tio_pool); 1840e6ee8c0bSKiyoshi Ueda if (md->io_pool) 18411da177e4SLinus Torvalds mempool_destroy(md->io_pool); 1842e6ee8c0bSKiyoshi Ueda if (md->bs) 18439faf400fSStefan Bader bioset_free(md->bs); 18449c47008dSMartin K. Petersen blk_integrity_unregister(md->disk); 18451da177e4SLinus Torvalds del_gendisk(md->disk); 184663d94e48SJun'ichi Nomura free_minor(minor); 1847fba9f90eSJeff Mahoney 1848fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 1849fba9f90eSJeff Mahoney md->disk->private_data = NULL; 1850fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 1851fba9f90eSJeff Mahoney 18521da177e4SLinus Torvalds put_disk(md->disk); 18531312f40eSAl Viro blk_cleanup_queue(md->queue); 185410da4f79SJeff Mahoney module_put(THIS_MODULE); 18551da177e4SLinus Torvalds kfree(md); 18561da177e4SLinus Torvalds } 18571da177e4SLinus Torvalds 1858e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1859e6ee8c0bSKiyoshi Ueda { 1860e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *p; 1861e6ee8c0bSKiyoshi Ueda 1862e6ee8c0bSKiyoshi Ueda if (md->io_pool && md->tio_pool && md->bs) 1863e6ee8c0bSKiyoshi Ueda /* the md already has necessary mempools */ 1864e6ee8c0bSKiyoshi Ueda goto out; 1865e6ee8c0bSKiyoshi Ueda 1866e6ee8c0bSKiyoshi Ueda p = dm_table_get_md_mempools(t); 1867e6ee8c0bSKiyoshi Ueda BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1868e6ee8c0bSKiyoshi Ueda 1869e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 1870e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 1871e6ee8c0bSKiyoshi Ueda md->tio_pool = p->tio_pool; 1872e6ee8c0bSKiyoshi Ueda p->tio_pool = NULL; 1873e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 1874e6ee8c0bSKiyoshi Ueda p->bs = NULL; 1875e6ee8c0bSKiyoshi Ueda 1876e6ee8c0bSKiyoshi Ueda out: 1877e6ee8c0bSKiyoshi Ueda /* mempool bind completed, now no need any mempools in the table */ 1878e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 1879e6ee8c0bSKiyoshi Ueda } 1880e6ee8c0bSKiyoshi Ueda 18811da177e4SLinus Torvalds /* 18821da177e4SLinus Torvalds * Bind a table to the device. 18831da177e4SLinus Torvalds */ 18841da177e4SLinus Torvalds static void event_callback(void *context) 18851da177e4SLinus Torvalds { 18867a8c3d3bSMike Anderson unsigned long flags; 18877a8c3d3bSMike Anderson LIST_HEAD(uevents); 18881da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 18891da177e4SLinus Torvalds 18907a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 18917a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 18927a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 18937a8c3d3bSMike Anderson 1894ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 18957a8c3d3bSMike Anderson 18961da177e4SLinus Torvalds atomic_inc(&md->event_nr); 18971da177e4SLinus Torvalds wake_up(&md->eventq); 18981da177e4SLinus Torvalds } 18991da177e4SLinus Torvalds 19004e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 19011da177e4SLinus Torvalds { 19024e90188bSAlasdair G Kergon set_capacity(md->disk, size); 19031da177e4SLinus Torvalds 1904db8fef4fSMikulas Patocka mutex_lock(&md->bdev->bd_inode->i_mutex); 1905db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1906db8fef4fSMikulas Patocka mutex_unlock(&md->bdev->bd_inode->i_mutex); 19071da177e4SLinus Torvalds } 19081da177e4SLinus Torvalds 1909754c5fc7SMike Snitzer static int __bind(struct mapped_device *md, struct dm_table *t, 1910754c5fc7SMike Snitzer struct queue_limits *limits) 19111da177e4SLinus Torvalds { 1912165125e1SJens Axboe struct request_queue *q = md->queue; 19131da177e4SLinus Torvalds sector_t size; 1914523d9297SKiyoshi Ueda unsigned long flags; 19151da177e4SLinus Torvalds 19161da177e4SLinus Torvalds size = dm_table_get_size(t); 19173ac51e74SDarrick J. Wong 19183ac51e74SDarrick J. Wong /* 19193ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 19203ac51e74SDarrick J. Wong */ 19213ac51e74SDarrick J. Wong if (size != get_capacity(md->disk)) 19223ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 19233ac51e74SDarrick J. Wong 19244e90188bSAlasdair G Kergon __set_size(md, size); 19251da177e4SLinus Torvalds 1926d5816876SMikulas Patocka if (!size) { 1927d5816876SMikulas Patocka dm_table_destroy(t); 1928d5816876SMikulas Patocka return 0; 1929d5816876SMikulas Patocka } 1930d5816876SMikulas Patocka 1931cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 19322ca3310eSAlasdair G Kergon 1933e6ee8c0bSKiyoshi Ueda /* 1934e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 1935e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 1936e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 1937e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 1938e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 1939e6ee8c0bSKiyoshi Ueda */ 1940e6ee8c0bSKiyoshi Ueda if (dm_table_request_based(t) && !blk_queue_stopped(q)) 1941e6ee8c0bSKiyoshi Ueda stop_queue(q); 1942e6ee8c0bSKiyoshi Ueda 1943e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 1944e6ee8c0bSKiyoshi Ueda 1945523d9297SKiyoshi Ueda write_lock_irqsave(&md->map_lock, flags); 19462ca3310eSAlasdair G Kergon md->map = t; 1947754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 1948523d9297SKiyoshi Ueda write_unlock_irqrestore(&md->map_lock, flags); 19492ca3310eSAlasdair G Kergon 19501da177e4SLinus Torvalds return 0; 19511da177e4SLinus Torvalds } 19521da177e4SLinus Torvalds 19531da177e4SLinus Torvalds static void __unbind(struct mapped_device *md) 19541da177e4SLinus Torvalds { 19551da177e4SLinus Torvalds struct dm_table *map = md->map; 1956523d9297SKiyoshi Ueda unsigned long flags; 19571da177e4SLinus Torvalds 19581da177e4SLinus Torvalds if (!map) 19591da177e4SLinus Torvalds return; 19601da177e4SLinus Torvalds 19611da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 1962523d9297SKiyoshi Ueda write_lock_irqsave(&md->map_lock, flags); 19631da177e4SLinus Torvalds md->map = NULL; 1964523d9297SKiyoshi Ueda write_unlock_irqrestore(&md->map_lock, flags); 1965d5816876SMikulas Patocka dm_table_destroy(map); 19661da177e4SLinus Torvalds } 19671da177e4SLinus Torvalds 19681da177e4SLinus Torvalds /* 19691da177e4SLinus Torvalds * Constructor for a new device. 19701da177e4SLinus Torvalds */ 19712b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 19721da177e4SLinus Torvalds { 19731da177e4SLinus Torvalds struct mapped_device *md; 19741da177e4SLinus Torvalds 19752b06cfffSAlasdair G Kergon md = alloc_dev(minor); 19761da177e4SLinus Torvalds if (!md) 19771da177e4SLinus Torvalds return -ENXIO; 19781da177e4SLinus Torvalds 1979784aae73SMilan Broz dm_sysfs_init(md); 1980784aae73SMilan Broz 19811da177e4SLinus Torvalds *result = md; 19821da177e4SLinus Torvalds return 0; 19831da177e4SLinus Torvalds } 19841da177e4SLinus Torvalds 1985637842cfSDavid Teigland static struct mapped_device *dm_find_md(dev_t dev) 19861da177e4SLinus Torvalds { 19871da177e4SLinus Torvalds struct mapped_device *md; 19881da177e4SLinus Torvalds unsigned minor = MINOR(dev); 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 19911da177e4SLinus Torvalds return NULL; 19921da177e4SLinus Torvalds 1993f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 19941da177e4SLinus Torvalds 19951da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 1996fba9f90eSJeff Mahoney if (md && (md == MINOR_ALLOCED || 1997f331c029STejun Heo (MINOR(disk_devt(dm_disk(md))) != minor) || 1998fba9f90eSJeff Mahoney test_bit(DMF_FREEING, &md->flags))) { 1999637842cfSDavid Teigland md = NULL; 2000fba9f90eSJeff Mahoney goto out; 2001fba9f90eSJeff Mahoney } 20021da177e4SLinus Torvalds 2003fba9f90eSJeff Mahoney out: 2004f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 20051da177e4SLinus Torvalds 2006637842cfSDavid Teigland return md; 2007637842cfSDavid Teigland } 2008637842cfSDavid Teigland 2009d229a958SDavid Teigland struct mapped_device *dm_get_md(dev_t dev) 2010d229a958SDavid Teigland { 2011d229a958SDavid Teigland struct mapped_device *md = dm_find_md(dev); 2012d229a958SDavid Teigland 2013d229a958SDavid Teigland if (md) 2014d229a958SDavid Teigland dm_get(md); 2015d229a958SDavid Teigland 2016d229a958SDavid Teigland return md; 2017d229a958SDavid Teigland } 2018d229a958SDavid Teigland 20199ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2020637842cfSDavid Teigland { 20219ade92a9SAlasdair G Kergon return md->interface_ptr; 20221da177e4SLinus Torvalds } 20231da177e4SLinus Torvalds 20241da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 20251da177e4SLinus Torvalds { 20261da177e4SLinus Torvalds md->interface_ptr = ptr; 20271da177e4SLinus Torvalds } 20281da177e4SLinus Torvalds 20291da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 20301da177e4SLinus Torvalds { 20311da177e4SLinus Torvalds atomic_inc(&md->holders); 20321da177e4SLinus Torvalds } 20331da177e4SLinus Torvalds 203472d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 203572d94861SAlasdair G Kergon { 203672d94861SAlasdair G Kergon return md->name; 203772d94861SAlasdair G Kergon } 203872d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 203972d94861SAlasdair G Kergon 20401da177e4SLinus Torvalds void dm_put(struct mapped_device *md) 20411da177e4SLinus Torvalds { 20421134e5aeSMike Anderson struct dm_table *map; 20431da177e4SLinus Torvalds 2044fba9f90eSJeff Mahoney BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2045fba9f90eSJeff Mahoney 2046f32c10b0SJeff Mahoney if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 20471134e5aeSMike Anderson map = dm_get_table(md); 2048f331c029STejun Heo idr_replace(&_minor_idr, MINOR_ALLOCED, 2049f331c029STejun Heo MINOR(disk_devt(dm_disk(md)))); 2050fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2051f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2052cf222b37SAlasdair G Kergon if (!dm_suspended(md)) { 20531da177e4SLinus Torvalds dm_table_presuspend_targets(map); 20541da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 20551da177e4SLinus Torvalds } 2056784aae73SMilan Broz dm_sysfs_exit(md); 20571134e5aeSMike Anderson dm_table_put(map); 2058a1b51e98SMikulas Patocka __unbind(md); 20591da177e4SLinus Torvalds free_dev(md); 20601da177e4SLinus Torvalds } 20611da177e4SLinus Torvalds } 206279eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 20631da177e4SLinus Torvalds 2064401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible) 206546125c1cSMilan Broz { 206646125c1cSMilan Broz int r = 0; 2067b44ebeb0SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 2068cec47e3dSKiyoshi Ueda struct request_queue *q = md->queue; 2069cec47e3dSKiyoshi Ueda unsigned long flags; 2070b44ebeb0SMikulas Patocka 2071b44ebeb0SMikulas Patocka dm_unplug_all(md->queue); 2072b44ebeb0SMikulas Patocka 2073b44ebeb0SMikulas Patocka add_wait_queue(&md->wait, &wait); 207446125c1cSMilan Broz 207546125c1cSMilan Broz while (1) { 2076401600dfSMikulas Patocka set_current_state(interruptible); 207746125c1cSMilan Broz 207846125c1cSMilan Broz smp_mb(); 2079cec47e3dSKiyoshi Ueda if (dm_request_based(md)) { 2080cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 2081cec47e3dSKiyoshi Ueda if (!queue_in_flight(q) && blk_queue_stopped(q)) { 2082cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 2083cec47e3dSKiyoshi Ueda break; 2084cec47e3dSKiyoshi Ueda } 2085cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 2086cec47e3dSKiyoshi Ueda } else if (!atomic_read(&md->pending)) 208746125c1cSMilan Broz break; 208846125c1cSMilan Broz 2089401600dfSMikulas Patocka if (interruptible == TASK_INTERRUPTIBLE && 2090401600dfSMikulas Patocka signal_pending(current)) { 209146125c1cSMilan Broz r = -EINTR; 209246125c1cSMilan Broz break; 209346125c1cSMilan Broz } 209446125c1cSMilan Broz 209546125c1cSMilan Broz io_schedule(); 209646125c1cSMilan Broz } 209746125c1cSMilan Broz set_current_state(TASK_RUNNING); 209846125c1cSMilan Broz 2099b44ebeb0SMikulas Patocka remove_wait_queue(&md->wait, &wait); 2100b44ebeb0SMikulas Patocka 210146125c1cSMilan Broz return r; 210246125c1cSMilan Broz } 210346125c1cSMilan Broz 2104531fe963SMikulas Patocka static void dm_flush(struct mapped_device *md) 2105af7e466aSMikulas Patocka { 2106af7e466aSMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 210752b1fd5aSMikulas Patocka 210852b1fd5aSMikulas Patocka bio_init(&md->barrier_bio); 210952b1fd5aSMikulas Patocka md->barrier_bio.bi_bdev = md->bdev; 211052b1fd5aSMikulas Patocka md->barrier_bio.bi_rw = WRITE_BARRIER; 211152b1fd5aSMikulas Patocka __split_and_process_bio(md, &md->barrier_bio); 211252b1fd5aSMikulas Patocka 211352b1fd5aSMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2114af7e466aSMikulas Patocka } 2115af7e466aSMikulas Patocka 2116af7e466aSMikulas Patocka static void process_barrier(struct mapped_device *md, struct bio *bio) 2117af7e466aSMikulas Patocka { 21185aa2781dSMikulas Patocka md->barrier_error = 0; 21195aa2781dSMikulas Patocka 2120531fe963SMikulas Patocka dm_flush(md); 2121af7e466aSMikulas Patocka 21225aa2781dSMikulas Patocka if (!bio_empty_barrier(bio)) { 2123af7e466aSMikulas Patocka __split_and_process_bio(md, bio); 2124531fe963SMikulas Patocka dm_flush(md); 21255aa2781dSMikulas Patocka } 2126af7e466aSMikulas Patocka 2127af7e466aSMikulas Patocka if (md->barrier_error != DM_ENDIO_REQUEUE) 2128531fe963SMikulas Patocka bio_endio(bio, md->barrier_error); 21292761e95fSMikulas Patocka else { 21302761e95fSMikulas Patocka spin_lock_irq(&md->deferred_lock); 21312761e95fSMikulas Patocka bio_list_add_head(&md->deferred, bio); 21322761e95fSMikulas Patocka spin_unlock_irq(&md->deferred_lock); 21332761e95fSMikulas Patocka } 2134af7e466aSMikulas Patocka } 2135af7e466aSMikulas Patocka 21361da177e4SLinus Torvalds /* 21371da177e4SLinus Torvalds * Process the deferred bios 21381da177e4SLinus Torvalds */ 2139ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 21401da177e4SLinus Torvalds { 2141ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2142ef208587SMikulas Patocka work); 21436d6f10dfSMilan Broz struct bio *c; 21441da177e4SLinus Torvalds 2145ef208587SMikulas Patocka down_write(&md->io_lock); 2146ef208587SMikulas Patocka 21473b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2148022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2149022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2150022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2151022c2611SMikulas Patocka 2152df12ee99SAlasdair G Kergon if (!c) { 21531eb787ecSAlasdair G Kergon clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); 2154df12ee99SAlasdair G Kergon break; 2155022c2611SMikulas Patocka } 215673d410c0SMilan Broz 21573b00b203SMikulas Patocka up_write(&md->io_lock); 21583b00b203SMikulas Patocka 2159e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2160e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2161e6ee8c0bSKiyoshi Ueda else { 2162af7e466aSMikulas Patocka if (bio_barrier(c)) 2163af7e466aSMikulas Patocka process_barrier(md, c); 2164af7e466aSMikulas Patocka else 2165df12ee99SAlasdair G Kergon __split_and_process_bio(md, c); 2166e6ee8c0bSKiyoshi Ueda } 21673b00b203SMikulas Patocka 21683b00b203SMikulas Patocka down_write(&md->io_lock); 2169df12ee99SAlasdair G Kergon } 2170ef208587SMikulas Patocka 2171ef208587SMikulas Patocka up_write(&md->io_lock); 21721da177e4SLinus Torvalds } 21731da177e4SLinus Torvalds 21749a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2175304f3f6aSMilan Broz { 21763b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 21773b00b203SMikulas Patocka smp_mb__after_clear_bit(); 217853d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2179304f3f6aSMilan Broz } 2180304f3f6aSMilan Broz 21811da177e4SLinus Torvalds /* 21821da177e4SLinus Torvalds * Swap in a new table (destroying old one). 21831da177e4SLinus Torvalds */ 21841da177e4SLinus Torvalds int dm_swap_table(struct mapped_device *md, struct dm_table *table) 21851da177e4SLinus Torvalds { 2186754c5fc7SMike Snitzer struct queue_limits limits; 218793c534aeSAlasdair G Kergon int r = -EINVAL; 21881da177e4SLinus Torvalds 2189e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 21901da177e4SLinus Torvalds 21911da177e4SLinus Torvalds /* device must be suspended */ 2192cf222b37SAlasdair G Kergon if (!dm_suspended(md)) 219393c534aeSAlasdair G Kergon goto out; 21941da177e4SLinus Torvalds 2195754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2196754c5fc7SMike Snitzer if (r) 2197754c5fc7SMike Snitzer goto out; 2198754c5fc7SMike Snitzer 2199e6ee8c0bSKiyoshi Ueda /* cannot change the device type, once a table is bound */ 2200e6ee8c0bSKiyoshi Ueda if (md->map && 2201e6ee8c0bSKiyoshi Ueda (dm_table_get_type(md->map) != dm_table_get_type(table))) { 2202e6ee8c0bSKiyoshi Ueda DMWARN("can't change the device type after a table is bound"); 2203e6ee8c0bSKiyoshi Ueda goto out; 2204e6ee8c0bSKiyoshi Ueda } 2205e6ee8c0bSKiyoshi Ueda 22065d67aa23SKiyoshi Ueda /* 22075d67aa23SKiyoshi Ueda * It is enought that blk_queue_ordered() is called only once when 22085d67aa23SKiyoshi Ueda * the first bio-based table is bound. 22095d67aa23SKiyoshi Ueda * 22105d67aa23SKiyoshi Ueda * This setting should be moved to alloc_dev() when request-based dm 22115d67aa23SKiyoshi Ueda * supports barrier. 22125d67aa23SKiyoshi Ueda */ 22135d67aa23SKiyoshi Ueda if (!md->map && dm_table_bio_based(table)) 22145d67aa23SKiyoshi Ueda blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); 22155d67aa23SKiyoshi Ueda 22161da177e4SLinus Torvalds __unbind(md); 2217754c5fc7SMike Snitzer r = __bind(md, table, &limits); 22181da177e4SLinus Torvalds 221993c534aeSAlasdair G Kergon out: 2220e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 222193c534aeSAlasdair G Kergon return r; 22221da177e4SLinus Torvalds } 22231da177e4SLinus Torvalds 2224cec47e3dSKiyoshi Ueda static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) 2225cec47e3dSKiyoshi Ueda { 2226cec47e3dSKiyoshi Ueda md->suspend_rq.special = (void *)0x1; 2227cec47e3dSKiyoshi Ueda } 2228cec47e3dSKiyoshi Ueda 2229cec47e3dSKiyoshi Ueda static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) 2230cec47e3dSKiyoshi Ueda { 2231cec47e3dSKiyoshi Ueda struct request_queue *q = md->queue; 2232cec47e3dSKiyoshi Ueda unsigned long flags; 2233cec47e3dSKiyoshi Ueda 2234cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 2235cec47e3dSKiyoshi Ueda if (!noflush) 2236cec47e3dSKiyoshi Ueda dm_rq_invalidate_suspend_marker(md); 2237cec47e3dSKiyoshi Ueda __start_queue(q); 2238cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 2239cec47e3dSKiyoshi Ueda } 2240cec47e3dSKiyoshi Ueda 2241cec47e3dSKiyoshi Ueda static void dm_rq_start_suspend(struct mapped_device *md, int noflush) 2242cec47e3dSKiyoshi Ueda { 2243cec47e3dSKiyoshi Ueda struct request *rq = &md->suspend_rq; 2244cec47e3dSKiyoshi Ueda struct request_queue *q = md->queue; 2245cec47e3dSKiyoshi Ueda 2246cec47e3dSKiyoshi Ueda if (noflush) 2247cec47e3dSKiyoshi Ueda stop_queue(q); 2248cec47e3dSKiyoshi Ueda else { 2249cec47e3dSKiyoshi Ueda blk_rq_init(q, rq); 2250cec47e3dSKiyoshi Ueda blk_insert_request(q, rq, 0, NULL); 2251cec47e3dSKiyoshi Ueda } 2252cec47e3dSKiyoshi Ueda } 2253cec47e3dSKiyoshi Ueda 2254cec47e3dSKiyoshi Ueda static int dm_rq_suspend_available(struct mapped_device *md, int noflush) 2255cec47e3dSKiyoshi Ueda { 2256cec47e3dSKiyoshi Ueda int r = 1; 2257cec47e3dSKiyoshi Ueda struct request *rq = &md->suspend_rq; 2258cec47e3dSKiyoshi Ueda struct request_queue *q = md->queue; 2259cec47e3dSKiyoshi Ueda unsigned long flags; 2260cec47e3dSKiyoshi Ueda 2261cec47e3dSKiyoshi Ueda if (noflush) 2262cec47e3dSKiyoshi Ueda return r; 2263cec47e3dSKiyoshi Ueda 2264cec47e3dSKiyoshi Ueda /* The marker must be protected by queue lock if it is in use */ 2265cec47e3dSKiyoshi Ueda spin_lock_irqsave(q->queue_lock, flags); 2266cec47e3dSKiyoshi Ueda if (unlikely(rq->ref_count)) { 2267cec47e3dSKiyoshi Ueda /* 2268cec47e3dSKiyoshi Ueda * This can happen, when the previous flush suspend was 2269cec47e3dSKiyoshi Ueda * interrupted, the marker is still in the queue and 2270cec47e3dSKiyoshi Ueda * this flush suspend has been invoked, because we don't 2271cec47e3dSKiyoshi Ueda * remove the marker at the time of suspend interruption. 2272cec47e3dSKiyoshi Ueda * We have only one marker per mapped_device, so we can't 2273cec47e3dSKiyoshi Ueda * start another flush suspend while it is in use. 2274cec47e3dSKiyoshi Ueda */ 2275cec47e3dSKiyoshi Ueda BUG_ON(!rq->special); /* The marker should be invalidated */ 2276cec47e3dSKiyoshi Ueda DMWARN("Invalidating the previous flush suspend is still in" 2277cec47e3dSKiyoshi Ueda " progress. Please retry later."); 2278cec47e3dSKiyoshi Ueda r = 0; 2279cec47e3dSKiyoshi Ueda } 2280cec47e3dSKiyoshi Ueda spin_unlock_irqrestore(q->queue_lock, flags); 2281cec47e3dSKiyoshi Ueda 2282cec47e3dSKiyoshi Ueda return r; 2283cec47e3dSKiyoshi Ueda } 2284cec47e3dSKiyoshi Ueda 22851da177e4SLinus Torvalds /* 22861da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 22871da177e4SLinus Torvalds * device. 22881da177e4SLinus Torvalds */ 22892ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 22901da177e4SLinus Torvalds { 2291e39e2e95SAlasdair G Kergon int r; 22921da177e4SLinus Torvalds 22931da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2294dfbe03f6SAlasdair G Kergon 2295db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2296dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2297cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2298e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2299e39e2e95SAlasdair G Kergon return r; 2300dfbe03f6SAlasdair G Kergon } 2301dfbe03f6SAlasdair G Kergon 2302aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2303aa8d7c2fSAlasdair G Kergon 23041da177e4SLinus Torvalds return 0; 23051da177e4SLinus Torvalds } 23061da177e4SLinus Torvalds 23072ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 23081da177e4SLinus Torvalds { 2309aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2310aa8d7c2fSAlasdair G Kergon return; 2311aa8d7c2fSAlasdair G Kergon 2312db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 23131da177e4SLinus Torvalds md->frozen_sb = NULL; 2314aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 23151da177e4SLinus Torvalds } 23161da177e4SLinus Torvalds 23171da177e4SLinus Torvalds /* 23181da177e4SLinus Torvalds * We need to be able to change a mapping table under a mounted 23191da177e4SLinus Torvalds * filesystem. For example we might want to move some data in 23201da177e4SLinus Torvalds * the background. Before the table can be swapped with 23211da177e4SLinus Torvalds * dm_bind_table, dm_suspend must be called to flush any in 23221da177e4SLinus Torvalds * flight bios and ensure that any further io gets deferred. 23231da177e4SLinus Torvalds */ 2324cec47e3dSKiyoshi Ueda /* 2325cec47e3dSKiyoshi Ueda * Suspend mechanism in request-based dm. 2326cec47e3dSKiyoshi Ueda * 2327cec47e3dSKiyoshi Ueda * After the suspend starts, further incoming requests are kept in 2328cec47e3dSKiyoshi Ueda * the request_queue and deferred. 2329cec47e3dSKiyoshi Ueda * Remaining requests in the request_queue at the start of suspend are flushed 2330cec47e3dSKiyoshi Ueda * if it is flush suspend. 2331cec47e3dSKiyoshi Ueda * The suspend completes when the following conditions have been satisfied, 2332cec47e3dSKiyoshi Ueda * so wait for it: 2333cec47e3dSKiyoshi Ueda * 1. q->in_flight is 0 (which means no in_flight request) 2334cec47e3dSKiyoshi Ueda * 2. queue has been stopped (which means no request dispatching) 2335cec47e3dSKiyoshi Ueda * 2336cec47e3dSKiyoshi Ueda * 2337cec47e3dSKiyoshi Ueda * Noflush suspend 2338cec47e3dSKiyoshi Ueda * --------------- 2339cec47e3dSKiyoshi Ueda * Noflush suspend doesn't need to dispatch remaining requests. 2340cec47e3dSKiyoshi Ueda * So stop the queue immediately. Then, wait for all in_flight requests 2341cec47e3dSKiyoshi Ueda * to be completed or requeued. 2342cec47e3dSKiyoshi Ueda * 2343cec47e3dSKiyoshi Ueda * To abort noflush suspend, start the queue. 2344cec47e3dSKiyoshi Ueda * 2345cec47e3dSKiyoshi Ueda * 2346cec47e3dSKiyoshi Ueda * Flush suspend 2347cec47e3dSKiyoshi Ueda * ------------- 2348cec47e3dSKiyoshi Ueda * Flush suspend needs to dispatch remaining requests. So stop the queue 2349cec47e3dSKiyoshi Ueda * after the remaining requests are completed. (Requeued request must be also 2350cec47e3dSKiyoshi Ueda * re-dispatched and completed. Until then, we can't stop the queue.) 2351cec47e3dSKiyoshi Ueda * 2352cec47e3dSKiyoshi Ueda * During flushing the remaining requests, further incoming requests are also 2353cec47e3dSKiyoshi Ueda * inserted to the same queue. To distinguish which requests are to be 2354cec47e3dSKiyoshi Ueda * flushed, we insert a marker request to the queue at the time of starting 2355cec47e3dSKiyoshi Ueda * flush suspend, like a barrier. 2356cec47e3dSKiyoshi Ueda * The dispatching is blocked when the marker is found on the top of the queue. 2357cec47e3dSKiyoshi Ueda * And the queue is stopped when all in_flight requests are completed, since 2358cec47e3dSKiyoshi Ueda * that means the remaining requests are completely flushed. 2359cec47e3dSKiyoshi Ueda * Then, the marker is removed from the queue. 2360cec47e3dSKiyoshi Ueda * 2361cec47e3dSKiyoshi Ueda * To abort flush suspend, we also need to take care of the marker, not only 2362cec47e3dSKiyoshi Ueda * starting the queue. 2363cec47e3dSKiyoshi Ueda * We don't remove the marker forcibly from the queue since it's against 2364cec47e3dSKiyoshi Ueda * the block-layer manner. Instead, we put a invalidated mark on the marker. 2365cec47e3dSKiyoshi Ueda * When the invalidated marker is found on the top of the queue, it is 2366cec47e3dSKiyoshi Ueda * immediately removed from the queue, so it doesn't block dispatching. 2367cec47e3dSKiyoshi Ueda * Because we have only one marker per mapped_device, we can't start another 2368cec47e3dSKiyoshi Ueda * flush suspend until the invalidated marker is removed from the queue. 2369cec47e3dSKiyoshi Ueda * So fail and return with -EBUSY in such a case. 2370cec47e3dSKiyoshi Ueda */ 2371a3d77d35SKiyoshi Ueda int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 23721da177e4SLinus Torvalds { 23732ca3310eSAlasdair G Kergon struct dm_table *map = NULL; 237446125c1cSMilan Broz int r = 0; 2375a3d77d35SKiyoshi Ueda int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 23762e93ccc1SKiyoshi Ueda int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 23771da177e4SLinus Torvalds 2378e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 23792ca3310eSAlasdair G Kergon 238073d410c0SMilan Broz if (dm_suspended(md)) { 238173d410c0SMilan Broz r = -EINVAL; 2382d287483dSAlasdair G Kergon goto out_unlock; 238373d410c0SMilan Broz } 23841da177e4SLinus Torvalds 2385cec47e3dSKiyoshi Ueda if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { 2386cec47e3dSKiyoshi Ueda r = -EBUSY; 2387cec47e3dSKiyoshi Ueda goto out_unlock; 2388cec47e3dSKiyoshi Ueda } 2389cec47e3dSKiyoshi Ueda 23901da177e4SLinus Torvalds map = dm_get_table(md); 2391cf222b37SAlasdair G Kergon 23922e93ccc1SKiyoshi Ueda /* 23932e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 23942e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 23952e93ccc1SKiyoshi Ueda */ 23962e93ccc1SKiyoshi Ueda if (noflush) 23972e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 23982e93ccc1SKiyoshi Ueda 2399436d4108SAlasdair G Kergon /* This does not get reverted if there's an error later. */ 24001da177e4SLinus Torvalds dm_table_presuspend_targets(map); 24011da177e4SLinus Torvalds 24022e93ccc1SKiyoshi Ueda /* 24036d6f10dfSMilan Broz * Flush I/O to the device. noflush supersedes do_lockfs, 24046d6f10dfSMilan Broz * because lock_fs() needs to flush I/Os. 24052e93ccc1SKiyoshi Ueda */ 240632a926daSMikulas Patocka if (!noflush && do_lockfs) { 24072ca3310eSAlasdair G Kergon r = lock_fs(md); 24082ca3310eSAlasdair G Kergon if (r) 24092ca3310eSAlasdair G Kergon goto out; 2410aa8d7c2fSAlasdair G Kergon } 24111da177e4SLinus Torvalds 24121da177e4SLinus Torvalds /* 24133b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 24143b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 24153b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 24163b00b203SMikulas Patocka * dm_wq_work. 24173b00b203SMikulas Patocka * 24183b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 24193b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 24203b00b203SMikulas Patocka * __split_and_process_bio from dm_request, we set 24213b00b203SMikulas Patocka * DMF_QUEUE_IO_TO_THREAD. 24223b00b203SMikulas Patocka * 24233b00b203SMikulas Patocka * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND 24243b00b203SMikulas Patocka * and call flush_workqueue(md->wq). flush_workqueue will wait until 24253b00b203SMikulas Patocka * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any 24263b00b203SMikulas Patocka * further calls to __split_and_process_bio from dm_wq_work. 24271da177e4SLinus Torvalds */ 24282ca3310eSAlasdair G Kergon down_write(&md->io_lock); 24291eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24301eb787ecSAlasdair G Kergon set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); 24312ca3310eSAlasdair G Kergon up_write(&md->io_lock); 24321da177e4SLinus Torvalds 24333b00b203SMikulas Patocka flush_workqueue(md->wq); 24343b00b203SMikulas Patocka 2435cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2436cec47e3dSKiyoshi Ueda dm_rq_start_suspend(md, noflush); 2437cec47e3dSKiyoshi Ueda 24381da177e4SLinus Torvalds /* 24393b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 24403b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 24413b00b203SMikulas Patocka * to finish. 24421da177e4SLinus Torvalds */ 2443401600dfSMikulas Patocka r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 24441da177e4SLinus Torvalds 24452ca3310eSAlasdair G Kergon down_write(&md->io_lock); 24466d6f10dfSMilan Broz if (noflush) 2447022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 244894d6351eSMilan Broz up_write(&md->io_lock); 24492e93ccc1SKiyoshi Ueda 24501da177e4SLinus Torvalds /* were we interrupted ? */ 245146125c1cSMilan Broz if (r < 0) { 24529a1fb464SMikulas Patocka dm_queue_flush(md); 245373d410c0SMilan Broz 2454cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2455cec47e3dSKiyoshi Ueda dm_rq_abort_suspend(md, noflush); 2456cec47e3dSKiyoshi Ueda 24572ca3310eSAlasdair G Kergon unlock_fs(md); 24582e93ccc1SKiyoshi Ueda goto out; /* pushback list is already flushed, so skip flush */ 24592ca3310eSAlasdair G Kergon } 24602ca3310eSAlasdair G Kergon 24613b00b203SMikulas Patocka /* 24623b00b203SMikulas Patocka * If dm_wait_for_completion returned 0, the device is completely 24633b00b203SMikulas Patocka * quiescent now. There is no request-processing activity. All new 24643b00b203SMikulas Patocka * requests are being added to md->deferred list. 24653b00b203SMikulas Patocka */ 24663b00b203SMikulas Patocka 24672ca3310eSAlasdair G Kergon dm_table_postsuspend_targets(map); 24681da177e4SLinus Torvalds 24691da177e4SLinus Torvalds set_bit(DMF_SUSPENDED, &md->flags); 24701da177e4SLinus Torvalds 24712ca3310eSAlasdair G Kergon out: 24721da177e4SLinus Torvalds dm_table_put(map); 2473d287483dSAlasdair G Kergon 2474d287483dSAlasdair G Kergon out_unlock: 2475e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2476cf222b37SAlasdair G Kergon return r; 24771da177e4SLinus Torvalds } 24781da177e4SLinus Torvalds 24791da177e4SLinus Torvalds int dm_resume(struct mapped_device *md) 24801da177e4SLinus Torvalds { 2481cf222b37SAlasdair G Kergon int r = -EINVAL; 2482cf222b37SAlasdair G Kergon struct dm_table *map = NULL; 24831da177e4SLinus Torvalds 2484e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24852ca3310eSAlasdair G Kergon if (!dm_suspended(md)) 2486cf222b37SAlasdair G Kergon goto out; 2487cf222b37SAlasdair G Kergon 2488cf222b37SAlasdair G Kergon map = dm_get_table(md); 24892ca3310eSAlasdair G Kergon if (!map || !dm_table_get_size(map)) 2490cf222b37SAlasdair G Kergon goto out; 24911da177e4SLinus Torvalds 24928757b776SMilan Broz r = dm_table_resume_targets(map); 24938757b776SMilan Broz if (r) 24948757b776SMilan Broz goto out; 24952ca3310eSAlasdair G Kergon 24969a1fb464SMikulas Patocka dm_queue_flush(md); 24972ca3310eSAlasdair G Kergon 2498cec47e3dSKiyoshi Ueda /* 2499cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2500cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2501cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2502cec47e3dSKiyoshi Ueda */ 2503cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2504cec47e3dSKiyoshi Ueda start_queue(md->queue); 2505cec47e3dSKiyoshi Ueda 25062ca3310eSAlasdair G Kergon unlock_fs(md); 25072ca3310eSAlasdair G Kergon 25082ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 25092ca3310eSAlasdair G Kergon 25101da177e4SLinus Torvalds dm_table_unplug_all(map); 2511cf222b37SAlasdair G Kergon r = 0; 2512cf222b37SAlasdair G Kergon out: 2513cf222b37SAlasdair G Kergon dm_table_put(map); 2514e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 25152ca3310eSAlasdair G Kergon 2516cf222b37SAlasdair G Kergon return r; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 25191da177e4SLinus Torvalds /*----------------------------------------------------------------- 25201da177e4SLinus Torvalds * Event notification. 25211da177e4SLinus Torvalds *---------------------------------------------------------------*/ 252260935eb2SMilan Broz void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 252360935eb2SMilan Broz unsigned cookie) 252469267a30SAlasdair G Kergon { 252560935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 252660935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 252760935eb2SMilan Broz 252860935eb2SMilan Broz if (!cookie) 252960935eb2SMilan Broz kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 253060935eb2SMilan Broz else { 253160935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 253260935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 253360935eb2SMilan Broz kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); 253460935eb2SMilan Broz } 253569267a30SAlasdair G Kergon } 253669267a30SAlasdair G Kergon 25377a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 25387a8c3d3bSMike Anderson { 25397a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 25407a8c3d3bSMike Anderson } 25417a8c3d3bSMike Anderson 25421da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 25431da177e4SLinus Torvalds { 25441da177e4SLinus Torvalds return atomic_read(&md->event_nr); 25451da177e4SLinus Torvalds } 25461da177e4SLinus Torvalds 25471da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 25481da177e4SLinus Torvalds { 25491da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 25501da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 25511da177e4SLinus Torvalds } 25521da177e4SLinus Torvalds 25537a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 25547a8c3d3bSMike Anderson { 25557a8c3d3bSMike Anderson unsigned long flags; 25567a8c3d3bSMike Anderson 25577a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 25587a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 25597a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 25607a8c3d3bSMike Anderson } 25617a8c3d3bSMike Anderson 25621da177e4SLinus Torvalds /* 25631da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 25641da177e4SLinus Torvalds * count on 'md'. 25651da177e4SLinus Torvalds */ 25661da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 25671da177e4SLinus Torvalds { 25681da177e4SLinus Torvalds return md->disk; 25691da177e4SLinus Torvalds } 25701da177e4SLinus Torvalds 2571784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2572784aae73SMilan Broz { 2573784aae73SMilan Broz return &md->kobj; 2574784aae73SMilan Broz } 2575784aae73SMilan Broz 2576784aae73SMilan Broz /* 2577784aae73SMilan Broz * struct mapped_device should not be exported outside of dm.c 2578784aae73SMilan Broz * so use this check to verify that kobj is part of md structure 2579784aae73SMilan Broz */ 2580784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2581784aae73SMilan Broz { 2582784aae73SMilan Broz struct mapped_device *md; 2583784aae73SMilan Broz 2584784aae73SMilan Broz md = container_of(kobj, struct mapped_device, kobj); 2585784aae73SMilan Broz if (&md->kobj != kobj) 2586784aae73SMilan Broz return NULL; 2587784aae73SMilan Broz 25884d89b7b4SMilan Broz if (test_bit(DMF_FREEING, &md->flags) || 25894d89b7b4SMilan Broz test_bit(DMF_DELETING, &md->flags)) 25904d89b7b4SMilan Broz return NULL; 25914d89b7b4SMilan Broz 2592784aae73SMilan Broz dm_get(md); 2593784aae73SMilan Broz return md; 2594784aae73SMilan Broz } 2595784aae73SMilan Broz 25961da177e4SLinus Torvalds int dm_suspended(struct mapped_device *md) 25971da177e4SLinus Torvalds { 25981da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 25991da177e4SLinus Torvalds } 26001da177e4SLinus Torvalds 26012e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 26022e93ccc1SKiyoshi Ueda { 26032e93ccc1SKiyoshi Ueda struct mapped_device *md = dm_table_get_md(ti->table); 26042e93ccc1SKiyoshi Ueda int r = __noflush_suspending(md); 26052e93ccc1SKiyoshi Ueda 26062e93ccc1SKiyoshi Ueda dm_put(md); 26072e93ccc1SKiyoshi Ueda 26082e93ccc1SKiyoshi Ueda return r; 26092e93ccc1SKiyoshi Ueda } 26102e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 26112e93ccc1SKiyoshi Ueda 2612e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) 2613e6ee8c0bSKiyoshi Ueda { 2614e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2615e6ee8c0bSKiyoshi Ueda 2616e6ee8c0bSKiyoshi Ueda if (!pools) 2617e6ee8c0bSKiyoshi Ueda return NULL; 2618e6ee8c0bSKiyoshi Ueda 2619e6ee8c0bSKiyoshi Ueda pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2620e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _io_cache) : 2621e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); 2622e6ee8c0bSKiyoshi Ueda if (!pools->io_pool) 2623e6ee8c0bSKiyoshi Ueda goto free_pools_and_out; 2624e6ee8c0bSKiyoshi Ueda 2625e6ee8c0bSKiyoshi Ueda pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? 2626e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _tio_cache) : 2627e6ee8c0bSKiyoshi Ueda mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); 2628e6ee8c0bSKiyoshi Ueda if (!pools->tio_pool) 2629e6ee8c0bSKiyoshi Ueda goto free_io_pool_and_out; 2630e6ee8c0bSKiyoshi Ueda 2631e6ee8c0bSKiyoshi Ueda pools->bs = (type == DM_TYPE_BIO_BASED) ? 2632e6ee8c0bSKiyoshi Ueda bioset_create(16, 0) : bioset_create(MIN_IOS, 0); 2633e6ee8c0bSKiyoshi Ueda if (!pools->bs) 2634e6ee8c0bSKiyoshi Ueda goto free_tio_pool_and_out; 2635e6ee8c0bSKiyoshi Ueda 2636e6ee8c0bSKiyoshi Ueda return pools; 2637e6ee8c0bSKiyoshi Ueda 2638e6ee8c0bSKiyoshi Ueda free_tio_pool_and_out: 2639e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->tio_pool); 2640e6ee8c0bSKiyoshi Ueda 2641e6ee8c0bSKiyoshi Ueda free_io_pool_and_out: 2642e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 2643e6ee8c0bSKiyoshi Ueda 2644e6ee8c0bSKiyoshi Ueda free_pools_and_out: 2645e6ee8c0bSKiyoshi Ueda kfree(pools); 2646e6ee8c0bSKiyoshi Ueda 2647e6ee8c0bSKiyoshi Ueda return NULL; 2648e6ee8c0bSKiyoshi Ueda } 2649e6ee8c0bSKiyoshi Ueda 2650e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2651e6ee8c0bSKiyoshi Ueda { 2652e6ee8c0bSKiyoshi Ueda if (!pools) 2653e6ee8c0bSKiyoshi Ueda return; 2654e6ee8c0bSKiyoshi Ueda 2655e6ee8c0bSKiyoshi Ueda if (pools->io_pool) 2656e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 2657e6ee8c0bSKiyoshi Ueda 2658e6ee8c0bSKiyoshi Ueda if (pools->tio_pool) 2659e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->tio_pool); 2660e6ee8c0bSKiyoshi Ueda 2661e6ee8c0bSKiyoshi Ueda if (pools->bs) 2662e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2663e6ee8c0bSKiyoshi Ueda 2664e6ee8c0bSKiyoshi Ueda kfree(pools); 2665e6ee8c0bSKiyoshi Ueda } 2666e6ee8c0bSKiyoshi Ueda 26671da177e4SLinus Torvalds static struct block_device_operations dm_blk_dops = { 26681da177e4SLinus Torvalds .open = dm_blk_open, 26691da177e4SLinus Torvalds .release = dm_blk_close, 2670aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 26713ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 26721da177e4SLinus Torvalds .owner = THIS_MODULE 26731da177e4SLinus Torvalds }; 26741da177e4SLinus Torvalds 26751da177e4SLinus Torvalds EXPORT_SYMBOL(dm_get_mapinfo); 26761da177e4SLinus Torvalds 26771da177e4SLinus Torvalds /* 26781da177e4SLinus Torvalds * module hooks 26791da177e4SLinus Torvalds */ 26801da177e4SLinus Torvalds module_init(dm_init); 26811da177e4SLinus Torvalds module_exit(dm_exit); 26821da177e4SLinus Torvalds 26831da177e4SLinus Torvalds module_param(major, uint, 0); 26841da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 26851da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 26861da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 26871da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2688