11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 227e026c8cSDan Williams #include <linux/uio.h> 233ac51e74SDarrick J. Wong #include <linux/hdreg.h> 243f77316dSKiyoshi Ueda #include <linux/delay.h> 25ffcc3936SMike Snitzer #include <linux/wait.h> 2671cdb697SChristoph Hellwig #include <linux/pr.h> 27b0b4d7c6SElena Reshetova #include <linux/refcount.h> 2855782138SLi Zefan 2972d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3072d94861SAlasdair G Kergon 3160935eb2SMilan Broz /* 3260935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3360935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3460935eb2SMilan Broz */ 3560935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3660935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3760935eb2SMilan Broz 381da177e4SLinus Torvalds static const char *_name = DM_NAME; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds static unsigned int major = 0; 411da177e4SLinus Torvalds static unsigned int _major = 0; 421da177e4SLinus Torvalds 43d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 44d15b774cSAlasdair G Kergon 45f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 462c140a24SMikulas Patocka 472c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 482c140a24SMikulas Patocka 492c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 502c140a24SMikulas Patocka 51acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 52acfe0ad7SMikulas Patocka 5393e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5493e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5593e6442cSMikulas Patocka 5662e08243SMikulas Patocka void dm_issue_global_event(void) 5762e08243SMikulas Patocka { 5862e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 5962e08243SMikulas Patocka wake_up(&dm_global_eventq); 6062e08243SMikulas Patocka } 6162e08243SMikulas Patocka 621da177e4SLinus Torvalds /* 631da177e4SLinus Torvalds * One of these is allocated per bio. 641da177e4SLinus Torvalds */ 651da177e4SLinus Torvalds struct dm_io { 661da177e4SLinus Torvalds struct mapped_device *md; 674e4cbee9SChristoph Hellwig blk_status_t status; 681da177e4SLinus Torvalds atomic_t io_count; 696ae2fa67SRichard Kennedy struct bio *bio; 703eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 71f88fb981SKiyoshi Ueda spinlock_t endio_lock; 72fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 731da177e4SLinus Torvalds }; 741da177e4SLinus Torvalds 75ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 76ba61fdd1SJeff Mahoney 771da177e4SLinus Torvalds /* 781da177e4SLinus Torvalds * Bits for the md->flags field. 791da177e4SLinus Torvalds */ 801eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 811da177e4SLinus Torvalds #define DMF_SUSPENDED 1 82aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 83fba9f90eSJeff Mahoney #define DMF_FREEING 3 845c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 852e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 868ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 878ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 881da177e4SLinus Torvalds 89115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 90115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 91faad87dfSMike Snitzer 92e6ee8c0bSKiyoshi Ueda /* 93e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 94e6ee8c0bSKiyoshi Ueda */ 95e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 96e6ee8c0bSKiyoshi Ueda mempool_t *io_pool; 97e6ee8c0bSKiyoshi Ueda struct bio_set *bs; 98e6ee8c0bSKiyoshi Ueda }; 99e6ee8c0bSKiyoshi Ueda 10086f1152bSBenjamin Marzinski struct table_device { 10186f1152bSBenjamin Marzinski struct list_head list; 102b0b4d7c6SElena Reshetova refcount_t count; 10386f1152bSBenjamin Marzinski struct dm_dev dm_dev; 10486f1152bSBenjamin Marzinski }; 10586f1152bSBenjamin Marzinski 106e18b890bSChristoph Lameter static struct kmem_cache *_io_cache; 1078fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache; 1081ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache; 10994818742SKent Overstreet 110f4790826SMike Snitzer /* 111e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 112e8603136SMike Snitzer */ 1134cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 114e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 115e8603136SMike Snitzer 116115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 117115485e8SMike Snitzer { 118115485e8SMike Snitzer int param = ACCESS_ONCE(*module_param); 119115485e8SMike Snitzer int modified_param = 0; 120115485e8SMike Snitzer bool modified = true; 121115485e8SMike Snitzer 122115485e8SMike Snitzer if (param < min) 123115485e8SMike Snitzer modified_param = min; 124115485e8SMike Snitzer else if (param > max) 125115485e8SMike Snitzer modified_param = max; 126115485e8SMike Snitzer else 127115485e8SMike Snitzer modified = false; 128115485e8SMike Snitzer 129115485e8SMike Snitzer if (modified) { 130115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 131115485e8SMike Snitzer param = modified_param; 132115485e8SMike Snitzer } 133115485e8SMike Snitzer 134115485e8SMike Snitzer return param; 135115485e8SMike Snitzer } 136115485e8SMike Snitzer 1374cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 138f4790826SMike Snitzer unsigned def, unsigned max) 139f4790826SMike Snitzer { 14009c2d531SMike Snitzer unsigned param = ACCESS_ONCE(*module_param); 14109c2d531SMike Snitzer unsigned modified_param = 0; 142f4790826SMike Snitzer 14309c2d531SMike Snitzer if (!param) 14409c2d531SMike Snitzer modified_param = def; 14509c2d531SMike Snitzer else if (param > max) 14609c2d531SMike Snitzer modified_param = max; 147f4790826SMike Snitzer 14809c2d531SMike Snitzer if (modified_param) { 14909c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 15009c2d531SMike Snitzer param = modified_param; 151f4790826SMike Snitzer } 152f4790826SMike Snitzer 15309c2d531SMike Snitzer return param; 154f4790826SMike Snitzer } 155f4790826SMike Snitzer 156e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 157e8603136SMike Snitzer { 15809c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1594cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 160e8603136SMike Snitzer } 161e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 162e8603136SMike Snitzer 163115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 164115485e8SMike Snitzer { 165115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 166115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 167115485e8SMike Snitzer } 168115485e8SMike Snitzer 1691da177e4SLinus Torvalds static int __init local_init(void) 1701da177e4SLinus Torvalds { 17151157b4aSKiyoshi Ueda int r = -ENOMEM; 1721da177e4SLinus Torvalds 1731da177e4SLinus Torvalds /* allocate a slab for the dm_ios */ 174028867acSAlasdair G Kergon _io_cache = KMEM_CACHE(dm_io, 0); 1751da177e4SLinus Torvalds if (!_io_cache) 17651157b4aSKiyoshi Ueda return r; 1771da177e4SLinus Torvalds 1788fbf26adSKiyoshi Ueda _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); 1798fbf26adSKiyoshi Ueda if (!_rq_tio_cache) 180dba14160SMikulas Patocka goto out_free_io_cache; 1818fbf26adSKiyoshi Ueda 182eca7ee6dSMike Snitzer _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), 1831ae49ea2SMike Snitzer __alignof__(struct request), 0, NULL); 1841ae49ea2SMike Snitzer if (!_rq_cache) 1851ae49ea2SMike Snitzer goto out_free_rq_tio_cache; 1861ae49ea2SMike Snitzer 18751e5b2bdSMike Anderson r = dm_uevent_init(); 18851157b4aSKiyoshi Ueda if (r) 1891ae49ea2SMike Snitzer goto out_free_rq_cache; 19051e5b2bdSMike Anderson 191acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 192acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 193acfe0ad7SMikulas Patocka r = -ENOMEM; 194acfe0ad7SMikulas Patocka goto out_uevent_exit; 195acfe0ad7SMikulas Patocka } 196acfe0ad7SMikulas Patocka 1971da177e4SLinus Torvalds _major = major; 1981da177e4SLinus Torvalds r = register_blkdev(_major, _name); 19951157b4aSKiyoshi Ueda if (r < 0) 200acfe0ad7SMikulas Patocka goto out_free_workqueue; 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds if (!_major) 2031da177e4SLinus Torvalds _major = r; 2041da177e4SLinus Torvalds 2051da177e4SLinus Torvalds return 0; 20651157b4aSKiyoshi Ueda 207acfe0ad7SMikulas Patocka out_free_workqueue: 208acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 20951157b4aSKiyoshi Ueda out_uevent_exit: 21051157b4aSKiyoshi Ueda dm_uevent_exit(); 2111ae49ea2SMike Snitzer out_free_rq_cache: 2121ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2138fbf26adSKiyoshi Ueda out_free_rq_tio_cache: 2148fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 21551157b4aSKiyoshi Ueda out_free_io_cache: 21651157b4aSKiyoshi Ueda kmem_cache_destroy(_io_cache); 21751157b4aSKiyoshi Ueda 21851157b4aSKiyoshi Ueda return r; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds static void local_exit(void) 2221da177e4SLinus Torvalds { 2232c140a24SMikulas Patocka flush_scheduled_work(); 224acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2252c140a24SMikulas Patocka 2261ae49ea2SMike Snitzer kmem_cache_destroy(_rq_cache); 2278fbf26adSKiyoshi Ueda kmem_cache_destroy(_rq_tio_cache); 2281da177e4SLinus Torvalds kmem_cache_destroy(_io_cache); 22900d59405SAkinobu Mita unregister_blkdev(_major, _name); 23051e5b2bdSMike Anderson dm_uevent_exit(); 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds _major = 0; 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds DMINFO("cleaned up"); 2351da177e4SLinus Torvalds } 2361da177e4SLinus Torvalds 237b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2381da177e4SLinus Torvalds local_init, 2391da177e4SLinus Torvalds dm_target_init, 2401da177e4SLinus Torvalds dm_linear_init, 2411da177e4SLinus Torvalds dm_stripe_init, 242952b3557SMikulas Patocka dm_io_init, 243945fa4d2SMikulas Patocka dm_kcopyd_init, 2441da177e4SLinus Torvalds dm_interface_init, 245fd2ed4d2SMikulas Patocka dm_statistics_init, 2461da177e4SLinus Torvalds }; 2471da177e4SLinus Torvalds 248b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2491da177e4SLinus Torvalds local_exit, 2501da177e4SLinus Torvalds dm_target_exit, 2511da177e4SLinus Torvalds dm_linear_exit, 2521da177e4SLinus Torvalds dm_stripe_exit, 253952b3557SMikulas Patocka dm_io_exit, 254945fa4d2SMikulas Patocka dm_kcopyd_exit, 2551da177e4SLinus Torvalds dm_interface_exit, 256fd2ed4d2SMikulas Patocka dm_statistics_exit, 2571da177e4SLinus Torvalds }; 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds static int __init dm_init(void) 2601da177e4SLinus Torvalds { 2611da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds int r, i; 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2661da177e4SLinus Torvalds r = _inits[i](); 2671da177e4SLinus Torvalds if (r) 2681da177e4SLinus Torvalds goto bad; 2691da177e4SLinus Torvalds } 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds return 0; 2721da177e4SLinus Torvalds 2731da177e4SLinus Torvalds bad: 2741da177e4SLinus Torvalds while (i--) 2751da177e4SLinus Torvalds _exits[i](); 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds return r; 2781da177e4SLinus Torvalds } 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds static void __exit dm_exit(void) 2811da177e4SLinus Torvalds { 2821da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds while (i--) 2851da177e4SLinus Torvalds _exits[i](); 286d15b774cSAlasdair G Kergon 287d15b774cSAlasdair G Kergon /* 288d15b774cSAlasdair G Kergon * Should be empty by this point. 289d15b774cSAlasdair G Kergon */ 290d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 2911da177e4SLinus Torvalds } 2921da177e4SLinus Torvalds 2931da177e4SLinus Torvalds /* 2941da177e4SLinus Torvalds * Block device functions 2951da177e4SLinus Torvalds */ 296432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 297432a212cSMike Anderson { 298432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 299432a212cSMike Anderson } 300432a212cSMike Anderson 301fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3021da177e4SLinus Torvalds { 3031da177e4SLinus Torvalds struct mapped_device *md; 3041da177e4SLinus Torvalds 305fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 306fba9f90eSJeff Mahoney 307fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 308fba9f90eSJeff Mahoney if (!md) 309fba9f90eSJeff Mahoney goto out; 310fba9f90eSJeff Mahoney 3115c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 312432a212cSMike Anderson dm_deleting_md(md)) { 313fba9f90eSJeff Mahoney md = NULL; 314fba9f90eSJeff Mahoney goto out; 315fba9f90eSJeff Mahoney } 316fba9f90eSJeff Mahoney 3171da177e4SLinus Torvalds dm_get(md); 3185c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 319fba9f90eSJeff Mahoney out: 320fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 321fba9f90eSJeff Mahoney 322fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3231da177e4SLinus Torvalds } 3241da177e4SLinus Torvalds 325db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3261da177e4SLinus Torvalds { 32763a4f065SMike Snitzer struct mapped_device *md; 3286e9624b8SArnd Bergmann 3294a1aeb98SMilan Broz spin_lock(&_minor_lock); 3304a1aeb98SMilan Broz 33163a4f065SMike Snitzer md = disk->private_data; 33263a4f065SMike Snitzer if (WARN_ON(!md)) 33363a4f065SMike Snitzer goto out; 33463a4f065SMike Snitzer 3352c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3362c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 337acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3382c140a24SMikulas Patocka 3391da177e4SLinus Torvalds dm_put(md); 34063a4f065SMike Snitzer out: 3414a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds 3445c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3455c6bd75dSAlasdair G Kergon { 3465c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3475c6bd75dSAlasdair G Kergon } 3485c6bd75dSAlasdair G Kergon 3495c6bd75dSAlasdair G Kergon /* 3505c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3515c6bd75dSAlasdair G Kergon */ 3522c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3535c6bd75dSAlasdair G Kergon { 3545c6bd75dSAlasdair G Kergon int r = 0; 3555c6bd75dSAlasdair G Kergon 3565c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3575c6bd75dSAlasdair G Kergon 3582c140a24SMikulas Patocka if (dm_open_count(md)) { 3595c6bd75dSAlasdair G Kergon r = -EBUSY; 3602c140a24SMikulas Patocka if (mark_deferred) 3612c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3622c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3632c140a24SMikulas Patocka r = -EEXIST; 3645c6bd75dSAlasdair G Kergon else 3655c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3665c6bd75dSAlasdair G Kergon 3675c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3685c6bd75dSAlasdair G Kergon 3695c6bd75dSAlasdair G Kergon return r; 3705c6bd75dSAlasdair G Kergon } 3715c6bd75dSAlasdair G Kergon 3722c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3732c140a24SMikulas Patocka { 3742c140a24SMikulas Patocka int r = 0; 3752c140a24SMikulas Patocka 3762c140a24SMikulas Patocka spin_lock(&_minor_lock); 3772c140a24SMikulas Patocka 3782c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3792c140a24SMikulas Patocka r = -EBUSY; 3802c140a24SMikulas Patocka else 3812c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3822c140a24SMikulas Patocka 3832c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3842c140a24SMikulas Patocka 3852c140a24SMikulas Patocka return r; 3862c140a24SMikulas Patocka } 3872c140a24SMikulas Patocka 3882c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 3892c140a24SMikulas Patocka { 3902c140a24SMikulas Patocka dm_deferred_remove(); 3912c140a24SMikulas Patocka } 3922c140a24SMikulas Patocka 393fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 394fd2ed4d2SMikulas Patocka { 395fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 396fd2ed4d2SMikulas Patocka } 397fd2ed4d2SMikulas Patocka 3989974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 3999974fa2cSMike Snitzer { 4009974fa2cSMike Snitzer return md->queue; 4019974fa2cSMike Snitzer } 4029974fa2cSMike Snitzer 403fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 404fd2ed4d2SMikulas Patocka { 405fd2ed4d2SMikulas Patocka return &md->stats; 406fd2ed4d2SMikulas Patocka } 407fd2ed4d2SMikulas Patocka 4083ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4093ac51e74SDarrick J. Wong { 4103ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4113ac51e74SDarrick J. Wong 4123ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4133ac51e74SDarrick J. Wong } 4143ac51e74SDarrick J. Wong 415956a4025SMike Snitzer static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 41666482026SMike Snitzer struct block_device **bdev, 417956a4025SMike Snitzer fmode_t *mode) 418aa129a22SMilan Broz { 41966482026SMike Snitzer struct dm_target *tgt; 4206c182cd8SHannes Reinecke struct dm_table *map; 421956a4025SMike Snitzer int srcu_idx, r; 422aa129a22SMilan Broz 4236c182cd8SHannes Reinecke retry: 424e56f81e0SChristoph Hellwig r = -ENOTTY; 425956a4025SMike Snitzer map = dm_get_live_table(md, &srcu_idx); 426aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 427aa129a22SMilan Broz goto out; 428aa129a22SMilan Broz 429aa129a22SMilan Broz /* We only support devices that have a single target */ 430aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 431aa129a22SMilan Broz goto out; 432aa129a22SMilan Broz 43366482026SMike Snitzer tgt = dm_table_get_target(map, 0); 43466482026SMike Snitzer if (!tgt->type->prepare_ioctl) 4354d341d82SMike Snitzer goto out; 436aa129a22SMilan Broz 4374f186f8bSKiyoshi Ueda if (dm_suspended_md(md)) { 438aa129a22SMilan Broz r = -EAGAIN; 439aa129a22SMilan Broz goto out; 440aa129a22SMilan Broz } 441aa129a22SMilan Broz 44266482026SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev, mode); 443e56f81e0SChristoph Hellwig if (r < 0) 444e56f81e0SChristoph Hellwig goto out; 445e56f81e0SChristoph Hellwig 446956a4025SMike Snitzer bdgrab(*bdev); 447956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 448e56f81e0SChristoph Hellwig return r; 449aa129a22SMilan Broz 450aa129a22SMilan Broz out: 451956a4025SMike Snitzer dm_put_live_table(md, srcu_idx); 4525bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 4536c182cd8SHannes Reinecke msleep(10); 4546c182cd8SHannes Reinecke goto retry; 4556c182cd8SHannes Reinecke } 456e56f81e0SChristoph Hellwig return r; 457e56f81e0SChristoph Hellwig } 4586c182cd8SHannes Reinecke 459e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 460e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 461e56f81e0SChristoph Hellwig { 462e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 463956a4025SMike Snitzer int r; 464e56f81e0SChristoph Hellwig 465956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 466e56f81e0SChristoph Hellwig if (r < 0) 467e56f81e0SChristoph Hellwig return r; 468e56f81e0SChristoph Hellwig 469e56f81e0SChristoph Hellwig if (r > 0) { 470e56f81e0SChristoph Hellwig /* 471e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 472e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 473e56f81e0SChristoph Hellwig */ 474e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 475e980f623SChristoph Hellwig DMWARN_LIMIT( 476e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 477e980f623SChristoph Hellwig current->comm, cmd); 478e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 479e56f81e0SChristoph Hellwig goto out; 480e56f81e0SChristoph Hellwig } 481e980f623SChristoph Hellwig } 482e56f81e0SChristoph Hellwig 48366482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 484e56f81e0SChristoph Hellwig out: 485956a4025SMike Snitzer bdput(bdev); 486aa129a22SMilan Broz return r; 487aa129a22SMilan Broz } 488aa129a22SMilan Broz 489028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md) 4901da177e4SLinus Torvalds { 4911da177e4SLinus Torvalds return mempool_alloc(md->io_pool, GFP_NOIO); 4921da177e4SLinus Torvalds } 4931da177e4SLinus Torvalds 494028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 4951da177e4SLinus Torvalds { 4961da177e4SLinus Torvalds mempool_free(io, md->io_pool); 4971da177e4SLinus Torvalds } 4981da177e4SLinus Torvalds 499cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 5001da177e4SLinus Torvalds { 501dba14160SMikulas Patocka bio_put(&tio->clone); 5021da177e4SLinus Torvalds } 5031da177e4SLinus Torvalds 5044cc96131SMike Snitzer int md_in_flight(struct mapped_device *md) 50590abb8c4SKiyoshi Ueda { 50690abb8c4SKiyoshi Ueda return atomic_read(&md->pending[READ]) + 50790abb8c4SKiyoshi Ueda atomic_read(&md->pending[WRITE]); 50890abb8c4SKiyoshi Ueda } 50990abb8c4SKiyoshi Ueda 5103eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 5113eaf840eSJun'ichi "Nick" Nomura { 5123eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 513fd2ed4d2SMikulas Patocka struct bio *bio = io->bio; 514c9959059STejun Heo int cpu; 515fd2ed4d2SMikulas Patocka int rw = bio_data_dir(bio); 5163eaf840eSJun'ichi "Nick" Nomura 5173eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 5183eaf840eSJun'ichi "Nick" Nomura 519074a7acaSTejun Heo cpu = part_stat_lock(); 520d62e26b3SJens Axboe part_round_stats(md->queue, cpu, &dm_disk(md)->part0); 521074a7acaSTejun Heo part_stat_unlock(); 5221e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], 5231e9bb880SShaohua Li atomic_inc_return(&md->pending[rw])); 524fd2ed4d2SMikulas Patocka 525fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 526528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 527528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 528528ec5abSMike Christie false, 0, &io->stats_aux); 5293eaf840eSJun'ichi "Nick" Nomura } 5303eaf840eSJun'ichi "Nick" Nomura 531d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 5323eaf840eSJun'ichi "Nick" Nomura { 5333eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 5343eaf840eSJun'ichi "Nick" Nomura struct bio *bio = io->bio; 5353eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 53618c0b223SGu Zheng int pending; 5373eaf840eSJun'ichi "Nick" Nomura int rw = bio_data_dir(bio); 5383eaf840eSJun'ichi "Nick" Nomura 539d62e26b3SJens Axboe generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); 5403eaf840eSJun'ichi "Nick" Nomura 541fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 542528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 543528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 544528ec5abSMike Christie true, duration, &io->stats_aux); 545fd2ed4d2SMikulas Patocka 546af7e466aSMikulas Patocka /* 547af7e466aSMikulas Patocka * After this is decremented the bio must not be touched if it is 548d87f4c14STejun Heo * a flush. 549af7e466aSMikulas Patocka */ 5501e9bb880SShaohua Li pending = atomic_dec_return(&md->pending[rw]); 5511e9bb880SShaohua Li atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); 552316d315bSNikanth Karthikesan pending += atomic_read(&md->pending[rw^0x1]); 5533eaf840eSJun'ichi "Nick" Nomura 554d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 555d221d2e7SMikulas Patocka if (!pending) 556d221d2e7SMikulas Patocka wake_up(&md->wait); 5573eaf840eSJun'ichi "Nick" Nomura } 5583eaf840eSJun'ichi "Nick" Nomura 5591da177e4SLinus Torvalds /* 5601da177e4SLinus Torvalds * Add the bio to the list of deferred io. 5611da177e4SLinus Torvalds */ 56292c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 5631da177e4SLinus Torvalds { 56405447420SKiyoshi Ueda unsigned long flags; 5651da177e4SLinus Torvalds 56605447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 5671da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 56805447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 56992c63902SMikulas Patocka queue_work(md->wq, &md->work); 5701da177e4SLinus Torvalds } 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds /* 5731da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5741da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 57583d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 5761da177e4SLinus Torvalds */ 57783d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 5781da177e4SLinus Torvalds { 57983d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 5801da177e4SLinus Torvalds 58183d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 58283d5e5b0SMikulas Patocka } 5831da177e4SLinus Torvalds 58483d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 58583d5e5b0SMikulas Patocka { 58683d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 58783d5e5b0SMikulas Patocka } 58883d5e5b0SMikulas Patocka 58983d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 59083d5e5b0SMikulas Patocka { 59183d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 59283d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 59383d5e5b0SMikulas Patocka } 59483d5e5b0SMikulas Patocka 59583d5e5b0SMikulas Patocka /* 59683d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 59783d5e5b0SMikulas Patocka * The caller must not block between these two functions. 59883d5e5b0SMikulas Patocka */ 59983d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 60083d5e5b0SMikulas Patocka { 60183d5e5b0SMikulas Patocka rcu_read_lock(); 60283d5e5b0SMikulas Patocka return rcu_dereference(md->map); 60383d5e5b0SMikulas Patocka } 60483d5e5b0SMikulas Patocka 60583d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 60683d5e5b0SMikulas Patocka { 60783d5e5b0SMikulas Patocka rcu_read_unlock(); 6081da177e4SLinus Torvalds } 6091da177e4SLinus Torvalds 6103ac51e74SDarrick J. Wong /* 61186f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 61286f1152bSBenjamin Marzinski */ 61386f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 61486f1152bSBenjamin Marzinski struct mapped_device *md) 61586f1152bSBenjamin Marzinski { 61686f1152bSBenjamin Marzinski static char *_claim_ptr = "I belong to device-mapper"; 61786f1152bSBenjamin Marzinski struct block_device *bdev; 61886f1152bSBenjamin Marzinski 61986f1152bSBenjamin Marzinski int r; 62086f1152bSBenjamin Marzinski 62186f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 62286f1152bSBenjamin Marzinski 62386f1152bSBenjamin Marzinski bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 62486f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 62586f1152bSBenjamin Marzinski return PTR_ERR(bdev); 62686f1152bSBenjamin Marzinski 62786f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 62886f1152bSBenjamin Marzinski if (r) { 62986f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 63086f1152bSBenjamin Marzinski return r; 63186f1152bSBenjamin Marzinski } 63286f1152bSBenjamin Marzinski 63386f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 634817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 63586f1152bSBenjamin Marzinski return 0; 63686f1152bSBenjamin Marzinski } 63786f1152bSBenjamin Marzinski 63886f1152bSBenjamin Marzinski /* 63986f1152bSBenjamin Marzinski * Close a table device that we've been using. 64086f1152bSBenjamin Marzinski */ 64186f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 64286f1152bSBenjamin Marzinski { 64386f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 64486f1152bSBenjamin Marzinski return; 64586f1152bSBenjamin Marzinski 64686f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 64786f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 648817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 64986f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 650817bf402SDan Williams td->dm_dev.dax_dev = NULL; 65186f1152bSBenjamin Marzinski } 65286f1152bSBenjamin Marzinski 65386f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 65486f1152bSBenjamin Marzinski fmode_t mode) { 65586f1152bSBenjamin Marzinski struct table_device *td; 65686f1152bSBenjamin Marzinski 65786f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 65886f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 65986f1152bSBenjamin Marzinski return td; 66086f1152bSBenjamin Marzinski 66186f1152bSBenjamin Marzinski return NULL; 66286f1152bSBenjamin Marzinski } 66386f1152bSBenjamin Marzinski 66486f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 66586f1152bSBenjamin Marzinski struct dm_dev **result) { 66686f1152bSBenjamin Marzinski int r; 66786f1152bSBenjamin Marzinski struct table_device *td; 66886f1152bSBenjamin Marzinski 66986f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 67086f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 67186f1152bSBenjamin Marzinski if (!td) { 672115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 67386f1152bSBenjamin Marzinski if (!td) { 67486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 67586f1152bSBenjamin Marzinski return -ENOMEM; 67686f1152bSBenjamin Marzinski } 67786f1152bSBenjamin Marzinski 67886f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 67986f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 68086f1152bSBenjamin Marzinski 68186f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 68286f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 68386f1152bSBenjamin Marzinski kfree(td); 68486f1152bSBenjamin Marzinski return r; 68586f1152bSBenjamin Marzinski } 68686f1152bSBenjamin Marzinski 68786f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 68886f1152bSBenjamin Marzinski 689b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 69086f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 691b0b4d7c6SElena Reshetova } else { 692b0b4d7c6SElena Reshetova refcount_inc(&td->count); 69386f1152bSBenjamin Marzinski } 69486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 69586f1152bSBenjamin Marzinski 69686f1152bSBenjamin Marzinski *result = &td->dm_dev; 69786f1152bSBenjamin Marzinski return 0; 69886f1152bSBenjamin Marzinski } 69986f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 70086f1152bSBenjamin Marzinski 70186f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 70286f1152bSBenjamin Marzinski { 70386f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 70486f1152bSBenjamin Marzinski 70586f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 706b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 70786f1152bSBenjamin Marzinski close_table_device(td, md); 70886f1152bSBenjamin Marzinski list_del(&td->list); 70986f1152bSBenjamin Marzinski kfree(td); 71086f1152bSBenjamin Marzinski } 71186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 71286f1152bSBenjamin Marzinski } 71386f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 71486f1152bSBenjamin Marzinski 71586f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 71686f1152bSBenjamin Marzinski { 71786f1152bSBenjamin Marzinski struct list_head *tmp, *next; 71886f1152bSBenjamin Marzinski 71986f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 72086f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 72186f1152bSBenjamin Marzinski 72286f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 723b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 72486f1152bSBenjamin Marzinski kfree(td); 72586f1152bSBenjamin Marzinski } 72686f1152bSBenjamin Marzinski } 72786f1152bSBenjamin Marzinski 72886f1152bSBenjamin Marzinski /* 7293ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 7303ac51e74SDarrick J. Wong */ 7313ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 7323ac51e74SDarrick J. Wong { 7333ac51e74SDarrick J. Wong *geo = md->geometry; 7343ac51e74SDarrick J. Wong 7353ac51e74SDarrick J. Wong return 0; 7363ac51e74SDarrick J. Wong } 7373ac51e74SDarrick J. Wong 7383ac51e74SDarrick J. Wong /* 7393ac51e74SDarrick J. Wong * Set the geometry of a device. 7403ac51e74SDarrick J. Wong */ 7413ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 7423ac51e74SDarrick J. Wong { 7433ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 7443ac51e74SDarrick J. Wong 7453ac51e74SDarrick J. Wong if (geo->start > sz) { 7463ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 7473ac51e74SDarrick J. Wong return -EINVAL; 7483ac51e74SDarrick J. Wong } 7493ac51e74SDarrick J. Wong 7503ac51e74SDarrick J. Wong md->geometry = *geo; 7513ac51e74SDarrick J. Wong 7523ac51e74SDarrick J. Wong return 0; 7533ac51e74SDarrick J. Wong } 7543ac51e74SDarrick J. Wong 7551da177e4SLinus Torvalds /*----------------------------------------------------------------- 7561da177e4SLinus Torvalds * CRUD START: 7571da177e4SLinus Torvalds * A more elegant soln is in the works that uses the queue 7581da177e4SLinus Torvalds * merge fn, unfortunately there are a couple of changes to 7591da177e4SLinus Torvalds * the block layer that I want to make for this. So in the 7601da177e4SLinus Torvalds * interests of getting something for people to use I give 7611da177e4SLinus Torvalds * you this clearly demarcated crap. 7621da177e4SLinus Torvalds *---------------------------------------------------------------*/ 7631da177e4SLinus Torvalds 7642e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 7652e93ccc1SKiyoshi Ueda { 7662e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7672e93ccc1SKiyoshi Ueda } 7682e93ccc1SKiyoshi Ueda 7691da177e4SLinus Torvalds /* 7701da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7711da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7721da177e4SLinus Torvalds */ 7734e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 7741da177e4SLinus Torvalds { 7752e93ccc1SKiyoshi Ueda unsigned long flags; 7764e4cbee9SChristoph Hellwig blk_status_t io_error; 777b35f8caaSMilan Broz struct bio *bio; 778b35f8caaSMilan Broz struct mapped_device *md = io->md; 7792e93ccc1SKiyoshi Ueda 7802e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 781f88fb981SKiyoshi Ueda if (unlikely(error)) { 782f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 7834e4cbee9SChristoph Hellwig if (!(io->status == BLK_STS_DM_REQUEUE && 7844e4cbee9SChristoph Hellwig __noflush_suspending(md))) 7854e4cbee9SChristoph Hellwig io->status = error; 786f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 787f88fb981SKiyoshi Ueda } 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 7904e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 7912e93ccc1SKiyoshi Ueda /* 7922e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 7932e93ccc1SKiyoshi Ueda */ 794022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 7956a8736d1STejun Heo if (__noflush_suspending(md)) 7966a8736d1STejun Heo bio_list_add_head(&md->deferred, io->bio); 7976a8736d1STejun Heo else 7982e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 7994e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 800022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 8012e93ccc1SKiyoshi Ueda } 8022e93ccc1SKiyoshi Ueda 8034e4cbee9SChristoph Hellwig io_error = io->status; 804b35f8caaSMilan Broz bio = io->bio; 805af7e466aSMikulas Patocka end_io_acct(io); 806a97f925aSMikulas Patocka free_io(md, io); 8071da177e4SLinus Torvalds 8084e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 8096a8736d1STejun Heo return; 8106a8736d1STejun Heo 8111eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8121da177e4SLinus Torvalds /* 8136a8736d1STejun Heo * Preflush done for flush with data, reissue 81428a8f0d3SMike Christie * without REQ_PREFLUSH. 8151da177e4SLinus Torvalds */ 8161eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 8176a8736d1STejun Heo queue_io(md, bio); 8185f3ea37cSArnaldo Carvalho de Melo } else { 819b372d360SMike Snitzer /* done with normal IO or empty flush */ 8204e4cbee9SChristoph Hellwig bio->bi_status = io_error; 8214246a0b6SChristoph Hellwig bio_endio(bio); 8222e93ccc1SKiyoshi Ueda } 8231da177e4SLinus Torvalds } 824af7e466aSMikulas Patocka } 8251da177e4SLinus Torvalds 8264cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 8277eee4ae2SMike Snitzer { 8287eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 8297eee4ae2SMike Snitzer 8307eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 8317eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 8327eee4ae2SMike Snitzer } 8337eee4ae2SMike Snitzer 834ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 835ac62d620SChristoph Hellwig { 836ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 837ac62d620SChristoph Hellwig 838ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 839ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 840ac62d620SChristoph Hellwig } 841ac62d620SChristoph Hellwig 8424246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 8431da177e4SLinus Torvalds { 8444e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 845bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 846b35f8caaSMilan Broz struct dm_io *io = tio->io; 8479faf400fSStefan Bader struct mapped_device *md = tio->io->md; 8481da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 8491da177e4SLinus Torvalds 8504e4cbee9SChristoph Hellwig if (unlikely(error == BLK_STS_TARGET)) { 851ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_SAME && 85274d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 8537eee4ae2SMike Snitzer disable_write_same(md); 854ac62d620SChristoph Hellwig if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 85574d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 856ac62d620SChristoph Hellwig disable_write_zeroes(md); 857ac62d620SChristoph Hellwig } 8587eee4ae2SMike Snitzer 8591be56909SChristoph Hellwig if (endio) { 8604e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 8611be56909SChristoph Hellwig switch (r) { 8621be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 8634e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 8641be56909SChristoph Hellwig /*FALLTHRU*/ 8651be56909SChristoph Hellwig case DM_ENDIO_DONE: 8661be56909SChristoph Hellwig break; 8671be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 8681be56909SChristoph Hellwig /* The target will handle the io */ 8691be56909SChristoph Hellwig return; 8701be56909SChristoph Hellwig default: 8711be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 8721be56909SChristoph Hellwig BUG(); 8731be56909SChristoph Hellwig } 8741be56909SChristoph Hellwig } 8751be56909SChristoph Hellwig 876cfae7529SMike Snitzer free_tio(tio); 877b35f8caaSMilan Broz dec_pending(io, error); 8781da177e4SLinus Torvalds } 8791da177e4SLinus Torvalds 88078d8e58aSMike Snitzer /* 88156a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 88256a67df7SMike Snitzer * target boundary. 88356a67df7SMike Snitzer */ 88456a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 8851da177e4SLinus Torvalds { 88656a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 88756a67df7SMike Snitzer 88856a67df7SMike Snitzer return ti->len - target_offset; 88956a67df7SMike Snitzer } 89056a67df7SMike Snitzer 89156a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 89256a67df7SMike Snitzer { 89356a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 894542f9038SMike Snitzer sector_t offset, max_len; 8951da177e4SLinus Torvalds 8961da177e4SLinus Torvalds /* 8971da177e4SLinus Torvalds * Does the target need to split even further? 8981da177e4SLinus Torvalds */ 899542f9038SMike Snitzer if (ti->max_io_len) { 900542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 901542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 902542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 903542f9038SMike Snitzer else 904542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 905542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 906542f9038SMike Snitzer 907542f9038SMike Snitzer if (len > max_len) 908542f9038SMike Snitzer len = max_len; 9091da177e4SLinus Torvalds } 9101da177e4SLinus Torvalds 9111da177e4SLinus Torvalds return len; 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds 914542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 915542f9038SMike Snitzer { 916542f9038SMike Snitzer if (len > UINT_MAX) { 917542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 918542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 919542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 920542f9038SMike Snitzer return -EINVAL; 921542f9038SMike Snitzer } 922542f9038SMike Snitzer 923542f9038SMike Snitzer ti->max_io_len = (uint32_t) len; 924542f9038SMike Snitzer 925542f9038SMike Snitzer return 0; 926542f9038SMike Snitzer } 927542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 928542f9038SMike Snitzer 929f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 930f26c5719SDan Williams sector_t sector, int *srcu_idx) 931545ed20eSToshi Kani { 932545ed20eSToshi Kani struct dm_table *map; 933545ed20eSToshi Kani struct dm_target *ti; 934545ed20eSToshi Kani 935f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 936545ed20eSToshi Kani if (!map) 937f26c5719SDan Williams return NULL; 938545ed20eSToshi Kani 939545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 940545ed20eSToshi Kani if (!dm_target_is_valid(ti)) 941f26c5719SDan Williams return NULL; 942f26c5719SDan Williams 943f26c5719SDan Williams return ti; 944f26c5719SDan Williams } 945f26c5719SDan Williams 946f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 947f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 948f26c5719SDan Williams { 949f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 950f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 951f26c5719SDan Williams struct dm_target *ti; 952f26c5719SDan Williams long len, ret = -EIO; 953f26c5719SDan Williams int srcu_idx; 954f26c5719SDan Williams 955f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 956f26c5719SDan Williams 957f26c5719SDan Williams if (!ti) 958545ed20eSToshi Kani goto out; 959f26c5719SDan Williams if (!ti->type->direct_access) 960f26c5719SDan Williams goto out; 961f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 962f26c5719SDan Williams if (len < 1) 963f26c5719SDan Williams goto out; 964f26c5719SDan Williams nr_pages = min(len, nr_pages); 965545ed20eSToshi Kani if (ti->type->direct_access) 966817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 967817bf402SDan Williams 968545ed20eSToshi Kani out: 969545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 970f26c5719SDan Williams 971f26c5719SDan Williams return ret; 972545ed20eSToshi Kani } 973545ed20eSToshi Kani 9747e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 9757e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 9767e026c8cSDan Williams { 9777e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 9787e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 9797e026c8cSDan Williams struct dm_target *ti; 9807e026c8cSDan Williams long ret = 0; 9817e026c8cSDan Williams int srcu_idx; 9827e026c8cSDan Williams 9837e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 9847e026c8cSDan Williams 9857e026c8cSDan Williams if (!ti) 9867e026c8cSDan Williams goto out; 9877e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 9887e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 9897e026c8cSDan Williams goto out; 9907e026c8cSDan Williams } 9917e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 9927e026c8cSDan Williams out: 9937e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 9947e026c8cSDan Williams 9957e026c8cSDan Williams return ret; 9967e026c8cSDan Williams } 9977e026c8cSDan Williams 9981dd40c3eSMikulas Patocka /* 9991dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 100028a8f0d3SMike Christie * allowed for all bio types except REQ_PREFLUSH. 10011dd40c3eSMikulas Patocka * 10021dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 10031dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 10041dd40c3eSMikulas Patocka * sent in a next bio. 10051dd40c3eSMikulas Patocka * 10061dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 10071dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 10081dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 10091dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 10101dd40c3eSMikulas Patocka * 10111dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 10121dd40c3eSMikulas Patocka * <------- bi_size -------> 10131dd40c3eSMikulas Patocka * <-- n_sectors --> 10141dd40c3eSMikulas Patocka * 10151dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 10161dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 10171dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 10181dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 10191dd40c3eSMikulas Patocka * to make it empty) 10201dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 10211dd40c3eSMikulas Patocka * 10221dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 10231dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 10241dd40c3eSMikulas Patocka * copies of the bio. 10251dd40c3eSMikulas Patocka */ 10261dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 10271dd40c3eSMikulas Patocka { 10281dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 10291dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 10301eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 10311dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 10321dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 10331dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 10341dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 10351dd40c3eSMikulas Patocka } 10361dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 10371dd40c3eSMikulas Patocka 1038d67a5f4bSMikulas Patocka /* 103910999307SDamien Le Moal * The zone descriptors obtained with a zone report indicate 104010999307SDamien Le Moal * zone positions within the target device. The zone descriptors 104110999307SDamien Le Moal * must be remapped to match their position within the dm device. 104210999307SDamien Le Moal * A target may call dm_remap_zone_report after completion of a 104310999307SDamien Le Moal * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained 104410999307SDamien Le Moal * from the target device mapping to the dm device. 104510999307SDamien Le Moal */ 104610999307SDamien Le Moal void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) 104710999307SDamien Le Moal { 104810999307SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED 104910999307SDamien Le Moal struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 105010999307SDamien Le Moal struct bio *report_bio = tio->io->bio; 105110999307SDamien Le Moal struct blk_zone_report_hdr *hdr = NULL; 105210999307SDamien Le Moal struct blk_zone *zone; 105310999307SDamien Le Moal unsigned int nr_rep = 0; 105410999307SDamien Le Moal unsigned int ofst; 105510999307SDamien Le Moal struct bio_vec bvec; 105610999307SDamien Le Moal struct bvec_iter iter; 105710999307SDamien Le Moal void *addr; 105810999307SDamien Le Moal 105910999307SDamien Le Moal if (bio->bi_status) 106010999307SDamien Le Moal return; 106110999307SDamien Le Moal 106210999307SDamien Le Moal /* 106310999307SDamien Le Moal * Remap the start sector of the reported zones. For sequential zones, 106410999307SDamien Le Moal * also remap the write pointer position. 106510999307SDamien Le Moal */ 106610999307SDamien Le Moal bio_for_each_segment(bvec, report_bio, iter) { 106710999307SDamien Le Moal addr = kmap_atomic(bvec.bv_page); 106810999307SDamien Le Moal 106910999307SDamien Le Moal /* Remember the report header in the first page */ 107010999307SDamien Le Moal if (!hdr) { 107110999307SDamien Le Moal hdr = addr; 107210999307SDamien Le Moal ofst = sizeof(struct blk_zone_report_hdr); 107310999307SDamien Le Moal } else 107410999307SDamien Le Moal ofst = 0; 107510999307SDamien Le Moal 107610999307SDamien Le Moal /* Set zones start sector */ 107710999307SDamien Le Moal while (hdr->nr_zones && ofst < bvec.bv_len) { 107810999307SDamien Le Moal zone = addr + ofst; 107910999307SDamien Le Moal if (zone->start >= start + ti->len) { 108010999307SDamien Le Moal hdr->nr_zones = 0; 108110999307SDamien Le Moal break; 108210999307SDamien Le Moal } 108310999307SDamien Le Moal zone->start = zone->start + ti->begin - start; 108410999307SDamien Le Moal if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 108510999307SDamien Le Moal if (zone->cond == BLK_ZONE_COND_FULL) 108610999307SDamien Le Moal zone->wp = zone->start + zone->len; 108710999307SDamien Le Moal else if (zone->cond == BLK_ZONE_COND_EMPTY) 108810999307SDamien Le Moal zone->wp = zone->start; 108910999307SDamien Le Moal else 109010999307SDamien Le Moal zone->wp = zone->wp + ti->begin - start; 109110999307SDamien Le Moal } 109210999307SDamien Le Moal ofst += sizeof(struct blk_zone); 109310999307SDamien Le Moal hdr->nr_zones--; 109410999307SDamien Le Moal nr_rep++; 109510999307SDamien Le Moal } 109610999307SDamien Le Moal 109710999307SDamien Le Moal if (addr != hdr) 109810999307SDamien Le Moal kunmap_atomic(addr); 109910999307SDamien Le Moal 110010999307SDamien Le Moal if (!hdr->nr_zones) 110110999307SDamien Le Moal break; 110210999307SDamien Le Moal } 110310999307SDamien Le Moal 110410999307SDamien Le Moal if (hdr) { 110510999307SDamien Le Moal hdr->nr_zones = nr_rep; 110610999307SDamien Le Moal kunmap_atomic(hdr); 110710999307SDamien Le Moal } 110810999307SDamien Le Moal 110910999307SDamien Le Moal bio_advance(report_bio, report_bio->bi_iter.bi_size); 111010999307SDamien Le Moal 111110999307SDamien Le Moal #else /* !CONFIG_BLK_DEV_ZONED */ 111210999307SDamien Le Moal bio->bi_status = BLK_STS_NOTSUPP; 111310999307SDamien Le Moal #endif 111410999307SDamien Le Moal } 111510999307SDamien Le Moal EXPORT_SYMBOL_GPL(dm_remap_zone_report); 111610999307SDamien Le Moal 111710999307SDamien Le Moal /* 1118d67a5f4bSMikulas Patocka * Flush current->bio_list when the target map method blocks. 1119d67a5f4bSMikulas Patocka * This fixes deadlocks in snapshot and possibly in other targets. 1120d67a5f4bSMikulas Patocka */ 1121d67a5f4bSMikulas Patocka struct dm_offload { 1122d67a5f4bSMikulas Patocka struct blk_plug plug; 1123d67a5f4bSMikulas Patocka struct blk_plug_cb cb; 1124d67a5f4bSMikulas Patocka }; 1125d67a5f4bSMikulas Patocka 1126d67a5f4bSMikulas Patocka static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) 1127d67a5f4bSMikulas Patocka { 1128d67a5f4bSMikulas Patocka struct dm_offload *o = container_of(cb, struct dm_offload, cb); 1129d67a5f4bSMikulas Patocka struct bio_list list; 1130d67a5f4bSMikulas Patocka struct bio *bio; 1131f5fe1b51SNeilBrown int i; 1132d67a5f4bSMikulas Patocka 1133d67a5f4bSMikulas Patocka INIT_LIST_HEAD(&o->cb.list); 1134d67a5f4bSMikulas Patocka 1135d67a5f4bSMikulas Patocka if (unlikely(!current->bio_list)) 1136d67a5f4bSMikulas Patocka return; 1137d67a5f4bSMikulas Patocka 1138f5fe1b51SNeilBrown for (i = 0; i < 2; i++) { 1139f5fe1b51SNeilBrown list = current->bio_list[i]; 1140f5fe1b51SNeilBrown bio_list_init(¤t->bio_list[i]); 1141d67a5f4bSMikulas Patocka 1142d67a5f4bSMikulas Patocka while ((bio = bio_list_pop(&list))) { 1143d67a5f4bSMikulas Patocka struct bio_set *bs = bio->bi_pool; 114447e0fb46SNeilBrown if (unlikely(!bs) || bs == fs_bio_set || 114547e0fb46SNeilBrown !bs->rescue_workqueue) { 1146f5fe1b51SNeilBrown bio_list_add(¤t->bio_list[i], bio); 1147d67a5f4bSMikulas Patocka continue; 1148d67a5f4bSMikulas Patocka } 1149d67a5f4bSMikulas Patocka 1150d67a5f4bSMikulas Patocka spin_lock(&bs->rescue_lock); 1151d67a5f4bSMikulas Patocka bio_list_add(&bs->rescue_list, bio); 1152d67a5f4bSMikulas Patocka queue_work(bs->rescue_workqueue, &bs->rescue_work); 1153d67a5f4bSMikulas Patocka spin_unlock(&bs->rescue_lock); 1154d67a5f4bSMikulas Patocka } 1155d67a5f4bSMikulas Patocka } 1156f5fe1b51SNeilBrown } 1157d67a5f4bSMikulas Patocka 1158d67a5f4bSMikulas Patocka static void dm_offload_start(struct dm_offload *o) 1159d67a5f4bSMikulas Patocka { 1160d67a5f4bSMikulas Patocka blk_start_plug(&o->plug); 1161d67a5f4bSMikulas Patocka o->cb.callback = flush_current_bio_list; 1162d67a5f4bSMikulas Patocka list_add(&o->cb.list, ¤t->plug->cb_list); 1163d67a5f4bSMikulas Patocka } 1164d67a5f4bSMikulas Patocka 1165d67a5f4bSMikulas Patocka static void dm_offload_end(struct dm_offload *o) 1166d67a5f4bSMikulas Patocka { 1167d67a5f4bSMikulas Patocka list_del(&o->cb.list); 1168d67a5f4bSMikulas Patocka blk_finish_plug(&o->plug); 1169d67a5f4bSMikulas Patocka } 1170d67a5f4bSMikulas Patocka 1171bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio) 11721da177e4SLinus Torvalds { 11731da177e4SLinus Torvalds int r; 11742056a782SJens Axboe sector_t sector; 1175d67a5f4bSMikulas Patocka struct dm_offload o; 1176dba14160SMikulas Patocka struct bio *clone = &tio->clone; 1177bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 11781da177e4SLinus Torvalds 11791da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 11801da177e4SLinus Torvalds 11811da177e4SLinus Torvalds /* 11821da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 11831da177e4SLinus Torvalds * anything, the target has assumed ownership of 11841da177e4SLinus Torvalds * this io. 11851da177e4SLinus Torvalds */ 11861da177e4SLinus Torvalds atomic_inc(&tio->io->io_count); 11874f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1188d67a5f4bSMikulas Patocka 1189d67a5f4bSMikulas Patocka dm_offload_start(&o); 11907de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1191d67a5f4bSMikulas Patocka dm_offload_end(&o); 1192d67a5f4bSMikulas Patocka 1193846785e6SChristoph Hellwig switch (r) { 1194846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1195846785e6SChristoph Hellwig break; 1196846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 11971da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 119874d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 119974d46992SChristoph Hellwig bio_dev(tio->io->bio), sector); 12001da177e4SLinus Torvalds generic_make_request(clone); 1201846785e6SChristoph Hellwig break; 1202846785e6SChristoph Hellwig case DM_MAPIO_KILL: 12034e4cbee9SChristoph Hellwig dec_pending(tio->io, BLK_STS_IOERR); 12044e4cbee9SChristoph Hellwig free_tio(tio); 12054e4cbee9SChristoph Hellwig break; 1206846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 12074e4cbee9SChristoph Hellwig dec_pending(tio->io, BLK_STS_DM_REQUEUE); 1208cfae7529SMike Snitzer free_tio(tio); 1209846785e6SChristoph Hellwig break; 1210846785e6SChristoph Hellwig default: 121145cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 121245cbcd79SKiyoshi Ueda BUG(); 12131da177e4SLinus Torvalds } 12141da177e4SLinus Torvalds } 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds struct clone_info { 12171da177e4SLinus Torvalds struct mapped_device *md; 12181da177e4SLinus Torvalds struct dm_table *map; 12191da177e4SLinus Torvalds struct bio *bio; 12201da177e4SLinus Torvalds struct dm_io *io; 12211da177e4SLinus Torvalds sector_t sector; 1222e0d6609aSMikulas Patocka unsigned sector_count; 12231da177e4SLinus Torvalds }; 12241da177e4SLinus Torvalds 1225e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1226bd2a49b8SAlasdair G Kergon { 12274f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 12284f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 12291da177e4SLinus Torvalds } 12301da177e4SLinus Torvalds 12311da177e4SLinus Torvalds /* 12321da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 12331da177e4SLinus Torvalds */ 1234c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 12351c3b13e6SKent Overstreet sector_t sector, unsigned len) 12361da177e4SLinus Torvalds { 1237dba14160SMikulas Patocka struct bio *clone = &tio->clone; 12381da177e4SLinus Torvalds 12391c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 12409c47008dSMartin K. Petersen 1241e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) { 1242e2460f2aSMikulas Patocka int r; 1243e2460f2aSMikulas Patocka 1244e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1245e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1246e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1247e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1248e2460f2aSMikulas Patocka tio->ti->type->name); 1249e2460f2aSMikulas Patocka return -EIO; 1250e2460f2aSMikulas Patocka } 1251e2460f2aSMikulas Patocka 1252e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1253c80914e8SMike Snitzer if (r < 0) 1254c80914e8SMike Snitzer return r; 1255c80914e8SMike Snitzer } 12561c3b13e6SKent Overstreet 1257264c869dSDamien Le Moal if (bio_op(bio) != REQ_OP_ZONE_REPORT) 12581c3b13e6SKent Overstreet bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 12591c3b13e6SKent Overstreet clone->bi_iter.bi_size = to_bytes(len); 12601c3b13e6SKent Overstreet 1261e2460f2aSMikulas Patocka if (unlikely(bio_integrity(bio) != NULL)) 1262fbd08e76SDmitry Monakhov bio_integrity_trim(clone); 1263c80914e8SMike Snitzer 1264c80914e8SMike Snitzer return 0; 12651da177e4SLinus Torvalds } 12661da177e4SLinus Torvalds 12679015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci, 126899778273SJunichi Nomura struct dm_target *ti, 126955a62eefSAlasdair G Kergon unsigned target_bio_nr) 1270f9ab94ceSMikulas Patocka { 1271dba14160SMikulas Patocka struct dm_target_io *tio; 1272dba14160SMikulas Patocka struct bio *clone; 1273dba14160SMikulas Patocka 127499778273SJunichi Nomura clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1275dba14160SMikulas Patocka tio = container_of(clone, struct dm_target_io, clone); 1276f9ab94ceSMikulas Patocka 1277f9ab94ceSMikulas Patocka tio->io = ci->io; 1278f9ab94ceSMikulas Patocka tio->ti = ti; 127955a62eefSAlasdair G Kergon tio->target_bio_nr = target_bio_nr; 12809015df24SAlasdair G Kergon 12819015df24SAlasdair G Kergon return tio; 12829015df24SAlasdair G Kergon } 12839015df24SAlasdair G Kergon 128414fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci, 128514fe594dSAlasdair G Kergon struct dm_target *ti, 12861dd40c3eSMikulas Patocka unsigned target_bio_nr, unsigned *len) 12879015df24SAlasdair G Kergon { 128899778273SJunichi Nomura struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); 1289dba14160SMikulas Patocka struct bio *clone = &tio->clone; 12909015df24SAlasdair G Kergon 12911dd40c3eSMikulas Patocka tio->len_ptr = len; 12921dd40c3eSMikulas Patocka 12931c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1294bd2a49b8SAlasdair G Kergon if (len) 12951dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1296f9ab94ceSMikulas Patocka 1297bd2a49b8SAlasdair G Kergon __map_bio(tio); 1298f9ab94ceSMikulas Patocka } 1299f9ab94ceSMikulas Patocka 130014fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13011dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 130206a426ceSMike Snitzer { 130355a62eefSAlasdair G Kergon unsigned target_bio_nr; 130406a426ceSMike Snitzer 130555a62eefSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) 130614fe594dSAlasdair G Kergon __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); 130706a426ceSMike Snitzer } 130806a426ceSMike Snitzer 130914fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1310f9ab94ceSMikulas Patocka { 131106a426ceSMike Snitzer unsigned target_nr = 0; 1312f9ab94ceSMikulas Patocka struct dm_target *ti; 1313f9ab94ceSMikulas Patocka 1314b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1315f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 13161dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1317f9ab94ceSMikulas Patocka 1318f9ab94ceSMikulas Patocka return 0; 1319f9ab94ceSMikulas Patocka } 1320f9ab94ceSMikulas Patocka 1321c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 13221dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 13235ae89a87SMike Snitzer { 1324dba14160SMikulas Patocka struct bio *bio = ci->bio; 13255ae89a87SMike Snitzer struct dm_target_io *tio; 1326b0d8ed4dSAlasdair G Kergon unsigned target_bio_nr; 1327b0d8ed4dSAlasdair G Kergon unsigned num_target_bios = 1; 1328c80914e8SMike Snitzer int r = 0; 13295ae89a87SMike Snitzer 1330b0d8ed4dSAlasdair G Kergon /* 1331b0d8ed4dSAlasdair G Kergon * Does the target want to receive duplicate copies of the bio? 1332b0d8ed4dSAlasdair G Kergon */ 1333b0d8ed4dSAlasdair G Kergon if (bio_data_dir(bio) == WRITE && ti->num_write_bios) 1334b0d8ed4dSAlasdair G Kergon num_target_bios = ti->num_write_bios(ti, bio); 1335e4c93811SAlasdair G Kergon 1336b0d8ed4dSAlasdair G Kergon for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 133799778273SJunichi Nomura tio = alloc_tio(ci, ti, target_bio_nr); 13381dd40c3eSMikulas Patocka tio->len_ptr = len; 1339c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1340072623deSMikulas Patocka if (r < 0) { 1341cfae7529SMike Snitzer free_tio(tio); 1342c80914e8SMike Snitzer break; 1343072623deSMikulas Patocka } 1344bd2a49b8SAlasdair G Kergon __map_bio(tio); 13455ae89a87SMike Snitzer } 1346c80914e8SMike Snitzer 1347c80914e8SMike Snitzer return r; 1348b0d8ed4dSAlasdair G Kergon } 13495ae89a87SMike Snitzer 135055a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 135123508a96SMike Snitzer 135255a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti) 135323508a96SMike Snitzer { 135455a62eefSAlasdair G Kergon return ti->num_discard_bios; 135523508a96SMike Snitzer } 135623508a96SMike Snitzer 135755a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti) 135823508a96SMike Snitzer { 135955a62eefSAlasdair G Kergon return ti->num_write_same_bios; 136023508a96SMike Snitzer } 136123508a96SMike Snitzer 1362ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1363ac62d620SChristoph Hellwig { 1364ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1365ac62d620SChristoph Hellwig } 1366ac62d620SChristoph Hellwig 136723508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti); 136823508a96SMike Snitzer 136923508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti) 137023508a96SMike Snitzer { 137155a62eefSAlasdair G Kergon return ti->split_discard_bios; 137223508a96SMike Snitzer } 137323508a96SMike Snitzer 137414fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci, 137555a62eefSAlasdair G Kergon get_num_bios_fn get_num_bios, 137623508a96SMike Snitzer is_split_required_fn is_split_required) 13775ae89a87SMike Snitzer { 13785ae89a87SMike Snitzer struct dm_target *ti; 1379e0d6609aSMikulas Patocka unsigned len; 138055a62eefSAlasdair G Kergon unsigned num_bios; 13815ae89a87SMike Snitzer 1382a79245b3SMike Snitzer do { 13835ae89a87SMike Snitzer ti = dm_table_find_target(ci->map, ci->sector); 13845ae89a87SMike Snitzer if (!dm_target_is_valid(ti)) 13855ae89a87SMike Snitzer return -EIO; 13865ae89a87SMike Snitzer 13875ae89a87SMike Snitzer /* 138823508a96SMike Snitzer * Even though the device advertised support for this type of 138923508a96SMike Snitzer * request, that does not mean every target supports it, and 1390936688d7SMike Snitzer * reconfiguration might also have changed that since the 13915ae89a87SMike Snitzer * check was performed. 13925ae89a87SMike Snitzer */ 139355a62eefSAlasdair G Kergon num_bios = get_num_bios ? get_num_bios(ti) : 0; 139455a62eefSAlasdair G Kergon if (!num_bios) 13955ae89a87SMike Snitzer return -EOPNOTSUPP; 13965ae89a87SMike Snitzer 139723508a96SMike Snitzer if (is_split_required && !is_split_required(ti)) 1398e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 13997acf0277SMikulas Patocka else 1400e0d6609aSMikulas Patocka len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 14015ae89a87SMike Snitzer 14021dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 14035ae89a87SMike Snitzer 1404a79245b3SMike Snitzer ci->sector += len; 1405a79245b3SMike Snitzer } while (ci->sector_count -= len); 14065ae89a87SMike Snitzer 14075ae89a87SMike Snitzer return 0; 14085ae89a87SMike Snitzer } 14095ae89a87SMike Snitzer 141014fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci) 141123508a96SMike Snitzer { 141214fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_discard_bios, 141323508a96SMike Snitzer is_split_required_for_discard); 141423508a96SMike Snitzer } 141523508a96SMike Snitzer 141614fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci) 141723508a96SMike Snitzer { 141814fe594dSAlasdair G Kergon return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); 141923508a96SMike Snitzer } 142023508a96SMike Snitzer 1421ac62d620SChristoph Hellwig static int __send_write_zeroes(struct clone_info *ci) 1422ac62d620SChristoph Hellwig { 1423ac62d620SChristoph Hellwig return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); 1424ac62d620SChristoph Hellwig } 1425ac62d620SChristoph Hellwig 1426e4c93811SAlasdair G Kergon /* 1427e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1428e4c93811SAlasdair G Kergon */ 1429e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1430e4c93811SAlasdair G Kergon { 1431e4c93811SAlasdair G Kergon struct bio *bio = ci->bio; 1432e4c93811SAlasdair G Kergon struct dm_target *ti; 14331c3b13e6SKent Overstreet unsigned len; 1434c80914e8SMike Snitzer int r; 1435e4c93811SAlasdair G Kergon 1436e6047149SMike Christie if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 1437e4c93811SAlasdair G Kergon return __send_discard(ci); 1438e6047149SMike Christie else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 1439e4c93811SAlasdair G Kergon return __send_write_same(ci); 1440ac62d620SChristoph Hellwig else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) 1441ac62d620SChristoph Hellwig return __send_write_zeroes(ci); 1442e4c93811SAlasdair G Kergon 1443e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1444e4c93811SAlasdair G Kergon if (!dm_target_is_valid(ti)) 1445e4c93811SAlasdair G Kergon return -EIO; 1446e4c93811SAlasdair G Kergon 1447264c869dSDamien Le Moal if (bio_op(bio) == REQ_OP_ZONE_REPORT) 1448264c869dSDamien Le Moal len = ci->sector_count; 1449264c869dSDamien Le Moal else 1450264c869dSDamien Le Moal len = min_t(sector_t, max_io_len(ci->sector, ti), 1451264c869dSDamien Le Moal ci->sector_count); 1452e4c93811SAlasdair G Kergon 1453c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1454c80914e8SMike Snitzer if (r < 0) 1455c80914e8SMike Snitzer return r; 1456e4c93811SAlasdair G Kergon 1457e4c93811SAlasdair G Kergon ci->sector += len; 1458e4c93811SAlasdair G Kergon ci->sector_count -= len; 1459e4c93811SAlasdair G Kergon 1460e4c93811SAlasdair G Kergon return 0; 1461e4c93811SAlasdair G Kergon } 1462e4c93811SAlasdair G Kergon 1463e4c93811SAlasdair G Kergon /* 146414fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 14651da177e4SLinus Torvalds */ 146683d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, 146783d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 14681da177e4SLinus Torvalds { 14691da177e4SLinus Torvalds struct clone_info ci; 1470512875bdSJun'ichi Nomura int error = 0; 14711da177e4SLinus Torvalds 147283d5e5b0SMikulas Patocka if (unlikely(!map)) { 1473f0b9a450SMikulas Patocka bio_io_error(bio); 1474f0b9a450SMikulas Patocka return; 1475f0b9a450SMikulas Patocka } 1476692d0eb9SMikulas Patocka 147783d5e5b0SMikulas Patocka ci.map = map; 14781da177e4SLinus Torvalds ci.md = md; 14791da177e4SLinus Torvalds ci.io = alloc_io(md); 14804e4cbee9SChristoph Hellwig ci.io->status = 0; 14811da177e4SLinus Torvalds atomic_set(&ci.io->io_count, 1); 14821da177e4SLinus Torvalds ci.io->bio = bio; 14831da177e4SLinus Torvalds ci.io->md = md; 1484f88fb981SKiyoshi Ueda spin_lock_init(&ci.io->endio_lock); 14854f024f37SKent Overstreet ci.sector = bio->bi_iter.bi_sector; 14861da177e4SLinus Torvalds 14873eaf840eSJun'ichi "Nick" Nomura start_io_acct(ci.io); 1488bd2a49b8SAlasdair G Kergon 14891eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1490b372d360SMike Snitzer ci.bio = &ci.md->flush_bio; 1491b372d360SMike Snitzer ci.sector_count = 0; 149214fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1493b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1494a4aa5e56SDamien Le Moal } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { 1495a4aa5e56SDamien Le Moal ci.bio = bio; 1496a4aa5e56SDamien Le Moal ci.sector_count = 0; 1497a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1498b372d360SMike Snitzer } else { 14996a8736d1STejun Heo ci.bio = bio; 1500f6fccb12SMilan Broz ci.sector_count = bio_sectors(bio); 1501512875bdSJun'ichi Nomura while (ci.sector_count && !error) 150214fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1503d87f4c14STejun Heo } 15041da177e4SLinus Torvalds 15051da177e4SLinus Torvalds /* drop the extra reference count */ 150654385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 15079e4e5f87SMilan Broz } 15089e4e5f87SMilan Broz /*----------------------------------------------------------------- 15091da177e4SLinus Torvalds * CRUD END 15101da177e4SLinus Torvalds *---------------------------------------------------------------*/ 15111da177e4SLinus Torvalds 15121da177e4SLinus Torvalds /* 15131da177e4SLinus Torvalds * The request function that just remaps the bio built up by 15141da177e4SLinus Torvalds * dm_merge_bvec. 15151da177e4SLinus Torvalds */ 1516dece1635SJens Axboe static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 15171da177e4SLinus Torvalds { 151812f03a49SKevin Corry int rw = bio_data_dir(bio); 15191da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 152083d5e5b0SMikulas Patocka int srcu_idx; 152183d5e5b0SMikulas Patocka struct dm_table *map; 15221da177e4SLinus Torvalds 152383d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 15241da177e4SLinus Torvalds 1525d62e26b3SJens Axboe generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); 152612f03a49SKevin Corry 15276a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 15286a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 152983d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 15301da177e4SLinus Torvalds 15311eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 153292c63902SMikulas Patocka queue_io(md, bio); 15336a8736d1STejun Heo else 15346a8736d1STejun Heo bio_io_error(bio); 1535dece1635SJens Axboe return BLK_QC_T_NONE; 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds 153883d5e5b0SMikulas Patocka __split_and_process_bio(md, map, bio); 153983d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1540dece1635SJens Axboe return BLK_QC_T_NONE; 1541cec47e3dSKiyoshi Ueda } 1542cec47e3dSKiyoshi Ueda 15431da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 15441da177e4SLinus Torvalds { 15458a57dfc6SChandra Seetharaman int r = bdi_bits; 15468a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 15478a57dfc6SChandra Seetharaman struct dm_table *map; 15481da177e4SLinus Torvalds 15491eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1550e522c039SMike Snitzer if (dm_request_based(md)) { 1551cec47e3dSKiyoshi Ueda /* 1552e522c039SMike Snitzer * With request-based DM we only need to check the 1553e522c039SMike Snitzer * top-level queue for congestion. 1554cec47e3dSKiyoshi Ueda */ 1555dc3b17ccSJan Kara r = md->queue->backing_dev_info->wb.state & bdi_bits; 1556e522c039SMike Snitzer } else { 1557e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1558e522c039SMike Snitzer if (map) 15591da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 156083d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 15618a57dfc6SChandra Seetharaman } 1562e522c039SMike Snitzer } 15638a57dfc6SChandra Seetharaman 15641da177e4SLinus Torvalds return r; 15651da177e4SLinus Torvalds } 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds /*----------------------------------------------------------------- 15681da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 15691da177e4SLinus Torvalds *---------------------------------------------------------------*/ 15702b06cfffSAlasdair G Kergon static void free_minor(int minor) 15711da177e4SLinus Torvalds { 1572f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 15731da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1574f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 15751da177e4SLinus Torvalds } 15761da177e4SLinus Torvalds 15771da177e4SLinus Torvalds /* 15781da177e4SLinus Torvalds * See if the device with a specific minor # is free. 15791da177e4SLinus Torvalds */ 1580cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 15811da177e4SLinus Torvalds { 1582c9d76be6STejun Heo int r; 15831da177e4SLinus Torvalds 15841da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 15851da177e4SLinus Torvalds return -EINVAL; 15861da177e4SLinus Torvalds 1587c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1588f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 15891da177e4SLinus Torvalds 1590c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 15911da177e4SLinus Torvalds 1592f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1593c9d76be6STejun Heo idr_preload_end(); 1594c9d76be6STejun Heo if (r < 0) 1595c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1596c9d76be6STejun Heo return 0; 15971da177e4SLinus Torvalds } 15981da177e4SLinus Torvalds 1599cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 16001da177e4SLinus Torvalds { 1601c9d76be6STejun Heo int r; 16021da177e4SLinus Torvalds 1603c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1604f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16051da177e4SLinus Torvalds 1606c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 16071da177e4SLinus Torvalds 1608f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1609c9d76be6STejun Heo idr_preload_end(); 1610c9d76be6STejun Heo if (r < 0) 16111da177e4SLinus Torvalds return r; 1612c9d76be6STejun Heo *minor = r; 1613c9d76be6STejun Heo return 0; 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds 161683d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1617f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 16181da177e4SLinus Torvalds 161953d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 162053d5914fSMikulas Patocka 16214cc96131SMike Snitzer void dm_init_md_queue(struct mapped_device *md) 16224a0b4ddfSMike Snitzer { 16234a0b4ddfSMike Snitzer /* 16244a0b4ddfSMike Snitzer * Request-based dm devices cannot be stacked on top of bio-based dm 1625bfebd1cdSMike Snitzer * devices. The type of this dm device may not have been decided yet. 16264a0b4ddfSMike Snitzer * The type is decided at the first table loading time. 16274a0b4ddfSMike Snitzer * To prevent problematic device stacking, clear the queue flag 16284a0b4ddfSMike Snitzer * for request stacking support until then. 16294a0b4ddfSMike Snitzer * 16304a0b4ddfSMike Snitzer * This queue is new, so no concurrency on the queue_flags. 16314a0b4ddfSMike Snitzer */ 16324a0b4ddfSMike Snitzer queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1633ad5f498fSMikulas Patocka 1634ad5f498fSMikulas Patocka /* 1635ad5f498fSMikulas Patocka * Initialize data that will only be used by a non-blk-mq DM queue 1636ad5f498fSMikulas Patocka * - must do so here (in alloc_dev callchain) before queue is used 1637ad5f498fSMikulas Patocka */ 1638ad5f498fSMikulas Patocka md->queue->queuedata = md; 1639dc3b17ccSJan Kara md->queue->backing_dev_info->congested_data = md; 1640bfebd1cdSMike Snitzer } 16414a0b4ddfSMike Snitzer 16424cc96131SMike Snitzer void dm_init_normal_md_queue(struct mapped_device *md) 1643bfebd1cdSMike Snitzer { 164417e149b8SMike Snitzer md->use_blk_mq = false; 1645bfebd1cdSMike Snitzer dm_init_md_queue(md); 1646bfebd1cdSMike Snitzer 1647bfebd1cdSMike Snitzer /* 1648bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1649bfebd1cdSMike Snitzer */ 1650dc3b17ccSJan Kara md->queue->backing_dev_info->congested_fn = dm_any_congested; 16514a0b4ddfSMike Snitzer } 16524a0b4ddfSMike Snitzer 16530f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 16540f20972fSMike Snitzer { 16550f20972fSMike Snitzer if (md->wq) 16560f20972fSMike Snitzer destroy_workqueue(md->wq); 16570f20972fSMike Snitzer if (md->kworker_task) 16580f20972fSMike Snitzer kthread_stop(md->kworker_task); 16590f20972fSMike Snitzer mempool_destroy(md->io_pool); 16600f20972fSMike Snitzer if (md->bs) 16610f20972fSMike Snitzer bioset_free(md->bs); 16620f20972fSMike Snitzer 1663f26c5719SDan Williams if (md->dax_dev) { 1664f26c5719SDan Williams kill_dax(md->dax_dev); 1665f26c5719SDan Williams put_dax(md->dax_dev); 1666f26c5719SDan Williams md->dax_dev = NULL; 1667f26c5719SDan Williams } 1668f26c5719SDan Williams 16690f20972fSMike Snitzer if (md->disk) { 16700f20972fSMike Snitzer spin_lock(&_minor_lock); 16710f20972fSMike Snitzer md->disk->private_data = NULL; 16720f20972fSMike Snitzer spin_unlock(&_minor_lock); 16730f20972fSMike Snitzer del_gendisk(md->disk); 16740f20972fSMike Snitzer put_disk(md->disk); 16750f20972fSMike Snitzer } 16760f20972fSMike Snitzer 16770f20972fSMike Snitzer if (md->queue) 16780f20972fSMike Snitzer blk_cleanup_queue(md->queue); 16790f20972fSMike Snitzer 1680d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1681d09960b0STahsin Erdogan 16820f20972fSMike Snitzer if (md->bdev) { 16830f20972fSMike Snitzer bdput(md->bdev); 16840f20972fSMike Snitzer md->bdev = NULL; 16850f20972fSMike Snitzer } 16864cc96131SMike Snitzer 16874cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 16880f20972fSMike Snitzer } 16890f20972fSMike Snitzer 16901da177e4SLinus Torvalds /* 16911da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 16921da177e4SLinus Torvalds */ 16932b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 16941da177e4SLinus Torvalds { 1695115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1696f26c5719SDan Williams struct dax_device *dax_dev; 1697115485e8SMike Snitzer struct mapped_device *md; 1698ba61fdd1SJeff Mahoney void *old_md; 16991da177e4SLinus Torvalds 1700856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 17011da177e4SLinus Torvalds if (!md) { 17021da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 17031da177e4SLinus Torvalds return NULL; 17041da177e4SLinus Torvalds } 17051da177e4SLinus Torvalds 170610da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 17076ed7ade8SMilan Broz goto bad_module_get; 170810da4f79SJeff Mahoney 17091da177e4SLinus Torvalds /* get a minor number for the dev */ 17102b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1711cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 17122b06cfffSAlasdair G Kergon else 1713cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 17141da177e4SLinus Torvalds if (r < 0) 17156ed7ade8SMilan Broz goto bad_minor; 17161da177e4SLinus Torvalds 171783d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 171883d5e5b0SMikulas Patocka if (r < 0) 171983d5e5b0SMikulas Patocka goto bad_io_barrier; 172083d5e5b0SMikulas Patocka 1721115485e8SMike Snitzer md->numa_node_id = numa_node_id; 17224cc96131SMike Snitzer md->use_blk_mq = dm_use_blk_mq_default(); 1723591ddcfcSMike Snitzer md->init_tio_pdu = false; 1724a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1725e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1726a5664dadSMike Snitzer mutex_init(&md->type_lock); 172786f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1728022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 17291da177e4SLinus Torvalds atomic_set(&md->holders, 1); 17305c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 17311da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 17327a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 17337a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 173486f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 17357a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 17361da177e4SLinus Torvalds 1737115485e8SMike Snitzer md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 17381da177e4SLinus Torvalds if (!md->queue) 17390f20972fSMike Snitzer goto bad; 17401da177e4SLinus Torvalds 17414a0b4ddfSMike Snitzer dm_init_md_queue(md); 17429faf400fSStefan Bader 1743115485e8SMike Snitzer md->disk = alloc_disk_node(1, numa_node_id); 17441da177e4SLinus Torvalds if (!md->disk) 17450f20972fSMike Snitzer goto bad; 17461da177e4SLinus Torvalds 1747316d315bSNikanth Karthikesan atomic_set(&md->pending[0], 0); 1748316d315bSNikanth Karthikesan atomic_set(&md->pending[1], 0); 1749f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 175053d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1751f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 17522995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 17532eb6e1e3SKeith Busch md->kworker_task = NULL; 1754f0b04115SJeff Mahoney 17551da177e4SLinus Torvalds md->disk->major = _major; 17561da177e4SLinus Torvalds md->disk->first_minor = minor; 17571da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 17581da177e4SLinus Torvalds md->disk->queue = md->queue; 17591da177e4SLinus Torvalds md->disk->private_data = md; 17601da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1761f26c5719SDan Williams 1762f26c5719SDan Williams dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); 1763f26c5719SDan Williams if (!dax_dev) 1764f26c5719SDan Williams goto bad; 1765f26c5719SDan Williams md->dax_dev = dax_dev; 1766f26c5719SDan Williams 17671da177e4SLinus Torvalds add_disk(md->disk); 17687e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 17691da177e4SLinus Torvalds 1770670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1771304f3f6aSMilan Broz if (!md->wq) 17720f20972fSMike Snitzer goto bad; 1773304f3f6aSMilan Broz 177432a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 177532a926daSMikulas Patocka if (!md->bdev) 17760f20972fSMike Snitzer goto bad; 177732a926daSMikulas Patocka 17783a83f467SMing Lei bio_init(&md->flush_bio, NULL, 0); 177974d46992SChristoph Hellwig bio_set_dev(&md->flush_bio, md->bdev); 1780ff0361b3SJan Kara md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 17816a8736d1STejun Heo 1782fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1783fd2ed4d2SMikulas Patocka 1784ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1785f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1786ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1787f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1788ba61fdd1SJeff Mahoney 1789ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1790ba61fdd1SJeff Mahoney 17911da177e4SLinus Torvalds return md; 17921da177e4SLinus Torvalds 17930f20972fSMike Snitzer bad: 17940f20972fSMike Snitzer cleanup_mapped_device(md); 179583d5e5b0SMikulas Patocka bad_io_barrier: 17961da177e4SLinus Torvalds free_minor(minor); 17976ed7ade8SMilan Broz bad_minor: 179810da4f79SJeff Mahoney module_put(THIS_MODULE); 17996ed7ade8SMilan Broz bad_module_get: 1800856eb091SMikulas Patocka kvfree(md); 18011da177e4SLinus Torvalds return NULL; 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds 1804ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1805ae9da83fSJun'ichi Nomura 18061da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 18071da177e4SLinus Torvalds { 1808f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 180963d94e48SJun'ichi Nomura 1810ae9da83fSJun'ichi Nomura unlock_fs(md); 18112eb6e1e3SKeith Busch 18120f20972fSMike Snitzer cleanup_mapped_device(md); 18130f20972fSMike Snitzer 18140f20972fSMike Snitzer free_table_devices(&md->table_devices); 18150f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 181663a4f065SMike Snitzer free_minor(minor); 181763a4f065SMike Snitzer 181810da4f79SJeff Mahoney module_put(THIS_MODULE); 1819856eb091SMikulas Patocka kvfree(md); 18201da177e4SLinus Torvalds } 18211da177e4SLinus Torvalds 1822e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1823e6ee8c0bSKiyoshi Ueda { 1824c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1825e6ee8c0bSKiyoshi Ueda 18264e6e36c3SMike Snitzer if (md->bs) { 18274e6e36c3SMike Snitzer /* The md already has necessary mempools. */ 1828545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1829c0820cf5SMikulas Patocka /* 183016245bdcSJun'ichi Nomura * Reload bioset because front_pad may have changed 183116245bdcSJun'ichi Nomura * because a different table was loaded. 1832c0820cf5SMikulas Patocka */ 1833c0820cf5SMikulas Patocka bioset_free(md->bs); 1834c0820cf5SMikulas Patocka md->bs = p->bs; 1835c0820cf5SMikulas Patocka p->bs = NULL; 1836c0820cf5SMikulas Patocka } 1837cbc4e3c1SMike Snitzer /* 18384e6e36c3SMike Snitzer * There's no need to reload with request-based dm 18394e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 18404e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 18414e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 18424e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 18434e6e36c3SMike Snitzer * through the queue to unprep. 1844cbc4e3c1SMike Snitzer */ 1845cbc4e3c1SMike Snitzer goto out; 1846cbc4e3c1SMike Snitzer } 1847cbc4e3c1SMike Snitzer 1848eb8db831SChristoph Hellwig BUG_ON(!p || md->io_pool || md->bs); 1849e6ee8c0bSKiyoshi Ueda 1850e6ee8c0bSKiyoshi Ueda md->io_pool = p->io_pool; 1851e6ee8c0bSKiyoshi Ueda p->io_pool = NULL; 1852e6ee8c0bSKiyoshi Ueda md->bs = p->bs; 1853e6ee8c0bSKiyoshi Ueda p->bs = NULL; 18544e6e36c3SMike Snitzer 1855e6ee8c0bSKiyoshi Ueda out: 185602233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1857e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 1858e6ee8c0bSKiyoshi Ueda } 1859e6ee8c0bSKiyoshi Ueda 18601da177e4SLinus Torvalds /* 18611da177e4SLinus Torvalds * Bind a table to the device. 18621da177e4SLinus Torvalds */ 18631da177e4SLinus Torvalds static void event_callback(void *context) 18641da177e4SLinus Torvalds { 18657a8c3d3bSMike Anderson unsigned long flags; 18667a8c3d3bSMike Anderson LIST_HEAD(uevents); 18671da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 18681da177e4SLinus Torvalds 18697a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 18707a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 18717a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 18727a8c3d3bSMike Anderson 1873ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 18747a8c3d3bSMike Anderson 18751da177e4SLinus Torvalds atomic_inc(&md->event_nr); 18761da177e4SLinus Torvalds wake_up(&md->eventq); 187762e08243SMikulas Patocka dm_issue_global_event(); 18781da177e4SLinus Torvalds } 18791da177e4SLinus Torvalds 1880c217649bSMike Snitzer /* 1881c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 1882c217649bSMike Snitzer */ 18834e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 18841da177e4SLinus Torvalds { 18851ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 18861ea0654eSBart Van Assche 18874e90188bSAlasdair G Kergon set_capacity(md->disk, size); 18881da177e4SLinus Torvalds 1889db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 18901da177e4SLinus Torvalds } 18911da177e4SLinus Torvalds 1892042d2a9bSAlasdair G Kergon /* 1893042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 1894042d2a9bSAlasdair G Kergon */ 1895042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1896754c5fc7SMike Snitzer struct queue_limits *limits) 18971da177e4SLinus Torvalds { 1898042d2a9bSAlasdair G Kergon struct dm_table *old_map; 1899165125e1SJens Axboe struct request_queue *q = md->queue; 19001da177e4SLinus Torvalds sector_t size; 19011da177e4SLinus Torvalds 19025a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 19035a8f1f80SBart Van Assche 19041da177e4SLinus Torvalds size = dm_table_get_size(t); 19053ac51e74SDarrick J. Wong 19063ac51e74SDarrick J. Wong /* 19073ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 19083ac51e74SDarrick J. Wong */ 1909fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 19103ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 19113ac51e74SDarrick J. Wong 19124e90188bSAlasdair G Kergon __set_size(md, size); 19131da177e4SLinus Torvalds 1914cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 19152ca3310eSAlasdair G Kergon 1916e6ee8c0bSKiyoshi Ueda /* 1917e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 1918e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 1919e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 1920e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 1921e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 1922e6ee8c0bSKiyoshi Ueda */ 192316f12266SMike Snitzer if (dm_table_request_based(t)) { 1924eca7ee6dSMike Snitzer dm_stop_queue(q); 192516f12266SMike Snitzer /* 192616f12266SMike Snitzer * Leverage the fact that request-based DM targets are 192716f12266SMike Snitzer * immutable singletons and establish md->immutable_target 192816f12266SMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq 192916f12266SMike Snitzer */ 193016f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 193116f12266SMike Snitzer } 1932e6ee8c0bSKiyoshi Ueda 1933e6ee8c0bSKiyoshi Ueda __bind_mempools(md, t); 1934e6ee8c0bSKiyoshi Ueda 1935a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 19361d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 193736a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 193836a0456fSAlasdair G Kergon 1939754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 194041abc4e1SHannes Reinecke if (old_map) 194183d5e5b0SMikulas Patocka dm_sync_table(md); 19422ca3310eSAlasdair G Kergon 1943042d2a9bSAlasdair G Kergon return old_map; 19441da177e4SLinus Torvalds } 19451da177e4SLinus Torvalds 1946a7940155SAlasdair G Kergon /* 1947a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 1948a7940155SAlasdair G Kergon */ 1949a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 19501da177e4SLinus Torvalds { 1951a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 19521da177e4SLinus Torvalds 19531da177e4SLinus Torvalds if (!map) 1954a7940155SAlasdair G Kergon return NULL; 19551da177e4SLinus Torvalds 19561da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 19579cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 195883d5e5b0SMikulas Patocka dm_sync_table(md); 1959a7940155SAlasdair G Kergon 1960a7940155SAlasdair G Kergon return map; 19611da177e4SLinus Torvalds } 19621da177e4SLinus Torvalds 19631da177e4SLinus Torvalds /* 19641da177e4SLinus Torvalds * Constructor for a new device. 19651da177e4SLinus Torvalds */ 19662b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 19671da177e4SLinus Torvalds { 19681da177e4SLinus Torvalds struct mapped_device *md; 19691da177e4SLinus Torvalds 19702b06cfffSAlasdair G Kergon md = alloc_dev(minor); 19711da177e4SLinus Torvalds if (!md) 19721da177e4SLinus Torvalds return -ENXIO; 19731da177e4SLinus Torvalds 1974784aae73SMilan Broz dm_sysfs_init(md); 1975784aae73SMilan Broz 19761da177e4SLinus Torvalds *result = md; 19771da177e4SLinus Torvalds return 0; 19781da177e4SLinus Torvalds } 19791da177e4SLinus Torvalds 1980a5664dadSMike Snitzer /* 1981a5664dadSMike Snitzer * Functions to manage md->type. 1982a5664dadSMike Snitzer * All are required to hold md->type_lock. 1983a5664dadSMike Snitzer */ 1984a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 1985a5664dadSMike Snitzer { 1986a5664dadSMike Snitzer mutex_lock(&md->type_lock); 1987a5664dadSMike Snitzer } 1988a5664dadSMike Snitzer 1989a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 1990a5664dadSMike Snitzer { 1991a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 1992a5664dadSMike Snitzer } 1993a5664dadSMike Snitzer 19947e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 1995a5664dadSMike Snitzer { 199600c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 1997a5664dadSMike Snitzer md->type = type; 1998a5664dadSMike Snitzer } 1999a5664dadSMike Snitzer 20007e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2001a5664dadSMike Snitzer { 2002a5664dadSMike Snitzer return md->type; 2003a5664dadSMike Snitzer } 2004a5664dadSMike Snitzer 200536a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 200636a0456fSAlasdair G Kergon { 200736a0456fSAlasdair G Kergon return md->immutable_target_type; 200836a0456fSAlasdair G Kergon } 200936a0456fSAlasdair G Kergon 20104a0b4ddfSMike Snitzer /* 2011f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2012f84cb8a4SMike Snitzer * count on 'md'. 2013f84cb8a4SMike Snitzer */ 2014f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2015f84cb8a4SMike Snitzer { 2016f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2017f84cb8a4SMike Snitzer return &md->queue->limits; 2018f84cb8a4SMike Snitzer } 2019f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2020f84cb8a4SMike Snitzer 20214a0b4ddfSMike Snitzer /* 20224a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 20234a0b4ddfSMike Snitzer */ 2024591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 20254a0b4ddfSMike Snitzer { 2026bfebd1cdSMike Snitzer int r; 20277e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2028bfebd1cdSMike Snitzer 2029545ed20eSToshi Kani switch (type) { 2030bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2031eb8db831SChristoph Hellwig r = dm_old_init_request_queue(md, t); 2032bfebd1cdSMike Snitzer if (r) { 2033eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based mapped device"); 2034bfebd1cdSMike Snitzer return r; 20354a0b4ddfSMike Snitzer } 2036bfebd1cdSMike Snitzer break; 2037bfebd1cdSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 2038e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2039bfebd1cdSMike Snitzer if (r) { 2040eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2041bfebd1cdSMike Snitzer return r; 2042bfebd1cdSMike Snitzer } 2043bfebd1cdSMike Snitzer break; 2044bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2045545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2046eca7ee6dSMike Snitzer dm_init_normal_md_queue(md); 2047ff36ab34SMike Snitzer blk_queue_make_request(md->queue, dm_make_request); 2048dbba42d8SMikulas Patocka /* 2049dbba42d8SMikulas Patocka * DM handles splitting bios as needed. Free the bio_split bioset 2050dbba42d8SMikulas Patocka * since it won't be used (saves 1 process per bio-based DM device). 2051dbba42d8SMikulas Patocka */ 2052dbba42d8SMikulas Patocka bioset_free(md->queue->bio_split); 2053dbba42d8SMikulas Patocka md->queue->bio_split = NULL; 2054545ed20eSToshi Kani 2055545ed20eSToshi Kani if (type == DM_TYPE_DAX_BIO_BASED) 2056545ed20eSToshi Kani queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); 2057bfebd1cdSMike Snitzer break; 20587e0d574fSBart Van Assche case DM_TYPE_NONE: 20597e0d574fSBart Van Assche WARN_ON_ONCE(true); 20607e0d574fSBart Van Assche break; 2061ff36ab34SMike Snitzer } 20624a0b4ddfSMike Snitzer 20634a0b4ddfSMike Snitzer return 0; 20644a0b4ddfSMike Snitzer } 20654a0b4ddfSMike Snitzer 20662bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 20671da177e4SLinus Torvalds { 20681da177e4SLinus Torvalds struct mapped_device *md; 20691da177e4SLinus Torvalds unsigned minor = MINOR(dev); 20701da177e4SLinus Torvalds 20711da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 20721da177e4SLinus Torvalds return NULL; 20731da177e4SLinus Torvalds 2074f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 20751da177e4SLinus Torvalds 20761da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 2077*49de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 2078*49de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2079637842cfSDavid Teigland md = NULL; 2080fba9f90eSJeff Mahoney goto out; 2081fba9f90eSJeff Mahoney } 20822bec1f4aSMikulas Patocka dm_get(md); 2083fba9f90eSJeff Mahoney out: 2084f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 20851da177e4SLinus Torvalds 2086637842cfSDavid Teigland return md; 2087637842cfSDavid Teigland } 20883cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2089d229a958SDavid Teigland 20909ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2091637842cfSDavid Teigland { 20929ade92a9SAlasdair G Kergon return md->interface_ptr; 20931da177e4SLinus Torvalds } 20941da177e4SLinus Torvalds 20951da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 20961da177e4SLinus Torvalds { 20971da177e4SLinus Torvalds md->interface_ptr = ptr; 20981da177e4SLinus Torvalds } 20991da177e4SLinus Torvalds 21001da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 21011da177e4SLinus Torvalds { 21021da177e4SLinus Torvalds atomic_inc(&md->holders); 21033f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 21041da177e4SLinus Torvalds } 21051da177e4SLinus Torvalds 210609ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 210709ee96b2SMikulas Patocka { 210809ee96b2SMikulas Patocka spin_lock(&_minor_lock); 210909ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 211009ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 211109ee96b2SMikulas Patocka return -EBUSY; 211209ee96b2SMikulas Patocka } 211309ee96b2SMikulas Patocka dm_get(md); 211409ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 211509ee96b2SMikulas Patocka return 0; 211609ee96b2SMikulas Patocka } 211709ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 211809ee96b2SMikulas Patocka 211972d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 212072d94861SAlasdair G Kergon { 212172d94861SAlasdair G Kergon return md->name; 212272d94861SAlasdair G Kergon } 212372d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 212472d94861SAlasdair G Kergon 21253f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 21261da177e4SLinus Torvalds { 21273b785fbcSBart Van Assche struct request_queue *q = dm_get_md_queue(md); 21281134e5aeSMike Anderson struct dm_table *map; 212983d5e5b0SMikulas Patocka int srcu_idx; 21301da177e4SLinus Torvalds 21313f77316dSKiyoshi Ueda might_sleep(); 2132fba9f90eSJeff Mahoney 213363a4f065SMike Snitzer spin_lock(&_minor_lock); 21343f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2135fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2136f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21373f77316dSKiyoshi Ueda 21382e91c369SBart Van Assche blk_set_queue_dying(q); 21393b785fbcSBart Van Assche 214002233342SMike Snitzer if (dm_request_based(md) && md->kworker_task) 21413989144fSPetr Mladek kthread_flush_worker(&md->kworker); 21422eb6e1e3SKeith Busch 2143ab7c7bb6SMikulas Patocka /* 2144ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2145ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2146ab7c7bb6SMikulas Patocka */ 2147ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 21482a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 21494f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 21501da177e4SLinus Torvalds dm_table_presuspend_targets(map); 21511da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 21521da177e4SLinus Torvalds } 215383d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 215483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 21552a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 215683d5e5b0SMikulas Patocka 21573f77316dSKiyoshi Ueda /* 21583f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 21593f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 21603f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 21613f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 21623f77316dSKiyoshi Ueda */ 21633f77316dSKiyoshi Ueda if (wait) 21643f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 21653f77316dSKiyoshi Ueda msleep(1); 21663f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 21673f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 21683f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 21693f77316dSKiyoshi Ueda 2170784aae73SMilan Broz dm_sysfs_exit(md); 2171a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 21721da177e4SLinus Torvalds free_dev(md); 21731da177e4SLinus Torvalds } 21743f77316dSKiyoshi Ueda 21753f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 21763f77316dSKiyoshi Ueda { 21773f77316dSKiyoshi Ueda __dm_destroy(md, true); 21783f77316dSKiyoshi Ueda } 21793f77316dSKiyoshi Ueda 21803f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 21813f77316dSKiyoshi Ueda { 21823f77316dSKiyoshi Ueda __dm_destroy(md, false); 21833f77316dSKiyoshi Ueda } 21843f77316dSKiyoshi Ueda 21853f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 21863f77316dSKiyoshi Ueda { 21873f77316dSKiyoshi Ueda atomic_dec(&md->holders); 21881da177e4SLinus Torvalds } 218979eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 21901da177e4SLinus Torvalds 2191b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 219246125c1cSMilan Broz { 219346125c1cSMilan Broz int r = 0; 21949f4c3f87SBart Van Assche DEFINE_WAIT(wait); 219546125c1cSMilan Broz 219646125c1cSMilan Broz while (1) { 21979f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 219846125c1cSMilan Broz 2199b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 220046125c1cSMilan Broz break; 220146125c1cSMilan Broz 2202e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 220346125c1cSMilan Broz r = -EINTR; 220446125c1cSMilan Broz break; 220546125c1cSMilan Broz } 220646125c1cSMilan Broz 220746125c1cSMilan Broz io_schedule(); 220846125c1cSMilan Broz } 22099f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2210b44ebeb0SMikulas Patocka 221146125c1cSMilan Broz return r; 221246125c1cSMilan Broz } 221346125c1cSMilan Broz 22141da177e4SLinus Torvalds /* 22151da177e4SLinus Torvalds * Process the deferred bios 22161da177e4SLinus Torvalds */ 2217ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 22181da177e4SLinus Torvalds { 2219ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2220ef208587SMikulas Patocka work); 22216d6f10dfSMilan Broz struct bio *c; 222283d5e5b0SMikulas Patocka int srcu_idx; 222383d5e5b0SMikulas Patocka struct dm_table *map; 22241da177e4SLinus Torvalds 222583d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2226ef208587SMikulas Patocka 22273b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2228022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2229022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2230022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2231022c2611SMikulas Patocka 22326a8736d1STejun Heo if (!c) 2233df12ee99SAlasdair G Kergon break; 223473d410c0SMilan Broz 2235e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2236e6ee8c0bSKiyoshi Ueda generic_make_request(c); 2237af7e466aSMikulas Patocka else 223883d5e5b0SMikulas Patocka __split_and_process_bio(md, map, c); 2239e6ee8c0bSKiyoshi Ueda } 22403b00b203SMikulas Patocka 224183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 22421da177e4SLinus Torvalds } 22431da177e4SLinus Torvalds 22449a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2245304f3f6aSMilan Broz { 22463b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 22474e857c58SPeter Zijlstra smp_mb__after_atomic(); 224853d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2249304f3f6aSMilan Broz } 2250304f3f6aSMilan Broz 22511da177e4SLinus Torvalds /* 2252042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 22531da177e4SLinus Torvalds */ 2254042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 22551da177e4SLinus Torvalds { 225687eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2257754c5fc7SMike Snitzer struct queue_limits limits; 2258042d2a9bSAlasdair G Kergon int r; 22591da177e4SLinus Torvalds 2260e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 22611da177e4SLinus Torvalds 22621da177e4SLinus Torvalds /* device must be suspended */ 22634f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 226493c534aeSAlasdair G Kergon goto out; 22651da177e4SLinus Torvalds 22663ae70656SMike Snitzer /* 22673ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 22683ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 22693ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 22703ae70656SMike Snitzer * reappear. 22713ae70656SMike Snitzer */ 22723ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 227383d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 22743ae70656SMike Snitzer if (live_map) 22753ae70656SMike Snitzer limits = md->queue->limits; 227683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 22773ae70656SMike Snitzer } 22783ae70656SMike Snitzer 227987eb5b21SMike Christie if (!live_map) { 2280754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2281042d2a9bSAlasdair G Kergon if (r) { 2282042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2283754c5fc7SMike Snitzer goto out; 2284042d2a9bSAlasdair G Kergon } 228587eb5b21SMike Christie } 2286754c5fc7SMike Snitzer 2287042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 228862e08243SMikulas Patocka dm_issue_global_event(); 22891da177e4SLinus Torvalds 229093c534aeSAlasdair G Kergon out: 2291e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2292042d2a9bSAlasdair G Kergon return map; 22931da177e4SLinus Torvalds } 22941da177e4SLinus Torvalds 22951da177e4SLinus Torvalds /* 22961da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 22971da177e4SLinus Torvalds * device. 22981da177e4SLinus Torvalds */ 22992ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 23001da177e4SLinus Torvalds { 2301e39e2e95SAlasdair G Kergon int r; 23021da177e4SLinus Torvalds 23031da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2304dfbe03f6SAlasdair G Kergon 2305db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2306dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2307cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2308e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2309e39e2e95SAlasdair G Kergon return r; 2310dfbe03f6SAlasdair G Kergon } 2311dfbe03f6SAlasdair G Kergon 2312aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2313aa8d7c2fSAlasdair G Kergon 23141da177e4SLinus Torvalds return 0; 23151da177e4SLinus Torvalds } 23161da177e4SLinus Torvalds 23172ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 23181da177e4SLinus Torvalds { 2319aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2320aa8d7c2fSAlasdair G Kergon return; 2321aa8d7c2fSAlasdair G Kergon 2322db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 23231da177e4SLinus Torvalds md->frozen_sb = NULL; 2324aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 23251da177e4SLinus Torvalds } 23261da177e4SLinus Torvalds 23271da177e4SLinus Torvalds /* 2328b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2329b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2330b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2331b48633f8SBart Van Assche * 2332ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2333ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2334ffcc3936SMike Snitzer * are being added to md->deferred list. 2335cec47e3dSKiyoshi Ueda */ 2336ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2337b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2338eaf9a736SMike Snitzer int dmf_suspended_flag) 23391da177e4SLinus Torvalds { 2340ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2341ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2342ffcc3936SMike Snitzer int r; 2343cf222b37SAlasdair G Kergon 23445a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 23455a8f1f80SBart Van Assche 23462e93ccc1SKiyoshi Ueda /* 23472e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 23482e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 23492e93ccc1SKiyoshi Ueda */ 23502e93ccc1SKiyoshi Ueda if (noflush) 23512e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 235286331f39SBart Van Assche else 235386331f39SBart Van Assche pr_debug("%s: suspending with flush\n", dm_device_name(md)); 23542e93ccc1SKiyoshi Ueda 2355d67ee213SMike Snitzer /* 2356d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2357d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2358d67ee213SMike Snitzer */ 23591da177e4SLinus Torvalds dm_table_presuspend_targets(map); 23601da177e4SLinus Torvalds 23612e93ccc1SKiyoshi Ueda /* 23629f518b27SKiyoshi Ueda * Flush I/O to the device. 23639f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 23649f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 23659f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 23662e93ccc1SKiyoshi Ueda */ 236732a926daSMikulas Patocka if (!noflush && do_lockfs) { 23682ca3310eSAlasdair G Kergon r = lock_fs(md); 2369d67ee213SMike Snitzer if (r) { 2370d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2371ffcc3936SMike Snitzer return r; 2372aa8d7c2fSAlasdair G Kergon } 2373d67ee213SMike Snitzer } 23741da177e4SLinus Torvalds 23751da177e4SLinus Torvalds /* 23763b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 23773b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 23783b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 23793b00b203SMikulas Patocka * dm_wq_work. 23803b00b203SMikulas Patocka * 23813b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 23823b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 23836a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 23846a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 23856a8736d1STejun Heo * flush_workqueue(md->wq). 23861da177e4SLinus Torvalds */ 23871eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 238841abc4e1SHannes Reinecke if (map) 238983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 23901da177e4SLinus Torvalds 2391d0bcb878SKiyoshi Ueda /* 239229e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 239329e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2394d0bcb878SKiyoshi Ueda */ 23952eb6e1e3SKeith Busch if (dm_request_based(md)) { 2396eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 239702233342SMike Snitzer if (md->kworker_task) 23983989144fSPetr Mladek kthread_flush_worker(&md->kworker); 23992eb6e1e3SKeith Busch } 2400cec47e3dSKiyoshi Ueda 2401d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2402d0bcb878SKiyoshi Ueda 24031da177e4SLinus Torvalds /* 24043b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 24053b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 24063b00b203SMikulas Patocka * to finish. 24071da177e4SLinus Torvalds */ 2408b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2409eaf9a736SMike Snitzer if (!r) 2410eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 24111da177e4SLinus Torvalds 24126d6f10dfSMilan Broz if (noflush) 2413022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 241441abc4e1SHannes Reinecke if (map) 241583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 24162e93ccc1SKiyoshi Ueda 24171da177e4SLinus Torvalds /* were we interrupted ? */ 241846125c1cSMilan Broz if (r < 0) { 24199a1fb464SMikulas Patocka dm_queue_flush(md); 242073d410c0SMilan Broz 2421cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2422eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2423cec47e3dSKiyoshi Ueda 24242ca3310eSAlasdair G Kergon unlock_fs(md); 2425d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2426ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2427ffcc3936SMike Snitzer } 2428ffcc3936SMike Snitzer 2429ffcc3936SMike Snitzer return r; 24302ca3310eSAlasdair G Kergon } 24312ca3310eSAlasdair G Kergon 24323b00b203SMikulas Patocka /* 2433ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2434ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2435ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2436ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2437ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 24383b00b203SMikulas Patocka */ 2439ffcc3936SMike Snitzer /* 2440ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2441ffcc3936SMike Snitzer * 2442ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2443ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2444ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2445ffcc3936SMike Snitzer * 2446ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2447ffcc3936SMike Snitzer */ 2448ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2449ffcc3936SMike Snitzer { 2450ffcc3936SMike Snitzer struct dm_table *map = NULL; 2451ffcc3936SMike Snitzer int r = 0; 2452ffcc3936SMike Snitzer 2453ffcc3936SMike Snitzer retry: 2454ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2455ffcc3936SMike Snitzer 2456ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2457ffcc3936SMike Snitzer r = -EINVAL; 2458ffcc3936SMike Snitzer goto out_unlock; 2459ffcc3936SMike Snitzer } 2460ffcc3936SMike Snitzer 2461ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2462ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2463ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2464ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2465ffcc3936SMike Snitzer if (r) 2466ffcc3936SMike Snitzer return r; 2467ffcc3936SMike Snitzer goto retry; 2468ffcc3936SMike Snitzer } 2469ffcc3936SMike Snitzer 2470a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2471ffcc3936SMike Snitzer 2472eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2473ffcc3936SMike Snitzer if (r) 2474ffcc3936SMike Snitzer goto out_unlock; 24753b00b203SMikulas Patocka 24764d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 24774d4471cbSKiyoshi Ueda 2478d287483dSAlasdair G Kergon out_unlock: 2479e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2480cf222b37SAlasdair G Kergon return r; 24811da177e4SLinus Torvalds } 24821da177e4SLinus Torvalds 2483ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 24841da177e4SLinus Torvalds { 2485ffcc3936SMike Snitzer if (map) { 2486ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 24878757b776SMilan Broz if (r) 2488ffcc3936SMike Snitzer return r; 2489ffcc3936SMike Snitzer } 24902ca3310eSAlasdair G Kergon 24919a1fb464SMikulas Patocka dm_queue_flush(md); 24922ca3310eSAlasdair G Kergon 2493cec47e3dSKiyoshi Ueda /* 2494cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2495cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2496cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2497cec47e3dSKiyoshi Ueda */ 2498cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2499eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2500cec47e3dSKiyoshi Ueda 25012ca3310eSAlasdair G Kergon unlock_fs(md); 25022ca3310eSAlasdair G Kergon 2503ffcc3936SMike Snitzer return 0; 2504ffcc3936SMike Snitzer } 2505ffcc3936SMike Snitzer 2506ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2507ffcc3936SMike Snitzer { 25088dc23658SMinfei Huang int r; 2509ffcc3936SMike Snitzer struct dm_table *map = NULL; 2510ffcc3936SMike Snitzer 2511ffcc3936SMike Snitzer retry: 25128dc23658SMinfei Huang r = -EINVAL; 2513ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2514ffcc3936SMike Snitzer 2515ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2516ffcc3936SMike Snitzer goto out; 2517ffcc3936SMike Snitzer 2518ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2519ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2520ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2521ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2522ffcc3936SMike Snitzer if (r) 2523ffcc3936SMike Snitzer return r; 2524ffcc3936SMike Snitzer goto retry; 2525ffcc3936SMike Snitzer } 2526ffcc3936SMike Snitzer 2527a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2528ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2529ffcc3936SMike Snitzer goto out; 2530ffcc3936SMike Snitzer 2531ffcc3936SMike Snitzer r = __dm_resume(md, map); 2532ffcc3936SMike Snitzer if (r) 2533ffcc3936SMike Snitzer goto out; 2534ffcc3936SMike Snitzer 25352ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2536cf222b37SAlasdair G Kergon out: 2537e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 25382ca3310eSAlasdair G Kergon 2539cf222b37SAlasdair G Kergon return r; 25401da177e4SLinus Torvalds } 25411da177e4SLinus Torvalds 2542fd2ed4d2SMikulas Patocka /* 2543fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2544fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2545fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2546fd2ed4d2SMikulas Patocka */ 2547fd2ed4d2SMikulas Patocka 2548ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2549ffcc3936SMike Snitzer { 2550ffcc3936SMike Snitzer struct dm_table *map = NULL; 2551ffcc3936SMike Snitzer 25521ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 25531ea0654eSBart Van Assche 255496b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2555ffcc3936SMike Snitzer return; /* nested internal suspend */ 2556ffcc3936SMike Snitzer 2557ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2558ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2559ffcc3936SMike Snitzer return; /* nest suspend */ 2560ffcc3936SMike Snitzer } 2561ffcc3936SMike Snitzer 2562a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2563ffcc3936SMike Snitzer 2564ffcc3936SMike Snitzer /* 2565ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2566ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2567ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2568ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2569ffcc3936SMike Snitzer */ 2570eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2571eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2572ffcc3936SMike Snitzer 2573ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2574ffcc3936SMike Snitzer } 2575ffcc3936SMike Snitzer 2576ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2577ffcc3936SMike Snitzer { 257896b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 257996b26c8cSMikulas Patocka 258096b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2581ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2582ffcc3936SMike Snitzer 2583ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2584ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2585ffcc3936SMike Snitzer 2586ffcc3936SMike Snitzer /* 2587ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2588ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2589ffcc3936SMike Snitzer */ 2590ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2591ffcc3936SMike Snitzer 2592ffcc3936SMike Snitzer done: 2593ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2594ffcc3936SMike Snitzer smp_mb__after_atomic(); 2595ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2596ffcc3936SMike Snitzer } 2597ffcc3936SMike Snitzer 2598ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2599fd2ed4d2SMikulas Patocka { 2600fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2601ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2602ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2603ffcc3936SMike Snitzer } 2604ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2605ffcc3936SMike Snitzer 2606ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2607ffcc3936SMike Snitzer { 2608ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2609ffcc3936SMike Snitzer __dm_internal_resume(md); 2610ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2611ffcc3936SMike Snitzer } 2612ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2613ffcc3936SMike Snitzer 2614ffcc3936SMike Snitzer /* 2615ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2616ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2617ffcc3936SMike Snitzer */ 2618ffcc3936SMike Snitzer 2619ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2620ffcc3936SMike Snitzer { 2621ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2622ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2623fd2ed4d2SMikulas Patocka return; 2624fd2ed4d2SMikulas Patocka 2625fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2626fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2627fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2628fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2629fd2ed4d2SMikulas Patocka } 2630b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2631fd2ed4d2SMikulas Patocka 2632ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2633fd2ed4d2SMikulas Patocka { 2634ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2635fd2ed4d2SMikulas Patocka goto done; 2636fd2ed4d2SMikulas Patocka 2637fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2638fd2ed4d2SMikulas Patocka 2639fd2ed4d2SMikulas Patocka done: 2640fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2641fd2ed4d2SMikulas Patocka } 2642b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2643fd2ed4d2SMikulas Patocka 26441da177e4SLinus Torvalds /*----------------------------------------------------------------- 26451da177e4SLinus Torvalds * Event notification. 26461da177e4SLinus Torvalds *---------------------------------------------------------------*/ 26473abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 264860935eb2SMilan Broz unsigned cookie) 264969267a30SAlasdair G Kergon { 265060935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 265160935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 265260935eb2SMilan Broz 265360935eb2SMilan Broz if (!cookie) 26543abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 265560935eb2SMilan Broz else { 265660935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 265760935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 26583abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 26593abf85b5SPeter Rajnoha action, envp); 266060935eb2SMilan Broz } 266169267a30SAlasdair G Kergon } 266269267a30SAlasdair G Kergon 26637a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 26647a8c3d3bSMike Anderson { 26657a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 26667a8c3d3bSMike Anderson } 26677a8c3d3bSMike Anderson 26681da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 26691da177e4SLinus Torvalds { 26701da177e4SLinus Torvalds return atomic_read(&md->event_nr); 26711da177e4SLinus Torvalds } 26721da177e4SLinus Torvalds 26731da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 26741da177e4SLinus Torvalds { 26751da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 26761da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 26771da177e4SLinus Torvalds } 26781da177e4SLinus Torvalds 26797a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 26807a8c3d3bSMike Anderson { 26817a8c3d3bSMike Anderson unsigned long flags; 26827a8c3d3bSMike Anderson 26837a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 26847a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 26857a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 26867a8c3d3bSMike Anderson } 26877a8c3d3bSMike Anderson 26881da177e4SLinus Torvalds /* 26891da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 26901da177e4SLinus Torvalds * count on 'md'. 26911da177e4SLinus Torvalds */ 26921da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 26931da177e4SLinus Torvalds { 26941da177e4SLinus Torvalds return md->disk; 26951da177e4SLinus Torvalds } 269665ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 26971da177e4SLinus Torvalds 2698784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2699784aae73SMilan Broz { 27002995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2701784aae73SMilan Broz } 2702784aae73SMilan Broz 2703784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2704784aae73SMilan Broz { 2705784aae73SMilan Broz struct mapped_device *md; 2706784aae73SMilan Broz 27072995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2708784aae73SMilan Broz 2709b9a41d21SHou Tao spin_lock(&_minor_lock); 2710b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2711b9a41d21SHou Tao md = NULL; 2712b9a41d21SHou Tao goto out; 2713b9a41d21SHou Tao } 2714784aae73SMilan Broz dm_get(md); 2715b9a41d21SHou Tao out: 2716b9a41d21SHou Tao spin_unlock(&_minor_lock); 2717b9a41d21SHou Tao 2718784aae73SMilan Broz return md; 2719784aae73SMilan Broz } 2720784aae73SMilan Broz 27214f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 27221da177e4SLinus Torvalds { 27231da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 27241da177e4SLinus Torvalds } 27251da177e4SLinus Torvalds 2726ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2727ffcc3936SMike Snitzer { 2728ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2729ffcc3936SMike Snitzer } 2730ffcc3936SMike Snitzer 27312c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 27322c140a24SMikulas Patocka { 27332c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 27342c140a24SMikulas Patocka } 27352c140a24SMikulas Patocka 273664dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 273764dbce58SKiyoshi Ueda { 2738ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 273964dbce58SKiyoshi Ueda } 274064dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 274164dbce58SKiyoshi Ueda 27422e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 27432e93ccc1SKiyoshi Ueda { 2744ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 27452e93ccc1SKiyoshi Ueda } 27462e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 27472e93ccc1SKiyoshi Ueda 27487e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 274930187e1dSMike Snitzer unsigned integrity, unsigned per_io_data_size) 2750e6ee8c0bSKiyoshi Ueda { 2751115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 275278d8e58aSMike Snitzer unsigned int pool_size = 0; 27535f015204SJun'ichi Nomura unsigned int front_pad; 2754e6ee8c0bSKiyoshi Ueda 2755e6ee8c0bSKiyoshi Ueda if (!pools) 27564e6e36c3SMike Snitzer return NULL; 2757e6ee8c0bSKiyoshi Ueda 275878d8e58aSMike Snitzer switch (type) { 275978d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2760545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 276178d8e58aSMike Snitzer pool_size = dm_get_reserved_bio_based_ios(); 276230187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2763eb8db831SChristoph Hellwig 2764eb8db831SChristoph Hellwig pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); 2765eb8db831SChristoph Hellwig if (!pools->io_pool) 2766eb8db831SChristoph Hellwig goto out; 276778d8e58aSMike Snitzer break; 276878d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 276978d8e58aSMike Snitzer case DM_TYPE_MQ_REQUEST_BASED: 277078d8e58aSMike Snitzer pool_size = dm_get_reserved_rq_based_ios(); 277178d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2772591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 277378d8e58aSMike Snitzer break; 277478d8e58aSMike Snitzer default: 277578d8e58aSMike Snitzer BUG(); 277678d8e58aSMike Snitzer } 277778d8e58aSMike Snitzer 277847e0fb46SNeilBrown pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER); 2779e6ee8c0bSKiyoshi Ueda if (!pools->bs) 27805f015204SJun'ichi Nomura goto out; 2781e6ee8c0bSKiyoshi Ueda 2782a91a2785SMartin K. Petersen if (integrity && bioset_integrity_create(pools->bs, pool_size)) 27835f015204SJun'ichi Nomura goto out; 2784a91a2785SMartin K. Petersen 2785e6ee8c0bSKiyoshi Ueda return pools; 278678d8e58aSMike Snitzer 27875f015204SJun'ichi Nomura out: 27885f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2789e6ee8c0bSKiyoshi Ueda 27904e6e36c3SMike Snitzer return NULL; 2791e6ee8c0bSKiyoshi Ueda } 2792e6ee8c0bSKiyoshi Ueda 2793e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2794e6ee8c0bSKiyoshi Ueda { 2795e6ee8c0bSKiyoshi Ueda if (!pools) 2796e6ee8c0bSKiyoshi Ueda return; 2797e6ee8c0bSKiyoshi Ueda 2798e6ee8c0bSKiyoshi Ueda mempool_destroy(pools->io_pool); 27991ae49ea2SMike Snitzer 2800e6ee8c0bSKiyoshi Ueda if (pools->bs) 2801e6ee8c0bSKiyoshi Ueda bioset_free(pools->bs); 2802e6ee8c0bSKiyoshi Ueda 2803e6ee8c0bSKiyoshi Ueda kfree(pools); 2804e6ee8c0bSKiyoshi Ueda } 2805e6ee8c0bSKiyoshi Ueda 28069c72bad1SChristoph Hellwig struct dm_pr { 28079c72bad1SChristoph Hellwig u64 old_key; 28089c72bad1SChristoph Hellwig u64 new_key; 28099c72bad1SChristoph Hellwig u32 flags; 28109c72bad1SChristoph Hellwig bool fail_early; 28119c72bad1SChristoph Hellwig }; 28129c72bad1SChristoph Hellwig 28139c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 28149c72bad1SChristoph Hellwig void *data) 28159c72bad1SChristoph Hellwig { 28169c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 28179c72bad1SChristoph Hellwig struct dm_table *table; 28189c72bad1SChristoph Hellwig struct dm_target *ti; 28199c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 28209c72bad1SChristoph Hellwig 28219c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 28229c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 28239c72bad1SChristoph Hellwig goto out; 28249c72bad1SChristoph Hellwig 28259c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 28269c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 28279c72bad1SChristoph Hellwig goto out; 28289c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 28299c72bad1SChristoph Hellwig 28309c72bad1SChristoph Hellwig ret = -EINVAL; 28319c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 28329c72bad1SChristoph Hellwig goto out; 28339c72bad1SChristoph Hellwig 28349c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 28359c72bad1SChristoph Hellwig out: 28369c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 28379c72bad1SChristoph Hellwig return ret; 28389c72bad1SChristoph Hellwig } 28399c72bad1SChristoph Hellwig 28409c72bad1SChristoph Hellwig /* 28419c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 28429c72bad1SChristoph Hellwig */ 28439c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 28449c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 28459c72bad1SChristoph Hellwig { 28469c72bad1SChristoph Hellwig struct dm_pr *pr = data; 28479c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 28489c72bad1SChristoph Hellwig 28499c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 28509c72bad1SChristoph Hellwig return -EOPNOTSUPP; 28519c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 28529c72bad1SChristoph Hellwig } 28539c72bad1SChristoph Hellwig 285471cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 285571cdb697SChristoph Hellwig u32 flags) 285671cdb697SChristoph Hellwig { 28579c72bad1SChristoph Hellwig struct dm_pr pr = { 28589c72bad1SChristoph Hellwig .old_key = old_key, 28599c72bad1SChristoph Hellwig .new_key = new_key, 28609c72bad1SChristoph Hellwig .flags = flags, 28619c72bad1SChristoph Hellwig .fail_early = true, 28629c72bad1SChristoph Hellwig }; 28639c72bad1SChristoph Hellwig int ret; 286471cdb697SChristoph Hellwig 28659c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 28669c72bad1SChristoph Hellwig if (ret && new_key) { 28679c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 28689c72bad1SChristoph Hellwig pr.old_key = new_key; 28699c72bad1SChristoph Hellwig pr.new_key = 0; 28709c72bad1SChristoph Hellwig pr.flags = 0; 28719c72bad1SChristoph Hellwig pr.fail_early = false; 28729c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 28739c72bad1SChristoph Hellwig } 287471cdb697SChristoph Hellwig 28759c72bad1SChristoph Hellwig return ret; 287671cdb697SChristoph Hellwig } 287771cdb697SChristoph Hellwig 287871cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 287971cdb697SChristoph Hellwig u32 flags) 288071cdb697SChristoph Hellwig { 288171cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 288271cdb697SChristoph Hellwig const struct pr_ops *ops; 288371cdb697SChristoph Hellwig fmode_t mode; 2884956a4025SMike Snitzer int r; 288571cdb697SChristoph Hellwig 2886956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 288771cdb697SChristoph Hellwig if (r < 0) 288871cdb697SChristoph Hellwig return r; 288971cdb697SChristoph Hellwig 289071cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 289171cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 289271cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 289371cdb697SChristoph Hellwig else 289471cdb697SChristoph Hellwig r = -EOPNOTSUPP; 289571cdb697SChristoph Hellwig 2896956a4025SMike Snitzer bdput(bdev); 289771cdb697SChristoph Hellwig return r; 289871cdb697SChristoph Hellwig } 289971cdb697SChristoph Hellwig 290071cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 290171cdb697SChristoph Hellwig { 290271cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 290371cdb697SChristoph Hellwig const struct pr_ops *ops; 290471cdb697SChristoph Hellwig fmode_t mode; 2905956a4025SMike Snitzer int r; 290671cdb697SChristoph Hellwig 2907956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 290871cdb697SChristoph Hellwig if (r < 0) 290971cdb697SChristoph Hellwig return r; 291071cdb697SChristoph Hellwig 291171cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 291271cdb697SChristoph Hellwig if (ops && ops->pr_release) 291371cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 291471cdb697SChristoph Hellwig else 291571cdb697SChristoph Hellwig r = -EOPNOTSUPP; 291671cdb697SChristoph Hellwig 2917956a4025SMike Snitzer bdput(bdev); 291871cdb697SChristoph Hellwig return r; 291971cdb697SChristoph Hellwig } 292071cdb697SChristoph Hellwig 292171cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 292271cdb697SChristoph Hellwig enum pr_type type, bool abort) 292371cdb697SChristoph Hellwig { 292471cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 292571cdb697SChristoph Hellwig const struct pr_ops *ops; 292671cdb697SChristoph Hellwig fmode_t mode; 2927956a4025SMike Snitzer int r; 292871cdb697SChristoph Hellwig 2929956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 293071cdb697SChristoph Hellwig if (r < 0) 293171cdb697SChristoph Hellwig return r; 293271cdb697SChristoph Hellwig 293371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 293471cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 293571cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 293671cdb697SChristoph Hellwig else 293771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 293871cdb697SChristoph Hellwig 2939956a4025SMike Snitzer bdput(bdev); 294071cdb697SChristoph Hellwig return r; 294171cdb697SChristoph Hellwig } 294271cdb697SChristoph Hellwig 294371cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 294471cdb697SChristoph Hellwig { 294571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 294671cdb697SChristoph Hellwig const struct pr_ops *ops; 294771cdb697SChristoph Hellwig fmode_t mode; 2948956a4025SMike Snitzer int r; 294971cdb697SChristoph Hellwig 2950956a4025SMike Snitzer r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 295171cdb697SChristoph Hellwig if (r < 0) 295271cdb697SChristoph Hellwig return r; 295371cdb697SChristoph Hellwig 295471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 295571cdb697SChristoph Hellwig if (ops && ops->pr_clear) 295671cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 295771cdb697SChristoph Hellwig else 295871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 295971cdb697SChristoph Hellwig 2960956a4025SMike Snitzer bdput(bdev); 296171cdb697SChristoph Hellwig return r; 296271cdb697SChristoph Hellwig } 296371cdb697SChristoph Hellwig 296471cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 296571cdb697SChristoph Hellwig .pr_register = dm_pr_register, 296671cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 296771cdb697SChristoph Hellwig .pr_release = dm_pr_release, 296871cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 296971cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 297071cdb697SChristoph Hellwig }; 297171cdb697SChristoph Hellwig 297283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 29731da177e4SLinus Torvalds .open = dm_blk_open, 29741da177e4SLinus Torvalds .release = dm_blk_close, 2975aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 29763ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 297771cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 29781da177e4SLinus Torvalds .owner = THIS_MODULE 29791da177e4SLinus Torvalds }; 29801da177e4SLinus Torvalds 2981f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 2982f26c5719SDan Williams .direct_access = dm_dax_direct_access, 29837e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 2984f26c5719SDan Williams }; 2985f26c5719SDan Williams 29861da177e4SLinus Torvalds /* 29871da177e4SLinus Torvalds * module hooks 29881da177e4SLinus Torvalds */ 29891da177e4SLinus Torvalds module_init(dm_init); 29901da177e4SLinus Torvalds module_exit(dm_exit); 29911da177e4SLinus Torvalds 29921da177e4SLinus Torvalds module_param(major, uint, 0); 29931da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 2994f4790826SMike Snitzer 2995e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2996e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2997e8603136SMike Snitzer 2998115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 2999115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3000115485e8SMike Snitzer 30011da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 30021da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 30031da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3004