11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 227e026c8cSDan Williams #include <linux/uio.h> 233ac51e74SDarrick J. Wong #include <linux/hdreg.h> 243f77316dSKiyoshi Ueda #include <linux/delay.h> 25ffcc3936SMike Snitzer #include <linux/wait.h> 2671cdb697SChristoph Hellwig #include <linux/pr.h> 27b0b4d7c6SElena Reshetova #include <linux/refcount.h> 2855782138SLi Zefan 2972d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3072d94861SAlasdair G Kergon 3160935eb2SMilan Broz /* 3260935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3360935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3460935eb2SMilan Broz */ 3560935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3660935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3760935eb2SMilan Broz 381da177e4SLinus Torvalds static const char *_name = DM_NAME; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds static unsigned int major = 0; 411da177e4SLinus Torvalds static unsigned int _major = 0; 421da177e4SLinus Torvalds 43d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 44d15b774cSAlasdair G Kergon 45f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 462c140a24SMikulas Patocka 472c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 482c140a24SMikulas Patocka 492c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 502c140a24SMikulas Patocka 51acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 52acfe0ad7SMikulas Patocka 5393e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5493e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5593e6442cSMikulas Patocka 5662e08243SMikulas Patocka void dm_issue_global_event(void) 5762e08243SMikulas Patocka { 5862e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 5962e08243SMikulas Patocka wake_up(&dm_global_eventq); 6062e08243SMikulas Patocka } 6162e08243SMikulas Patocka 621da177e4SLinus Torvalds /* 6364f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 641da177e4SLinus Torvalds */ 6564f52b0eSMike Snitzer struct clone_info { 6664f52b0eSMike Snitzer struct dm_table *map; 6764f52b0eSMike Snitzer struct bio *bio; 6864f52b0eSMike Snitzer struct dm_io *io; 6964f52b0eSMike Snitzer sector_t sector; 7064f52b0eSMike Snitzer unsigned sector_count; 7164f52b0eSMike Snitzer }; 7264f52b0eSMike Snitzer 7364f52b0eSMike Snitzer /* 7464f52b0eSMike Snitzer * One of these is allocated per clone bio. 7564f52b0eSMike Snitzer */ 7664f52b0eSMike Snitzer #define DM_TIO_MAGIC 7282014 7764f52b0eSMike Snitzer struct dm_target_io { 7864f52b0eSMike Snitzer unsigned magic; 7964f52b0eSMike Snitzer struct dm_io *io; 8064f52b0eSMike Snitzer struct dm_target *ti; 8164f52b0eSMike Snitzer unsigned target_bio_nr; 8264f52b0eSMike Snitzer unsigned *len_ptr; 8364f52b0eSMike Snitzer bool inside_dm_io; 8464f52b0eSMike Snitzer struct bio clone; 8564f52b0eSMike Snitzer }; 8664f52b0eSMike Snitzer 8764f52b0eSMike Snitzer /* 8864f52b0eSMike Snitzer * One of these is allocated per original bio. 8964f52b0eSMike Snitzer * It contains the first clone used for that original. 9064f52b0eSMike Snitzer */ 9164f52b0eSMike Snitzer #define DM_IO_MAGIC 5191977 921da177e4SLinus Torvalds struct dm_io { 9364f52b0eSMike Snitzer unsigned magic; 941da177e4SLinus Torvalds struct mapped_device *md; 954e4cbee9SChristoph Hellwig blk_status_t status; 961da177e4SLinus Torvalds atomic_t io_count; 97745dc570SMike Snitzer struct bio *orig_bio; 983eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 99f88fb981SKiyoshi Ueda spinlock_t endio_lock; 100fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 10164f52b0eSMike Snitzer /* last member of dm_target_io is 'struct bio' */ 10264f52b0eSMike Snitzer struct dm_target_io tio; 1031da177e4SLinus Torvalds }; 1041da177e4SLinus Torvalds 10564f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10664f52b0eSMike Snitzer { 10764f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 10864f52b0eSMike Snitzer if (!tio->inside_dm_io) 10964f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 11064f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 11164f52b0eSMike Snitzer } 11264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 11364f52b0eSMike Snitzer 11464f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 11564f52b0eSMike Snitzer { 11664f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 11764f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 11864f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 11964f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 12064f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 12164f52b0eSMike Snitzer } 12264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 12364f52b0eSMike Snitzer 12464f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 12564f52b0eSMike Snitzer { 12664f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 12764f52b0eSMike Snitzer } 12864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 12964f52b0eSMike Snitzer 130ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 131ba61fdd1SJeff Mahoney 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * Bits for the md->flags field. 1341da177e4SLinus Torvalds */ 1351eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1361da177e4SLinus Torvalds #define DMF_SUSPENDED 1 137aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 138fba9f90eSJeff Mahoney #define DMF_FREEING 3 1395c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1402e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1418ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1428ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1431da177e4SLinus Torvalds 144115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 145115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 146faad87dfSMike Snitzer 147e6ee8c0bSKiyoshi Ueda /* 148e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 149e6ee8c0bSKiyoshi Ueda */ 150e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1516f1c819cSKent Overstreet struct bio_set bs; 1526f1c819cSKent Overstreet struct bio_set io_bs; 153e6ee8c0bSKiyoshi Ueda }; 154e6ee8c0bSKiyoshi Ueda 15586f1152bSBenjamin Marzinski struct table_device { 15686f1152bSBenjamin Marzinski struct list_head list; 157b0b4d7c6SElena Reshetova refcount_t count; 15886f1152bSBenjamin Marzinski struct dm_dev dm_dev; 15986f1152bSBenjamin Marzinski }; 16086f1152bSBenjamin Marzinski 161f4790826SMike Snitzer /* 162e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 163e8603136SMike Snitzer */ 1644cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 165e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 166e8603136SMike Snitzer 167115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 168115485e8SMike Snitzer { 1696aa7de05SMark Rutland int param = READ_ONCE(*module_param); 170115485e8SMike Snitzer int modified_param = 0; 171115485e8SMike Snitzer bool modified = true; 172115485e8SMike Snitzer 173115485e8SMike Snitzer if (param < min) 174115485e8SMike Snitzer modified_param = min; 175115485e8SMike Snitzer else if (param > max) 176115485e8SMike Snitzer modified_param = max; 177115485e8SMike Snitzer else 178115485e8SMike Snitzer modified = false; 179115485e8SMike Snitzer 180115485e8SMike Snitzer if (modified) { 181115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 182115485e8SMike Snitzer param = modified_param; 183115485e8SMike Snitzer } 184115485e8SMike Snitzer 185115485e8SMike Snitzer return param; 186115485e8SMike Snitzer } 187115485e8SMike Snitzer 1884cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 189f4790826SMike Snitzer unsigned def, unsigned max) 190f4790826SMike Snitzer { 1916aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 19209c2d531SMike Snitzer unsigned modified_param = 0; 193f4790826SMike Snitzer 19409c2d531SMike Snitzer if (!param) 19509c2d531SMike Snitzer modified_param = def; 19609c2d531SMike Snitzer else if (param > max) 19709c2d531SMike Snitzer modified_param = max; 198f4790826SMike Snitzer 19909c2d531SMike Snitzer if (modified_param) { 20009c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 20109c2d531SMike Snitzer param = modified_param; 202f4790826SMike Snitzer } 203f4790826SMike Snitzer 20409c2d531SMike Snitzer return param; 205f4790826SMike Snitzer } 206f4790826SMike Snitzer 207e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 208e8603136SMike Snitzer { 20909c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 2104cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 211e8603136SMike Snitzer } 212e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 213e8603136SMike Snitzer 214115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 215115485e8SMike Snitzer { 216115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 217115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 218115485e8SMike Snitzer } 219115485e8SMike Snitzer 2201da177e4SLinus Torvalds static int __init local_init(void) 2211da177e4SLinus Torvalds { 222e689fbabSMike Snitzer int r; 2231ae49ea2SMike Snitzer 22451e5b2bdSMike Anderson r = dm_uevent_init(); 22551157b4aSKiyoshi Ueda if (r) 226e689fbabSMike Snitzer return r; 22751e5b2bdSMike Anderson 228acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 229acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 230acfe0ad7SMikulas Patocka r = -ENOMEM; 231acfe0ad7SMikulas Patocka goto out_uevent_exit; 232acfe0ad7SMikulas Patocka } 233acfe0ad7SMikulas Patocka 2341da177e4SLinus Torvalds _major = major; 2351da177e4SLinus Torvalds r = register_blkdev(_major, _name); 23651157b4aSKiyoshi Ueda if (r < 0) 237acfe0ad7SMikulas Patocka goto out_free_workqueue; 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds if (!_major) 2401da177e4SLinus Torvalds _major = r; 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds return 0; 24351157b4aSKiyoshi Ueda 244acfe0ad7SMikulas Patocka out_free_workqueue: 245acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 24651157b4aSKiyoshi Ueda out_uevent_exit: 24751157b4aSKiyoshi Ueda dm_uevent_exit(); 24851157b4aSKiyoshi Ueda 24951157b4aSKiyoshi Ueda return r; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds 2521da177e4SLinus Torvalds static void local_exit(void) 2531da177e4SLinus Torvalds { 2542c140a24SMikulas Patocka flush_scheduled_work(); 255acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2562c140a24SMikulas Patocka 25700d59405SAkinobu Mita unregister_blkdev(_major, _name); 25851e5b2bdSMike Anderson dm_uevent_exit(); 2591da177e4SLinus Torvalds 2601da177e4SLinus Torvalds _major = 0; 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds DMINFO("cleaned up"); 2631da177e4SLinus Torvalds } 2641da177e4SLinus Torvalds 265b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2661da177e4SLinus Torvalds local_init, 2671da177e4SLinus Torvalds dm_target_init, 2681da177e4SLinus Torvalds dm_linear_init, 2691da177e4SLinus Torvalds dm_stripe_init, 270952b3557SMikulas Patocka dm_io_init, 271945fa4d2SMikulas Patocka dm_kcopyd_init, 2721da177e4SLinus Torvalds dm_interface_init, 273fd2ed4d2SMikulas Patocka dm_statistics_init, 2741da177e4SLinus Torvalds }; 2751da177e4SLinus Torvalds 276b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2771da177e4SLinus Torvalds local_exit, 2781da177e4SLinus Torvalds dm_target_exit, 2791da177e4SLinus Torvalds dm_linear_exit, 2801da177e4SLinus Torvalds dm_stripe_exit, 281952b3557SMikulas Patocka dm_io_exit, 282945fa4d2SMikulas Patocka dm_kcopyd_exit, 2831da177e4SLinus Torvalds dm_interface_exit, 284fd2ed4d2SMikulas Patocka dm_statistics_exit, 2851da177e4SLinus Torvalds }; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds static int __init dm_init(void) 2881da177e4SLinus Torvalds { 2891da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds int r, i; 2921da177e4SLinus Torvalds 2931da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2941da177e4SLinus Torvalds r = _inits[i](); 2951da177e4SLinus Torvalds if (r) 2961da177e4SLinus Torvalds goto bad; 2971da177e4SLinus Torvalds } 2981da177e4SLinus Torvalds 2991da177e4SLinus Torvalds return 0; 3001da177e4SLinus Torvalds 3011da177e4SLinus Torvalds bad: 3021da177e4SLinus Torvalds while (i--) 3031da177e4SLinus Torvalds _exits[i](); 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds return r; 3061da177e4SLinus Torvalds } 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds static void __exit dm_exit(void) 3091da177e4SLinus Torvalds { 3101da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3111da177e4SLinus Torvalds 3121da177e4SLinus Torvalds while (i--) 3131da177e4SLinus Torvalds _exits[i](); 314d15b774cSAlasdair G Kergon 315d15b774cSAlasdair G Kergon /* 316d15b774cSAlasdair G Kergon * Should be empty by this point. 317d15b774cSAlasdair G Kergon */ 318d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3191da177e4SLinus Torvalds } 3201da177e4SLinus Torvalds 3211da177e4SLinus Torvalds /* 3221da177e4SLinus Torvalds * Block device functions 3231da177e4SLinus Torvalds */ 324432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 325432a212cSMike Anderson { 326432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 327432a212cSMike Anderson } 328432a212cSMike Anderson 329fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3301da177e4SLinus Torvalds { 3311da177e4SLinus Torvalds struct mapped_device *md; 3321da177e4SLinus Torvalds 333fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 334fba9f90eSJeff Mahoney 335fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 336fba9f90eSJeff Mahoney if (!md) 337fba9f90eSJeff Mahoney goto out; 338fba9f90eSJeff Mahoney 3395c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 340432a212cSMike Anderson dm_deleting_md(md)) { 341fba9f90eSJeff Mahoney md = NULL; 342fba9f90eSJeff Mahoney goto out; 343fba9f90eSJeff Mahoney } 344fba9f90eSJeff Mahoney 3451da177e4SLinus Torvalds dm_get(md); 3465c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 347fba9f90eSJeff Mahoney out: 348fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 349fba9f90eSJeff Mahoney 350fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3511da177e4SLinus Torvalds } 3521da177e4SLinus Torvalds 353db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3541da177e4SLinus Torvalds { 35563a4f065SMike Snitzer struct mapped_device *md; 3566e9624b8SArnd Bergmann 3574a1aeb98SMilan Broz spin_lock(&_minor_lock); 3584a1aeb98SMilan Broz 35963a4f065SMike Snitzer md = disk->private_data; 36063a4f065SMike Snitzer if (WARN_ON(!md)) 36163a4f065SMike Snitzer goto out; 36263a4f065SMike Snitzer 3632c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3642c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 365acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3662c140a24SMikulas Patocka 3671da177e4SLinus Torvalds dm_put(md); 36863a4f065SMike Snitzer out: 3694a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3701da177e4SLinus Torvalds } 3711da177e4SLinus Torvalds 3725c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3735c6bd75dSAlasdair G Kergon { 3745c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3755c6bd75dSAlasdair G Kergon } 3765c6bd75dSAlasdair G Kergon 3775c6bd75dSAlasdair G Kergon /* 3785c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3795c6bd75dSAlasdair G Kergon */ 3802c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3815c6bd75dSAlasdair G Kergon { 3825c6bd75dSAlasdair G Kergon int r = 0; 3835c6bd75dSAlasdair G Kergon 3845c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3855c6bd75dSAlasdair G Kergon 3862c140a24SMikulas Patocka if (dm_open_count(md)) { 3875c6bd75dSAlasdair G Kergon r = -EBUSY; 3882c140a24SMikulas Patocka if (mark_deferred) 3892c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3902c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3912c140a24SMikulas Patocka r = -EEXIST; 3925c6bd75dSAlasdair G Kergon else 3935c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3945c6bd75dSAlasdair G Kergon 3955c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3965c6bd75dSAlasdair G Kergon 3975c6bd75dSAlasdair G Kergon return r; 3985c6bd75dSAlasdair G Kergon } 3995c6bd75dSAlasdair G Kergon 4002c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4012c140a24SMikulas Patocka { 4022c140a24SMikulas Patocka int r = 0; 4032c140a24SMikulas Patocka 4042c140a24SMikulas Patocka spin_lock(&_minor_lock); 4052c140a24SMikulas Patocka 4062c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4072c140a24SMikulas Patocka r = -EBUSY; 4082c140a24SMikulas Patocka else 4092c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4102c140a24SMikulas Patocka 4112c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4122c140a24SMikulas Patocka 4132c140a24SMikulas Patocka return r; 4142c140a24SMikulas Patocka } 4152c140a24SMikulas Patocka 4162c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4172c140a24SMikulas Patocka { 4182c140a24SMikulas Patocka dm_deferred_remove(); 4192c140a24SMikulas Patocka } 4202c140a24SMikulas Patocka 421fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 422fd2ed4d2SMikulas Patocka { 423fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 424fd2ed4d2SMikulas Patocka } 425fd2ed4d2SMikulas Patocka 4269974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 4279974fa2cSMike Snitzer { 4289974fa2cSMike Snitzer return md->queue; 4299974fa2cSMike Snitzer } 4309974fa2cSMike Snitzer 431fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 432fd2ed4d2SMikulas Patocka { 433fd2ed4d2SMikulas Patocka return &md->stats; 434fd2ed4d2SMikulas Patocka } 435fd2ed4d2SMikulas Patocka 4363ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4373ac51e74SDarrick J. Wong { 4383ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4393ac51e74SDarrick J. Wong 4403ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4413ac51e74SDarrick J. Wong } 4423ac51e74SDarrick J. Wong 443e76239a3SChristoph Hellwig static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 444bd976e52SDamien Le Moal struct blk_zone *zones, unsigned int *nr_zones) 445e76239a3SChristoph Hellwig { 446e76239a3SChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 447e76239a3SChristoph Hellwig struct mapped_device *md = disk->private_data; 448e76239a3SChristoph Hellwig struct dm_target *tgt; 449e76239a3SChristoph Hellwig struct dm_table *map; 450e76239a3SChristoph Hellwig int srcu_idx, ret; 451e76239a3SChristoph Hellwig 452e76239a3SChristoph Hellwig if (dm_suspended_md(md)) 453e76239a3SChristoph Hellwig return -EAGAIN; 454e76239a3SChristoph Hellwig 455e76239a3SChristoph Hellwig map = dm_get_live_table(md, &srcu_idx); 456e76239a3SChristoph Hellwig if (!map) 457e76239a3SChristoph Hellwig return -EIO; 458e76239a3SChristoph Hellwig 459e76239a3SChristoph Hellwig tgt = dm_table_find_target(map, sector); 460123d87d5SMikulas Patocka if (!tgt) { 461e76239a3SChristoph Hellwig ret = -EIO; 462e76239a3SChristoph Hellwig goto out; 463e76239a3SChristoph Hellwig } 464e76239a3SChristoph Hellwig 465e76239a3SChristoph Hellwig /* 466e76239a3SChristoph Hellwig * If we are executing this, we already know that the block device 467e76239a3SChristoph Hellwig * is a zoned device and so each target should have support for that 468e76239a3SChristoph Hellwig * type of drive. A missing report_zones method means that the target 469e76239a3SChristoph Hellwig * driver has a problem. 470e76239a3SChristoph Hellwig */ 471e76239a3SChristoph Hellwig if (WARN_ON(!tgt->type->report_zones)) { 472e76239a3SChristoph Hellwig ret = -EIO; 473e76239a3SChristoph Hellwig goto out; 474e76239a3SChristoph Hellwig } 475e76239a3SChristoph Hellwig 476e76239a3SChristoph Hellwig /* 477e76239a3SChristoph Hellwig * blkdev_report_zones() will loop and call this again to cover all the 478e76239a3SChristoph Hellwig * zones of the target, eventually moving on to the next target. 479e76239a3SChristoph Hellwig * So there is no need to loop here trying to fill the entire array 480e76239a3SChristoph Hellwig * of zones. 481e76239a3SChristoph Hellwig */ 482bd976e52SDamien Le Moal ret = tgt->type->report_zones(tgt, sector, zones, nr_zones); 483e76239a3SChristoph Hellwig 484e76239a3SChristoph Hellwig out: 485e76239a3SChristoph Hellwig dm_put_live_table(md, srcu_idx); 486e76239a3SChristoph Hellwig return ret; 487e76239a3SChristoph Hellwig #else 488e76239a3SChristoph Hellwig return -ENOTSUPP; 489e76239a3SChristoph Hellwig #endif 490e76239a3SChristoph Hellwig } 491e76239a3SChristoph Hellwig 492971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4935bd5e8d8SMike Snitzer struct block_device **bdev) 494971888c4SMike Snitzer __acquires(md->io_barrier) 495aa129a22SMilan Broz { 49666482026SMike Snitzer struct dm_target *tgt; 4976c182cd8SHannes Reinecke struct dm_table *map; 498971888c4SMike Snitzer int r; 499aa129a22SMilan Broz 5006c182cd8SHannes Reinecke retry: 501e56f81e0SChristoph Hellwig r = -ENOTTY; 502971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 503aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 504971888c4SMike Snitzer return r; 505aa129a22SMilan Broz 506aa129a22SMilan Broz /* We only support devices that have a single target */ 507aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 508971888c4SMike Snitzer return r; 509aa129a22SMilan Broz 51066482026SMike Snitzer tgt = dm_table_get_target(map, 0); 51166482026SMike Snitzer if (!tgt->type->prepare_ioctl) 512e56f81e0SChristoph Hellwig return r; 513aa129a22SMilan Broz 514971888c4SMike Snitzer if (dm_suspended_md(md)) 515971888c4SMike Snitzer return -EAGAIN; 516971888c4SMike Snitzer 5175bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 5185bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 519971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 5206c182cd8SHannes Reinecke msleep(10); 5216c182cd8SHannes Reinecke goto retry; 5226c182cd8SHannes Reinecke } 523971888c4SMike Snitzer 524e56f81e0SChristoph Hellwig return r; 525e56f81e0SChristoph Hellwig } 5266c182cd8SHannes Reinecke 527971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 528971888c4SMike Snitzer __releases(md->io_barrier) 529971888c4SMike Snitzer { 530971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 531971888c4SMike Snitzer } 532971888c4SMike Snitzer 533e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 534e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 535e56f81e0SChristoph Hellwig { 536e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 537971888c4SMike Snitzer int r, srcu_idx; 538e56f81e0SChristoph Hellwig 5395bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 540e56f81e0SChristoph Hellwig if (r < 0) 541971888c4SMike Snitzer goto out; 542e56f81e0SChristoph Hellwig 543e56f81e0SChristoph Hellwig if (r > 0) { 544e56f81e0SChristoph Hellwig /* 545e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 546e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 547e56f81e0SChristoph Hellwig */ 548e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 549e980f623SChristoph Hellwig DMWARN_LIMIT( 550e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 551e980f623SChristoph Hellwig current->comm, cmd); 552e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 553e56f81e0SChristoph Hellwig goto out; 554e56f81e0SChristoph Hellwig } 555e980f623SChristoph Hellwig } 556e56f81e0SChristoph Hellwig 55766482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 558e56f81e0SChristoph Hellwig out: 559971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 560aa129a22SMilan Broz return r; 561aa129a22SMilan Broz } 562aa129a22SMilan Broz 563978e51baSMike Snitzer static void start_io_acct(struct dm_io *io); 564978e51baSMike Snitzer 565978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5661da177e4SLinus Torvalds { 56764f52b0eSMike Snitzer struct dm_io *io; 56864f52b0eSMike Snitzer struct dm_target_io *tio; 56964f52b0eSMike Snitzer struct bio *clone; 57064f52b0eSMike Snitzer 5716f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 57264f52b0eSMike Snitzer if (!clone) 57364f52b0eSMike Snitzer return NULL; 57464f52b0eSMike Snitzer 57564f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 57664f52b0eSMike Snitzer tio->inside_dm_io = true; 57764f52b0eSMike Snitzer tio->io = NULL; 57864f52b0eSMike Snitzer 57964f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 58064f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 581978e51baSMike Snitzer io->status = 0; 582978e51baSMike Snitzer atomic_set(&io->io_count, 1); 583978e51baSMike Snitzer io->orig_bio = bio; 584978e51baSMike Snitzer io->md = md; 585978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 586978e51baSMike Snitzer 587978e51baSMike Snitzer start_io_acct(io); 58864f52b0eSMike Snitzer 58964f52b0eSMike Snitzer return io; 5901da177e4SLinus Torvalds } 5911da177e4SLinus Torvalds 592028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5931da177e4SLinus Torvalds { 59464f52b0eSMike Snitzer bio_put(&io->tio.clone); 59564f52b0eSMike Snitzer } 59664f52b0eSMike Snitzer 59764f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 59864f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 59964f52b0eSMike Snitzer { 60064f52b0eSMike Snitzer struct dm_target_io *tio; 60164f52b0eSMike Snitzer 60264f52b0eSMike Snitzer if (!ci->io->tio.io) { 60364f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 60464f52b0eSMike Snitzer tio = &ci->io->tio; 60564f52b0eSMike Snitzer } else { 6066f1c819cSKent Overstreet struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 60764f52b0eSMike Snitzer if (!clone) 60864f52b0eSMike Snitzer return NULL; 60964f52b0eSMike Snitzer 61064f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 61164f52b0eSMike Snitzer tio->inside_dm_io = false; 61264f52b0eSMike Snitzer } 61364f52b0eSMike Snitzer 61464f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 61564f52b0eSMike Snitzer tio->io = ci->io; 61664f52b0eSMike Snitzer tio->ti = ti; 61764f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 61864f52b0eSMike Snitzer 61964f52b0eSMike Snitzer return tio; 6201da177e4SLinus Torvalds } 6211da177e4SLinus Torvalds 622cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 6231da177e4SLinus Torvalds { 62464f52b0eSMike Snitzer if (tio->inside_dm_io) 62564f52b0eSMike Snitzer return; 626dba14160SMikulas Patocka bio_put(&tio->clone); 6271da177e4SLinus Torvalds } 6281da177e4SLinus Torvalds 629c4576aedSMike Snitzer static bool md_in_flight_bios(struct mapped_device *md) 63090abb8c4SKiyoshi Ueda { 6316f757231SMikulas Patocka int cpu; 6326f757231SMikulas Patocka struct hd_struct *part = &dm_disk(md)->part0; 633b7934ba4SJens Axboe long sum = 0; 6346f757231SMikulas Patocka 6356f757231SMikulas Patocka for_each_possible_cpu(cpu) { 636b7934ba4SJens Axboe sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 637b7934ba4SJens Axboe sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 6386f757231SMikulas Patocka } 6396f757231SMikulas Patocka 640b7934ba4SJens Axboe return sum != 0; 64190abb8c4SKiyoshi Ueda } 64290abb8c4SKiyoshi Ueda 643c4576aedSMike Snitzer static bool md_in_flight(struct mapped_device *md) 644c4576aedSMike Snitzer { 645c4576aedSMike Snitzer if (queue_is_mq(md->queue)) 6463c94d83cSJens Axboe return blk_mq_queue_inflight(md->queue); 647c4576aedSMike Snitzer else 648c4576aedSMike Snitzer return md_in_flight_bios(md); 649cec47e3dSKiyoshi Ueda } 650cec47e3dSKiyoshi Ueda 6513eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6523eaf840eSJun'ichi "Nick" Nomura { 6533eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 654745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6553eaf840eSJun'ichi "Nick" Nomura 6563eaf840eSJun'ichi "Nick" Nomura io->start_time = jiffies; 6573eaf840eSJun'ichi "Nick" Nomura 658ddcf35d3SMichael Callahan generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), 659ddcf35d3SMichael Callahan &dm_disk(md)->part0); 660f3986374SMike Snitzer 661fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 662528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 663528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 664528ec5abSMike Christie false, 0, &io->stats_aux); 6653eaf840eSJun'ichi "Nick" Nomura } 6663eaf840eSJun'ichi "Nick" Nomura 667d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 6683eaf840eSJun'ichi "Nick" Nomura { 6693eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 670745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6713eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 6723eaf840eSJun'ichi "Nick" Nomura 673ddcf35d3SMichael Callahan generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, 674ddcf35d3SMichael Callahan io->start_time); 6753eaf840eSJun'ichi "Nick" Nomura 676fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 677528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 678528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 679528ec5abSMike Christie true, duration, &io->stats_aux); 680fd2ed4d2SMikulas Patocka 681d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 682645efa84SMikulas Patocka if (unlikely(wq_has_sleeper(&md->wait))) 683d221d2e7SMikulas Patocka wake_up(&md->wait); 6843eaf840eSJun'ichi "Nick" Nomura } 6853eaf840eSJun'ichi "Nick" Nomura 6861da177e4SLinus Torvalds /* 6871da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6881da177e4SLinus Torvalds */ 68992c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6901da177e4SLinus Torvalds { 69105447420SKiyoshi Ueda unsigned long flags; 6921da177e4SLinus Torvalds 69305447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6941da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 69505447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 69692c63902SMikulas Patocka queue_work(md->wq, &md->work); 6971da177e4SLinus Torvalds } 6981da177e4SLinus Torvalds 6991da177e4SLinus Torvalds /* 7001da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 7011da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 70283d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 7031da177e4SLinus Torvalds */ 70483d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7051da177e4SLinus Torvalds { 70683d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7071da177e4SLinus Torvalds 70883d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 70983d5e5b0SMikulas Patocka } 7101da177e4SLinus Torvalds 71183d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 71283d5e5b0SMikulas Patocka { 71383d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 71483d5e5b0SMikulas Patocka } 71583d5e5b0SMikulas Patocka 71683d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 71783d5e5b0SMikulas Patocka { 71883d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 71983d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 72083d5e5b0SMikulas Patocka } 72183d5e5b0SMikulas Patocka 72283d5e5b0SMikulas Patocka /* 72383d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 72483d5e5b0SMikulas Patocka * The caller must not block between these two functions. 72583d5e5b0SMikulas Patocka */ 72683d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 72783d5e5b0SMikulas Patocka { 72883d5e5b0SMikulas Patocka rcu_read_lock(); 72983d5e5b0SMikulas Patocka return rcu_dereference(md->map); 73083d5e5b0SMikulas Patocka } 73183d5e5b0SMikulas Patocka 73283d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 73383d5e5b0SMikulas Patocka { 73483d5e5b0SMikulas Patocka rcu_read_unlock(); 7351da177e4SLinus Torvalds } 7361da177e4SLinus Torvalds 737971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 738971888c4SMike Snitzer 7393ac51e74SDarrick J. Wong /* 74086f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 74186f1152bSBenjamin Marzinski */ 74286f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 74386f1152bSBenjamin Marzinski struct mapped_device *md) 74486f1152bSBenjamin Marzinski { 74586f1152bSBenjamin Marzinski struct block_device *bdev; 74686f1152bSBenjamin Marzinski 74786f1152bSBenjamin Marzinski int r; 74886f1152bSBenjamin Marzinski 74986f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 75086f1152bSBenjamin Marzinski 751519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 75286f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 75386f1152bSBenjamin Marzinski return PTR_ERR(bdev); 75486f1152bSBenjamin Marzinski 75586f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 75686f1152bSBenjamin Marzinski if (r) { 75786f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 75886f1152bSBenjamin Marzinski return r; 75986f1152bSBenjamin Marzinski } 76086f1152bSBenjamin Marzinski 76186f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 762817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 76386f1152bSBenjamin Marzinski return 0; 76486f1152bSBenjamin Marzinski } 76586f1152bSBenjamin Marzinski 76686f1152bSBenjamin Marzinski /* 76786f1152bSBenjamin Marzinski * Close a table device that we've been using. 76886f1152bSBenjamin Marzinski */ 76986f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 77086f1152bSBenjamin Marzinski { 77186f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 77286f1152bSBenjamin Marzinski return; 77386f1152bSBenjamin Marzinski 77486f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 77586f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 776817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 77786f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 778817bf402SDan Williams td->dm_dev.dax_dev = NULL; 77986f1152bSBenjamin Marzinski } 78086f1152bSBenjamin Marzinski 78186f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 7828454fca4SSheetal Singala fmode_t mode) 7838454fca4SSheetal Singala { 78486f1152bSBenjamin Marzinski struct table_device *td; 78586f1152bSBenjamin Marzinski 78686f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 78786f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 78886f1152bSBenjamin Marzinski return td; 78986f1152bSBenjamin Marzinski 79086f1152bSBenjamin Marzinski return NULL; 79186f1152bSBenjamin Marzinski } 79286f1152bSBenjamin Marzinski 79386f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 7948454fca4SSheetal Singala struct dm_dev **result) 7958454fca4SSheetal Singala { 79686f1152bSBenjamin Marzinski int r; 79786f1152bSBenjamin Marzinski struct table_device *td; 79886f1152bSBenjamin Marzinski 79986f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 80086f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 80186f1152bSBenjamin Marzinski if (!td) { 802115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 80386f1152bSBenjamin Marzinski if (!td) { 80486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80586f1152bSBenjamin Marzinski return -ENOMEM; 80686f1152bSBenjamin Marzinski } 80786f1152bSBenjamin Marzinski 80886f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 80986f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 81086f1152bSBenjamin Marzinski 81186f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 81286f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 81386f1152bSBenjamin Marzinski kfree(td); 81486f1152bSBenjamin Marzinski return r; 81586f1152bSBenjamin Marzinski } 81686f1152bSBenjamin Marzinski 81786f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 81886f1152bSBenjamin Marzinski 819b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 82086f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 821b0b4d7c6SElena Reshetova } else { 822b0b4d7c6SElena Reshetova refcount_inc(&td->count); 82386f1152bSBenjamin Marzinski } 82486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 82586f1152bSBenjamin Marzinski 82686f1152bSBenjamin Marzinski *result = &td->dm_dev; 82786f1152bSBenjamin Marzinski return 0; 82886f1152bSBenjamin Marzinski } 82986f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 83086f1152bSBenjamin Marzinski 83186f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 83286f1152bSBenjamin Marzinski { 83386f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 83486f1152bSBenjamin Marzinski 83586f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 836b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 83786f1152bSBenjamin Marzinski close_table_device(td, md); 83886f1152bSBenjamin Marzinski list_del(&td->list); 83986f1152bSBenjamin Marzinski kfree(td); 84086f1152bSBenjamin Marzinski } 84186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 84286f1152bSBenjamin Marzinski } 84386f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 84486f1152bSBenjamin Marzinski 84586f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 84686f1152bSBenjamin Marzinski { 84786f1152bSBenjamin Marzinski struct list_head *tmp, *next; 84886f1152bSBenjamin Marzinski 84986f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 85086f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 85186f1152bSBenjamin Marzinski 85286f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 853b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 85486f1152bSBenjamin Marzinski kfree(td); 85586f1152bSBenjamin Marzinski } 85686f1152bSBenjamin Marzinski } 85786f1152bSBenjamin Marzinski 85886f1152bSBenjamin Marzinski /* 8593ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8603ac51e74SDarrick J. Wong */ 8613ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8623ac51e74SDarrick J. Wong { 8633ac51e74SDarrick J. Wong *geo = md->geometry; 8643ac51e74SDarrick J. Wong 8653ac51e74SDarrick J. Wong return 0; 8663ac51e74SDarrick J. Wong } 8673ac51e74SDarrick J. Wong 8683ac51e74SDarrick J. Wong /* 8693ac51e74SDarrick J. Wong * Set the geometry of a device. 8703ac51e74SDarrick J. Wong */ 8713ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8723ac51e74SDarrick J. Wong { 8733ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8743ac51e74SDarrick J. Wong 8753ac51e74SDarrick J. Wong if (geo->start > sz) { 8763ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8773ac51e74SDarrick J. Wong return -EINVAL; 8783ac51e74SDarrick J. Wong } 8793ac51e74SDarrick J. Wong 8803ac51e74SDarrick J. Wong md->geometry = *geo; 8813ac51e74SDarrick J. Wong 8823ac51e74SDarrick J. Wong return 0; 8833ac51e74SDarrick J. Wong } 8843ac51e74SDarrick J. Wong 8852e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8862e93ccc1SKiyoshi Ueda { 8872e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8882e93ccc1SKiyoshi Ueda } 8892e93ccc1SKiyoshi Ueda 8901da177e4SLinus Torvalds /* 8911da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 8921da177e4SLinus Torvalds * cloned into, completing the original io if necc. 8931da177e4SLinus Torvalds */ 8944e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 8951da177e4SLinus Torvalds { 8962e93ccc1SKiyoshi Ueda unsigned long flags; 8974e4cbee9SChristoph Hellwig blk_status_t io_error; 898b35f8caaSMilan Broz struct bio *bio; 899b35f8caaSMilan Broz struct mapped_device *md = io->md; 9002e93ccc1SKiyoshi Ueda 9012e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 902f88fb981SKiyoshi Ueda if (unlikely(error)) { 903f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 904745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 9054e4cbee9SChristoph Hellwig io->status = error; 906f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 907f88fb981SKiyoshi Ueda } 9081da177e4SLinus Torvalds 9091da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9104e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 9112e93ccc1SKiyoshi Ueda /* 9122e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9132e93ccc1SKiyoshi Ueda */ 914022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9156a8736d1STejun Heo if (__noflush_suspending(md)) 916745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 917745dc570SMike Snitzer bio_list_add_head(&md->deferred, io->orig_bio); 9186a8736d1STejun Heo else 9192e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9204e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 921022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9222e93ccc1SKiyoshi Ueda } 9232e93ccc1SKiyoshi Ueda 9244e4cbee9SChristoph Hellwig io_error = io->status; 925745dc570SMike Snitzer bio = io->orig_bio; 926af7e466aSMikulas Patocka end_io_acct(io); 927a97f925aSMikulas Patocka free_io(md, io); 9281da177e4SLinus Torvalds 9294e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 9306a8736d1STejun Heo return; 9316a8736d1STejun Heo 9321eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 9331da177e4SLinus Torvalds /* 9346a8736d1STejun Heo * Preflush done for flush with data, reissue 93528a8f0d3SMike Christie * without REQ_PREFLUSH. 9361da177e4SLinus Torvalds */ 9371eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9386a8736d1STejun Heo queue_io(md, bio); 939b35f8caaSMilan Broz } else { 940b372d360SMike Snitzer /* done with normal IO or empty flush */ 9418dd601faSNeilBrown if (io_error) 9424e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9434246a0b6SChristoph Hellwig bio_endio(bio); 9442e93ccc1SKiyoshi Ueda } 9451da177e4SLinus Torvalds } 946af7e466aSMikulas Patocka } 9471da177e4SLinus Torvalds 948bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 949bcb44433SMike Snitzer { 950bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 951bcb44433SMike Snitzer 952bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 953bcb44433SMike Snitzer limits->max_discard_sectors = 0; 954bcb44433SMike Snitzer blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 955bcb44433SMike Snitzer } 956bcb44433SMike Snitzer 9574cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 9587eee4ae2SMike Snitzer { 9597eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9607eee4ae2SMike Snitzer 9617eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9627eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9637eee4ae2SMike Snitzer } 9647eee4ae2SMike Snitzer 965ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 966ac62d620SChristoph Hellwig { 967ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 968ac62d620SChristoph Hellwig 969ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 970ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 971ac62d620SChristoph Hellwig } 972ac62d620SChristoph Hellwig 9734246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9741da177e4SLinus Torvalds { 9754e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 976bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 977b35f8caaSMilan Broz struct dm_io *io = tio->io; 9789faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9791da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 9801da177e4SLinus Torvalds 981978e51baSMike Snitzer if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 982bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 983bcb44433SMike Snitzer !bio->bi_disk->queue->limits.max_discard_sectors) 984bcb44433SMike Snitzer disable_discard(md); 985bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME && 98674d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 9877eee4ae2SMike Snitzer disable_write_same(md); 988bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 98974d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 990ac62d620SChristoph Hellwig disable_write_zeroes(md); 991ac62d620SChristoph Hellwig } 9927eee4ae2SMike Snitzer 9931be56909SChristoph Hellwig if (endio) { 9944e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 9951be56909SChristoph Hellwig switch (r) { 9961be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 9974e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 9981be56909SChristoph Hellwig /*FALLTHRU*/ 9991be56909SChristoph Hellwig case DM_ENDIO_DONE: 10001be56909SChristoph Hellwig break; 10011be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 10021be56909SChristoph Hellwig /* The target will handle the io */ 10031be56909SChristoph Hellwig return; 10041be56909SChristoph Hellwig default: 10051be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 10061be56909SChristoph Hellwig BUG(); 10071be56909SChristoph Hellwig } 10081be56909SChristoph Hellwig } 10091be56909SChristoph Hellwig 1010cfae7529SMike Snitzer free_tio(tio); 1011b35f8caaSMilan Broz dec_pending(io, error); 10121da177e4SLinus Torvalds } 10131da177e4SLinus Torvalds 101478d8e58aSMike Snitzer /* 101556a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 101656a67df7SMike Snitzer * target boundary. 101756a67df7SMike Snitzer */ 101856a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 10191da177e4SLinus Torvalds { 102056a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 102156a67df7SMike Snitzer 102256a67df7SMike Snitzer return ti->len - target_offset; 102356a67df7SMike Snitzer } 102456a67df7SMike Snitzer 102556a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 102656a67df7SMike Snitzer { 102756a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1028542f9038SMike Snitzer sector_t offset, max_len; 10291da177e4SLinus Torvalds 10301da177e4SLinus Torvalds /* 10311da177e4SLinus Torvalds * Does the target need to split even further? 10321da177e4SLinus Torvalds */ 1033542f9038SMike Snitzer if (ti->max_io_len) { 1034542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1035542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1036542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1037542f9038SMike Snitzer else 1038542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1039542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1040542f9038SMike Snitzer 1041542f9038SMike Snitzer if (len > max_len) 1042542f9038SMike Snitzer len = max_len; 10431da177e4SLinus Torvalds } 10441da177e4SLinus Torvalds 10451da177e4SLinus Torvalds return len; 10461da177e4SLinus Torvalds } 10471da177e4SLinus Torvalds 1048542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1049542f9038SMike Snitzer { 1050542f9038SMike Snitzer if (len > UINT_MAX) { 1051542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1052542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1053542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1054542f9038SMike Snitzer return -EINVAL; 1055542f9038SMike Snitzer } 1056542f9038SMike Snitzer 105775ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 1058542f9038SMike Snitzer 1059542f9038SMike Snitzer return 0; 1060542f9038SMike Snitzer } 1061542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1062542f9038SMike Snitzer 1063f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1064f26c5719SDan Williams sector_t sector, int *srcu_idx) 10653d97c829SMike Snitzer __acquires(md->io_barrier) 1066545ed20eSToshi Kani { 1067545ed20eSToshi Kani struct dm_table *map; 1068545ed20eSToshi Kani struct dm_target *ti; 1069545ed20eSToshi Kani 1070f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1071545ed20eSToshi Kani if (!map) 1072f26c5719SDan Williams return NULL; 1073545ed20eSToshi Kani 1074545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1075123d87d5SMikulas Patocka if (!ti) 1076f26c5719SDan Williams return NULL; 1077f26c5719SDan Williams 1078f26c5719SDan Williams return ti; 1079f26c5719SDan Williams } 1080f26c5719SDan Williams 1081f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1082f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1083f26c5719SDan Williams { 1084f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1085f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1086f26c5719SDan Williams struct dm_target *ti; 1087f26c5719SDan Williams long len, ret = -EIO; 1088f26c5719SDan Williams int srcu_idx; 1089f26c5719SDan Williams 1090f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1091f26c5719SDan Williams 1092f26c5719SDan Williams if (!ti) 1093545ed20eSToshi Kani goto out; 1094f26c5719SDan Williams if (!ti->type->direct_access) 1095f26c5719SDan Williams goto out; 1096f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 1097f26c5719SDan Williams if (len < 1) 1098f26c5719SDan Williams goto out; 1099f26c5719SDan Williams nr_pages = min(len, nr_pages); 1100817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1101817bf402SDan Williams 1102545ed20eSToshi Kani out: 1103545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1104f26c5719SDan Williams 1105f26c5719SDan Williams return ret; 1106545ed20eSToshi Kani } 1107545ed20eSToshi Kani 11087bf7eac8SDan Williams static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 11097bf7eac8SDan Williams int blocksize, sector_t start, sector_t len) 11107bf7eac8SDan Williams { 11117bf7eac8SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11127bf7eac8SDan Williams struct dm_table *map; 11137bf7eac8SDan Williams int srcu_idx; 11147bf7eac8SDan Williams bool ret; 11157bf7eac8SDan Williams 11167bf7eac8SDan Williams map = dm_get_live_table(md, &srcu_idx); 11177bf7eac8SDan Williams if (!map) 11187bf7eac8SDan Williams return false; 11197bf7eac8SDan Williams 11202e9ee095SPankaj Gupta ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 11217bf7eac8SDan Williams 11227bf7eac8SDan Williams dm_put_live_table(md, srcu_idx); 11237bf7eac8SDan Williams 11247bf7eac8SDan Williams return ret; 11257bf7eac8SDan Williams } 11267bf7eac8SDan Williams 11277e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 11287e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 11297e026c8cSDan Williams { 11307e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11317e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 11327e026c8cSDan Williams struct dm_target *ti; 11337e026c8cSDan Williams long ret = 0; 11347e026c8cSDan Williams int srcu_idx; 11357e026c8cSDan Williams 11367e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 11377e026c8cSDan Williams 11387e026c8cSDan Williams if (!ti) 11397e026c8cSDan Williams goto out; 11407e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 11417e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 11427e026c8cSDan Williams goto out; 11437e026c8cSDan Williams } 11447e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 11457e026c8cSDan Williams out: 11467e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 11477e026c8cSDan Williams 11487e026c8cSDan Williams return ret; 11497e026c8cSDan Williams } 11507e026c8cSDan Williams 1151b3a9a0c3SDan Williams static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1152b3a9a0c3SDan Williams void *addr, size_t bytes, struct iov_iter *i) 1153b3a9a0c3SDan Williams { 1154b3a9a0c3SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1155b3a9a0c3SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1156b3a9a0c3SDan Williams struct dm_target *ti; 1157b3a9a0c3SDan Williams long ret = 0; 1158b3a9a0c3SDan Williams int srcu_idx; 1159b3a9a0c3SDan Williams 1160b3a9a0c3SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1161b3a9a0c3SDan Williams 1162b3a9a0c3SDan Williams if (!ti) 1163b3a9a0c3SDan Williams goto out; 1164b3a9a0c3SDan Williams if (!ti->type->dax_copy_to_iter) { 1165b3a9a0c3SDan Williams ret = copy_to_iter(addr, bytes, i); 1166b3a9a0c3SDan Williams goto out; 1167b3a9a0c3SDan Williams } 1168b3a9a0c3SDan Williams ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1169b3a9a0c3SDan Williams out: 1170b3a9a0c3SDan Williams dm_put_live_table(md, srcu_idx); 1171b3a9a0c3SDan Williams 1172b3a9a0c3SDan Williams return ret; 1173b3a9a0c3SDan Williams } 1174b3a9a0c3SDan Williams 11751dd40c3eSMikulas Patocka /* 11761dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 1177*2e2d6f7eSAjay Joshi * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 1178*2e2d6f7eSAjay Joshi * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 11791dd40c3eSMikulas Patocka * 11801dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 11811dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 11821dd40c3eSMikulas Patocka * sent in a next bio. 11831dd40c3eSMikulas Patocka * 11841dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 11851dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11861dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 11871dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11881dd40c3eSMikulas Patocka * 11891dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 11901dd40c3eSMikulas Patocka * <------- bi_size -------> 11911dd40c3eSMikulas Patocka * <-- n_sectors --> 11921dd40c3eSMikulas Patocka * 11931dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11941dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11951dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11961dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11971dd40c3eSMikulas Patocka * to make it empty) 11981dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11991dd40c3eSMikulas Patocka * 12001dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 12011dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 12021dd40c3eSMikulas Patocka * copies of the bio. 12031dd40c3eSMikulas Patocka */ 12041dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 12051dd40c3eSMikulas Patocka { 12061dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 12071dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 12081eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 12091dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 12101dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 12111dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 12121dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 12131dd40c3eSMikulas Patocka } 12141dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 12151dd40c3eSMikulas Patocka 1216d67a5f4bSMikulas Patocka /* 1217e76239a3SChristoph Hellwig * The zone descriptors obtained with a zone report indicate 1218e76239a3SChristoph Hellwig * zone positions within the underlying device of the target. The zone 1219e76239a3SChristoph Hellwig * descriptors must be remapped to match their position within the dm device. 1220e76239a3SChristoph Hellwig * The caller target should obtain the zones information using 1221e76239a3SChristoph Hellwig * blkdev_report_zones() to ensure that remapping for partition offset is 1222e76239a3SChristoph Hellwig * already handled. 122310999307SDamien Le Moal */ 1224e76239a3SChristoph Hellwig void dm_remap_zone_report(struct dm_target *ti, sector_t start, 1225e76239a3SChristoph Hellwig struct blk_zone *zones, unsigned int *nr_zones) 122610999307SDamien Le Moal { 122710999307SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED 122810999307SDamien Le Moal struct blk_zone *zone; 1229e76239a3SChristoph Hellwig unsigned int nrz = *nr_zones; 1230e76239a3SChristoph Hellwig int i; 123110999307SDamien Le Moal 123210999307SDamien Le Moal /* 1233e76239a3SChristoph Hellwig * Remap the start sector and write pointer position of the zones in 1234e76239a3SChristoph Hellwig * the array. Since we may have obtained from the target underlying 1235e76239a3SChristoph Hellwig * device more zones that the target size, also adjust the number 1236e76239a3SChristoph Hellwig * of zones. 12379864cd5dSDamien Le Moal */ 1238e76239a3SChristoph Hellwig for (i = 0; i < nrz; i++) { 1239e76239a3SChristoph Hellwig zone = zones + i; 124010999307SDamien Le Moal if (zone->start >= start + ti->len) { 1241e76239a3SChristoph Hellwig memset(zone, 0, sizeof(struct blk_zone) * (nrz - i)); 124210999307SDamien Le Moal break; 124310999307SDamien Le Moal } 1244e76239a3SChristoph Hellwig 124510999307SDamien Le Moal zone->start = zone->start + ti->begin - start; 1246e76239a3SChristoph Hellwig if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 1247e76239a3SChristoph Hellwig continue; 1248e76239a3SChristoph Hellwig 124910999307SDamien Le Moal if (zone->cond == BLK_ZONE_COND_FULL) 125010999307SDamien Le Moal zone->wp = zone->start + zone->len; 125110999307SDamien Le Moal else if (zone->cond == BLK_ZONE_COND_EMPTY) 125210999307SDamien Le Moal zone->wp = zone->start; 125310999307SDamien Le Moal else 1254e76239a3SChristoph Hellwig zone->wp = zone->wp + ti->begin - start; 125510999307SDamien Le Moal } 125610999307SDamien Le Moal 1257e76239a3SChristoph Hellwig *nr_zones = i; 125810999307SDamien Le Moal #else /* !CONFIG_BLK_DEV_ZONED */ 1259e76239a3SChristoph Hellwig *nr_zones = 0; 126010999307SDamien Le Moal #endif 126110999307SDamien Le Moal } 126210999307SDamien Le Moal EXPORT_SYMBOL_GPL(dm_remap_zone_report); 126310999307SDamien Le Moal 1264978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 12651da177e4SLinus Torvalds { 12661da177e4SLinus Torvalds int r; 12672056a782SJens Axboe sector_t sector; 1268dba14160SMikulas Patocka struct bio *clone = &tio->clone; 126964f52b0eSMike Snitzer struct dm_io *io = tio->io; 1270978e51baSMike Snitzer struct mapped_device *md = io->md; 1271bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1272978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12751da177e4SLinus Torvalds 12761da177e4SLinus Torvalds /* 12771da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 12781da177e4SLinus Torvalds * anything, the target has assumed ownership of 12791da177e4SLinus Torvalds * this io. 12801da177e4SLinus Torvalds */ 128164f52b0eSMike Snitzer atomic_inc(&io->io_count); 12824f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1283d67a5f4bSMikulas Patocka 12847de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1285846785e6SChristoph Hellwig switch (r) { 1286846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1287846785e6SChristoph Hellwig break; 1288846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 12891da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 129074d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 129164f52b0eSMike Snitzer bio_dev(io->orig_bio), sector); 1292978e51baSMike Snitzer if (md->type == DM_TYPE_NVME_BIO_BASED) 1293978e51baSMike Snitzer ret = direct_make_request(clone); 1294978e51baSMike Snitzer else 1295978e51baSMike Snitzer ret = generic_make_request(clone); 1296846785e6SChristoph Hellwig break; 1297846785e6SChristoph Hellwig case DM_MAPIO_KILL: 12984e4cbee9SChristoph Hellwig free_tio(tio); 129964f52b0eSMike Snitzer dec_pending(io, BLK_STS_IOERR); 13004e4cbee9SChristoph Hellwig break; 1301846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1302cfae7529SMike Snitzer free_tio(tio); 130364f52b0eSMike Snitzer dec_pending(io, BLK_STS_DM_REQUEUE); 1304846785e6SChristoph Hellwig break; 1305846785e6SChristoph Hellwig default: 130645cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 130745cbcd79SKiyoshi Ueda BUG(); 13081da177e4SLinus Torvalds } 13091da177e4SLinus Torvalds 1310978e51baSMike Snitzer return ret; 13111da177e4SLinus Torvalds } 13121da177e4SLinus Torvalds 1313e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1314bd2a49b8SAlasdair G Kergon { 13154f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 13164f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 13171da177e4SLinus Torvalds } 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds /* 13201da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 13211da177e4SLinus Torvalds */ 1322c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 13231c3b13e6SKent Overstreet sector_t sector, unsigned len) 13241da177e4SLinus Torvalds { 1325dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13261da177e4SLinus Torvalds 13271c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 13289c47008dSMartin K. Petersen 132957c36519SMike Snitzer if (bio_integrity(bio)) { 1330e2460f2aSMikulas Patocka int r; 1331e2460f2aSMikulas Patocka 1332e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1333e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1334e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1335e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1336e2460f2aSMikulas Patocka tio->ti->type->name); 1337e2460f2aSMikulas Patocka return -EIO; 1338e2460f2aSMikulas Patocka } 1339e2460f2aSMikulas Patocka 1340e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1341c80914e8SMike Snitzer if (r < 0) 1342c80914e8SMike Snitzer return r; 1343c80914e8SMike Snitzer } 13441c3b13e6SKent Overstreet 1345fa8db494SMike Snitzer bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1346fa8db494SMike Snitzer clone->bi_iter.bi_size = to_bytes(len); 1347fa8db494SMike Snitzer 1348fa8db494SMike Snitzer if (bio_integrity(bio)) 1349fa8db494SMike Snitzer bio_integrity_trim(clone); 1350c80914e8SMike Snitzer 1351c80914e8SMike Snitzer return 0; 13521da177e4SLinus Torvalds } 13531da177e4SLinus Torvalds 1354318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1355318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 1356f9ab94ceSMikulas Patocka { 1357dba14160SMikulas Patocka struct dm_target_io *tio; 1358318716ddSMike Snitzer int try; 1359dba14160SMikulas Patocka 1360318716ddSMike Snitzer if (!num_bios) 1361318716ddSMike Snitzer return; 1362f9ab94ceSMikulas Patocka 1363318716ddSMike Snitzer if (num_bios == 1) { 1364318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1365318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1366318716ddSMike Snitzer return; 13679015df24SAlasdair G Kergon } 13689015df24SAlasdair G Kergon 1369318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1370318716ddSMike Snitzer int bio_nr; 1371318716ddSMike Snitzer struct bio *bio; 1372318716ddSMike Snitzer 1373318716ddSMike Snitzer if (try) 1374bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1375318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1376318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1377318716ddSMike Snitzer if (!tio) 1378318716ddSMike Snitzer break; 1379318716ddSMike Snitzer 1380318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1381318716ddSMike Snitzer } 1382318716ddSMike Snitzer if (try) 1383bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1384318716ddSMike Snitzer if (bio_nr == num_bios) 1385318716ddSMike Snitzer return; 1386318716ddSMike Snitzer 1387318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1388318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1389318716ddSMike Snitzer free_tio(tio); 1390318716ddSMike Snitzer } 1391318716ddSMike Snitzer } 1392318716ddSMike Snitzer } 1393318716ddSMike Snitzer 1394978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1395318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 13969015df24SAlasdair G Kergon { 1397dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13989015df24SAlasdair G Kergon 13991dd40c3eSMikulas Patocka tio->len_ptr = len; 14001dd40c3eSMikulas Patocka 14011c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1402bd2a49b8SAlasdair G Kergon if (len) 14031dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1404f9ab94ceSMikulas Patocka 1405978e51baSMike Snitzer return __map_bio(tio); 1406f9ab94ceSMikulas Patocka } 1407f9ab94ceSMikulas Patocka 140814fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 14091dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 141006a426ceSMike Snitzer { 1411318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1412318716ddSMike Snitzer struct bio *bio; 1413318716ddSMike Snitzer struct dm_target_io *tio; 141406a426ceSMike Snitzer 1415318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1416318716ddSMike Snitzer 1417318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1418318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1419978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1420318716ddSMike Snitzer } 142106a426ceSMike Snitzer } 142206a426ceSMike Snitzer 142314fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1424f9ab94ceSMikulas Patocka { 142506a426ceSMike Snitzer unsigned target_nr = 0; 1426f9ab94ceSMikulas Patocka struct dm_target *ti; 1427f9ab94ceSMikulas Patocka 1428892ad71fSDennis Zhou /* 1429dbe3ece1SJens Axboe * Empty flush uses a statically initialized bio, as the base for 1430dbe3ece1SJens Axboe * cloning. However, blkg association requires that a bdev is 1431dbe3ece1SJens Axboe * associated with a gendisk, which doesn't happen until the bdev is 1432dbe3ece1SJens Axboe * opened. So, blkg association is done at issue time of the flush 1433dbe3ece1SJens Axboe * rather than when the device is created in alloc_dev(). 1434892ad71fSDennis Zhou */ 1435892ad71fSDennis Zhou bio_set_dev(ci->bio, ci->io->md->bdev); 1436892ad71fSDennis Zhou 1437b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1438f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14391dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1440f9ab94ceSMikulas Patocka 1441892ad71fSDennis Zhou bio_disassociate_blkg(ci->bio); 1442892ad71fSDennis Zhou 1443f9ab94ceSMikulas Patocka return 0; 1444f9ab94ceSMikulas Patocka } 1445f9ab94ceSMikulas Patocka 1446c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 14471dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 14485ae89a87SMike Snitzer { 1449dba14160SMikulas Patocka struct bio *bio = ci->bio; 14505ae89a87SMike Snitzer struct dm_target_io *tio; 1451f31c21e4SNeilBrown int r; 14525ae89a87SMike Snitzer 1453318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 14541dd40c3eSMikulas Patocka tio->len_ptr = len; 1455c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1456072623deSMikulas Patocka if (r < 0) { 1457cfae7529SMike Snitzer free_tio(tio); 1458c80914e8SMike Snitzer return r; 1459b0d8ed4dSAlasdair G Kergon } 1460978e51baSMike Snitzer (void) __map_bio(tio); 146155a62eefSAlasdair G Kergon 1462f31c21e4SNeilBrown return 0; 146323508a96SMike Snitzer } 146455a62eefSAlasdair G Kergon 146523508a96SMike Snitzer typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 146655a62eefSAlasdair G Kergon 146723508a96SMike Snitzer static unsigned get_num_discard_bios(struct dm_target *ti) 146823508a96SMike Snitzer { 146923508a96SMike Snitzer return ti->num_discard_bios; 147023508a96SMike Snitzer } 147123508a96SMike Snitzer 147200716545SDenis Semakin static unsigned get_num_secure_erase_bios(struct dm_target *ti) 147300716545SDenis Semakin { 147400716545SDenis Semakin return ti->num_secure_erase_bios; 147500716545SDenis Semakin } 147600716545SDenis Semakin 147723508a96SMike Snitzer static unsigned get_num_write_same_bios(struct dm_target *ti) 147823508a96SMike Snitzer { 147923508a96SMike Snitzer return ti->num_write_same_bios; 148023508a96SMike Snitzer } 148123508a96SMike Snitzer 1482ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1483ac62d620SChristoph Hellwig { 1484ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1485ac62d620SChristoph Hellwig } 1486ac62d620SChristoph Hellwig 14873d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 148861697a6aSMike Snitzer unsigned num_bios) 14895ae89a87SMike Snitzer { 149051b86f9aSMichael Lass unsigned len; 14915ae89a87SMike Snitzer 14925ae89a87SMike Snitzer /* 149323508a96SMike Snitzer * Even though the device advertised support for this type of 149423508a96SMike Snitzer * request, that does not mean every target supports it, and 1495936688d7SMike Snitzer * reconfiguration might also have changed that since the 14965ae89a87SMike Snitzer * check was performed. 14975ae89a87SMike Snitzer */ 149855a62eefSAlasdair G Kergon if (!num_bios) 14995ae89a87SMike Snitzer return -EOPNOTSUPP; 15005ae89a87SMike Snitzer 150151b86f9aSMichael Lass len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 150251b86f9aSMichael Lass 15031dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 15045ae89a87SMike Snitzer 1505a79245b3SMike Snitzer ci->sector += len; 15063d7f4562SMike Snitzer ci->sector_count -= len; 15075ae89a87SMike Snitzer 15085ae89a87SMike Snitzer return 0; 15095ae89a87SMike Snitzer } 15105ae89a87SMike Snitzer 15113d7f4562SMike Snitzer static int __send_discard(struct clone_info *ci, struct dm_target *ti) 151223508a96SMike Snitzer { 151361697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); 151423508a96SMike Snitzer } 151523508a96SMike Snitzer 151600716545SDenis Semakin static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 151700716545SDenis Semakin { 151861697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); 151900716545SDenis Semakin } 152000716545SDenis Semakin 15213d7f4562SMike Snitzer static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 152223508a96SMike Snitzer { 152361697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); 152423508a96SMike Snitzer } 152523508a96SMike Snitzer 15263d7f4562SMike Snitzer static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1527ac62d620SChristoph Hellwig { 152861697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); 1529ac62d620SChristoph Hellwig } 1530ac62d620SChristoph Hellwig 1531568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1532568c73a3SMike Snitzer { 1533568c73a3SMike Snitzer bool r = false; 1534568c73a3SMike Snitzer 1535568c73a3SMike Snitzer switch (bio_op(bio)) { 1536568c73a3SMike Snitzer case REQ_OP_DISCARD: 1537568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1538568c73a3SMike Snitzer case REQ_OP_WRITE_SAME: 1539568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 1540568c73a3SMike Snitzer r = true; 1541568c73a3SMike Snitzer break; 1542568c73a3SMike Snitzer } 1543568c73a3SMike Snitzer 1544568c73a3SMike Snitzer return r; 1545568c73a3SMike Snitzer } 1546568c73a3SMike Snitzer 15470519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 15480519c71eSMike Snitzer int *result) 15490519c71eSMike Snitzer { 15500519c71eSMike Snitzer struct bio *bio = ci->bio; 15510519c71eSMike Snitzer 15520519c71eSMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD) 15530519c71eSMike Snitzer *result = __send_discard(ci, ti); 155400716545SDenis Semakin else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 155500716545SDenis Semakin *result = __send_secure_erase(ci, ti); 15560519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME) 15570519c71eSMike Snitzer *result = __send_write_same(ci, ti); 15580519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 15590519c71eSMike Snitzer *result = __send_write_zeroes(ci, ti); 15600519c71eSMike Snitzer else 15610519c71eSMike Snitzer return false; 15620519c71eSMike Snitzer 15630519c71eSMike Snitzer return true; 15640519c71eSMike Snitzer } 15650519c71eSMike Snitzer 1566e4c93811SAlasdair G Kergon /* 1567e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1568e4c93811SAlasdair G Kergon */ 1569e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1570e4c93811SAlasdair G Kergon { 1571e4c93811SAlasdair G Kergon struct dm_target *ti; 15721c3b13e6SKent Overstreet unsigned len; 1573c80914e8SMike Snitzer int r; 1574e4c93811SAlasdair G Kergon 1575e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1576123d87d5SMikulas Patocka if (!ti) 1577e4c93811SAlasdair G Kergon return -EIO; 1578e4c93811SAlasdair G Kergon 1579568c73a3SMike Snitzer if (__process_abnormal_io(ci, ti, &r)) 15800519c71eSMike Snitzer return r; 15813d7f4562SMike Snitzer 1582e76239a3SChristoph Hellwig len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1583e4c93811SAlasdair G Kergon 1584c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1585c80914e8SMike Snitzer if (r < 0) 1586c80914e8SMike Snitzer return r; 1587e4c93811SAlasdair G Kergon 1588e4c93811SAlasdair G Kergon ci->sector += len; 1589e4c93811SAlasdair G Kergon ci->sector_count -= len; 1590e4c93811SAlasdair G Kergon 1591e4c93811SAlasdair G Kergon return 0; 1592e4c93811SAlasdair G Kergon } 1593e4c93811SAlasdair G Kergon 1594978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1595978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1596978e51baSMike Snitzer { 1597978e51baSMike Snitzer ci->map = map; 1598978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1599978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1600978e51baSMike Snitzer } 1601978e51baSMike Snitzer 1602a1e1cb72SMike Snitzer #define __dm_part_stat_sub(part, field, subnd) \ 1603a1e1cb72SMike Snitzer (part_stat_get(part, field) -= (subnd)) 1604a1e1cb72SMike Snitzer 1605e4c93811SAlasdair G Kergon /* 160614fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 16071da177e4SLinus Torvalds */ 1608978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 160983d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 16101da177e4SLinus Torvalds { 16111da177e4SLinus Torvalds struct clone_info ci; 1612978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1613512875bdSJun'ichi Nomura int error = 0; 16141da177e4SLinus Torvalds 1615978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1616bd2a49b8SAlasdair G Kergon 16171eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1618dbe3ece1SJens Axboe struct bio flush_bio; 1619dbe3ece1SJens Axboe 1620dbe3ece1SJens Axboe /* 1621dbe3ece1SJens Axboe * Use an on-stack bio for this, it's safe since we don't 1622dbe3ece1SJens Axboe * need to reference it after submit. It's just used as 1623dbe3ece1SJens Axboe * the basis for the clone(s). 1624dbe3ece1SJens Axboe */ 1625dbe3ece1SJens Axboe bio_init(&flush_bio, NULL, 0); 1626dbe3ece1SJens Axboe flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1627dbe3ece1SJens Axboe ci.bio = &flush_bio; 1628b372d360SMike Snitzer ci.sector_count = 0; 162914fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1630b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 1631*2e2d6f7eSAjay Joshi } else if (op_is_zone_mgmt(bio_op(bio))) { 1632a4aa5e56SDamien Le Moal ci.bio = bio; 1633a4aa5e56SDamien Le Moal ci.sector_count = 0; 1634a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1635b372d360SMike Snitzer } else { 16366a8736d1STejun Heo ci.bio = bio; 16371da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 163818a25da8SNeilBrown while (ci.sector_count && !error) { 163914fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 164018a25da8SNeilBrown if (current->bio_list && ci.sector_count && !error) { 164118a25da8SNeilBrown /* 164218a25da8SNeilBrown * Remainder must be passed to generic_make_request() 164318a25da8SNeilBrown * so that it gets handled *after* bios already submitted 164418a25da8SNeilBrown * have been completely processed. 164518a25da8SNeilBrown * We take a clone of the original to store in 1646745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 164718a25da8SNeilBrown * for dec_pending to use for completion handling. 164818a25da8SNeilBrown */ 1649f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1650f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1651745dc570SMike Snitzer ci.io->orig_bio = b; 1652a1e1cb72SMike Snitzer 1653a1e1cb72SMike Snitzer /* 1654a1e1cb72SMike Snitzer * Adjust IO stats for each split, otherwise upon queue 1655a1e1cb72SMike Snitzer * reentry there will be redundant IO accounting. 1656a1e1cb72SMike Snitzer * NOTE: this is a stop-gap fix, a proper fix involves 1657a1e1cb72SMike Snitzer * significant refactoring of DM core's bio splitting 1658a1e1cb72SMike Snitzer * (by eliminating DM's splitting and just using bio_split) 1659a1e1cb72SMike Snitzer */ 1660a1e1cb72SMike Snitzer part_stat_lock(); 1661a1e1cb72SMike Snitzer __dm_part_stat_sub(&dm_disk(md)->part0, 1662a1e1cb72SMike Snitzer sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1663a1e1cb72SMike Snitzer part_stat_unlock(); 1664a1e1cb72SMike Snitzer 166518a25da8SNeilBrown bio_chain(b, bio); 1666075c18c3SMike Snitzer trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1667978e51baSMike Snitzer ret = generic_make_request(bio); 166818a25da8SNeilBrown break; 166918a25da8SNeilBrown } 167018a25da8SNeilBrown } 1671d87f4c14STejun Heo } 16721da177e4SLinus Torvalds 16731da177e4SLinus Torvalds /* drop the extra reference count */ 167454385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 1675978e51baSMike Snitzer return ret; 16761da177e4SLinus Torvalds } 16771da177e4SLinus Torvalds 16781da177e4SLinus Torvalds /* 1679978e51baSMike Snitzer * Optimized variant of __split_and_process_bio that leverages the 1680978e51baSMike Snitzer * fact that targets that use it do _not_ have a need to split bios. 16811da177e4SLinus Torvalds */ 1682568c73a3SMike Snitzer static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, 1683568c73a3SMike Snitzer struct bio *bio, struct dm_target *ti) 16841da177e4SLinus Torvalds { 1685978e51baSMike Snitzer struct clone_info ci; 1686978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1687978e51baSMike Snitzer int error = 0; 1688978e51baSMike Snitzer 1689978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1690978e51baSMike Snitzer 1691978e51baSMike Snitzer if (bio->bi_opf & REQ_PREFLUSH) { 1692dbe3ece1SJens Axboe struct bio flush_bio; 1693dbe3ece1SJens Axboe 1694dbe3ece1SJens Axboe /* 1695dbe3ece1SJens Axboe * Use an on-stack bio for this, it's safe since we don't 1696dbe3ece1SJens Axboe * need to reference it after submit. It's just used as 1697dbe3ece1SJens Axboe * the basis for the clone(s). 1698dbe3ece1SJens Axboe */ 1699dbe3ece1SJens Axboe bio_init(&flush_bio, NULL, 0); 1700dbe3ece1SJens Axboe flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1701dbe3ece1SJens Axboe ci.bio = &flush_bio; 1702978e51baSMike Snitzer ci.sector_count = 0; 1703978e51baSMike Snitzer error = __send_empty_flush(&ci); 1704978e51baSMike Snitzer /* dec_pending submits any data associated with flush */ 1705978e51baSMike Snitzer } else { 1706978e51baSMike Snitzer struct dm_target_io *tio; 1707978e51baSMike Snitzer 1708978e51baSMike Snitzer ci.bio = bio; 1709978e51baSMike Snitzer ci.sector_count = bio_sectors(bio); 1710568c73a3SMike Snitzer if (__process_abnormal_io(&ci, ti, &error)) 17110519c71eSMike Snitzer goto out; 17120519c71eSMike Snitzer 17130519c71eSMike Snitzer tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1714978e51baSMike Snitzer ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1715978e51baSMike Snitzer } 1716978e51baSMike Snitzer out: 1717978e51baSMike Snitzer /* drop the extra reference count */ 1718978e51baSMike Snitzer dec_pending(ci.io, errno_to_blk_status(error)); 1719978e51baSMike Snitzer return ret; 1720978e51baSMike Snitzer } 1721978e51baSMike Snitzer 1722568c73a3SMike Snitzer static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) 1723568c73a3SMike Snitzer { 1724568c73a3SMike Snitzer unsigned len, sector_count; 1725568c73a3SMike Snitzer 1726568c73a3SMike Snitzer sector_count = bio_sectors(*bio); 1727568c73a3SMike Snitzer len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); 1728568c73a3SMike Snitzer 1729568c73a3SMike Snitzer if (sector_count > len) { 1730568c73a3SMike Snitzer struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); 1731568c73a3SMike Snitzer 1732568c73a3SMike Snitzer bio_chain(split, *bio); 1733568c73a3SMike Snitzer trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); 1734568c73a3SMike Snitzer generic_make_request(*bio); 1735568c73a3SMike Snitzer *bio = split; 1736568c73a3SMike Snitzer } 1737568c73a3SMike Snitzer } 1738568c73a3SMike Snitzer 17396548c7c5SMike Snitzer static blk_qc_t dm_process_bio(struct mapped_device *md, 17406548c7c5SMike Snitzer struct dm_table *map, struct bio *bio) 17416548c7c5SMike Snitzer { 1742568c73a3SMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1743568c73a3SMike Snitzer struct dm_target *ti = md->immutable_target; 1744568c73a3SMike Snitzer 1745568c73a3SMike Snitzer if (unlikely(!map)) { 1746568c73a3SMike Snitzer bio_io_error(bio); 1747568c73a3SMike Snitzer return ret; 1748568c73a3SMike Snitzer } 1749568c73a3SMike Snitzer 1750568c73a3SMike Snitzer if (!ti) { 1751568c73a3SMike Snitzer ti = dm_table_find_target(map, bio->bi_iter.bi_sector); 1752123d87d5SMikulas Patocka if (unlikely(!ti)) { 1753568c73a3SMike Snitzer bio_io_error(bio); 1754568c73a3SMike Snitzer return ret; 1755568c73a3SMike Snitzer } 1756568c73a3SMike Snitzer } 1757568c73a3SMike Snitzer 1758568c73a3SMike Snitzer /* 1759568c73a3SMike Snitzer * If in ->make_request_fn we need to use blk_queue_split(), otherwise 1760568c73a3SMike Snitzer * queue_limits for abnormal requests (e.g. discard, writesame, etc) 1761568c73a3SMike Snitzer * won't be imposed. 1762568c73a3SMike Snitzer */ 1763568c73a3SMike Snitzer if (current->bio_list) { 1764568c73a3SMike Snitzer blk_queue_split(md->queue, &bio); 1765effd58c9SMike Snitzer if (!is_abnormal_io(bio)) 1766568c73a3SMike Snitzer dm_queue_split(md, ti, &bio); 1767568c73a3SMike Snitzer } 1768568c73a3SMike Snitzer 17696548c7c5SMike Snitzer if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1770568c73a3SMike Snitzer return __process_bio(md, map, bio, ti); 17716548c7c5SMike Snitzer else 17726548c7c5SMike Snitzer return __split_and_process_bio(md, map, bio); 17736548c7c5SMike Snitzer } 17746548c7c5SMike Snitzer 177524113d48SMikulas Patocka static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 17761da177e4SLinus Torvalds { 17771da177e4SLinus Torvalds struct mapped_device *md = q->queuedata; 1778978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 177983d5e5b0SMikulas Patocka int srcu_idx; 178083d5e5b0SMikulas Patocka struct dm_table *map; 17811da177e4SLinus Torvalds 178283d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17831da177e4SLinus Torvalds 17846a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17856a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 178683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17871da177e4SLinus Torvalds 17881eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 178992c63902SMikulas Patocka queue_io(md, bio); 17906a8736d1STejun Heo else 17916a8736d1STejun Heo bio_io_error(bio); 1792978e51baSMike Snitzer return ret; 17931da177e4SLinus Torvalds } 17941da177e4SLinus Torvalds 17956548c7c5SMike Snitzer ret = dm_process_bio(md, map, bio); 1796978e51baSMike Snitzer 179783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1798978e51baSMike Snitzer return ret; 1799978e51baSMike Snitzer } 1800978e51baSMike Snitzer 18011da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 18021da177e4SLinus Torvalds { 18038a57dfc6SChandra Seetharaman int r = bdi_bits; 18048a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 18058a57dfc6SChandra Seetharaman struct dm_table *map; 18061da177e4SLinus Torvalds 18071eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1808e522c039SMike Snitzer if (dm_request_based(md)) { 1809cec47e3dSKiyoshi Ueda /* 1810e522c039SMike Snitzer * With request-based DM we only need to check the 1811e522c039SMike Snitzer * top-level queue for congestion. 1812cec47e3dSKiyoshi Ueda */ 1813dc3b17ccSJan Kara r = md->queue->backing_dev_info->wb.state & bdi_bits; 1814e522c039SMike Snitzer } else { 1815e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1816e522c039SMike Snitzer if (map) 18171da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 181883d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 18198a57dfc6SChandra Seetharaman } 1820e522c039SMike Snitzer } 18218a57dfc6SChandra Seetharaman 18221da177e4SLinus Torvalds return r; 18231da177e4SLinus Torvalds } 18241da177e4SLinus Torvalds 18251da177e4SLinus Torvalds /*----------------------------------------------------------------- 18261da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 18271da177e4SLinus Torvalds *---------------------------------------------------------------*/ 18282b06cfffSAlasdair G Kergon static void free_minor(int minor) 18291da177e4SLinus Torvalds { 1830f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18311da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1832f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18331da177e4SLinus Torvalds } 18341da177e4SLinus Torvalds 18351da177e4SLinus Torvalds /* 18361da177e4SLinus Torvalds * See if the device with a specific minor # is free. 18371da177e4SLinus Torvalds */ 1838cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 18391da177e4SLinus Torvalds { 1840c9d76be6STejun Heo int r; 18411da177e4SLinus Torvalds 18421da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 18431da177e4SLinus Torvalds return -EINVAL; 18441da177e4SLinus Torvalds 1845c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1846f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18471da177e4SLinus Torvalds 1848c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 18491da177e4SLinus Torvalds 1850f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1851c9d76be6STejun Heo idr_preload_end(); 1852c9d76be6STejun Heo if (r < 0) 1853c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1854c9d76be6STejun Heo return 0; 18551da177e4SLinus Torvalds } 18561da177e4SLinus Torvalds 1857cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 18581da177e4SLinus Torvalds { 1859c9d76be6STejun Heo int r; 18601da177e4SLinus Torvalds 1861c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1862f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18631da177e4SLinus Torvalds 1864c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 18651da177e4SLinus Torvalds 1866f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1867c9d76be6STejun Heo idr_preload_end(); 1868c9d76be6STejun Heo if (r < 0) 18691da177e4SLinus Torvalds return r; 1870c9d76be6STejun Heo *minor = r; 1871c9d76be6STejun Heo return 0; 18721da177e4SLinus Torvalds } 18731da177e4SLinus Torvalds 187483d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1875f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 18761da177e4SLinus Torvalds 187753d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 187853d5914fSMikulas Patocka 1879c12c9a3cSMike Snitzer static void dm_init_normal_md_queue(struct mapped_device *md) 1880bfebd1cdSMike Snitzer { 1881bfebd1cdSMike Snitzer /* 1882bfebd1cdSMike Snitzer * Initialize aspects of queue that aren't relevant for blk-mq 1883bfebd1cdSMike Snitzer */ 1884dc3b17ccSJan Kara md->queue->backing_dev_info->congested_fn = dm_any_congested; 18854a0b4ddfSMike Snitzer } 18864a0b4ddfSMike Snitzer 18870f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 18880f20972fSMike Snitzer { 18890f20972fSMike Snitzer if (md->wq) 18900f20972fSMike Snitzer destroy_workqueue(md->wq); 18916f1c819cSKent Overstreet bioset_exit(&md->bs); 18926f1c819cSKent Overstreet bioset_exit(&md->io_bs); 18930f20972fSMike Snitzer 1894f26c5719SDan Williams if (md->dax_dev) { 1895f26c5719SDan Williams kill_dax(md->dax_dev); 1896f26c5719SDan Williams put_dax(md->dax_dev); 1897f26c5719SDan Williams md->dax_dev = NULL; 1898f26c5719SDan Williams } 1899f26c5719SDan Williams 19000f20972fSMike Snitzer if (md->disk) { 19010f20972fSMike Snitzer spin_lock(&_minor_lock); 19020f20972fSMike Snitzer md->disk->private_data = NULL; 19030f20972fSMike Snitzer spin_unlock(&_minor_lock); 19040f20972fSMike Snitzer del_gendisk(md->disk); 19050f20972fSMike Snitzer put_disk(md->disk); 19060f20972fSMike Snitzer } 19070f20972fSMike Snitzer 19080f20972fSMike Snitzer if (md->queue) 19090f20972fSMike Snitzer blk_cleanup_queue(md->queue); 19100f20972fSMike Snitzer 1911d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1912d09960b0STahsin Erdogan 19130f20972fSMike Snitzer if (md->bdev) { 19140f20972fSMike Snitzer bdput(md->bdev); 19150f20972fSMike Snitzer md->bdev = NULL; 19160f20972fSMike Snitzer } 19174cc96131SMike Snitzer 1918d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1919d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1920d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1921d5ffebddSMike Snitzer 19224cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 19230f20972fSMike Snitzer } 19240f20972fSMike Snitzer 19251da177e4SLinus Torvalds /* 19261da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 19271da177e4SLinus Torvalds */ 19282b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 19291da177e4SLinus Torvalds { 1930115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1931115485e8SMike Snitzer struct mapped_device *md; 1932ba61fdd1SJeff Mahoney void *old_md; 19331da177e4SLinus Torvalds 1934856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 19351da177e4SLinus Torvalds if (!md) { 19361da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 19371da177e4SLinus Torvalds return NULL; 19381da177e4SLinus Torvalds } 19391da177e4SLinus Torvalds 194010da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 19416ed7ade8SMilan Broz goto bad_module_get; 194210da4f79SJeff Mahoney 19431da177e4SLinus Torvalds /* get a minor number for the dev */ 19442b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1945cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 19462b06cfffSAlasdair G Kergon else 1947cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 19481da177e4SLinus Torvalds if (r < 0) 19496ed7ade8SMilan Broz goto bad_minor; 19501da177e4SLinus Torvalds 195183d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 195283d5e5b0SMikulas Patocka if (r < 0) 195383d5e5b0SMikulas Patocka goto bad_io_barrier; 195483d5e5b0SMikulas Patocka 1955115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1956591ddcfcSMike Snitzer md->init_tio_pdu = false; 1957a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1958e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1959a5664dadSMike Snitzer mutex_init(&md->type_lock); 196086f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1961022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 19621da177e4SLinus Torvalds atomic_set(&md->holders, 1); 19635c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 19641da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 19657a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 19667a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 196786f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 19687a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 19691da177e4SLinus Torvalds 19706d469642SChristoph Hellwig md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); 19711da177e4SLinus Torvalds if (!md->queue) 19720f20972fSMike Snitzer goto bad; 1973c12c9a3cSMike Snitzer md->queue->queuedata = md; 1974c12c9a3cSMike Snitzer md->queue->backing_dev_info->congested_data = md; 19751da177e4SLinus Torvalds 1976c12c9a3cSMike Snitzer md->disk = alloc_disk_node(1, md->numa_node_id); 19771da177e4SLinus Torvalds if (!md->disk) 19780f20972fSMike Snitzer goto bad; 19791da177e4SLinus Torvalds 1980f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 198153d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1982f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 19832995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1984f0b04115SJeff Mahoney 19851da177e4SLinus Torvalds md->disk->major = _major; 19861da177e4SLinus Torvalds md->disk->first_minor = minor; 19871da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 19881da177e4SLinus Torvalds md->disk->queue = md->queue; 19891da177e4SLinus Torvalds md->disk->private_data = md; 19901da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1991f26c5719SDan Williams 1992976431b0SDan Williams if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1993fefc1d97SPankaj Gupta md->dax_dev = alloc_dax(md, md->disk->disk_name, 1994fefc1d97SPankaj Gupta &dm_dax_ops, 0); 1995514cf4f8SPeng Wang if (!md->dax_dev) 1996f26c5719SDan Williams goto bad; 1997976431b0SDan Williams } 1998f26c5719SDan Williams 1999c100ec49SMike Snitzer add_disk_no_queue_reg(md->disk); 20007e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 20011da177e4SLinus Torvalds 2002670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2003304f3f6aSMilan Broz if (!md->wq) 20040f20972fSMike Snitzer goto bad; 2005304f3f6aSMilan Broz 200632a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 200732a926daSMikulas Patocka if (!md->bdev) 20080f20972fSMike Snitzer goto bad; 200932a926daSMikulas Patocka 2010fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2011fd2ed4d2SMikulas Patocka 2012ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2013f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2014ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2015f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2016ba61fdd1SJeff Mahoney 2017ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2018ba61fdd1SJeff Mahoney 20191da177e4SLinus Torvalds return md; 20201da177e4SLinus Torvalds 20210f20972fSMike Snitzer bad: 20220f20972fSMike Snitzer cleanup_mapped_device(md); 202383d5e5b0SMikulas Patocka bad_io_barrier: 20241da177e4SLinus Torvalds free_minor(minor); 20256ed7ade8SMilan Broz bad_minor: 202610da4f79SJeff Mahoney module_put(THIS_MODULE); 20276ed7ade8SMilan Broz bad_module_get: 2028856eb091SMikulas Patocka kvfree(md); 20291da177e4SLinus Torvalds return NULL; 20301da177e4SLinus Torvalds } 20311da177e4SLinus Torvalds 2032ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2033ae9da83fSJun'ichi Nomura 20341da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 20351da177e4SLinus Torvalds { 2036f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 203763d94e48SJun'ichi Nomura 2038ae9da83fSJun'ichi Nomura unlock_fs(md); 20392eb6e1e3SKeith Busch 20400f20972fSMike Snitzer cleanup_mapped_device(md); 20410f20972fSMike Snitzer 20420f20972fSMike Snitzer free_table_devices(&md->table_devices); 20430f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 204463a4f065SMike Snitzer free_minor(minor); 204563a4f065SMike Snitzer 204610da4f79SJeff Mahoney module_put(THIS_MODULE); 2047856eb091SMikulas Patocka kvfree(md); 20481da177e4SLinus Torvalds } 20491da177e4SLinus Torvalds 20502a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 2051e6ee8c0bSKiyoshi Ueda { 2052c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 20532a2a4c51SJens Axboe int ret = 0; 2054e6ee8c0bSKiyoshi Ueda 2055545ed20eSToshi Kani if (dm_table_bio_based(t)) { 2056c0820cf5SMikulas Patocka /* 205764f52b0eSMike Snitzer * The md may already have mempools that need changing. 205864f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 205916245bdcSJun'ichi Nomura * because a different table was loaded. 2060c0820cf5SMikulas Patocka */ 20616f1c819cSKent Overstreet bioset_exit(&md->bs); 20626f1c819cSKent Overstreet bioset_exit(&md->io_bs); 20630776aa0eSMike Snitzer 20646f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 2065cbc4e3c1SMike Snitzer /* 20664e6e36c3SMike Snitzer * There's no need to reload with request-based dm 20674e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 20684e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 20694e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 20704e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 20714e6e36c3SMike Snitzer * through the queue to unprep. 2072cbc4e3c1SMike Snitzer */ 2073cbc4e3c1SMike Snitzer goto out; 2074cbc4e3c1SMike Snitzer } 2075cbc4e3c1SMike Snitzer 20766f1c819cSKent Overstreet BUG_ON(!p || 20776f1c819cSKent Overstreet bioset_initialized(&md->bs) || 20786f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 2079e6ee8c0bSKiyoshi Ueda 20802a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 20812a2a4c51SJens Axboe if (ret) 20822a2a4c51SJens Axboe goto out; 20832a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 20842a2a4c51SJens Axboe if (ret) 20852a2a4c51SJens Axboe bioset_exit(&md->bs); 2086e6ee8c0bSKiyoshi Ueda out: 208702233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2088e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 20892a2a4c51SJens Axboe return ret; 2090e6ee8c0bSKiyoshi Ueda } 2091e6ee8c0bSKiyoshi Ueda 20921da177e4SLinus Torvalds /* 20931da177e4SLinus Torvalds * Bind a table to the device. 20941da177e4SLinus Torvalds */ 20951da177e4SLinus Torvalds static void event_callback(void *context) 20961da177e4SLinus Torvalds { 20977a8c3d3bSMike Anderson unsigned long flags; 20987a8c3d3bSMike Anderson LIST_HEAD(uevents); 20991da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 21001da177e4SLinus Torvalds 21017a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 21027a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 21037a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 21047a8c3d3bSMike Anderson 2105ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 21067a8c3d3bSMike Anderson 21071da177e4SLinus Torvalds atomic_inc(&md->event_nr); 21081da177e4SLinus Torvalds wake_up(&md->eventq); 210962e08243SMikulas Patocka dm_issue_global_event(); 21101da177e4SLinus Torvalds } 21111da177e4SLinus Torvalds 2112c217649bSMike Snitzer /* 2113c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2114c217649bSMike Snitzer */ 21154e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 21161da177e4SLinus Torvalds { 21171ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 21181ea0654eSBart Van Assche 21194e90188bSAlasdair G Kergon set_capacity(md->disk, size); 21201da177e4SLinus Torvalds 2121db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds 2124042d2a9bSAlasdair G Kergon /* 2125042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2126042d2a9bSAlasdair G Kergon */ 2127042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2128754c5fc7SMike Snitzer struct queue_limits *limits) 21291da177e4SLinus Torvalds { 2130042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2131165125e1SJens Axboe struct request_queue *q = md->queue; 2132978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 21331da177e4SLinus Torvalds sector_t size; 21342a2a4c51SJens Axboe int ret; 21351da177e4SLinus Torvalds 21365a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 21375a8f1f80SBart Van Assche 21381da177e4SLinus Torvalds size = dm_table_get_size(t); 21393ac51e74SDarrick J. Wong 21403ac51e74SDarrick J. Wong /* 21413ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 21423ac51e74SDarrick J. Wong */ 2143fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 21443ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 21453ac51e74SDarrick J. Wong 21464e90188bSAlasdair G Kergon __set_size(md, size); 21471da177e4SLinus Torvalds 2148cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 21492ca3310eSAlasdair G Kergon 2150e6ee8c0bSKiyoshi Ueda /* 2151e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2152e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2153e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2154e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2155e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2156e6ee8c0bSKiyoshi Ueda */ 2157978e51baSMike Snitzer if (request_based) 2158eca7ee6dSMike Snitzer dm_stop_queue(q); 2159978e51baSMike Snitzer 2160978e51baSMike Snitzer if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 216116f12266SMike Snitzer /* 2162978e51baSMike Snitzer * Leverage the fact that request-based DM targets and 2163978e51baSMike Snitzer * NVMe bio based targets are immutable singletons 2164978e51baSMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2165978e51baSMike Snitzer * and __process_bio. 216616f12266SMike Snitzer */ 216716f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 216816f12266SMike Snitzer } 2169e6ee8c0bSKiyoshi Ueda 21702a2a4c51SJens Axboe ret = __bind_mempools(md, t); 21712a2a4c51SJens Axboe if (ret) { 21722a2a4c51SJens Axboe old_map = ERR_PTR(ret); 21732a2a4c51SJens Axboe goto out; 21742a2a4c51SJens Axboe } 2175e6ee8c0bSKiyoshi Ueda 2176a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 21771d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 217836a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 217936a0456fSAlasdair G Kergon 2180754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 218141abc4e1SHannes Reinecke if (old_map) 218283d5e5b0SMikulas Patocka dm_sync_table(md); 21832ca3310eSAlasdair G Kergon 21842a2a4c51SJens Axboe out: 2185042d2a9bSAlasdair G Kergon return old_map; 21861da177e4SLinus Torvalds } 21871da177e4SLinus Torvalds 2188a7940155SAlasdair G Kergon /* 2189a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2190a7940155SAlasdair G Kergon */ 2191a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 21921da177e4SLinus Torvalds { 2193a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 21941da177e4SLinus Torvalds 21951da177e4SLinus Torvalds if (!map) 2196a7940155SAlasdair G Kergon return NULL; 21971da177e4SLinus Torvalds 21981da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 21999cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 220083d5e5b0SMikulas Patocka dm_sync_table(md); 2201a7940155SAlasdair G Kergon 2202a7940155SAlasdair G Kergon return map; 22031da177e4SLinus Torvalds } 22041da177e4SLinus Torvalds 22051da177e4SLinus Torvalds /* 22061da177e4SLinus Torvalds * Constructor for a new device. 22071da177e4SLinus Torvalds */ 22082b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 22091da177e4SLinus Torvalds { 2210c12c9a3cSMike Snitzer int r; 22111da177e4SLinus Torvalds struct mapped_device *md; 22121da177e4SLinus Torvalds 22132b06cfffSAlasdair G Kergon md = alloc_dev(minor); 22141da177e4SLinus Torvalds if (!md) 22151da177e4SLinus Torvalds return -ENXIO; 22161da177e4SLinus Torvalds 2217c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2218c12c9a3cSMike Snitzer if (r) { 2219c12c9a3cSMike Snitzer free_dev(md); 2220c12c9a3cSMike Snitzer return r; 2221c12c9a3cSMike Snitzer } 2222784aae73SMilan Broz 22231da177e4SLinus Torvalds *result = md; 22241da177e4SLinus Torvalds return 0; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds 2227a5664dadSMike Snitzer /* 2228a5664dadSMike Snitzer * Functions to manage md->type. 2229a5664dadSMike Snitzer * All are required to hold md->type_lock. 2230a5664dadSMike Snitzer */ 2231a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2232a5664dadSMike Snitzer { 2233a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2234a5664dadSMike Snitzer } 2235a5664dadSMike Snitzer 2236a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2237a5664dadSMike Snitzer { 2238a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2239a5664dadSMike Snitzer } 2240a5664dadSMike Snitzer 22417e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2242a5664dadSMike Snitzer { 224300c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2244a5664dadSMike Snitzer md->type = type; 2245a5664dadSMike Snitzer } 2246a5664dadSMike Snitzer 22477e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2248a5664dadSMike Snitzer { 2249a5664dadSMike Snitzer return md->type; 2250a5664dadSMike Snitzer } 2251a5664dadSMike Snitzer 225236a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 225336a0456fSAlasdair G Kergon { 225436a0456fSAlasdair G Kergon return md->immutable_target_type; 225536a0456fSAlasdair G Kergon } 225636a0456fSAlasdair G Kergon 22574a0b4ddfSMike Snitzer /* 2258f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2259f84cb8a4SMike Snitzer * count on 'md'. 2260f84cb8a4SMike Snitzer */ 2261f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2262f84cb8a4SMike Snitzer { 2263f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2264f84cb8a4SMike Snitzer return &md->queue->limits; 2265f84cb8a4SMike Snitzer } 2266f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2267f84cb8a4SMike Snitzer 22684a0b4ddfSMike Snitzer /* 22694a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 22704a0b4ddfSMike Snitzer */ 2271591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 22724a0b4ddfSMike Snitzer { 2273bfebd1cdSMike Snitzer int r; 2274c100ec49SMike Snitzer struct queue_limits limits; 22757e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2276bfebd1cdSMike Snitzer 2277545ed20eSToshi Kani switch (type) { 2278bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2279e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2280bfebd1cdSMike Snitzer if (r) { 2281eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2282bfebd1cdSMike Snitzer return r; 2283bfebd1cdSMike Snitzer } 2284bfebd1cdSMike Snitzer break; 2285bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2286545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2287978e51baSMike Snitzer case DM_TYPE_NVME_BIO_BASED: 2288978e51baSMike Snitzer dm_init_normal_md_queue(md); 228924113d48SMikulas Patocka blk_queue_make_request(md->queue, dm_make_request); 2290bfebd1cdSMike Snitzer break; 22917e0d574fSBart Van Assche case DM_TYPE_NONE: 22927e0d574fSBart Van Assche WARN_ON_ONCE(true); 22937e0d574fSBart Van Assche break; 2294ff36ab34SMike Snitzer } 22954a0b4ddfSMike Snitzer 2296c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2297c100ec49SMike Snitzer if (r) { 2298c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2299c100ec49SMike Snitzer return r; 2300c100ec49SMike Snitzer } 2301c100ec49SMike Snitzer dm_table_set_restrictions(t, md->queue, &limits); 2302c100ec49SMike Snitzer blk_register_queue(md->disk); 2303c100ec49SMike Snitzer 23044a0b4ddfSMike Snitzer return 0; 23054a0b4ddfSMike Snitzer } 23064a0b4ddfSMike Snitzer 23072bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 23081da177e4SLinus Torvalds { 23091da177e4SLinus Torvalds struct mapped_device *md; 23101da177e4SLinus Torvalds unsigned minor = MINOR(dev); 23111da177e4SLinus Torvalds 23121da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 23131da177e4SLinus Torvalds return NULL; 23141da177e4SLinus Torvalds 2315f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 23161da177e4SLinus Torvalds 23171da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 231849de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 231949de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2320637842cfSDavid Teigland md = NULL; 2321fba9f90eSJeff Mahoney goto out; 2322fba9f90eSJeff Mahoney } 23232bec1f4aSMikulas Patocka dm_get(md); 2324fba9f90eSJeff Mahoney out: 2325f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23261da177e4SLinus Torvalds 2327637842cfSDavid Teigland return md; 2328637842cfSDavid Teigland } 23293cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2330d229a958SDavid Teigland 23319ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2332637842cfSDavid Teigland { 23339ade92a9SAlasdair G Kergon return md->interface_ptr; 23341da177e4SLinus Torvalds } 23351da177e4SLinus Torvalds 23361da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 23371da177e4SLinus Torvalds { 23381da177e4SLinus Torvalds md->interface_ptr = ptr; 23391da177e4SLinus Torvalds } 23401da177e4SLinus Torvalds 23411da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 23421da177e4SLinus Torvalds { 23431da177e4SLinus Torvalds atomic_inc(&md->holders); 23443f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 23451da177e4SLinus Torvalds } 23461da177e4SLinus Torvalds 234709ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 234809ee96b2SMikulas Patocka { 234909ee96b2SMikulas Patocka spin_lock(&_minor_lock); 235009ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 235109ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 235209ee96b2SMikulas Patocka return -EBUSY; 235309ee96b2SMikulas Patocka } 235409ee96b2SMikulas Patocka dm_get(md); 235509ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 235609ee96b2SMikulas Patocka return 0; 235709ee96b2SMikulas Patocka } 235809ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 235909ee96b2SMikulas Patocka 236072d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 236172d94861SAlasdair G Kergon { 236272d94861SAlasdair G Kergon return md->name; 236372d94861SAlasdair G Kergon } 236472d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 236572d94861SAlasdair G Kergon 23663f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 23671da177e4SLinus Torvalds { 23681134e5aeSMike Anderson struct dm_table *map; 236983d5e5b0SMikulas Patocka int srcu_idx; 23701da177e4SLinus Torvalds 23713f77316dSKiyoshi Ueda might_sleep(); 2372fba9f90eSJeff Mahoney 237363a4f065SMike Snitzer spin_lock(&_minor_lock); 23743f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2375fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2376f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23773f77316dSKiyoshi Ueda 2378c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 23793b785fbcSBart Van Assche 2380ab7c7bb6SMikulas Patocka /* 2381ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2382ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2383ab7c7bb6SMikulas Patocka */ 2384ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 23852a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 23864f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 23871da177e4SLinus Torvalds dm_table_presuspend_targets(map); 23881da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 23891da177e4SLinus Torvalds } 239083d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 239183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 23922a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 239383d5e5b0SMikulas Patocka 23943f77316dSKiyoshi Ueda /* 23953f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 23963f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 23973f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 23983f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 23993f77316dSKiyoshi Ueda */ 24003f77316dSKiyoshi Ueda if (wait) 24013f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 24023f77316dSKiyoshi Ueda msleep(1); 24033f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 24043f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 24053f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 24063f77316dSKiyoshi Ueda 2407784aae73SMilan Broz dm_sysfs_exit(md); 2408a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 24091da177e4SLinus Torvalds free_dev(md); 24101da177e4SLinus Torvalds } 24113f77316dSKiyoshi Ueda 24123f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 24133f77316dSKiyoshi Ueda { 24143f77316dSKiyoshi Ueda __dm_destroy(md, true); 24153f77316dSKiyoshi Ueda } 24163f77316dSKiyoshi Ueda 24173f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 24183f77316dSKiyoshi Ueda { 24193f77316dSKiyoshi Ueda __dm_destroy(md, false); 24203f77316dSKiyoshi Ueda } 24213f77316dSKiyoshi Ueda 24223f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 24233f77316dSKiyoshi Ueda { 24243f77316dSKiyoshi Ueda atomic_dec(&md->holders); 24251da177e4SLinus Torvalds } 242679eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 24271da177e4SLinus Torvalds 2428b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 242946125c1cSMilan Broz { 243046125c1cSMilan Broz int r = 0; 24319f4c3f87SBart Van Assche DEFINE_WAIT(wait); 243246125c1cSMilan Broz 243346125c1cSMilan Broz while (1) { 24349f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 243546125c1cSMilan Broz 2436b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 243746125c1cSMilan Broz break; 243846125c1cSMilan Broz 2439e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 244046125c1cSMilan Broz r = -EINTR; 244146125c1cSMilan Broz break; 244246125c1cSMilan Broz } 244346125c1cSMilan Broz 244446125c1cSMilan Broz io_schedule(); 244546125c1cSMilan Broz } 24469f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2447b44ebeb0SMikulas Patocka 244846125c1cSMilan Broz return r; 244946125c1cSMilan Broz } 245046125c1cSMilan Broz 24511da177e4SLinus Torvalds /* 24521da177e4SLinus Torvalds * Process the deferred bios 24531da177e4SLinus Torvalds */ 2454ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 24551da177e4SLinus Torvalds { 2456ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2457ef208587SMikulas Patocka work); 24586d6f10dfSMilan Broz struct bio *c; 245983d5e5b0SMikulas Patocka int srcu_idx; 246083d5e5b0SMikulas Patocka struct dm_table *map; 24611da177e4SLinus Torvalds 246283d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2463ef208587SMikulas Patocka 24643b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2465022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2466022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2467022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2468022c2611SMikulas Patocka 24696a8736d1STejun Heo if (!c) 2470df12ee99SAlasdair G Kergon break; 247173d410c0SMilan Broz 2472e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 24736548c7c5SMike Snitzer (void) generic_make_request(c); 2474af7e466aSMikulas Patocka else 24756548c7c5SMike Snitzer (void) dm_process_bio(md, map, c); 2476e6ee8c0bSKiyoshi Ueda } 24773b00b203SMikulas Patocka 247883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 24791da177e4SLinus Torvalds } 24801da177e4SLinus Torvalds 24819a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2482304f3f6aSMilan Broz { 24833b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 24844e857c58SPeter Zijlstra smp_mb__after_atomic(); 248553d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2486304f3f6aSMilan Broz } 2487304f3f6aSMilan Broz 24881da177e4SLinus Torvalds /* 2489042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24901da177e4SLinus Torvalds */ 2491042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24921da177e4SLinus Torvalds { 249387eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2494754c5fc7SMike Snitzer struct queue_limits limits; 2495042d2a9bSAlasdair G Kergon int r; 24961da177e4SLinus Torvalds 2497e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24981da177e4SLinus Torvalds 24991da177e4SLinus Torvalds /* device must be suspended */ 25004f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 250193c534aeSAlasdair G Kergon goto out; 25021da177e4SLinus Torvalds 25033ae70656SMike Snitzer /* 25043ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 25053ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 25063ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 25073ae70656SMike Snitzer * reappear. 25083ae70656SMike Snitzer */ 25093ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 251083d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 25113ae70656SMike Snitzer if (live_map) 25123ae70656SMike Snitzer limits = md->queue->limits; 251383d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 25143ae70656SMike Snitzer } 25153ae70656SMike Snitzer 251687eb5b21SMike Christie if (!live_map) { 2517754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2518042d2a9bSAlasdair G Kergon if (r) { 2519042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2520754c5fc7SMike Snitzer goto out; 2521042d2a9bSAlasdair G Kergon } 252287eb5b21SMike Christie } 2523754c5fc7SMike Snitzer 2524042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 252562e08243SMikulas Patocka dm_issue_global_event(); 25261da177e4SLinus Torvalds 252793c534aeSAlasdair G Kergon out: 2528e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2529042d2a9bSAlasdair G Kergon return map; 25301da177e4SLinus Torvalds } 25311da177e4SLinus Torvalds 25321da177e4SLinus Torvalds /* 25331da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 25341da177e4SLinus Torvalds * device. 25351da177e4SLinus Torvalds */ 25362ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 25371da177e4SLinus Torvalds { 2538e39e2e95SAlasdair G Kergon int r; 25391da177e4SLinus Torvalds 25401da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2541dfbe03f6SAlasdair G Kergon 2542db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2543dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2544cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2545e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2546e39e2e95SAlasdair G Kergon return r; 2547dfbe03f6SAlasdair G Kergon } 2548dfbe03f6SAlasdair G Kergon 2549aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2550aa8d7c2fSAlasdair G Kergon 25511da177e4SLinus Torvalds return 0; 25521da177e4SLinus Torvalds } 25531da177e4SLinus Torvalds 25542ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 25551da177e4SLinus Torvalds { 2556aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2557aa8d7c2fSAlasdair G Kergon return; 2558aa8d7c2fSAlasdair G Kergon 2559db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 25601da177e4SLinus Torvalds md->frozen_sb = NULL; 2561aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25621da177e4SLinus Torvalds } 25631da177e4SLinus Torvalds 25641da177e4SLinus Torvalds /* 2565b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2566b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2567b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2568b48633f8SBart Van Assche * 2569ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2570ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2571ffcc3936SMike Snitzer * are being added to md->deferred list. 2572cec47e3dSKiyoshi Ueda */ 2573ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2574b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2575eaf9a736SMike Snitzer int dmf_suspended_flag) 25761da177e4SLinus Torvalds { 2577ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2578ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2579ffcc3936SMike Snitzer int r; 2580cf222b37SAlasdair G Kergon 25815a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 25825a8f1f80SBart Van Assche 25832e93ccc1SKiyoshi Ueda /* 25842e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 25852e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25862e93ccc1SKiyoshi Ueda */ 25872e93ccc1SKiyoshi Ueda if (noflush) 25882e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 258986331f39SBart Van Assche else 259086331f39SBart Van Assche pr_debug("%s: suspending with flush\n", dm_device_name(md)); 25912e93ccc1SKiyoshi Ueda 2592d67ee213SMike Snitzer /* 2593d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2594d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2595d67ee213SMike Snitzer */ 25961da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25971da177e4SLinus Torvalds 25982e93ccc1SKiyoshi Ueda /* 25999f518b27SKiyoshi Ueda * Flush I/O to the device. 26009f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 26019f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 26029f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 26032e93ccc1SKiyoshi Ueda */ 260432a926daSMikulas Patocka if (!noflush && do_lockfs) { 26052ca3310eSAlasdair G Kergon r = lock_fs(md); 2606d67ee213SMike Snitzer if (r) { 2607d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2608ffcc3936SMike Snitzer return r; 2609aa8d7c2fSAlasdair G Kergon } 2610d67ee213SMike Snitzer } 26111da177e4SLinus Torvalds 26121da177e4SLinus Torvalds /* 26133b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 26143b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 26153b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 26163b00b203SMikulas Patocka * dm_wq_work. 26173b00b203SMikulas Patocka * 26183b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 26193b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 26206a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 26216a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 26226a8736d1STejun Heo * flush_workqueue(md->wq). 26231da177e4SLinus Torvalds */ 26241eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 262541abc4e1SHannes Reinecke if (map) 262683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26271da177e4SLinus Torvalds 2628d0bcb878SKiyoshi Ueda /* 262929e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 263029e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2631d0bcb878SKiyoshi Ueda */ 26326a23e05cSJens Axboe if (dm_request_based(md)) 2633eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2634cec47e3dSKiyoshi Ueda 2635d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2636d0bcb878SKiyoshi Ueda 26371da177e4SLinus Torvalds /* 26383b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 26393b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 26403b00b203SMikulas Patocka * to finish. 26411da177e4SLinus Torvalds */ 2642b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2643eaf9a736SMike Snitzer if (!r) 2644eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 26451da177e4SLinus Torvalds 26466d6f10dfSMilan Broz if (noflush) 2647022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 264841abc4e1SHannes Reinecke if (map) 264983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26502e93ccc1SKiyoshi Ueda 26511da177e4SLinus Torvalds /* were we interrupted ? */ 265246125c1cSMilan Broz if (r < 0) { 26539a1fb464SMikulas Patocka dm_queue_flush(md); 265473d410c0SMilan Broz 2655cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2656eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2657cec47e3dSKiyoshi Ueda 26582ca3310eSAlasdair G Kergon unlock_fs(md); 2659d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2660ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2661ffcc3936SMike Snitzer } 2662ffcc3936SMike Snitzer 2663ffcc3936SMike Snitzer return r; 26642ca3310eSAlasdair G Kergon } 26652ca3310eSAlasdair G Kergon 26663b00b203SMikulas Patocka /* 2667ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2668ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2669ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2670ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2671ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 26723b00b203SMikulas Patocka */ 2673ffcc3936SMike Snitzer /* 2674ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2675ffcc3936SMike Snitzer * 2676ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2677ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2678ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2679ffcc3936SMike Snitzer * 2680ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2681ffcc3936SMike Snitzer */ 2682ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2683ffcc3936SMike Snitzer { 2684ffcc3936SMike Snitzer struct dm_table *map = NULL; 2685ffcc3936SMike Snitzer int r = 0; 2686ffcc3936SMike Snitzer 2687ffcc3936SMike Snitzer retry: 2688ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2689ffcc3936SMike Snitzer 2690ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2691ffcc3936SMike Snitzer r = -EINVAL; 2692ffcc3936SMike Snitzer goto out_unlock; 2693ffcc3936SMike Snitzer } 2694ffcc3936SMike Snitzer 2695ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2696ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2697ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2698ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2699ffcc3936SMike Snitzer if (r) 2700ffcc3936SMike Snitzer return r; 2701ffcc3936SMike Snitzer goto retry; 2702ffcc3936SMike Snitzer } 2703ffcc3936SMike Snitzer 2704a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2705ffcc3936SMike Snitzer 2706eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2707ffcc3936SMike Snitzer if (r) 2708ffcc3936SMike Snitzer goto out_unlock; 27093b00b203SMikulas Patocka 27104d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 27114d4471cbSKiyoshi Ueda 2712d287483dSAlasdair G Kergon out_unlock: 2713e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2714cf222b37SAlasdair G Kergon return r; 27151da177e4SLinus Torvalds } 27161da177e4SLinus Torvalds 2717ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 27181da177e4SLinus Torvalds { 2719ffcc3936SMike Snitzer if (map) { 2720ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 27218757b776SMilan Broz if (r) 2722ffcc3936SMike Snitzer return r; 2723ffcc3936SMike Snitzer } 27242ca3310eSAlasdair G Kergon 27259a1fb464SMikulas Patocka dm_queue_flush(md); 27262ca3310eSAlasdair G Kergon 2727cec47e3dSKiyoshi Ueda /* 2728cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2729cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2730cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2731cec47e3dSKiyoshi Ueda */ 2732cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2733eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2734cec47e3dSKiyoshi Ueda 27352ca3310eSAlasdair G Kergon unlock_fs(md); 27362ca3310eSAlasdair G Kergon 2737ffcc3936SMike Snitzer return 0; 2738ffcc3936SMike Snitzer } 2739ffcc3936SMike Snitzer 2740ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2741ffcc3936SMike Snitzer { 27428dc23658SMinfei Huang int r; 2743ffcc3936SMike Snitzer struct dm_table *map = NULL; 2744ffcc3936SMike Snitzer 2745ffcc3936SMike Snitzer retry: 27468dc23658SMinfei Huang r = -EINVAL; 2747ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2748ffcc3936SMike Snitzer 2749ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2750ffcc3936SMike Snitzer goto out; 2751ffcc3936SMike Snitzer 2752ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2753ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2754ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2755ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2756ffcc3936SMike Snitzer if (r) 2757ffcc3936SMike Snitzer return r; 2758ffcc3936SMike Snitzer goto retry; 2759ffcc3936SMike Snitzer } 2760ffcc3936SMike Snitzer 2761a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2762ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2763ffcc3936SMike Snitzer goto out; 2764ffcc3936SMike Snitzer 2765ffcc3936SMike Snitzer r = __dm_resume(md, map); 2766ffcc3936SMike Snitzer if (r) 2767ffcc3936SMike Snitzer goto out; 2768ffcc3936SMike Snitzer 27692ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2770cf222b37SAlasdair G Kergon out: 2771e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 27722ca3310eSAlasdair G Kergon 2773cf222b37SAlasdair G Kergon return r; 27741da177e4SLinus Torvalds } 27751da177e4SLinus Torvalds 2776fd2ed4d2SMikulas Patocka /* 2777fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2778fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2779fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2780fd2ed4d2SMikulas Patocka */ 2781fd2ed4d2SMikulas Patocka 2782ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2783ffcc3936SMike Snitzer { 2784ffcc3936SMike Snitzer struct dm_table *map = NULL; 2785ffcc3936SMike Snitzer 27861ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 27871ea0654eSBart Van Assche 278896b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2789ffcc3936SMike Snitzer return; /* nested internal suspend */ 2790ffcc3936SMike Snitzer 2791ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2792ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2793ffcc3936SMike Snitzer return; /* nest suspend */ 2794ffcc3936SMike Snitzer } 2795ffcc3936SMike Snitzer 2796a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2797ffcc3936SMike Snitzer 2798ffcc3936SMike Snitzer /* 2799ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2800ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2801ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2802ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2803ffcc3936SMike Snitzer */ 2804eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2805eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2806ffcc3936SMike Snitzer 2807ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2808ffcc3936SMike Snitzer } 2809ffcc3936SMike Snitzer 2810ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2811ffcc3936SMike Snitzer { 281296b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 281396b26c8cSMikulas Patocka 281496b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2815ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2816ffcc3936SMike Snitzer 2817ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2818ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2819ffcc3936SMike Snitzer 2820ffcc3936SMike Snitzer /* 2821ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2822ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2823ffcc3936SMike Snitzer */ 2824ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2825ffcc3936SMike Snitzer 2826ffcc3936SMike Snitzer done: 2827ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2828ffcc3936SMike Snitzer smp_mb__after_atomic(); 2829ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2830ffcc3936SMike Snitzer } 2831ffcc3936SMike Snitzer 2832ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2833fd2ed4d2SMikulas Patocka { 2834fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2835ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2836ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2837ffcc3936SMike Snitzer } 2838ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2839ffcc3936SMike Snitzer 2840ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2841ffcc3936SMike Snitzer { 2842ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2843ffcc3936SMike Snitzer __dm_internal_resume(md); 2844ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2845ffcc3936SMike Snitzer } 2846ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2847ffcc3936SMike Snitzer 2848ffcc3936SMike Snitzer /* 2849ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2850ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2851ffcc3936SMike Snitzer */ 2852ffcc3936SMike Snitzer 2853ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2854ffcc3936SMike Snitzer { 2855ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2856ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2857fd2ed4d2SMikulas Patocka return; 2858fd2ed4d2SMikulas Patocka 2859fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2860fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2861fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2862fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2863fd2ed4d2SMikulas Patocka } 2864b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2865fd2ed4d2SMikulas Patocka 2866ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2867fd2ed4d2SMikulas Patocka { 2868ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2869fd2ed4d2SMikulas Patocka goto done; 2870fd2ed4d2SMikulas Patocka 2871fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2872fd2ed4d2SMikulas Patocka 2873fd2ed4d2SMikulas Patocka done: 2874fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2875fd2ed4d2SMikulas Patocka } 2876b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2877fd2ed4d2SMikulas Patocka 28781da177e4SLinus Torvalds /*----------------------------------------------------------------- 28791da177e4SLinus Torvalds * Event notification. 28801da177e4SLinus Torvalds *---------------------------------------------------------------*/ 28813abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 288260935eb2SMilan Broz unsigned cookie) 288369267a30SAlasdair G Kergon { 288460935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 288560935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 288660935eb2SMilan Broz 288760935eb2SMilan Broz if (!cookie) 28883abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 288960935eb2SMilan Broz else { 289060935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 289160935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 28923abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 28933abf85b5SPeter Rajnoha action, envp); 289460935eb2SMilan Broz } 289569267a30SAlasdair G Kergon } 289669267a30SAlasdair G Kergon 28977a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28987a8c3d3bSMike Anderson { 28997a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 29007a8c3d3bSMike Anderson } 29017a8c3d3bSMike Anderson 29021da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 29031da177e4SLinus Torvalds { 29041da177e4SLinus Torvalds return atomic_read(&md->event_nr); 29051da177e4SLinus Torvalds } 29061da177e4SLinus Torvalds 29071da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 29081da177e4SLinus Torvalds { 29091da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 29101da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 29111da177e4SLinus Torvalds } 29121da177e4SLinus Torvalds 29137a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 29147a8c3d3bSMike Anderson { 29157a8c3d3bSMike Anderson unsigned long flags; 29167a8c3d3bSMike Anderson 29177a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 29187a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 29197a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 29207a8c3d3bSMike Anderson } 29217a8c3d3bSMike Anderson 29221da177e4SLinus Torvalds /* 29231da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 29241da177e4SLinus Torvalds * count on 'md'. 29251da177e4SLinus Torvalds */ 29261da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 29271da177e4SLinus Torvalds { 29281da177e4SLinus Torvalds return md->disk; 29291da177e4SLinus Torvalds } 293065ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 29311da177e4SLinus Torvalds 2932784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2933784aae73SMilan Broz { 29342995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2935784aae73SMilan Broz } 2936784aae73SMilan Broz 2937784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2938784aae73SMilan Broz { 2939784aae73SMilan Broz struct mapped_device *md; 2940784aae73SMilan Broz 29412995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2942784aae73SMilan Broz 2943b9a41d21SHou Tao spin_lock(&_minor_lock); 2944b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2945b9a41d21SHou Tao md = NULL; 2946b9a41d21SHou Tao goto out; 2947b9a41d21SHou Tao } 2948784aae73SMilan Broz dm_get(md); 2949b9a41d21SHou Tao out: 2950b9a41d21SHou Tao spin_unlock(&_minor_lock); 2951b9a41d21SHou Tao 2952784aae73SMilan Broz return md; 2953784aae73SMilan Broz } 2954784aae73SMilan Broz 29554f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 29561da177e4SLinus Torvalds { 29571da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 29581da177e4SLinus Torvalds } 29591da177e4SLinus Torvalds 2960ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2961ffcc3936SMike Snitzer { 2962ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2963ffcc3936SMike Snitzer } 2964ffcc3936SMike Snitzer 29652c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 29662c140a24SMikulas Patocka { 29672c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 29682c140a24SMikulas Patocka } 29692c140a24SMikulas Patocka 297064dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 297164dbce58SKiyoshi Ueda { 2972ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 297364dbce58SKiyoshi Ueda } 297464dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 297564dbce58SKiyoshi Ueda 29762e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 29772e93ccc1SKiyoshi Ueda { 2978ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 29792e93ccc1SKiyoshi Ueda } 29802e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 29812e93ccc1SKiyoshi Ueda 29827e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 29830776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 29840776aa0eSMike Snitzer unsigned min_pool_size) 2985e6ee8c0bSKiyoshi Ueda { 2986115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 298778d8e58aSMike Snitzer unsigned int pool_size = 0; 298864f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 29896f1c819cSKent Overstreet int ret; 2990e6ee8c0bSKiyoshi Ueda 2991e6ee8c0bSKiyoshi Ueda if (!pools) 29924e6e36c3SMike Snitzer return NULL; 2993e6ee8c0bSKiyoshi Ueda 299478d8e58aSMike Snitzer switch (type) { 299578d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2996545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 299722c11858SMike Snitzer case DM_TYPE_NVME_BIO_BASED: 29980776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 299930187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 300064f52b0eSMike Snitzer io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 30016f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 30026f1c819cSKent Overstreet if (ret) 300364f52b0eSMike Snitzer goto out; 30046f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 3005eb8db831SChristoph Hellwig goto out; 300678d8e58aSMike Snitzer break; 300778d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 30080776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 300978d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3010591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 301178d8e58aSMike Snitzer break; 301278d8e58aSMike Snitzer default: 301378d8e58aSMike Snitzer BUG(); 301478d8e58aSMike Snitzer } 301578d8e58aSMike Snitzer 30166f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 30176f1c819cSKent Overstreet if (ret) 30185f015204SJun'ichi Nomura goto out; 3019e6ee8c0bSKiyoshi Ueda 30206f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 30215f015204SJun'ichi Nomura goto out; 3022a91a2785SMartin K. Petersen 3023e6ee8c0bSKiyoshi Ueda return pools; 302478d8e58aSMike Snitzer 30255f015204SJun'ichi Nomura out: 30265f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3027e6ee8c0bSKiyoshi Ueda 30284e6e36c3SMike Snitzer return NULL; 3029e6ee8c0bSKiyoshi Ueda } 3030e6ee8c0bSKiyoshi Ueda 3031e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3032e6ee8c0bSKiyoshi Ueda { 3033e6ee8c0bSKiyoshi Ueda if (!pools) 3034e6ee8c0bSKiyoshi Ueda return; 3035e6ee8c0bSKiyoshi Ueda 30366f1c819cSKent Overstreet bioset_exit(&pools->bs); 30376f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 3038e6ee8c0bSKiyoshi Ueda 3039e6ee8c0bSKiyoshi Ueda kfree(pools); 3040e6ee8c0bSKiyoshi Ueda } 3041e6ee8c0bSKiyoshi Ueda 30429c72bad1SChristoph Hellwig struct dm_pr { 30439c72bad1SChristoph Hellwig u64 old_key; 30449c72bad1SChristoph Hellwig u64 new_key; 30459c72bad1SChristoph Hellwig u32 flags; 30469c72bad1SChristoph Hellwig bool fail_early; 30479c72bad1SChristoph Hellwig }; 30489c72bad1SChristoph Hellwig 30499c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 30509c72bad1SChristoph Hellwig void *data) 30519c72bad1SChristoph Hellwig { 30529c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 30539c72bad1SChristoph Hellwig struct dm_table *table; 30549c72bad1SChristoph Hellwig struct dm_target *ti; 30559c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 30569c72bad1SChristoph Hellwig 30579c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 30589c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 30599c72bad1SChristoph Hellwig goto out; 30609c72bad1SChristoph Hellwig 30619c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 30629c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30639c72bad1SChristoph Hellwig goto out; 30649c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30659c72bad1SChristoph Hellwig 30669c72bad1SChristoph Hellwig ret = -EINVAL; 30679c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30689c72bad1SChristoph Hellwig goto out; 30699c72bad1SChristoph Hellwig 30709c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 30719c72bad1SChristoph Hellwig out: 30729c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 30739c72bad1SChristoph Hellwig return ret; 30749c72bad1SChristoph Hellwig } 30759c72bad1SChristoph Hellwig 30769c72bad1SChristoph Hellwig /* 30779c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 30789c72bad1SChristoph Hellwig */ 30799c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 30809c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 30819c72bad1SChristoph Hellwig { 30829c72bad1SChristoph Hellwig struct dm_pr *pr = data; 30839c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 30849c72bad1SChristoph Hellwig 30859c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 30869c72bad1SChristoph Hellwig return -EOPNOTSUPP; 30879c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 30889c72bad1SChristoph Hellwig } 30899c72bad1SChristoph Hellwig 309071cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 309171cdb697SChristoph Hellwig u32 flags) 309271cdb697SChristoph Hellwig { 30939c72bad1SChristoph Hellwig struct dm_pr pr = { 30949c72bad1SChristoph Hellwig .old_key = old_key, 30959c72bad1SChristoph Hellwig .new_key = new_key, 30969c72bad1SChristoph Hellwig .flags = flags, 30979c72bad1SChristoph Hellwig .fail_early = true, 30989c72bad1SChristoph Hellwig }; 30999c72bad1SChristoph Hellwig int ret; 310071cdb697SChristoph Hellwig 31019c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 31029c72bad1SChristoph Hellwig if (ret && new_key) { 31039c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 31049c72bad1SChristoph Hellwig pr.old_key = new_key; 31059c72bad1SChristoph Hellwig pr.new_key = 0; 31069c72bad1SChristoph Hellwig pr.flags = 0; 31079c72bad1SChristoph Hellwig pr.fail_early = false; 31089c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 31099c72bad1SChristoph Hellwig } 311071cdb697SChristoph Hellwig 31119c72bad1SChristoph Hellwig return ret; 311271cdb697SChristoph Hellwig } 311371cdb697SChristoph Hellwig 311471cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 311571cdb697SChristoph Hellwig u32 flags) 311671cdb697SChristoph Hellwig { 311771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 311871cdb697SChristoph Hellwig const struct pr_ops *ops; 3119971888c4SMike Snitzer int r, srcu_idx; 312071cdb697SChristoph Hellwig 31215bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 312271cdb697SChristoph Hellwig if (r < 0) 3123971888c4SMike Snitzer goto out; 312471cdb697SChristoph Hellwig 312571cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 312671cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 312771cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 312871cdb697SChristoph Hellwig else 312971cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3130971888c4SMike Snitzer out: 3131971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 313271cdb697SChristoph Hellwig return r; 313371cdb697SChristoph Hellwig } 313471cdb697SChristoph Hellwig 313571cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 313671cdb697SChristoph Hellwig { 313771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 313871cdb697SChristoph Hellwig const struct pr_ops *ops; 3139971888c4SMike Snitzer int r, srcu_idx; 314071cdb697SChristoph Hellwig 31415bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 314271cdb697SChristoph Hellwig if (r < 0) 3143971888c4SMike Snitzer goto out; 314471cdb697SChristoph Hellwig 314571cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 314671cdb697SChristoph Hellwig if (ops && ops->pr_release) 314771cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 314871cdb697SChristoph Hellwig else 314971cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3150971888c4SMike Snitzer out: 3151971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 315271cdb697SChristoph Hellwig return r; 315371cdb697SChristoph Hellwig } 315471cdb697SChristoph Hellwig 315571cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 315671cdb697SChristoph Hellwig enum pr_type type, bool abort) 315771cdb697SChristoph Hellwig { 315871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 315971cdb697SChristoph Hellwig const struct pr_ops *ops; 3160971888c4SMike Snitzer int r, srcu_idx; 316171cdb697SChristoph Hellwig 31625bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 316371cdb697SChristoph Hellwig if (r < 0) 3164971888c4SMike Snitzer goto out; 316571cdb697SChristoph Hellwig 316671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 316771cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 316871cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 316971cdb697SChristoph Hellwig else 317071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3171971888c4SMike Snitzer out: 3172971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 317371cdb697SChristoph Hellwig return r; 317471cdb697SChristoph Hellwig } 317571cdb697SChristoph Hellwig 317671cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 317771cdb697SChristoph Hellwig { 317871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 317971cdb697SChristoph Hellwig const struct pr_ops *ops; 3180971888c4SMike Snitzer int r, srcu_idx; 318171cdb697SChristoph Hellwig 31825bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 318371cdb697SChristoph Hellwig if (r < 0) 3184971888c4SMike Snitzer goto out; 318571cdb697SChristoph Hellwig 318671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 318771cdb697SChristoph Hellwig if (ops && ops->pr_clear) 318871cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 318971cdb697SChristoph Hellwig else 319071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3191971888c4SMike Snitzer out: 3192971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 319371cdb697SChristoph Hellwig return r; 319471cdb697SChristoph Hellwig } 319571cdb697SChristoph Hellwig 319671cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 319771cdb697SChristoph Hellwig .pr_register = dm_pr_register, 319871cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 319971cdb697SChristoph Hellwig .pr_release = dm_pr_release, 320071cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 320171cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 320271cdb697SChristoph Hellwig }; 320371cdb697SChristoph Hellwig 320483d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 32051da177e4SLinus Torvalds .open = dm_blk_open, 32061da177e4SLinus Torvalds .release = dm_blk_close, 3207aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 32083ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3209e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 321071cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 32111da177e4SLinus Torvalds .owner = THIS_MODULE 32121da177e4SLinus Torvalds }; 32131da177e4SLinus Torvalds 3214f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3215f26c5719SDan Williams .direct_access = dm_dax_direct_access, 32167bf7eac8SDan Williams .dax_supported = dm_dax_supported, 32177e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3218b3a9a0c3SDan Williams .copy_to_iter = dm_dax_copy_to_iter, 3219f26c5719SDan Williams }; 3220f26c5719SDan Williams 32211da177e4SLinus Torvalds /* 32221da177e4SLinus Torvalds * module hooks 32231da177e4SLinus Torvalds */ 32241da177e4SLinus Torvalds module_init(dm_init); 32251da177e4SLinus Torvalds module_exit(dm_exit); 32261da177e4SLinus Torvalds 32271da177e4SLinus Torvalds module_param(major, uint, 0); 32281da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3229f4790826SMike Snitzer 3230e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3231e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3232e8603136SMike Snitzer 3233115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3234115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3235115485e8SMike Snitzer 32361da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 32371da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 32381da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3239