11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 15174cd4b1SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/blkpg.h> 171da177e4SLinus Torvalds #include <linux/bio.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 19f26c5719SDan Williams #include <linux/dax.h> 201da177e4SLinus Torvalds #include <linux/slab.h> 211da177e4SLinus Torvalds #include <linux/idr.h> 227e026c8cSDan Williams #include <linux/uio.h> 233ac51e74SDarrick J. Wong #include <linux/hdreg.h> 243f77316dSKiyoshi Ueda #include <linux/delay.h> 25ffcc3936SMike Snitzer #include <linux/wait.h> 2671cdb697SChristoph Hellwig #include <linux/pr.h> 27b0b4d7c6SElena Reshetova #include <linux/refcount.h> 28c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 29a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 3055782138SLi Zefan 3172d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3272d94861SAlasdair G Kergon 3360935eb2SMilan Broz /* 3460935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3560935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3660935eb2SMilan Broz */ 3760935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3860935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 3960935eb2SMilan Broz 401da177e4SLinus Torvalds static const char *_name = DM_NAME; 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds static unsigned int major = 0; 431da177e4SLinus Torvalds static unsigned int _major = 0; 441da177e4SLinus Torvalds 45d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 46d15b774cSAlasdair G Kergon 47f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 482c140a24SMikulas Patocka 492c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 502c140a24SMikulas Patocka 512c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 522c140a24SMikulas Patocka 53acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 54acfe0ad7SMikulas Patocka 5593e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5693e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5793e6442cSMikulas Patocka 5862e08243SMikulas Patocka void dm_issue_global_event(void) 5962e08243SMikulas Patocka { 6062e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 6162e08243SMikulas Patocka wake_up(&dm_global_eventq); 6262e08243SMikulas Patocka } 6362e08243SMikulas Patocka 641da177e4SLinus Torvalds /* 6564f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 661da177e4SLinus Torvalds */ 6764f52b0eSMike Snitzer struct clone_info { 6864f52b0eSMike Snitzer struct dm_table *map; 6964f52b0eSMike Snitzer struct bio *bio; 7064f52b0eSMike Snitzer struct dm_io *io; 7164f52b0eSMike Snitzer sector_t sector; 7264f52b0eSMike Snitzer unsigned sector_count; 7364f52b0eSMike Snitzer }; 7464f52b0eSMike Snitzer 7564f52b0eSMike Snitzer /* 7664f52b0eSMike Snitzer * One of these is allocated per clone bio. 7764f52b0eSMike Snitzer */ 7864f52b0eSMike Snitzer #define DM_TIO_MAGIC 7282014 7964f52b0eSMike Snitzer struct dm_target_io { 8064f52b0eSMike Snitzer unsigned magic; 8164f52b0eSMike Snitzer struct dm_io *io; 8264f52b0eSMike Snitzer struct dm_target *ti; 8364f52b0eSMike Snitzer unsigned target_bio_nr; 8464f52b0eSMike Snitzer unsigned *len_ptr; 8564f52b0eSMike Snitzer bool inside_dm_io; 8664f52b0eSMike Snitzer struct bio clone; 8764f52b0eSMike Snitzer }; 8864f52b0eSMike Snitzer 8964f52b0eSMike Snitzer /* 9064f52b0eSMike Snitzer * One of these is allocated per original bio. 9164f52b0eSMike Snitzer * It contains the first clone used for that original. 9264f52b0eSMike Snitzer */ 9364f52b0eSMike Snitzer #define DM_IO_MAGIC 5191977 941da177e4SLinus Torvalds struct dm_io { 9564f52b0eSMike Snitzer unsigned magic; 961da177e4SLinus Torvalds struct mapped_device *md; 974e4cbee9SChristoph Hellwig blk_status_t status; 981da177e4SLinus Torvalds atomic_t io_count; 99745dc570SMike Snitzer struct bio *orig_bio; 1003eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 101f88fb981SKiyoshi Ueda spinlock_t endio_lock; 102fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 10364f52b0eSMike Snitzer /* last member of dm_target_io is 'struct bio' */ 10464f52b0eSMike Snitzer struct dm_target_io tio; 1051da177e4SLinus Torvalds }; 1061da177e4SLinus Torvalds 10764f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10864f52b0eSMike Snitzer { 10964f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11064f52b0eSMike Snitzer if (!tio->inside_dm_io) 11164f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 11264f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 11364f52b0eSMike Snitzer } 11464f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 11564f52b0eSMike Snitzer 11664f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 11764f52b0eSMike Snitzer { 11864f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 11964f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 12064f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 12164f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 12264f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 12364f52b0eSMike Snitzer } 12464f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 12564f52b0eSMike Snitzer 12664f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 12764f52b0eSMike Snitzer { 12864f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 12964f52b0eSMike Snitzer } 13064f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 13164f52b0eSMike Snitzer 132ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 133ba61fdd1SJeff Mahoney 1341da177e4SLinus Torvalds /* 1351da177e4SLinus Torvalds * Bits for the md->flags field. 1361da177e4SLinus Torvalds */ 1371eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1381da177e4SLinus Torvalds #define DMF_SUSPENDED 1 139aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 140fba9f90eSJeff Mahoney #define DMF_FREEING 3 1415c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1422e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1438ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1448ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1451da177e4SLinus Torvalds 146115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 147115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 148faad87dfSMike Snitzer 149e6ee8c0bSKiyoshi Ueda /* 150e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 151e6ee8c0bSKiyoshi Ueda */ 152e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1536f1c819cSKent Overstreet struct bio_set bs; 1546f1c819cSKent Overstreet struct bio_set io_bs; 155e6ee8c0bSKiyoshi Ueda }; 156e6ee8c0bSKiyoshi Ueda 15786f1152bSBenjamin Marzinski struct table_device { 15886f1152bSBenjamin Marzinski struct list_head list; 159b0b4d7c6SElena Reshetova refcount_t count; 16086f1152bSBenjamin Marzinski struct dm_dev dm_dev; 16186f1152bSBenjamin Marzinski }; 16286f1152bSBenjamin Marzinski 163f4790826SMike Snitzer /* 164e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 165e8603136SMike Snitzer */ 1664cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 167e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 168e8603136SMike Snitzer 169115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 170115485e8SMike Snitzer { 1716aa7de05SMark Rutland int param = READ_ONCE(*module_param); 172115485e8SMike Snitzer int modified_param = 0; 173115485e8SMike Snitzer bool modified = true; 174115485e8SMike Snitzer 175115485e8SMike Snitzer if (param < min) 176115485e8SMike Snitzer modified_param = min; 177115485e8SMike Snitzer else if (param > max) 178115485e8SMike Snitzer modified_param = max; 179115485e8SMike Snitzer else 180115485e8SMike Snitzer modified = false; 181115485e8SMike Snitzer 182115485e8SMike Snitzer if (modified) { 183115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 184115485e8SMike Snitzer param = modified_param; 185115485e8SMike Snitzer } 186115485e8SMike Snitzer 187115485e8SMike Snitzer return param; 188115485e8SMike Snitzer } 189115485e8SMike Snitzer 1904cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 191f4790826SMike Snitzer unsigned def, unsigned max) 192f4790826SMike Snitzer { 1936aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 19409c2d531SMike Snitzer unsigned modified_param = 0; 195f4790826SMike Snitzer 19609c2d531SMike Snitzer if (!param) 19709c2d531SMike Snitzer modified_param = def; 19809c2d531SMike Snitzer else if (param > max) 19909c2d531SMike Snitzer modified_param = max; 200f4790826SMike Snitzer 20109c2d531SMike Snitzer if (modified_param) { 20209c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 20309c2d531SMike Snitzer param = modified_param; 204f4790826SMike Snitzer } 205f4790826SMike Snitzer 20609c2d531SMike Snitzer return param; 207f4790826SMike Snitzer } 208f4790826SMike Snitzer 209e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 210e8603136SMike Snitzer { 21109c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 2124cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 213e8603136SMike Snitzer } 214e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 215e8603136SMike Snitzer 216115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 217115485e8SMike Snitzer { 218115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 219115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 220115485e8SMike Snitzer } 221115485e8SMike Snitzer 2221da177e4SLinus Torvalds static int __init local_init(void) 2231da177e4SLinus Torvalds { 224e689fbabSMike Snitzer int r; 2251ae49ea2SMike Snitzer 22651e5b2bdSMike Anderson r = dm_uevent_init(); 22751157b4aSKiyoshi Ueda if (r) 228e689fbabSMike Snitzer return r; 22951e5b2bdSMike Anderson 230acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 231acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 232acfe0ad7SMikulas Patocka r = -ENOMEM; 233acfe0ad7SMikulas Patocka goto out_uevent_exit; 234acfe0ad7SMikulas Patocka } 235acfe0ad7SMikulas Patocka 2361da177e4SLinus Torvalds _major = major; 2371da177e4SLinus Torvalds r = register_blkdev(_major, _name); 23851157b4aSKiyoshi Ueda if (r < 0) 239acfe0ad7SMikulas Patocka goto out_free_workqueue; 2401da177e4SLinus Torvalds 2411da177e4SLinus Torvalds if (!_major) 2421da177e4SLinus Torvalds _major = r; 2431da177e4SLinus Torvalds 2441da177e4SLinus Torvalds return 0; 24551157b4aSKiyoshi Ueda 246acfe0ad7SMikulas Patocka out_free_workqueue: 247acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 24851157b4aSKiyoshi Ueda out_uevent_exit: 24951157b4aSKiyoshi Ueda dm_uevent_exit(); 25051157b4aSKiyoshi Ueda 25151157b4aSKiyoshi Ueda return r; 2521da177e4SLinus Torvalds } 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds static void local_exit(void) 2551da177e4SLinus Torvalds { 2562c140a24SMikulas Patocka flush_scheduled_work(); 257acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2582c140a24SMikulas Patocka 25900d59405SAkinobu Mita unregister_blkdev(_major, _name); 26051e5b2bdSMike Anderson dm_uevent_exit(); 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds _major = 0; 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds DMINFO("cleaned up"); 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds 267b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2681da177e4SLinus Torvalds local_init, 2691da177e4SLinus Torvalds dm_target_init, 2701da177e4SLinus Torvalds dm_linear_init, 2711da177e4SLinus Torvalds dm_stripe_init, 272952b3557SMikulas Patocka dm_io_init, 273945fa4d2SMikulas Patocka dm_kcopyd_init, 2741da177e4SLinus Torvalds dm_interface_init, 275fd2ed4d2SMikulas Patocka dm_statistics_init, 2761da177e4SLinus Torvalds }; 2771da177e4SLinus Torvalds 278b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2791da177e4SLinus Torvalds local_exit, 2801da177e4SLinus Torvalds dm_target_exit, 2811da177e4SLinus Torvalds dm_linear_exit, 2821da177e4SLinus Torvalds dm_stripe_exit, 283952b3557SMikulas Patocka dm_io_exit, 284945fa4d2SMikulas Patocka dm_kcopyd_exit, 2851da177e4SLinus Torvalds dm_interface_exit, 286fd2ed4d2SMikulas Patocka dm_statistics_exit, 2871da177e4SLinus Torvalds }; 2881da177e4SLinus Torvalds 2891da177e4SLinus Torvalds static int __init dm_init(void) 2901da177e4SLinus Torvalds { 2911da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2921da177e4SLinus Torvalds 2931da177e4SLinus Torvalds int r, i; 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2961da177e4SLinus Torvalds r = _inits[i](); 2971da177e4SLinus Torvalds if (r) 2981da177e4SLinus Torvalds goto bad; 2991da177e4SLinus Torvalds } 3001da177e4SLinus Torvalds 3011da177e4SLinus Torvalds return 0; 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds bad: 3041da177e4SLinus Torvalds while (i--) 3051da177e4SLinus Torvalds _exits[i](); 3061da177e4SLinus Torvalds 3071da177e4SLinus Torvalds return r; 3081da177e4SLinus Torvalds } 3091da177e4SLinus Torvalds 3101da177e4SLinus Torvalds static void __exit dm_exit(void) 3111da177e4SLinus Torvalds { 3121da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds while (i--) 3151da177e4SLinus Torvalds _exits[i](); 316d15b774cSAlasdair G Kergon 317d15b774cSAlasdair G Kergon /* 318d15b774cSAlasdair G Kergon * Should be empty by this point. 319d15b774cSAlasdair G Kergon */ 320d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3211da177e4SLinus Torvalds } 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds /* 3241da177e4SLinus Torvalds * Block device functions 3251da177e4SLinus Torvalds */ 326432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 327432a212cSMike Anderson { 328432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 329432a212cSMike Anderson } 330432a212cSMike Anderson 331fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3321da177e4SLinus Torvalds { 3331da177e4SLinus Torvalds struct mapped_device *md; 3341da177e4SLinus Torvalds 335fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 336fba9f90eSJeff Mahoney 337fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 338fba9f90eSJeff Mahoney if (!md) 339fba9f90eSJeff Mahoney goto out; 340fba9f90eSJeff Mahoney 3415c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 342432a212cSMike Anderson dm_deleting_md(md)) { 343fba9f90eSJeff Mahoney md = NULL; 344fba9f90eSJeff Mahoney goto out; 345fba9f90eSJeff Mahoney } 346fba9f90eSJeff Mahoney 3471da177e4SLinus Torvalds dm_get(md); 3485c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 349fba9f90eSJeff Mahoney out: 350fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 351fba9f90eSJeff Mahoney 352fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3531da177e4SLinus Torvalds } 3541da177e4SLinus Torvalds 355db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3561da177e4SLinus Torvalds { 35763a4f065SMike Snitzer struct mapped_device *md; 3586e9624b8SArnd Bergmann 3594a1aeb98SMilan Broz spin_lock(&_minor_lock); 3604a1aeb98SMilan Broz 36163a4f065SMike Snitzer md = disk->private_data; 36263a4f065SMike Snitzer if (WARN_ON(!md)) 36363a4f065SMike Snitzer goto out; 36463a4f065SMike Snitzer 3652c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3662c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 367acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3682c140a24SMikulas Patocka 3691da177e4SLinus Torvalds dm_put(md); 37063a4f065SMike Snitzer out: 3714a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 3745c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3755c6bd75dSAlasdair G Kergon { 3765c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3775c6bd75dSAlasdair G Kergon } 3785c6bd75dSAlasdair G Kergon 3795c6bd75dSAlasdair G Kergon /* 3805c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3815c6bd75dSAlasdair G Kergon */ 3822c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3835c6bd75dSAlasdair G Kergon { 3845c6bd75dSAlasdair G Kergon int r = 0; 3855c6bd75dSAlasdair G Kergon 3865c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3875c6bd75dSAlasdair G Kergon 3882c140a24SMikulas Patocka if (dm_open_count(md)) { 3895c6bd75dSAlasdair G Kergon r = -EBUSY; 3902c140a24SMikulas Patocka if (mark_deferred) 3912c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3922c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3932c140a24SMikulas Patocka r = -EEXIST; 3945c6bd75dSAlasdair G Kergon else 3955c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3965c6bd75dSAlasdair G Kergon 3975c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3985c6bd75dSAlasdair G Kergon 3995c6bd75dSAlasdair G Kergon return r; 4005c6bd75dSAlasdair G Kergon } 4015c6bd75dSAlasdair G Kergon 4022c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4032c140a24SMikulas Patocka { 4042c140a24SMikulas Patocka int r = 0; 4052c140a24SMikulas Patocka 4062c140a24SMikulas Patocka spin_lock(&_minor_lock); 4072c140a24SMikulas Patocka 4082c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4092c140a24SMikulas Patocka r = -EBUSY; 4102c140a24SMikulas Patocka else 4112c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4122c140a24SMikulas Patocka 4132c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4142c140a24SMikulas Patocka 4152c140a24SMikulas Patocka return r; 4162c140a24SMikulas Patocka } 4172c140a24SMikulas Patocka 4182c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4192c140a24SMikulas Patocka { 4202c140a24SMikulas Patocka dm_deferred_remove(); 4212c140a24SMikulas Patocka } 4222c140a24SMikulas Patocka 423fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md) 424fd2ed4d2SMikulas Patocka { 425fd2ed4d2SMikulas Patocka return get_capacity(md->disk); 426fd2ed4d2SMikulas Patocka } 427fd2ed4d2SMikulas Patocka 4289974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md) 4299974fa2cSMike Snitzer { 4309974fa2cSMike Snitzer return md->queue; 4319974fa2cSMike Snitzer } 4329974fa2cSMike Snitzer 433fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md) 434fd2ed4d2SMikulas Patocka { 435fd2ed4d2SMikulas Patocka return &md->stats; 436fd2ed4d2SMikulas Patocka } 437fd2ed4d2SMikulas Patocka 4383ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4393ac51e74SDarrick J. Wong { 4403ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4413ac51e74SDarrick J. Wong 4423ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4433ac51e74SDarrick J. Wong } 4443ac51e74SDarrick J. Wong 445e76239a3SChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 446d4100351SChristoph Hellwig int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 447d4100351SChristoph Hellwig { 448d4100351SChristoph Hellwig struct dm_report_zones_args *args = data; 449d4100351SChristoph Hellwig sector_t sector_diff = args->tgt->begin - args->start; 450d4100351SChristoph Hellwig 451d4100351SChristoph Hellwig /* 452d4100351SChristoph Hellwig * Ignore zones beyond the target range. 453d4100351SChristoph Hellwig */ 454d4100351SChristoph Hellwig if (zone->start >= args->start + args->tgt->len) 455d4100351SChristoph Hellwig return 0; 456d4100351SChristoph Hellwig 457d4100351SChristoph Hellwig /* 458d4100351SChristoph Hellwig * Remap the start sector and write pointer position of the zone 459d4100351SChristoph Hellwig * to match its position in the target range. 460d4100351SChristoph Hellwig */ 461d4100351SChristoph Hellwig zone->start += sector_diff; 462d4100351SChristoph Hellwig if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 463d4100351SChristoph Hellwig if (zone->cond == BLK_ZONE_COND_FULL) 464d4100351SChristoph Hellwig zone->wp = zone->start + zone->len; 465d4100351SChristoph Hellwig else if (zone->cond == BLK_ZONE_COND_EMPTY) 466d4100351SChristoph Hellwig zone->wp = zone->start; 467d4100351SChristoph Hellwig else 468d4100351SChristoph Hellwig zone->wp += sector_diff; 469d4100351SChristoph Hellwig } 470d4100351SChristoph Hellwig 471d4100351SChristoph Hellwig args->next_sector = zone->start + zone->len; 472d4100351SChristoph Hellwig return args->orig_cb(zone, args->zone_idx++, args->orig_data); 473d4100351SChristoph Hellwig } 474d4100351SChristoph Hellwig EXPORT_SYMBOL_GPL(dm_report_zones_cb); 475d4100351SChristoph Hellwig 476d4100351SChristoph Hellwig static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 477d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data) 478d4100351SChristoph Hellwig { 479e76239a3SChristoph Hellwig struct mapped_device *md = disk->private_data; 480e76239a3SChristoph Hellwig struct dm_table *map; 481e76239a3SChristoph Hellwig int srcu_idx, ret; 482d4100351SChristoph Hellwig struct dm_report_zones_args args = { 483d4100351SChristoph Hellwig .next_sector = sector, 484d4100351SChristoph Hellwig .orig_data = data, 485d4100351SChristoph Hellwig .orig_cb = cb, 486d4100351SChristoph Hellwig }; 487e76239a3SChristoph Hellwig 488e76239a3SChristoph Hellwig if (dm_suspended_md(md)) 489e76239a3SChristoph Hellwig return -EAGAIN; 490e76239a3SChristoph Hellwig 491e76239a3SChristoph Hellwig map = dm_get_live_table(md, &srcu_idx); 492e76239a3SChristoph Hellwig if (!map) 493e76239a3SChristoph Hellwig return -EIO; 494e76239a3SChristoph Hellwig 495d4100351SChristoph Hellwig do { 496d4100351SChristoph Hellwig struct dm_target *tgt; 497d4100351SChristoph Hellwig 498d4100351SChristoph Hellwig tgt = dm_table_find_target(map, args.next_sector); 499d4100351SChristoph Hellwig if (WARN_ON_ONCE(!tgt->type->report_zones)) { 500e76239a3SChristoph Hellwig ret = -EIO; 501e76239a3SChristoph Hellwig goto out; 502e76239a3SChristoph Hellwig } 503e76239a3SChristoph Hellwig 504d4100351SChristoph Hellwig args.tgt = tgt; 505d4100351SChristoph Hellwig ret = tgt->type->report_zones(tgt, &args, nr_zones); 506d4100351SChristoph Hellwig if (ret < 0) 507e76239a3SChristoph Hellwig goto out; 508d4100351SChristoph Hellwig } while (args.zone_idx < nr_zones && 509d4100351SChristoph Hellwig args.next_sector < get_capacity(disk)); 510e76239a3SChristoph Hellwig 511d4100351SChristoph Hellwig ret = args.zone_idx; 512e76239a3SChristoph Hellwig out: 513e76239a3SChristoph Hellwig dm_put_live_table(md, srcu_idx); 514e76239a3SChristoph Hellwig return ret; 515e76239a3SChristoph Hellwig } 516d4100351SChristoph Hellwig #else 517d4100351SChristoph Hellwig #define dm_blk_report_zones NULL 518d4100351SChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 519e76239a3SChristoph Hellwig 520971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 5215bd5e8d8SMike Snitzer struct block_device **bdev) 522971888c4SMike Snitzer __acquires(md->io_barrier) 523aa129a22SMilan Broz { 52466482026SMike Snitzer struct dm_target *tgt; 5256c182cd8SHannes Reinecke struct dm_table *map; 526971888c4SMike Snitzer int r; 527aa129a22SMilan Broz 5286c182cd8SHannes Reinecke retry: 529e56f81e0SChristoph Hellwig r = -ENOTTY; 530971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 531aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 532971888c4SMike Snitzer return r; 533aa129a22SMilan Broz 534aa129a22SMilan Broz /* We only support devices that have a single target */ 535aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 536971888c4SMike Snitzer return r; 537aa129a22SMilan Broz 53866482026SMike Snitzer tgt = dm_table_get_target(map, 0); 53966482026SMike Snitzer if (!tgt->type->prepare_ioctl) 540e56f81e0SChristoph Hellwig return r; 541aa129a22SMilan Broz 542971888c4SMike Snitzer if (dm_suspended_md(md)) 543971888c4SMike Snitzer return -EAGAIN; 544971888c4SMike Snitzer 5455bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 5465bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 547971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 5486c182cd8SHannes Reinecke msleep(10); 5496c182cd8SHannes Reinecke goto retry; 5506c182cd8SHannes Reinecke } 551971888c4SMike Snitzer 552e56f81e0SChristoph Hellwig return r; 553e56f81e0SChristoph Hellwig } 5546c182cd8SHannes Reinecke 555971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 556971888c4SMike Snitzer __releases(md->io_barrier) 557971888c4SMike Snitzer { 558971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 559971888c4SMike Snitzer } 560971888c4SMike Snitzer 561e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 562e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 563e56f81e0SChristoph Hellwig { 564e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 565971888c4SMike Snitzer int r, srcu_idx; 566e56f81e0SChristoph Hellwig 5675bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 568e56f81e0SChristoph Hellwig if (r < 0) 569971888c4SMike Snitzer goto out; 570e56f81e0SChristoph Hellwig 571e56f81e0SChristoph Hellwig if (r > 0) { 572e56f81e0SChristoph Hellwig /* 573e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 574e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 575e56f81e0SChristoph Hellwig */ 576e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 577e980f623SChristoph Hellwig DMWARN_LIMIT( 578e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 579e980f623SChristoph Hellwig current->comm, cmd); 580e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 581e56f81e0SChristoph Hellwig goto out; 582e56f81e0SChristoph Hellwig } 583e980f623SChristoph Hellwig } 584e56f81e0SChristoph Hellwig 58566482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 586e56f81e0SChristoph Hellwig out: 587971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 588aa129a22SMilan Broz return r; 589aa129a22SMilan Broz } 590aa129a22SMilan Broz 591978e51baSMike Snitzer static void start_io_acct(struct dm_io *io); 592978e51baSMike Snitzer 593978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5941da177e4SLinus Torvalds { 59564f52b0eSMike Snitzer struct dm_io *io; 59664f52b0eSMike Snitzer struct dm_target_io *tio; 59764f52b0eSMike Snitzer struct bio *clone; 59864f52b0eSMike Snitzer 5996f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 60064f52b0eSMike Snitzer if (!clone) 60164f52b0eSMike Snitzer return NULL; 60264f52b0eSMike Snitzer 60364f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 60464f52b0eSMike Snitzer tio->inside_dm_io = true; 60564f52b0eSMike Snitzer tio->io = NULL; 60664f52b0eSMike Snitzer 60764f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 60864f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 609978e51baSMike Snitzer io->status = 0; 610978e51baSMike Snitzer atomic_set(&io->io_count, 1); 611978e51baSMike Snitzer io->orig_bio = bio; 612978e51baSMike Snitzer io->md = md; 613978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 614978e51baSMike Snitzer 615978e51baSMike Snitzer start_io_acct(io); 61664f52b0eSMike Snitzer 61764f52b0eSMike Snitzer return io; 6181da177e4SLinus Torvalds } 6191da177e4SLinus Torvalds 620028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 6211da177e4SLinus Torvalds { 62264f52b0eSMike Snitzer bio_put(&io->tio.clone); 62364f52b0eSMike Snitzer } 62464f52b0eSMike Snitzer 62564f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 62664f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 62764f52b0eSMike Snitzer { 62864f52b0eSMike Snitzer struct dm_target_io *tio; 62964f52b0eSMike Snitzer 63064f52b0eSMike Snitzer if (!ci->io->tio.io) { 63164f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 63264f52b0eSMike Snitzer tio = &ci->io->tio; 63364f52b0eSMike Snitzer } else { 6346f1c819cSKent Overstreet struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 63564f52b0eSMike Snitzer if (!clone) 63664f52b0eSMike Snitzer return NULL; 63764f52b0eSMike Snitzer 63864f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 63964f52b0eSMike Snitzer tio->inside_dm_io = false; 64064f52b0eSMike Snitzer } 64164f52b0eSMike Snitzer 64264f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 64364f52b0eSMike Snitzer tio->io = ci->io; 64464f52b0eSMike Snitzer tio->ti = ti; 64564f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 64664f52b0eSMike Snitzer 64764f52b0eSMike Snitzer return tio; 6481da177e4SLinus Torvalds } 6491da177e4SLinus Torvalds 650cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 6511da177e4SLinus Torvalds { 65264f52b0eSMike Snitzer if (tio->inside_dm_io) 65364f52b0eSMike Snitzer return; 654dba14160SMikulas Patocka bio_put(&tio->clone); 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 657c4576aedSMike Snitzer static bool md_in_flight_bios(struct mapped_device *md) 65890abb8c4SKiyoshi Ueda { 6596f757231SMikulas Patocka int cpu; 6606f757231SMikulas Patocka struct hd_struct *part = &dm_disk(md)->part0; 661b7934ba4SJens Axboe long sum = 0; 6626f757231SMikulas Patocka 6636f757231SMikulas Patocka for_each_possible_cpu(cpu) { 664b7934ba4SJens Axboe sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 665b7934ba4SJens Axboe sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 6666f757231SMikulas Patocka } 6676f757231SMikulas Patocka 668b7934ba4SJens Axboe return sum != 0; 66990abb8c4SKiyoshi Ueda } 67090abb8c4SKiyoshi Ueda 671c4576aedSMike Snitzer static bool md_in_flight(struct mapped_device *md) 672c4576aedSMike Snitzer { 673c4576aedSMike Snitzer if (queue_is_mq(md->queue)) 6743c94d83cSJens Axboe return blk_mq_queue_inflight(md->queue); 675c4576aedSMike Snitzer else 676c4576aedSMike Snitzer return md_in_flight_bios(md); 677cec47e3dSKiyoshi Ueda } 678cec47e3dSKiyoshi Ueda 679087615bfSGabriel Krisman Bertazi u64 dm_start_time_ns_from_clone(struct bio *bio) 680087615bfSGabriel Krisman Bertazi { 681087615bfSGabriel Krisman Bertazi struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 682087615bfSGabriel Krisman Bertazi struct dm_io *io = tio->io; 683087615bfSGabriel Krisman Bertazi 684087615bfSGabriel Krisman Bertazi return jiffies_to_nsecs(io->start_time); 685087615bfSGabriel Krisman Bertazi } 686087615bfSGabriel Krisman Bertazi EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 687087615bfSGabriel Krisman Bertazi 6883eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io) 6893eaf840eSJun'ichi "Nick" Nomura { 6903eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 691745dc570SMike Snitzer struct bio *bio = io->orig_bio; 6923eaf840eSJun'ichi "Nick" Nomura 69386240d5bSChristoph Hellwig io->start_time = bio_start_io_acct(bio); 694fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 695528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 696528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 697528ec5abSMike Christie false, 0, &io->stats_aux); 6983eaf840eSJun'ichi "Nick" Nomura } 6993eaf840eSJun'ichi "Nick" Nomura 700d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io) 7013eaf840eSJun'ichi "Nick" Nomura { 7023eaf840eSJun'ichi "Nick" Nomura struct mapped_device *md = io->md; 703745dc570SMike Snitzer struct bio *bio = io->orig_bio; 7043eaf840eSJun'ichi "Nick" Nomura unsigned long duration = jiffies - io->start_time; 7053eaf840eSJun'ichi "Nick" Nomura 70686240d5bSChristoph Hellwig bio_end_io_acct(bio, io->start_time); 7073eaf840eSJun'ichi "Nick" Nomura 708fd2ed4d2SMikulas Patocka if (unlikely(dm_stats_used(&md->stats))) 709528ec5abSMike Christie dm_stats_account_io(&md->stats, bio_data_dir(bio), 710528ec5abSMike Christie bio->bi_iter.bi_sector, bio_sectors(bio), 711528ec5abSMike Christie true, duration, &io->stats_aux); 712fd2ed4d2SMikulas Patocka 713d221d2e7SMikulas Patocka /* nudge anyone waiting on suspend queue */ 714645efa84SMikulas Patocka if (unlikely(wq_has_sleeper(&md->wait))) 715d221d2e7SMikulas Patocka wake_up(&md->wait); 7163eaf840eSJun'ichi "Nick" Nomura } 7173eaf840eSJun'ichi "Nick" Nomura 7181da177e4SLinus Torvalds /* 7191da177e4SLinus Torvalds * Add the bio to the list of deferred io. 7201da177e4SLinus Torvalds */ 72192c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 7221da177e4SLinus Torvalds { 72305447420SKiyoshi Ueda unsigned long flags; 7241da177e4SLinus Torvalds 72505447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 7261da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 72705447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 72892c63902SMikulas Patocka queue_work(md->wq, &md->work); 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds /* 7321da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 7331da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 73483d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 7351da177e4SLinus Torvalds */ 73683d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7371da177e4SLinus Torvalds { 73883d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7391da177e4SLinus Torvalds 74083d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 74183d5e5b0SMikulas Patocka } 7421da177e4SLinus Torvalds 74383d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 74483d5e5b0SMikulas Patocka { 74583d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 74683d5e5b0SMikulas Patocka } 74783d5e5b0SMikulas Patocka 74883d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 74983d5e5b0SMikulas Patocka { 75083d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 75183d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 75283d5e5b0SMikulas Patocka } 75383d5e5b0SMikulas Patocka 75483d5e5b0SMikulas Patocka /* 75583d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 75683d5e5b0SMikulas Patocka * The caller must not block between these two functions. 75783d5e5b0SMikulas Patocka */ 75883d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 75983d5e5b0SMikulas Patocka { 76083d5e5b0SMikulas Patocka rcu_read_lock(); 76183d5e5b0SMikulas Patocka return rcu_dereference(md->map); 76283d5e5b0SMikulas Patocka } 76383d5e5b0SMikulas Patocka 76483d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 76583d5e5b0SMikulas Patocka { 76683d5e5b0SMikulas Patocka rcu_read_unlock(); 7671da177e4SLinus Torvalds } 7681da177e4SLinus Torvalds 769971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 770971888c4SMike Snitzer 7713ac51e74SDarrick J. Wong /* 77286f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 77386f1152bSBenjamin Marzinski */ 77486f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 77586f1152bSBenjamin Marzinski struct mapped_device *md) 77686f1152bSBenjamin Marzinski { 77786f1152bSBenjamin Marzinski struct block_device *bdev; 77886f1152bSBenjamin Marzinski 77986f1152bSBenjamin Marzinski int r; 78086f1152bSBenjamin Marzinski 78186f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 78286f1152bSBenjamin Marzinski 783519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 78486f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 78586f1152bSBenjamin Marzinski return PTR_ERR(bdev); 78686f1152bSBenjamin Marzinski 78786f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 78886f1152bSBenjamin Marzinski if (r) { 78986f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 79086f1152bSBenjamin Marzinski return r; 79186f1152bSBenjamin Marzinski } 79286f1152bSBenjamin Marzinski 79386f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 794817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 79586f1152bSBenjamin Marzinski return 0; 79686f1152bSBenjamin Marzinski } 79786f1152bSBenjamin Marzinski 79886f1152bSBenjamin Marzinski /* 79986f1152bSBenjamin Marzinski * Close a table device that we've been using. 80086f1152bSBenjamin Marzinski */ 80186f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 80286f1152bSBenjamin Marzinski { 80386f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 80486f1152bSBenjamin Marzinski return; 80586f1152bSBenjamin Marzinski 80686f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 80786f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 808817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 80986f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 810817bf402SDan Williams td->dm_dev.dax_dev = NULL; 81186f1152bSBenjamin Marzinski } 81286f1152bSBenjamin Marzinski 81386f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 8148454fca4SSheetal Singala fmode_t mode) 8158454fca4SSheetal Singala { 81686f1152bSBenjamin Marzinski struct table_device *td; 81786f1152bSBenjamin Marzinski 81886f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 81986f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 82086f1152bSBenjamin Marzinski return td; 82186f1152bSBenjamin Marzinski 82286f1152bSBenjamin Marzinski return NULL; 82386f1152bSBenjamin Marzinski } 82486f1152bSBenjamin Marzinski 82586f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 8268454fca4SSheetal Singala struct dm_dev **result) 8278454fca4SSheetal Singala { 82886f1152bSBenjamin Marzinski int r; 82986f1152bSBenjamin Marzinski struct table_device *td; 83086f1152bSBenjamin Marzinski 83186f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 83286f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 83386f1152bSBenjamin Marzinski if (!td) { 834115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 83586f1152bSBenjamin Marzinski if (!td) { 83686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83786f1152bSBenjamin Marzinski return -ENOMEM; 83886f1152bSBenjamin Marzinski } 83986f1152bSBenjamin Marzinski 84086f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 84186f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 84286f1152bSBenjamin Marzinski 84386f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 84486f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 84586f1152bSBenjamin Marzinski kfree(td); 84686f1152bSBenjamin Marzinski return r; 84786f1152bSBenjamin Marzinski } 84886f1152bSBenjamin Marzinski 84986f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 85086f1152bSBenjamin Marzinski 851b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 85286f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 853b0b4d7c6SElena Reshetova } else { 854b0b4d7c6SElena Reshetova refcount_inc(&td->count); 85586f1152bSBenjamin Marzinski } 85686f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 85786f1152bSBenjamin Marzinski 85886f1152bSBenjamin Marzinski *result = &td->dm_dev; 85986f1152bSBenjamin Marzinski return 0; 86086f1152bSBenjamin Marzinski } 86186f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 86286f1152bSBenjamin Marzinski 86386f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 86486f1152bSBenjamin Marzinski { 86586f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 86686f1152bSBenjamin Marzinski 86786f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 868b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 86986f1152bSBenjamin Marzinski close_table_device(td, md); 87086f1152bSBenjamin Marzinski list_del(&td->list); 87186f1152bSBenjamin Marzinski kfree(td); 87286f1152bSBenjamin Marzinski } 87386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 87486f1152bSBenjamin Marzinski } 87586f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 87686f1152bSBenjamin Marzinski 87786f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 87886f1152bSBenjamin Marzinski { 87986f1152bSBenjamin Marzinski struct list_head *tmp, *next; 88086f1152bSBenjamin Marzinski 88186f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 88286f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 88386f1152bSBenjamin Marzinski 88486f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 885b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 88686f1152bSBenjamin Marzinski kfree(td); 88786f1152bSBenjamin Marzinski } 88886f1152bSBenjamin Marzinski } 88986f1152bSBenjamin Marzinski 89086f1152bSBenjamin Marzinski /* 8913ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8923ac51e74SDarrick J. Wong */ 8933ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8943ac51e74SDarrick J. Wong { 8953ac51e74SDarrick J. Wong *geo = md->geometry; 8963ac51e74SDarrick J. Wong 8973ac51e74SDarrick J. Wong return 0; 8983ac51e74SDarrick J. Wong } 8993ac51e74SDarrick J. Wong 9003ac51e74SDarrick J. Wong /* 9013ac51e74SDarrick J. Wong * Set the geometry of a device. 9023ac51e74SDarrick J. Wong */ 9033ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 9043ac51e74SDarrick J. Wong { 9053ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 9063ac51e74SDarrick J. Wong 9073ac51e74SDarrick J. Wong if (geo->start > sz) { 9083ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 9093ac51e74SDarrick J. Wong return -EINVAL; 9103ac51e74SDarrick J. Wong } 9113ac51e74SDarrick J. Wong 9123ac51e74SDarrick J. Wong md->geometry = *geo; 9133ac51e74SDarrick J. Wong 9143ac51e74SDarrick J. Wong return 0; 9153ac51e74SDarrick J. Wong } 9163ac51e74SDarrick J. Wong 9172e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 9182e93ccc1SKiyoshi Ueda { 9192e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 9202e93ccc1SKiyoshi Ueda } 9212e93ccc1SKiyoshi Ueda 9221da177e4SLinus Torvalds /* 9231da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 9241da177e4SLinus Torvalds * cloned into, completing the original io if necc. 9251da177e4SLinus Torvalds */ 9264e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 9271da177e4SLinus Torvalds { 9282e93ccc1SKiyoshi Ueda unsigned long flags; 9294e4cbee9SChristoph Hellwig blk_status_t io_error; 930b35f8caaSMilan Broz struct bio *bio; 931b35f8caaSMilan Broz struct mapped_device *md = io->md; 9322e93ccc1SKiyoshi Ueda 9332e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 934f88fb981SKiyoshi Ueda if (unlikely(error)) { 935f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 936745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 9374e4cbee9SChristoph Hellwig io->status = error; 938f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 939f88fb981SKiyoshi Ueda } 9401da177e4SLinus Torvalds 9411da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9424e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 9432e93ccc1SKiyoshi Ueda /* 9442e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9452e93ccc1SKiyoshi Ueda */ 946022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9476a8736d1STejun Heo if (__noflush_suspending(md)) 948745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 949745dc570SMike Snitzer bio_list_add_head(&md->deferred, io->orig_bio); 9506a8736d1STejun Heo else 9512e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9524e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 953022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9542e93ccc1SKiyoshi Ueda } 9552e93ccc1SKiyoshi Ueda 9564e4cbee9SChristoph Hellwig io_error = io->status; 957745dc570SMike Snitzer bio = io->orig_bio; 958af7e466aSMikulas Patocka end_io_acct(io); 959a97f925aSMikulas Patocka free_io(md, io); 9601da177e4SLinus Torvalds 9614e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 9626a8736d1STejun Heo return; 9636a8736d1STejun Heo 9641eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 9651da177e4SLinus Torvalds /* 9666a8736d1STejun Heo * Preflush done for flush with data, reissue 96728a8f0d3SMike Christie * without REQ_PREFLUSH. 9681da177e4SLinus Torvalds */ 9691eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9706a8736d1STejun Heo queue_io(md, bio); 971b35f8caaSMilan Broz } else { 972b372d360SMike Snitzer /* done with normal IO or empty flush */ 9738dd601faSNeilBrown if (io_error) 9744e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9754246a0b6SChristoph Hellwig bio_endio(bio); 9762e93ccc1SKiyoshi Ueda } 9771da177e4SLinus Torvalds } 978af7e466aSMikulas Patocka } 9791da177e4SLinus Torvalds 980bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 981bcb44433SMike Snitzer { 982bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 983bcb44433SMike Snitzer 984bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 985bcb44433SMike Snitzer limits->max_discard_sectors = 0; 986bcb44433SMike Snitzer blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 987bcb44433SMike Snitzer } 988bcb44433SMike Snitzer 9894cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 9907eee4ae2SMike Snitzer { 9917eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9927eee4ae2SMike Snitzer 9937eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9947eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9957eee4ae2SMike Snitzer } 9967eee4ae2SMike Snitzer 997ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 998ac62d620SChristoph Hellwig { 999ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 1000ac62d620SChristoph Hellwig 1001ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 1002ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 1003ac62d620SChristoph Hellwig } 1004ac62d620SChristoph Hellwig 10054246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 10061da177e4SLinus Torvalds { 10074e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 1008bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1009b35f8caaSMilan Broz struct dm_io *io = tio->io; 10109faf400fSStefan Bader struct mapped_device *md = tio->io->md; 10111da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 10121da177e4SLinus Torvalds 1013978e51baSMike Snitzer if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 1014bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 1015bcb44433SMike Snitzer !bio->bi_disk->queue->limits.max_discard_sectors) 1016bcb44433SMike Snitzer disable_discard(md); 1017bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME && 101874d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 10197eee4ae2SMike Snitzer disable_write_same(md); 1020bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 102174d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 1022ac62d620SChristoph Hellwig disable_write_zeroes(md); 1023ac62d620SChristoph Hellwig } 10247eee4ae2SMike Snitzer 10251be56909SChristoph Hellwig if (endio) { 10264e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 10271be56909SChristoph Hellwig switch (r) { 10281be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 10294e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 10301be56909SChristoph Hellwig /*FALLTHRU*/ 10311be56909SChristoph Hellwig case DM_ENDIO_DONE: 10321be56909SChristoph Hellwig break; 10331be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 10341be56909SChristoph Hellwig /* The target will handle the io */ 10351be56909SChristoph Hellwig return; 10361be56909SChristoph Hellwig default: 10371be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 10381be56909SChristoph Hellwig BUG(); 10391be56909SChristoph Hellwig } 10401be56909SChristoph Hellwig } 10411be56909SChristoph Hellwig 1042cfae7529SMike Snitzer free_tio(tio); 1043b35f8caaSMilan Broz dec_pending(io, error); 10441da177e4SLinus Torvalds } 10451da177e4SLinus Torvalds 104678d8e58aSMike Snitzer /* 104756a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 104856a67df7SMike Snitzer * target boundary. 104956a67df7SMike Snitzer */ 105056a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 10511da177e4SLinus Torvalds { 105256a67df7SMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 105356a67df7SMike Snitzer 105456a67df7SMike Snitzer return ti->len - target_offset; 105556a67df7SMike Snitzer } 105656a67df7SMike Snitzer 105756a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti) 105856a67df7SMike Snitzer { 105956a67df7SMike Snitzer sector_t len = max_io_len_target_boundary(sector, ti); 1060542f9038SMike Snitzer sector_t offset, max_len; 10611da177e4SLinus Torvalds 10621da177e4SLinus Torvalds /* 10631da177e4SLinus Torvalds * Does the target need to split even further? 10641da177e4SLinus Torvalds */ 1065542f9038SMike Snitzer if (ti->max_io_len) { 1066542f9038SMike Snitzer offset = dm_target_offset(ti, sector); 1067542f9038SMike Snitzer if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) 1068542f9038SMike Snitzer max_len = sector_div(offset, ti->max_io_len); 1069542f9038SMike Snitzer else 1070542f9038SMike Snitzer max_len = offset & (ti->max_io_len - 1); 1071542f9038SMike Snitzer max_len = ti->max_io_len - max_len; 1072542f9038SMike Snitzer 1073542f9038SMike Snitzer if (len > max_len) 1074542f9038SMike Snitzer len = max_len; 10751da177e4SLinus Torvalds } 10761da177e4SLinus Torvalds 10771da177e4SLinus Torvalds return len; 10781da177e4SLinus Torvalds } 10791da177e4SLinus Torvalds 1080542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1081542f9038SMike Snitzer { 1082542f9038SMike Snitzer if (len > UINT_MAX) { 1083542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1084542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1085542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1086542f9038SMike Snitzer return -EINVAL; 1087542f9038SMike Snitzer } 1088542f9038SMike Snitzer 108975ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 1090542f9038SMike Snitzer 1091542f9038SMike Snitzer return 0; 1092542f9038SMike Snitzer } 1093542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1094542f9038SMike Snitzer 1095f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1096f26c5719SDan Williams sector_t sector, int *srcu_idx) 10973d97c829SMike Snitzer __acquires(md->io_barrier) 1098545ed20eSToshi Kani { 1099545ed20eSToshi Kani struct dm_table *map; 1100545ed20eSToshi Kani struct dm_target *ti; 1101545ed20eSToshi Kani 1102f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1103545ed20eSToshi Kani if (!map) 1104f26c5719SDan Williams return NULL; 1105545ed20eSToshi Kani 1106545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1107123d87d5SMikulas Patocka if (!ti) 1108f26c5719SDan Williams return NULL; 1109f26c5719SDan Williams 1110f26c5719SDan Williams return ti; 1111f26c5719SDan Williams } 1112f26c5719SDan Williams 1113f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1114f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1115f26c5719SDan Williams { 1116f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1117f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1118f26c5719SDan Williams struct dm_target *ti; 1119f26c5719SDan Williams long len, ret = -EIO; 1120f26c5719SDan Williams int srcu_idx; 1121f26c5719SDan Williams 1122f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1123f26c5719SDan Williams 1124f26c5719SDan Williams if (!ti) 1125545ed20eSToshi Kani goto out; 1126f26c5719SDan Williams if (!ti->type->direct_access) 1127f26c5719SDan Williams goto out; 1128f26c5719SDan Williams len = max_io_len(sector, ti) / PAGE_SECTORS; 1129f26c5719SDan Williams if (len < 1) 1130f26c5719SDan Williams goto out; 1131f26c5719SDan Williams nr_pages = min(len, nr_pages); 1132817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1133817bf402SDan Williams 1134545ed20eSToshi Kani out: 1135545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1136f26c5719SDan Williams 1137f26c5719SDan Williams return ret; 1138545ed20eSToshi Kani } 1139545ed20eSToshi Kani 11407bf7eac8SDan Williams static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 11417bf7eac8SDan Williams int blocksize, sector_t start, sector_t len) 11427bf7eac8SDan Williams { 11437bf7eac8SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11447bf7eac8SDan Williams struct dm_table *map; 11457bf7eac8SDan Williams int srcu_idx; 11467bf7eac8SDan Williams bool ret; 11477bf7eac8SDan Williams 11487bf7eac8SDan Williams map = dm_get_live_table(md, &srcu_idx); 11497bf7eac8SDan Williams if (!map) 11507bf7eac8SDan Williams return false; 11517bf7eac8SDan Williams 11522e9ee095SPankaj Gupta ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 11537bf7eac8SDan Williams 11547bf7eac8SDan Williams dm_put_live_table(md, srcu_idx); 11557bf7eac8SDan Williams 11567bf7eac8SDan Williams return ret; 11577bf7eac8SDan Williams } 11587bf7eac8SDan Williams 11597e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 11607e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 11617e026c8cSDan Williams { 11627e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11637e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 11647e026c8cSDan Williams struct dm_target *ti; 11657e026c8cSDan Williams long ret = 0; 11667e026c8cSDan Williams int srcu_idx; 11677e026c8cSDan Williams 11687e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 11697e026c8cSDan Williams 11707e026c8cSDan Williams if (!ti) 11717e026c8cSDan Williams goto out; 11727e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 11737e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 11747e026c8cSDan Williams goto out; 11757e026c8cSDan Williams } 11767e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 11777e026c8cSDan Williams out: 11787e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 11797e026c8cSDan Williams 11807e026c8cSDan Williams return ret; 11817e026c8cSDan Williams } 11827e026c8cSDan Williams 1183b3a9a0c3SDan Williams static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1184b3a9a0c3SDan Williams void *addr, size_t bytes, struct iov_iter *i) 1185b3a9a0c3SDan Williams { 1186b3a9a0c3SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1187b3a9a0c3SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1188b3a9a0c3SDan Williams struct dm_target *ti; 1189b3a9a0c3SDan Williams long ret = 0; 1190b3a9a0c3SDan Williams int srcu_idx; 1191b3a9a0c3SDan Williams 1192b3a9a0c3SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1193b3a9a0c3SDan Williams 1194b3a9a0c3SDan Williams if (!ti) 1195b3a9a0c3SDan Williams goto out; 1196b3a9a0c3SDan Williams if (!ti->type->dax_copy_to_iter) { 1197b3a9a0c3SDan Williams ret = copy_to_iter(addr, bytes, i); 1198b3a9a0c3SDan Williams goto out; 1199b3a9a0c3SDan Williams } 1200b3a9a0c3SDan Williams ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1201b3a9a0c3SDan Williams out: 1202b3a9a0c3SDan Williams dm_put_live_table(md, srcu_idx); 1203b3a9a0c3SDan Williams 1204b3a9a0c3SDan Williams return ret; 1205b3a9a0c3SDan Williams } 1206b3a9a0c3SDan Williams 1207cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1208cdf6cdcdSVivek Goyal size_t nr_pages) 1209cdf6cdcdSVivek Goyal { 1210cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1211cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1212cdf6cdcdSVivek Goyal struct dm_target *ti; 1213cdf6cdcdSVivek Goyal int ret = -EIO; 1214cdf6cdcdSVivek Goyal int srcu_idx; 1215cdf6cdcdSVivek Goyal 1216cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1217cdf6cdcdSVivek Goyal 1218cdf6cdcdSVivek Goyal if (!ti) 1219cdf6cdcdSVivek Goyal goto out; 1220cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1221cdf6cdcdSVivek Goyal /* 1222cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1223cdf6cdcdSVivek Goyal * here, something is wrong. 1224cdf6cdcdSVivek Goyal */ 1225cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1226cdf6cdcdSVivek Goyal goto out; 1227cdf6cdcdSVivek Goyal } 1228cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1229cdf6cdcdSVivek Goyal 1230cdf6cdcdSVivek Goyal out: 1231cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1232cdf6cdcdSVivek Goyal 1233cdf6cdcdSVivek Goyal return ret; 1234cdf6cdcdSVivek Goyal } 1235cdf6cdcdSVivek Goyal 12361dd40c3eSMikulas Patocka /* 12371dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 12382e2d6f7eSAjay Joshi * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 12392e2d6f7eSAjay Joshi * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 12401dd40c3eSMikulas Patocka * 12411dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 12421dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 12431dd40c3eSMikulas Patocka * sent in a next bio. 12441dd40c3eSMikulas Patocka * 12451dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 12461dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12471dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 12481dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12491dd40c3eSMikulas Patocka * 12501dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 12511dd40c3eSMikulas Patocka * <------- bi_size -------> 12521dd40c3eSMikulas Patocka * <-- n_sectors --> 12531dd40c3eSMikulas Patocka * 12541dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 12551dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 12561dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 12571dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 12581dd40c3eSMikulas Patocka * to make it empty) 12591dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 12601dd40c3eSMikulas Patocka * 12611dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 12621dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 12631dd40c3eSMikulas Patocka * copies of the bio. 12641dd40c3eSMikulas Patocka */ 12651dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 12661dd40c3eSMikulas Patocka { 12671dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 12681dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 12691eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 12701dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 12711dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 12721dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 12731dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 12741dd40c3eSMikulas Patocka } 12751dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 12761dd40c3eSMikulas Patocka 1277978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 12781da177e4SLinus Torvalds { 12791da177e4SLinus Torvalds int r; 12802056a782SJens Axboe sector_t sector; 1281dba14160SMikulas Patocka struct bio *clone = &tio->clone; 128264f52b0eSMike Snitzer struct dm_io *io = tio->io; 1283978e51baSMike Snitzer struct mapped_device *md = io->md; 1284bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1285978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 12861da177e4SLinus Torvalds 12871da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12881da177e4SLinus Torvalds 12891da177e4SLinus Torvalds /* 12901da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 12911da177e4SLinus Torvalds * anything, the target has assumed ownership of 12921da177e4SLinus Torvalds * this io. 12931da177e4SLinus Torvalds */ 129464f52b0eSMike Snitzer atomic_inc(&io->io_count); 12954f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1296d67a5f4bSMikulas Patocka 12977de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1298846785e6SChristoph Hellwig switch (r) { 1299846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1300846785e6SChristoph Hellwig break; 1301846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 13021da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 130374d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 130464f52b0eSMike Snitzer bio_dev(io->orig_bio), sector); 1305978e51baSMike Snitzer if (md->type == DM_TYPE_NVME_BIO_BASED) 1306978e51baSMike Snitzer ret = direct_make_request(clone); 1307978e51baSMike Snitzer else 1308*ed00aabdSChristoph Hellwig ret = submit_bio_noacct(clone); 1309846785e6SChristoph Hellwig break; 1310846785e6SChristoph Hellwig case DM_MAPIO_KILL: 13114e4cbee9SChristoph Hellwig free_tio(tio); 131264f52b0eSMike Snitzer dec_pending(io, BLK_STS_IOERR); 13134e4cbee9SChristoph Hellwig break; 1314846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1315cfae7529SMike Snitzer free_tio(tio); 131664f52b0eSMike Snitzer dec_pending(io, BLK_STS_DM_REQUEUE); 1317846785e6SChristoph Hellwig break; 1318846785e6SChristoph Hellwig default: 131945cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 132045cbcd79SKiyoshi Ueda BUG(); 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds 1323978e51baSMike Snitzer return ret; 13241da177e4SLinus Torvalds } 13251da177e4SLinus Torvalds 1326e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1327bd2a49b8SAlasdair G Kergon { 13284f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 13294f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 13301da177e4SLinus Torvalds } 13311da177e4SLinus Torvalds 13321da177e4SLinus Torvalds /* 13331da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 13341da177e4SLinus Torvalds */ 1335c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 13361c3b13e6SKent Overstreet sector_t sector, unsigned len) 13371da177e4SLinus Torvalds { 1338dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13391da177e4SLinus Torvalds 13401c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 13419c47008dSMartin K. Petersen 1342a892c8d5SSatya Tangirala bio_crypt_clone(clone, bio, GFP_NOIO); 1343a892c8d5SSatya Tangirala 134457c36519SMike Snitzer if (bio_integrity(bio)) { 1345e2460f2aSMikulas Patocka int r; 1346e2460f2aSMikulas Patocka 1347e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1348e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1349e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1350e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1351e2460f2aSMikulas Patocka tio->ti->type->name); 1352e2460f2aSMikulas Patocka return -EIO; 1353e2460f2aSMikulas Patocka } 1354e2460f2aSMikulas Patocka 1355e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1356c80914e8SMike Snitzer if (r < 0) 1357c80914e8SMike Snitzer return r; 1358c80914e8SMike Snitzer } 13591c3b13e6SKent Overstreet 1360fa8db494SMike Snitzer bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1361fa8db494SMike Snitzer clone->bi_iter.bi_size = to_bytes(len); 1362fa8db494SMike Snitzer 1363fa8db494SMike Snitzer if (bio_integrity(bio)) 1364fa8db494SMike Snitzer bio_integrity_trim(clone); 1365c80914e8SMike Snitzer 1366c80914e8SMike Snitzer return 0; 13671da177e4SLinus Torvalds } 13681da177e4SLinus Torvalds 1369318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1370318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 1371f9ab94ceSMikulas Patocka { 1372dba14160SMikulas Patocka struct dm_target_io *tio; 1373318716ddSMike Snitzer int try; 1374dba14160SMikulas Patocka 1375318716ddSMike Snitzer if (!num_bios) 1376318716ddSMike Snitzer return; 1377f9ab94ceSMikulas Patocka 1378318716ddSMike Snitzer if (num_bios == 1) { 1379318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1380318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1381318716ddSMike Snitzer return; 13829015df24SAlasdair G Kergon } 13839015df24SAlasdair G Kergon 1384318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1385318716ddSMike Snitzer int bio_nr; 1386318716ddSMike Snitzer struct bio *bio; 1387318716ddSMike Snitzer 1388318716ddSMike Snitzer if (try) 1389bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1390318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1391318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1392318716ddSMike Snitzer if (!tio) 1393318716ddSMike Snitzer break; 1394318716ddSMike Snitzer 1395318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1396318716ddSMike Snitzer } 1397318716ddSMike Snitzer if (try) 1398bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1399318716ddSMike Snitzer if (bio_nr == num_bios) 1400318716ddSMike Snitzer return; 1401318716ddSMike Snitzer 1402318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1403318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1404318716ddSMike Snitzer free_tio(tio); 1405318716ddSMike Snitzer } 1406318716ddSMike Snitzer } 1407318716ddSMike Snitzer } 1408318716ddSMike Snitzer 1409978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1410318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 14119015df24SAlasdair G Kergon { 1412dba14160SMikulas Patocka struct bio *clone = &tio->clone; 14139015df24SAlasdair G Kergon 14141dd40c3eSMikulas Patocka tio->len_ptr = len; 14151dd40c3eSMikulas Patocka 14161c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1417bd2a49b8SAlasdair G Kergon if (len) 14181dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1419f9ab94ceSMikulas Patocka 1420978e51baSMike Snitzer return __map_bio(tio); 1421f9ab94ceSMikulas Patocka } 1422f9ab94ceSMikulas Patocka 142314fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 14241dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 142506a426ceSMike Snitzer { 1426318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1427318716ddSMike Snitzer struct bio *bio; 1428318716ddSMike Snitzer struct dm_target_io *tio; 142906a426ceSMike Snitzer 1430318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1431318716ddSMike Snitzer 1432318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1433318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1434978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1435318716ddSMike Snitzer } 143606a426ceSMike Snitzer } 143706a426ceSMike Snitzer 143814fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1439f9ab94ceSMikulas Patocka { 144006a426ceSMike Snitzer unsigned target_nr = 0; 1441f9ab94ceSMikulas Patocka struct dm_target *ti; 1442f9ab94ceSMikulas Patocka 1443892ad71fSDennis Zhou /* 1444dbe3ece1SJens Axboe * Empty flush uses a statically initialized bio, as the base for 1445dbe3ece1SJens Axboe * cloning. However, blkg association requires that a bdev is 1446dbe3ece1SJens Axboe * associated with a gendisk, which doesn't happen until the bdev is 1447dbe3ece1SJens Axboe * opened. So, blkg association is done at issue time of the flush 1448dbe3ece1SJens Axboe * rather than when the device is created in alloc_dev(). 1449892ad71fSDennis Zhou */ 1450892ad71fSDennis Zhou bio_set_dev(ci->bio, ci->io->md->bdev); 1451892ad71fSDennis Zhou 1452b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1453f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14541dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1455f9ab94ceSMikulas Patocka return 0; 1456f9ab94ceSMikulas Patocka } 1457f9ab94ceSMikulas Patocka 1458c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 14591dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 14605ae89a87SMike Snitzer { 1461dba14160SMikulas Patocka struct bio *bio = ci->bio; 14625ae89a87SMike Snitzer struct dm_target_io *tio; 1463f31c21e4SNeilBrown int r; 14645ae89a87SMike Snitzer 1465318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 14661dd40c3eSMikulas Patocka tio->len_ptr = len; 1467c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1468072623deSMikulas Patocka if (r < 0) { 1469cfae7529SMike Snitzer free_tio(tio); 1470c80914e8SMike Snitzer return r; 1471b0d8ed4dSAlasdair G Kergon } 1472978e51baSMike Snitzer (void) __map_bio(tio); 147355a62eefSAlasdair G Kergon 1474f31c21e4SNeilBrown return 0; 147523508a96SMike Snitzer } 147655a62eefSAlasdair G Kergon 147723508a96SMike Snitzer typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); 147855a62eefSAlasdair G Kergon 147923508a96SMike Snitzer static unsigned get_num_discard_bios(struct dm_target *ti) 148023508a96SMike Snitzer { 148123508a96SMike Snitzer return ti->num_discard_bios; 148223508a96SMike Snitzer } 148323508a96SMike Snitzer 148400716545SDenis Semakin static unsigned get_num_secure_erase_bios(struct dm_target *ti) 148500716545SDenis Semakin { 148600716545SDenis Semakin return ti->num_secure_erase_bios; 148700716545SDenis Semakin } 148800716545SDenis Semakin 148923508a96SMike Snitzer static unsigned get_num_write_same_bios(struct dm_target *ti) 149023508a96SMike Snitzer { 149123508a96SMike Snitzer return ti->num_write_same_bios; 149223508a96SMike Snitzer } 149323508a96SMike Snitzer 1494ac62d620SChristoph Hellwig static unsigned get_num_write_zeroes_bios(struct dm_target *ti) 1495ac62d620SChristoph Hellwig { 1496ac62d620SChristoph Hellwig return ti->num_write_zeroes_bios; 1497ac62d620SChristoph Hellwig } 1498ac62d620SChristoph Hellwig 14993d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 150061697a6aSMike Snitzer unsigned num_bios) 15015ae89a87SMike Snitzer { 150251b86f9aSMichael Lass unsigned len; 15035ae89a87SMike Snitzer 15045ae89a87SMike Snitzer /* 150523508a96SMike Snitzer * Even though the device advertised support for this type of 150623508a96SMike Snitzer * request, that does not mean every target supports it, and 1507936688d7SMike Snitzer * reconfiguration might also have changed that since the 15085ae89a87SMike Snitzer * check was performed. 15095ae89a87SMike Snitzer */ 151055a62eefSAlasdair G Kergon if (!num_bios) 15115ae89a87SMike Snitzer return -EOPNOTSUPP; 15125ae89a87SMike Snitzer 151351b86f9aSMichael Lass len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 151451b86f9aSMichael Lass 15151dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 15165ae89a87SMike Snitzer 1517a79245b3SMike Snitzer ci->sector += len; 15183d7f4562SMike Snitzer ci->sector_count -= len; 15195ae89a87SMike Snitzer 15205ae89a87SMike Snitzer return 0; 15215ae89a87SMike Snitzer } 15225ae89a87SMike Snitzer 15233d7f4562SMike Snitzer static int __send_discard(struct clone_info *ci, struct dm_target *ti) 152423508a96SMike Snitzer { 152561697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); 152623508a96SMike Snitzer } 152723508a96SMike Snitzer 152800716545SDenis Semakin static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) 152900716545SDenis Semakin { 153061697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); 153100716545SDenis Semakin } 153200716545SDenis Semakin 15333d7f4562SMike Snitzer static int __send_write_same(struct clone_info *ci, struct dm_target *ti) 153423508a96SMike Snitzer { 153561697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); 153623508a96SMike Snitzer } 153723508a96SMike Snitzer 15383d7f4562SMike Snitzer static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) 1539ac62d620SChristoph Hellwig { 154061697a6aSMike Snitzer return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); 1541ac62d620SChristoph Hellwig } 1542ac62d620SChristoph Hellwig 1543568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1544568c73a3SMike Snitzer { 1545568c73a3SMike Snitzer bool r = false; 1546568c73a3SMike Snitzer 1547568c73a3SMike Snitzer switch (bio_op(bio)) { 1548568c73a3SMike Snitzer case REQ_OP_DISCARD: 1549568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1550568c73a3SMike Snitzer case REQ_OP_WRITE_SAME: 1551568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 1552568c73a3SMike Snitzer r = true; 1553568c73a3SMike Snitzer break; 1554568c73a3SMike Snitzer } 1555568c73a3SMike Snitzer 1556568c73a3SMike Snitzer return r; 1557568c73a3SMike Snitzer } 1558568c73a3SMike Snitzer 15590519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 15600519c71eSMike Snitzer int *result) 15610519c71eSMike Snitzer { 15620519c71eSMike Snitzer struct bio *bio = ci->bio; 15630519c71eSMike Snitzer 15640519c71eSMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD) 15650519c71eSMike Snitzer *result = __send_discard(ci, ti); 156600716545SDenis Semakin else if (bio_op(bio) == REQ_OP_SECURE_ERASE) 156700716545SDenis Semakin *result = __send_secure_erase(ci, ti); 15680519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME) 15690519c71eSMike Snitzer *result = __send_write_same(ci, ti); 15700519c71eSMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) 15710519c71eSMike Snitzer *result = __send_write_zeroes(ci, ti); 15720519c71eSMike Snitzer else 15730519c71eSMike Snitzer return false; 15740519c71eSMike Snitzer 15750519c71eSMike Snitzer return true; 15760519c71eSMike Snitzer } 15770519c71eSMike Snitzer 1578e4c93811SAlasdair G Kergon /* 1579e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1580e4c93811SAlasdair G Kergon */ 1581e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1582e4c93811SAlasdair G Kergon { 1583e4c93811SAlasdair G Kergon struct dm_target *ti; 15841c3b13e6SKent Overstreet unsigned len; 1585c80914e8SMike Snitzer int r; 1586e4c93811SAlasdair G Kergon 1587e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1588123d87d5SMikulas Patocka if (!ti) 1589e4c93811SAlasdair G Kergon return -EIO; 1590e4c93811SAlasdair G Kergon 1591568c73a3SMike Snitzer if (__process_abnormal_io(ci, ti, &r)) 15920519c71eSMike Snitzer return r; 15933d7f4562SMike Snitzer 1594e76239a3SChristoph Hellwig len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1595e4c93811SAlasdair G Kergon 1596c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1597c80914e8SMike Snitzer if (r < 0) 1598c80914e8SMike Snitzer return r; 1599e4c93811SAlasdair G Kergon 1600e4c93811SAlasdair G Kergon ci->sector += len; 1601e4c93811SAlasdair G Kergon ci->sector_count -= len; 1602e4c93811SAlasdair G Kergon 1603e4c93811SAlasdair G Kergon return 0; 1604e4c93811SAlasdair G Kergon } 1605e4c93811SAlasdair G Kergon 1606978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1607978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1608978e51baSMike Snitzer { 1609978e51baSMike Snitzer ci->map = map; 1610978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1611978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1612978e51baSMike Snitzer } 1613978e51baSMike Snitzer 1614a1e1cb72SMike Snitzer #define __dm_part_stat_sub(part, field, subnd) \ 1615a1e1cb72SMike Snitzer (part_stat_get(part, field) -= (subnd)) 1616a1e1cb72SMike Snitzer 1617e4c93811SAlasdair G Kergon /* 161814fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 16191da177e4SLinus Torvalds */ 1620978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 162183d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 16221da177e4SLinus Torvalds { 16231da177e4SLinus Torvalds struct clone_info ci; 1624978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1625512875bdSJun'ichi Nomura int error = 0; 16261da177e4SLinus Torvalds 1627978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1628bd2a49b8SAlasdair G Kergon 16291eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 1630dbe3ece1SJens Axboe struct bio flush_bio; 1631dbe3ece1SJens Axboe 1632dbe3ece1SJens Axboe /* 1633dbe3ece1SJens Axboe * Use an on-stack bio for this, it's safe since we don't 1634dbe3ece1SJens Axboe * need to reference it after submit. It's just used as 1635dbe3ece1SJens Axboe * the basis for the clone(s). 1636dbe3ece1SJens Axboe */ 1637dbe3ece1SJens Axboe bio_init(&flush_bio, NULL, 0); 1638dbe3ece1SJens Axboe flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1639dbe3ece1SJens Axboe ci.bio = &flush_bio; 1640b372d360SMike Snitzer ci.sector_count = 0; 164114fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 16424ef2c5c2SChristoph Hellwig bio_uninit(ci.bio); 1643b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 16442e2d6f7eSAjay Joshi } else if (op_is_zone_mgmt(bio_op(bio))) { 1645a4aa5e56SDamien Le Moal ci.bio = bio; 1646a4aa5e56SDamien Le Moal ci.sector_count = 0; 1647a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1648b372d360SMike Snitzer } else { 16496a8736d1STejun Heo ci.bio = bio; 16501da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 165118a25da8SNeilBrown while (ci.sector_count && !error) { 165214fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 165318a25da8SNeilBrown if (current->bio_list && ci.sector_count && !error) { 165418a25da8SNeilBrown /* 1655*ed00aabdSChristoph Hellwig * Remainder must be passed to submit_bio_noacct() 165618a25da8SNeilBrown * so that it gets handled *after* bios already submitted 165718a25da8SNeilBrown * have been completely processed. 165818a25da8SNeilBrown * We take a clone of the original to store in 1659745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 166018a25da8SNeilBrown * for dec_pending to use for completion handling. 166118a25da8SNeilBrown */ 1662f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1663f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1664745dc570SMike Snitzer ci.io->orig_bio = b; 1665a1e1cb72SMike Snitzer 1666a1e1cb72SMike Snitzer /* 1667a1e1cb72SMike Snitzer * Adjust IO stats for each split, otherwise upon queue 1668a1e1cb72SMike Snitzer * reentry there will be redundant IO accounting. 1669a1e1cb72SMike Snitzer * NOTE: this is a stop-gap fix, a proper fix involves 1670a1e1cb72SMike Snitzer * significant refactoring of DM core's bio splitting 1671a1e1cb72SMike Snitzer * (by eliminating DM's splitting and just using bio_split) 1672a1e1cb72SMike Snitzer */ 1673a1e1cb72SMike Snitzer part_stat_lock(); 1674a1e1cb72SMike Snitzer __dm_part_stat_sub(&dm_disk(md)->part0, 1675a1e1cb72SMike Snitzer sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1676a1e1cb72SMike Snitzer part_stat_unlock(); 1677a1e1cb72SMike Snitzer 167818a25da8SNeilBrown bio_chain(b, bio); 1679075c18c3SMike Snitzer trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1680*ed00aabdSChristoph Hellwig ret = submit_bio_noacct(bio); 168118a25da8SNeilBrown break; 168218a25da8SNeilBrown } 168318a25da8SNeilBrown } 1684d87f4c14STejun Heo } 16851da177e4SLinus Torvalds 16861da177e4SLinus Torvalds /* drop the extra reference count */ 168754385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 1688978e51baSMike Snitzer return ret; 16891da177e4SLinus Torvalds } 16901da177e4SLinus Torvalds 16911da177e4SLinus Torvalds /* 1692978e51baSMike Snitzer * Optimized variant of __split_and_process_bio that leverages the 1693978e51baSMike Snitzer * fact that targets that use it do _not_ have a need to split bios. 16941da177e4SLinus Torvalds */ 1695568c73a3SMike Snitzer static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, 1696568c73a3SMike Snitzer struct bio *bio, struct dm_target *ti) 16971da177e4SLinus Torvalds { 1698978e51baSMike Snitzer struct clone_info ci; 1699978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1700978e51baSMike Snitzer int error = 0; 1701978e51baSMike Snitzer 1702978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1703978e51baSMike Snitzer 1704978e51baSMike Snitzer if (bio->bi_opf & REQ_PREFLUSH) { 1705dbe3ece1SJens Axboe struct bio flush_bio; 1706dbe3ece1SJens Axboe 1707dbe3ece1SJens Axboe /* 1708dbe3ece1SJens Axboe * Use an on-stack bio for this, it's safe since we don't 1709dbe3ece1SJens Axboe * need to reference it after submit. It's just used as 1710dbe3ece1SJens Axboe * the basis for the clone(s). 1711dbe3ece1SJens Axboe */ 1712dbe3ece1SJens Axboe bio_init(&flush_bio, NULL, 0); 1713dbe3ece1SJens Axboe flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1714dbe3ece1SJens Axboe ci.bio = &flush_bio; 1715978e51baSMike Snitzer ci.sector_count = 0; 1716978e51baSMike Snitzer error = __send_empty_flush(&ci); 17174ef2c5c2SChristoph Hellwig bio_uninit(ci.bio); 1718978e51baSMike Snitzer /* dec_pending submits any data associated with flush */ 1719978e51baSMike Snitzer } else { 1720978e51baSMike Snitzer struct dm_target_io *tio; 1721978e51baSMike Snitzer 1722978e51baSMike Snitzer ci.bio = bio; 1723978e51baSMike Snitzer ci.sector_count = bio_sectors(bio); 1724568c73a3SMike Snitzer if (__process_abnormal_io(&ci, ti, &error)) 17250519c71eSMike Snitzer goto out; 17260519c71eSMike Snitzer 17270519c71eSMike Snitzer tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1728978e51baSMike Snitzer ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1729978e51baSMike Snitzer } 1730978e51baSMike Snitzer out: 1731978e51baSMike Snitzer /* drop the extra reference count */ 1732978e51baSMike Snitzer dec_pending(ci.io, errno_to_blk_status(error)); 1733978e51baSMike Snitzer return ret; 1734978e51baSMike Snitzer } 1735978e51baSMike Snitzer 1736568c73a3SMike Snitzer static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) 1737568c73a3SMike Snitzer { 1738568c73a3SMike Snitzer unsigned len, sector_count; 1739568c73a3SMike Snitzer 1740568c73a3SMike Snitzer sector_count = bio_sectors(*bio); 1741568c73a3SMike Snitzer len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); 1742568c73a3SMike Snitzer 1743568c73a3SMike Snitzer if (sector_count > len) { 1744568c73a3SMike Snitzer struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); 1745568c73a3SMike Snitzer 1746568c73a3SMike Snitzer bio_chain(split, *bio); 1747568c73a3SMike Snitzer trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); 1748*ed00aabdSChristoph Hellwig submit_bio_noacct(*bio); 1749568c73a3SMike Snitzer *bio = split; 1750568c73a3SMike Snitzer } 1751568c73a3SMike Snitzer } 1752568c73a3SMike Snitzer 17536548c7c5SMike Snitzer static blk_qc_t dm_process_bio(struct mapped_device *md, 17546548c7c5SMike Snitzer struct dm_table *map, struct bio *bio) 17556548c7c5SMike Snitzer { 1756568c73a3SMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1757568c73a3SMike Snitzer struct dm_target *ti = md->immutable_target; 1758568c73a3SMike Snitzer 1759568c73a3SMike Snitzer if (unlikely(!map)) { 1760568c73a3SMike Snitzer bio_io_error(bio); 1761568c73a3SMike Snitzer return ret; 1762568c73a3SMike Snitzer } 1763568c73a3SMike Snitzer 1764568c73a3SMike Snitzer if (!ti) { 1765568c73a3SMike Snitzer ti = dm_table_find_target(map, bio->bi_iter.bi_sector); 1766123d87d5SMikulas Patocka if (unlikely(!ti)) { 1767568c73a3SMike Snitzer bio_io_error(bio); 1768568c73a3SMike Snitzer return ret; 1769568c73a3SMike Snitzer } 1770568c73a3SMike Snitzer } 1771568c73a3SMike Snitzer 1772568c73a3SMike Snitzer /* 1773c62b37d9SChristoph Hellwig * If in ->queue_bio we need to use blk_queue_split(), otherwise 1774568c73a3SMike Snitzer * queue_limits for abnormal requests (e.g. discard, writesame, etc) 1775568c73a3SMike Snitzer * won't be imposed. 1776568c73a3SMike Snitzer */ 1777568c73a3SMike Snitzer if (current->bio_list) { 1778120c9257SMike Snitzer if (is_abnormal_io(bio)) 1779f695ca38SChristoph Hellwig blk_queue_split(&bio); 1780120c9257SMike Snitzer else 1781568c73a3SMike Snitzer dm_queue_split(md, ti, &bio); 1782568c73a3SMike Snitzer } 1783568c73a3SMike Snitzer 17846548c7c5SMike Snitzer if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1785568c73a3SMike Snitzer return __process_bio(md, map, bio, ti); 17866548c7c5SMike Snitzer else 17876548c7c5SMike Snitzer return __split_and_process_bio(md, map, bio); 17886548c7c5SMike Snitzer } 17896548c7c5SMike Snitzer 1790c62b37d9SChristoph Hellwig static blk_qc_t dm_submit_bio(struct bio *bio) 17911da177e4SLinus Torvalds { 1792c4a59c4eSChristoph Hellwig struct mapped_device *md = bio->bi_disk->private_data; 1793978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 179483d5e5b0SMikulas Patocka int srcu_idx; 179583d5e5b0SMikulas Patocka struct dm_table *map; 17961da177e4SLinus Torvalds 1797ac7c5675SChristoph Hellwig if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { 1798ac7c5675SChristoph Hellwig /* 1799ac7c5675SChristoph Hellwig * We are called with a live reference on q_usage_counter, but 1800ac7c5675SChristoph Hellwig * that one will be released as soon as we return. Grab an 1801c62b37d9SChristoph Hellwig * extra one as blk_mq_submit_bio expects to be able to consume 1802c62b37d9SChristoph Hellwig * a reference (which lives until the request is freed in case a 1803c62b37d9SChristoph Hellwig * request is allocated). 1804ac7c5675SChristoph Hellwig */ 1805c62b37d9SChristoph Hellwig percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); 1806c62b37d9SChristoph Hellwig return blk_mq_submit_bio(bio); 1807ac7c5675SChristoph Hellwig } 18088cf7961dSChristoph Hellwig 180983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 18101da177e4SLinus Torvalds 18116a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 18126a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 181383d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 18141da177e4SLinus Torvalds 18151eff9d32SJens Axboe if (!(bio->bi_opf & REQ_RAHEAD)) 181692c63902SMikulas Patocka queue_io(md, bio); 18176a8736d1STejun Heo else 18186a8736d1STejun Heo bio_io_error(bio); 1819978e51baSMike Snitzer return ret; 18201da177e4SLinus Torvalds } 18211da177e4SLinus Torvalds 18226548c7c5SMike Snitzer ret = dm_process_bio(md, map, bio); 1823978e51baSMike Snitzer 182483d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1825978e51baSMike Snitzer return ret; 1826978e51baSMike Snitzer } 1827978e51baSMike Snitzer 18281da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits) 18291da177e4SLinus Torvalds { 18308a57dfc6SChandra Seetharaman int r = bdi_bits; 18318a57dfc6SChandra Seetharaman struct mapped_device *md = congested_data; 18328a57dfc6SChandra Seetharaman struct dm_table *map; 18331da177e4SLinus Torvalds 18341eb787ecSAlasdair G Kergon if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1835e522c039SMike Snitzer if (dm_request_based(md)) { 1836cec47e3dSKiyoshi Ueda /* 1837e522c039SMike Snitzer * With request-based DM we only need to check the 1838e522c039SMike Snitzer * top-level queue for congestion. 1839cec47e3dSKiyoshi Ueda */ 1840974f51e8SHou Tao struct backing_dev_info *bdi = md->queue->backing_dev_info; 1841974f51e8SHou Tao r = bdi->wb.congested->state & bdi_bits; 1842e522c039SMike Snitzer } else { 1843e522c039SMike Snitzer map = dm_get_live_table_fast(md); 1844e522c039SMike Snitzer if (map) 18451da177e4SLinus Torvalds r = dm_table_any_congested(map, bdi_bits); 184683d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 18478a57dfc6SChandra Seetharaman } 1848e522c039SMike Snitzer } 18498a57dfc6SChandra Seetharaman 18501da177e4SLinus Torvalds return r; 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds /*----------------------------------------------------------------- 18541da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 18551da177e4SLinus Torvalds *---------------------------------------------------------------*/ 18562b06cfffSAlasdair G Kergon static void free_minor(int minor) 18571da177e4SLinus Torvalds { 1858f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18591da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1860f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 18611da177e4SLinus Torvalds } 18621da177e4SLinus Torvalds 18631da177e4SLinus Torvalds /* 18641da177e4SLinus Torvalds * See if the device with a specific minor # is free. 18651da177e4SLinus Torvalds */ 1866cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 18671da177e4SLinus Torvalds { 1868c9d76be6STejun Heo int r; 18691da177e4SLinus Torvalds 18701da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 18711da177e4SLinus Torvalds return -EINVAL; 18721da177e4SLinus Torvalds 1873c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1874f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18751da177e4SLinus Torvalds 1876c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 18771da177e4SLinus Torvalds 1878f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1879c9d76be6STejun Heo idr_preload_end(); 1880c9d76be6STejun Heo if (r < 0) 1881c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1882c9d76be6STejun Heo return 0; 18831da177e4SLinus Torvalds } 18841da177e4SLinus Torvalds 1885cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 18861da177e4SLinus Torvalds { 1887c9d76be6STejun Heo int r; 18881da177e4SLinus Torvalds 1889c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1890f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 18911da177e4SLinus Torvalds 1892c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 18931da177e4SLinus Torvalds 1894f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1895c9d76be6STejun Heo idr_preload_end(); 1896c9d76be6STejun Heo if (r < 0) 18971da177e4SLinus Torvalds return r; 1898c9d76be6STejun Heo *minor = r; 1899c9d76be6STejun Heo return 0; 19001da177e4SLinus Torvalds } 19011da177e4SLinus Torvalds 190283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1903f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 19041da177e4SLinus Torvalds 190553d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 190653d5914fSMikulas Patocka 19070f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 19080f20972fSMike Snitzer { 19090f20972fSMike Snitzer if (md->wq) 19100f20972fSMike Snitzer destroy_workqueue(md->wq); 19116f1c819cSKent Overstreet bioset_exit(&md->bs); 19126f1c819cSKent Overstreet bioset_exit(&md->io_bs); 19130f20972fSMike Snitzer 1914f26c5719SDan Williams if (md->dax_dev) { 1915f26c5719SDan Williams kill_dax(md->dax_dev); 1916f26c5719SDan Williams put_dax(md->dax_dev); 1917f26c5719SDan Williams md->dax_dev = NULL; 1918f26c5719SDan Williams } 1919f26c5719SDan Williams 19200f20972fSMike Snitzer if (md->disk) { 19210f20972fSMike Snitzer spin_lock(&_minor_lock); 19220f20972fSMike Snitzer md->disk->private_data = NULL; 19230f20972fSMike Snitzer spin_unlock(&_minor_lock); 19240f20972fSMike Snitzer del_gendisk(md->disk); 19250f20972fSMike Snitzer put_disk(md->disk); 19260f20972fSMike Snitzer } 19270f20972fSMike Snitzer 19280f20972fSMike Snitzer if (md->queue) 19290f20972fSMike Snitzer blk_cleanup_queue(md->queue); 19300f20972fSMike Snitzer 1931d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1932d09960b0STahsin Erdogan 19330f20972fSMike Snitzer if (md->bdev) { 19340f20972fSMike Snitzer bdput(md->bdev); 19350f20972fSMike Snitzer md->bdev = NULL; 19360f20972fSMike Snitzer } 19374cc96131SMike Snitzer 1938d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1939d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1940d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1941d5ffebddSMike Snitzer 19424cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 19430f20972fSMike Snitzer } 19440f20972fSMike Snitzer 19451da177e4SLinus Torvalds /* 19461da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 19471da177e4SLinus Torvalds */ 19482b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 19491da177e4SLinus Torvalds { 1950115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1951115485e8SMike Snitzer struct mapped_device *md; 1952ba61fdd1SJeff Mahoney void *old_md; 19531da177e4SLinus Torvalds 1954856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 19551da177e4SLinus Torvalds if (!md) { 19561da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 19571da177e4SLinus Torvalds return NULL; 19581da177e4SLinus Torvalds } 19591da177e4SLinus Torvalds 196010da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 19616ed7ade8SMilan Broz goto bad_module_get; 196210da4f79SJeff Mahoney 19631da177e4SLinus Torvalds /* get a minor number for the dev */ 19642b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1965cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 19662b06cfffSAlasdair G Kergon else 1967cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 19681da177e4SLinus Torvalds if (r < 0) 19696ed7ade8SMilan Broz goto bad_minor; 19701da177e4SLinus Torvalds 197183d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 197283d5e5b0SMikulas Patocka if (r < 0) 197383d5e5b0SMikulas Patocka goto bad_io_barrier; 197483d5e5b0SMikulas Patocka 1975115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1976591ddcfcSMike Snitzer md->init_tio_pdu = false; 1977a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1978e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1979a5664dadSMike Snitzer mutex_init(&md->type_lock); 198086f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1981022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 19821da177e4SLinus Torvalds atomic_set(&md->holders, 1); 19835c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 19841da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 19857a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 19867a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 198786f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 19887a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 19891da177e4SLinus Torvalds 199047ace7e0SMike Snitzer /* 1991c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 1992c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 1993c62b37d9SChristoph Hellwig * override accordingly. 199447ace7e0SMike Snitzer */ 1995c62b37d9SChristoph Hellwig md->queue = blk_alloc_queue(numa_node_id); 19963d745ea5SChristoph Hellwig if (!md->queue) 19973d745ea5SChristoph Hellwig goto bad; 19981da177e4SLinus Torvalds 1999c12c9a3cSMike Snitzer md->disk = alloc_disk_node(1, md->numa_node_id); 20001da177e4SLinus Torvalds if (!md->disk) 20010f20972fSMike Snitzer goto bad; 20021da177e4SLinus Torvalds 2003f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 200453d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 2005f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 20062995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 2007f0b04115SJeff Mahoney 20081da177e4SLinus Torvalds md->disk->major = _major; 20091da177e4SLinus Torvalds md->disk->first_minor = minor; 20101da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 20111da177e4SLinus Torvalds md->disk->queue = md->queue; 20121da177e4SLinus Torvalds md->disk->private_data = md; 20131da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 2014f26c5719SDan Williams 2015976431b0SDan Williams if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 2016fefc1d97SPankaj Gupta md->dax_dev = alloc_dax(md, md->disk->disk_name, 2017fefc1d97SPankaj Gupta &dm_dax_ops, 0); 20184e4ced93SVivek Goyal if (IS_ERR(md->dax_dev)) 2019f26c5719SDan Williams goto bad; 2020976431b0SDan Williams } 2021f26c5719SDan Williams 2022c100ec49SMike Snitzer add_disk_no_queue_reg(md->disk); 20237e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 20241da177e4SLinus Torvalds 2025670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 2026304f3f6aSMilan Broz if (!md->wq) 20270f20972fSMike Snitzer goto bad; 2028304f3f6aSMilan Broz 202932a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 203032a926daSMikulas Patocka if (!md->bdev) 20310f20972fSMike Snitzer goto bad; 203232a926daSMikulas Patocka 2033fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 2034fd2ed4d2SMikulas Patocka 2035ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 2036f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 2037ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 2038f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 2039ba61fdd1SJeff Mahoney 2040ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 2041ba61fdd1SJeff Mahoney 20421da177e4SLinus Torvalds return md; 20431da177e4SLinus Torvalds 20440f20972fSMike Snitzer bad: 20450f20972fSMike Snitzer cleanup_mapped_device(md); 204683d5e5b0SMikulas Patocka bad_io_barrier: 20471da177e4SLinus Torvalds free_minor(minor); 20486ed7ade8SMilan Broz bad_minor: 204910da4f79SJeff Mahoney module_put(THIS_MODULE); 20506ed7ade8SMilan Broz bad_module_get: 2051856eb091SMikulas Patocka kvfree(md); 20521da177e4SLinus Torvalds return NULL; 20531da177e4SLinus Torvalds } 20541da177e4SLinus Torvalds 2055ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 2056ae9da83fSJun'ichi Nomura 20571da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 20581da177e4SLinus Torvalds { 2059f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 206063d94e48SJun'ichi Nomura 2061ae9da83fSJun'ichi Nomura unlock_fs(md); 20622eb6e1e3SKeith Busch 20630f20972fSMike Snitzer cleanup_mapped_device(md); 20640f20972fSMike Snitzer 20650f20972fSMike Snitzer free_table_devices(&md->table_devices); 20660f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 206763a4f065SMike Snitzer free_minor(minor); 206863a4f065SMike Snitzer 206910da4f79SJeff Mahoney module_put(THIS_MODULE); 2070856eb091SMikulas Patocka kvfree(md); 20711da177e4SLinus Torvalds } 20721da177e4SLinus Torvalds 20732a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 2074e6ee8c0bSKiyoshi Ueda { 2075c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 20762a2a4c51SJens Axboe int ret = 0; 2077e6ee8c0bSKiyoshi Ueda 2078545ed20eSToshi Kani if (dm_table_bio_based(t)) { 2079c0820cf5SMikulas Patocka /* 208064f52b0eSMike Snitzer * The md may already have mempools that need changing. 208164f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 208216245bdcSJun'ichi Nomura * because a different table was loaded. 2083c0820cf5SMikulas Patocka */ 20846f1c819cSKent Overstreet bioset_exit(&md->bs); 20856f1c819cSKent Overstreet bioset_exit(&md->io_bs); 20860776aa0eSMike Snitzer 20876f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 2088cbc4e3c1SMike Snitzer /* 20894e6e36c3SMike Snitzer * There's no need to reload with request-based dm 20904e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 20914e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 20924e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 20934e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 20944e6e36c3SMike Snitzer * through the queue to unprep. 2095cbc4e3c1SMike Snitzer */ 2096cbc4e3c1SMike Snitzer goto out; 2097cbc4e3c1SMike Snitzer } 2098cbc4e3c1SMike Snitzer 20996f1c819cSKent Overstreet BUG_ON(!p || 21006f1c819cSKent Overstreet bioset_initialized(&md->bs) || 21016f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 2102e6ee8c0bSKiyoshi Ueda 21032a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 21042a2a4c51SJens Axboe if (ret) 21052a2a4c51SJens Axboe goto out; 21062a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 21072a2a4c51SJens Axboe if (ret) 21082a2a4c51SJens Axboe bioset_exit(&md->bs); 2109e6ee8c0bSKiyoshi Ueda out: 211002233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 2111e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 21122a2a4c51SJens Axboe return ret; 2113e6ee8c0bSKiyoshi Ueda } 2114e6ee8c0bSKiyoshi Ueda 21151da177e4SLinus Torvalds /* 21161da177e4SLinus Torvalds * Bind a table to the device. 21171da177e4SLinus Torvalds */ 21181da177e4SLinus Torvalds static void event_callback(void *context) 21191da177e4SLinus Torvalds { 21207a8c3d3bSMike Anderson unsigned long flags; 21217a8c3d3bSMike Anderson LIST_HEAD(uevents); 21221da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 21231da177e4SLinus Torvalds 21247a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 21257a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 21267a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 21277a8c3d3bSMike Anderson 2128ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 21297a8c3d3bSMike Anderson 21301da177e4SLinus Torvalds atomic_inc(&md->event_nr); 21311da177e4SLinus Torvalds wake_up(&md->eventq); 213262e08243SMikulas Patocka dm_issue_global_event(); 21331da177e4SLinus Torvalds } 21341da177e4SLinus Torvalds 2135c217649bSMike Snitzer /* 2136c217649bSMike Snitzer * Protected by md->suspend_lock obtained by dm_swap_table(). 2137c217649bSMike Snitzer */ 21384e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size) 21391da177e4SLinus Torvalds { 21401ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 21411ea0654eSBart Van Assche 21424e90188bSAlasdair G Kergon set_capacity(md->disk, size); 21431da177e4SLinus Torvalds 2144db8fef4fSMikulas Patocka i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 21451da177e4SLinus Torvalds } 21461da177e4SLinus Torvalds 2147042d2a9bSAlasdair G Kergon /* 2148042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2149042d2a9bSAlasdair G Kergon */ 2150042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2151754c5fc7SMike Snitzer struct queue_limits *limits) 21521da177e4SLinus Torvalds { 2153042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2154165125e1SJens Axboe struct request_queue *q = md->queue; 2155978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 21561da177e4SLinus Torvalds sector_t size; 21572a2a4c51SJens Axboe int ret; 21581da177e4SLinus Torvalds 21595a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 21605a8f1f80SBart Van Assche 21611da177e4SLinus Torvalds size = dm_table_get_size(t); 21623ac51e74SDarrick J. Wong 21633ac51e74SDarrick J. Wong /* 21643ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 21653ac51e74SDarrick J. Wong */ 2166fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 21673ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 21683ac51e74SDarrick J. Wong 21694e90188bSAlasdair G Kergon __set_size(md, size); 21701da177e4SLinus Torvalds 2171cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 21722ca3310eSAlasdair G Kergon 2173e6ee8c0bSKiyoshi Ueda /* 2174e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2175e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2176e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2177e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2178e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2179e6ee8c0bSKiyoshi Ueda */ 2180978e51baSMike Snitzer if (request_based) 2181eca7ee6dSMike Snitzer dm_stop_queue(q); 2182978e51baSMike Snitzer 2183978e51baSMike Snitzer if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 218416f12266SMike Snitzer /* 2185978e51baSMike Snitzer * Leverage the fact that request-based DM targets and 2186978e51baSMike Snitzer * NVMe bio based targets are immutable singletons 2187978e51baSMike Snitzer * - used to optimize both dm_request_fn and dm_mq_queue_rq; 2188978e51baSMike Snitzer * and __process_bio. 218916f12266SMike Snitzer */ 219016f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 219116f12266SMike Snitzer } 2192e6ee8c0bSKiyoshi Ueda 21932a2a4c51SJens Axboe ret = __bind_mempools(md, t); 21942a2a4c51SJens Axboe if (ret) { 21952a2a4c51SJens Axboe old_map = ERR_PTR(ret); 21962a2a4c51SJens Axboe goto out; 21972a2a4c51SJens Axboe } 2198e6ee8c0bSKiyoshi Ueda 2199a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 22001d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 220136a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 220236a0456fSAlasdair G Kergon 2203754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 220441abc4e1SHannes Reinecke if (old_map) 220583d5e5b0SMikulas Patocka dm_sync_table(md); 22062ca3310eSAlasdair G Kergon 22072a2a4c51SJens Axboe out: 2208042d2a9bSAlasdair G Kergon return old_map; 22091da177e4SLinus Torvalds } 22101da177e4SLinus Torvalds 2211a7940155SAlasdair G Kergon /* 2212a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2213a7940155SAlasdair G Kergon */ 2214a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 22151da177e4SLinus Torvalds { 2216a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 22171da177e4SLinus Torvalds 22181da177e4SLinus Torvalds if (!map) 2219a7940155SAlasdair G Kergon return NULL; 22201da177e4SLinus Torvalds 22211da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 22229cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 222383d5e5b0SMikulas Patocka dm_sync_table(md); 2224a7940155SAlasdair G Kergon 2225a7940155SAlasdair G Kergon return map; 22261da177e4SLinus Torvalds } 22271da177e4SLinus Torvalds 22281da177e4SLinus Torvalds /* 22291da177e4SLinus Torvalds * Constructor for a new device. 22301da177e4SLinus Torvalds */ 22312b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 22321da177e4SLinus Torvalds { 2233c12c9a3cSMike Snitzer int r; 22341da177e4SLinus Torvalds struct mapped_device *md; 22351da177e4SLinus Torvalds 22362b06cfffSAlasdair G Kergon md = alloc_dev(minor); 22371da177e4SLinus Torvalds if (!md) 22381da177e4SLinus Torvalds return -ENXIO; 22391da177e4SLinus Torvalds 2240c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2241c12c9a3cSMike Snitzer if (r) { 2242c12c9a3cSMike Snitzer free_dev(md); 2243c12c9a3cSMike Snitzer return r; 2244c12c9a3cSMike Snitzer } 2245784aae73SMilan Broz 22461da177e4SLinus Torvalds *result = md; 22471da177e4SLinus Torvalds return 0; 22481da177e4SLinus Torvalds } 22491da177e4SLinus Torvalds 2250a5664dadSMike Snitzer /* 2251a5664dadSMike Snitzer * Functions to manage md->type. 2252a5664dadSMike Snitzer * All are required to hold md->type_lock. 2253a5664dadSMike Snitzer */ 2254a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2255a5664dadSMike Snitzer { 2256a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2257a5664dadSMike Snitzer } 2258a5664dadSMike Snitzer 2259a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2260a5664dadSMike Snitzer { 2261a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2262a5664dadSMike Snitzer } 2263a5664dadSMike Snitzer 22647e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2265a5664dadSMike Snitzer { 226600c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2267a5664dadSMike Snitzer md->type = type; 2268a5664dadSMike Snitzer } 2269a5664dadSMike Snitzer 22707e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2271a5664dadSMike Snitzer { 2272a5664dadSMike Snitzer return md->type; 2273a5664dadSMike Snitzer } 2274a5664dadSMike Snitzer 227536a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 227636a0456fSAlasdair G Kergon { 227736a0456fSAlasdair G Kergon return md->immutable_target_type; 227836a0456fSAlasdair G Kergon } 227936a0456fSAlasdair G Kergon 22804a0b4ddfSMike Snitzer /* 2281f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2282f84cb8a4SMike Snitzer * count on 'md'. 2283f84cb8a4SMike Snitzer */ 2284f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2285f84cb8a4SMike Snitzer { 2286f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2287f84cb8a4SMike Snitzer return &md->queue->limits; 2288f84cb8a4SMike Snitzer } 2289f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2290f84cb8a4SMike Snitzer 2291974f51e8SHou Tao static void dm_init_congested_fn(struct mapped_device *md) 2292974f51e8SHou Tao { 2293974f51e8SHou Tao md->queue->backing_dev_info->congested_data = md; 2294974f51e8SHou Tao md->queue->backing_dev_info->congested_fn = dm_any_congested; 2295974f51e8SHou Tao } 2296974f51e8SHou Tao 22974a0b4ddfSMike Snitzer /* 22984a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 22994a0b4ddfSMike Snitzer */ 2300591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 23014a0b4ddfSMike Snitzer { 2302bfebd1cdSMike Snitzer int r; 2303c100ec49SMike Snitzer struct queue_limits limits; 23047e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2305bfebd1cdSMike Snitzer 2306545ed20eSToshi Kani switch (type) { 2307bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2308e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2309bfebd1cdSMike Snitzer if (r) { 2310eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2311bfebd1cdSMike Snitzer return r; 2312bfebd1cdSMike Snitzer } 2313974f51e8SHou Tao dm_init_congested_fn(md); 2314bfebd1cdSMike Snitzer break; 2315bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2316545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2317978e51baSMike Snitzer case DM_TYPE_NVME_BIO_BASED: 2318974f51e8SHou Tao dm_init_congested_fn(md); 2319bfebd1cdSMike Snitzer break; 23207e0d574fSBart Van Assche case DM_TYPE_NONE: 23217e0d574fSBart Van Assche WARN_ON_ONCE(true); 23227e0d574fSBart Van Assche break; 2323ff36ab34SMike Snitzer } 23244a0b4ddfSMike Snitzer 2325c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2326c100ec49SMike Snitzer if (r) { 2327c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2328c100ec49SMike Snitzer return r; 2329c100ec49SMike Snitzer } 2330c100ec49SMike Snitzer dm_table_set_restrictions(t, md->queue, &limits); 2331c100ec49SMike Snitzer blk_register_queue(md->disk); 2332c100ec49SMike Snitzer 23334a0b4ddfSMike Snitzer return 0; 23344a0b4ddfSMike Snitzer } 23354a0b4ddfSMike Snitzer 23362bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 23371da177e4SLinus Torvalds { 23381da177e4SLinus Torvalds struct mapped_device *md; 23391da177e4SLinus Torvalds unsigned minor = MINOR(dev); 23401da177e4SLinus Torvalds 23411da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 23421da177e4SLinus Torvalds return NULL; 23431da177e4SLinus Torvalds 2344f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 23451da177e4SLinus Torvalds 23461da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 234749de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 234849de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2349637842cfSDavid Teigland md = NULL; 2350fba9f90eSJeff Mahoney goto out; 2351fba9f90eSJeff Mahoney } 23522bec1f4aSMikulas Patocka dm_get(md); 2353fba9f90eSJeff Mahoney out: 2354f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 23551da177e4SLinus Torvalds 2356637842cfSDavid Teigland return md; 2357637842cfSDavid Teigland } 23583cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2359d229a958SDavid Teigland 23609ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2361637842cfSDavid Teigland { 23629ade92a9SAlasdair G Kergon return md->interface_ptr; 23631da177e4SLinus Torvalds } 23641da177e4SLinus Torvalds 23651da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 23661da177e4SLinus Torvalds { 23671da177e4SLinus Torvalds md->interface_ptr = ptr; 23681da177e4SLinus Torvalds } 23691da177e4SLinus Torvalds 23701da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 23711da177e4SLinus Torvalds { 23721da177e4SLinus Torvalds atomic_inc(&md->holders); 23733f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 23741da177e4SLinus Torvalds } 23751da177e4SLinus Torvalds 237609ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 237709ee96b2SMikulas Patocka { 237809ee96b2SMikulas Patocka spin_lock(&_minor_lock); 237909ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 238009ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 238109ee96b2SMikulas Patocka return -EBUSY; 238209ee96b2SMikulas Patocka } 238309ee96b2SMikulas Patocka dm_get(md); 238409ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 238509ee96b2SMikulas Patocka return 0; 238609ee96b2SMikulas Patocka } 238709ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 238809ee96b2SMikulas Patocka 238972d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 239072d94861SAlasdair G Kergon { 239172d94861SAlasdair G Kergon return md->name; 239272d94861SAlasdair G Kergon } 239372d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 239472d94861SAlasdair G Kergon 23953f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 23961da177e4SLinus Torvalds { 23971134e5aeSMike Anderson struct dm_table *map; 239883d5e5b0SMikulas Patocka int srcu_idx; 23991da177e4SLinus Torvalds 24003f77316dSKiyoshi Ueda might_sleep(); 2401fba9f90eSJeff Mahoney 240263a4f065SMike Snitzer spin_lock(&_minor_lock); 24033f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2404fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2405f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 24063f77316dSKiyoshi Ueda 2407c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 24083b785fbcSBart Van Assche 2409ab7c7bb6SMikulas Patocka /* 2410ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2411ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2412ab7c7bb6SMikulas Patocka */ 2413ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 24142a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 24154f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 24161da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2417adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 24181da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 24191da177e4SLinus Torvalds } 242083d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 242183d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 24222a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 242383d5e5b0SMikulas Patocka 24243f77316dSKiyoshi Ueda /* 24253f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 24263f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 24273f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 24283f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 24293f77316dSKiyoshi Ueda */ 24303f77316dSKiyoshi Ueda if (wait) 24313f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 24323f77316dSKiyoshi Ueda msleep(1); 24333f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 24343f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 24353f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 24363f77316dSKiyoshi Ueda 2437784aae73SMilan Broz dm_sysfs_exit(md); 2438a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 24391da177e4SLinus Torvalds free_dev(md); 24401da177e4SLinus Torvalds } 24413f77316dSKiyoshi Ueda 24423f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 24433f77316dSKiyoshi Ueda { 24443f77316dSKiyoshi Ueda __dm_destroy(md, true); 24453f77316dSKiyoshi Ueda } 24463f77316dSKiyoshi Ueda 24473f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 24483f77316dSKiyoshi Ueda { 24493f77316dSKiyoshi Ueda __dm_destroy(md, false); 24503f77316dSKiyoshi Ueda } 24513f77316dSKiyoshi Ueda 24523f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 24533f77316dSKiyoshi Ueda { 24543f77316dSKiyoshi Ueda atomic_dec(&md->holders); 24551da177e4SLinus Torvalds } 245679eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 24571da177e4SLinus Torvalds 2458b48633f8SBart Van Assche static int dm_wait_for_completion(struct mapped_device *md, long task_state) 245946125c1cSMilan Broz { 246046125c1cSMilan Broz int r = 0; 24619f4c3f87SBart Van Assche DEFINE_WAIT(wait); 246246125c1cSMilan Broz 246346125c1cSMilan Broz while (1) { 24649f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 246546125c1cSMilan Broz 2466b4324feeSKiyoshi Ueda if (!md_in_flight(md)) 246746125c1cSMilan Broz break; 246846125c1cSMilan Broz 2469e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 247046125c1cSMilan Broz r = -EINTR; 247146125c1cSMilan Broz break; 247246125c1cSMilan Broz } 247346125c1cSMilan Broz 247446125c1cSMilan Broz io_schedule(); 247546125c1cSMilan Broz } 24769f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2477b44ebeb0SMikulas Patocka 247846125c1cSMilan Broz return r; 247946125c1cSMilan Broz } 248046125c1cSMilan Broz 24811da177e4SLinus Torvalds /* 24821da177e4SLinus Torvalds * Process the deferred bios 24831da177e4SLinus Torvalds */ 2484ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 24851da177e4SLinus Torvalds { 2486ef208587SMikulas Patocka struct mapped_device *md = container_of(work, struct mapped_device, 2487ef208587SMikulas Patocka work); 24886d6f10dfSMilan Broz struct bio *c; 248983d5e5b0SMikulas Patocka int srcu_idx; 249083d5e5b0SMikulas Patocka struct dm_table *map; 24911da177e4SLinus Torvalds 249283d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 2493ef208587SMikulas Patocka 24943b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2495022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2496022c2611SMikulas Patocka c = bio_list_pop(&md->deferred); 2497022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2498022c2611SMikulas Patocka 24996a8736d1STejun Heo if (!c) 2500df12ee99SAlasdair G Kergon break; 250173d410c0SMilan Broz 2502e6ee8c0bSKiyoshi Ueda if (dm_request_based(md)) 2503*ed00aabdSChristoph Hellwig (void) submit_bio_noacct(c); 2504af7e466aSMikulas Patocka else 25056548c7c5SMike Snitzer (void) dm_process_bio(md, map, c); 2506e6ee8c0bSKiyoshi Ueda } 25073b00b203SMikulas Patocka 250883d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 25091da177e4SLinus Torvalds } 25101da177e4SLinus Torvalds 25119a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2512304f3f6aSMilan Broz { 25133b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 25144e857c58SPeter Zijlstra smp_mb__after_atomic(); 251553d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2516304f3f6aSMilan Broz } 2517304f3f6aSMilan Broz 25181da177e4SLinus Torvalds /* 2519042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 25201da177e4SLinus Torvalds */ 2521042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 25221da177e4SLinus Torvalds { 252387eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2524754c5fc7SMike Snitzer struct queue_limits limits; 2525042d2a9bSAlasdair G Kergon int r; 25261da177e4SLinus Torvalds 2527e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds /* device must be suspended */ 25304f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 253193c534aeSAlasdair G Kergon goto out; 25321da177e4SLinus Torvalds 25333ae70656SMike Snitzer /* 25343ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 25353ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 25363ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 25373ae70656SMike Snitzer * reappear. 25383ae70656SMike Snitzer */ 25393ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 254083d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 25413ae70656SMike Snitzer if (live_map) 25423ae70656SMike Snitzer limits = md->queue->limits; 254383d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 25443ae70656SMike Snitzer } 25453ae70656SMike Snitzer 254687eb5b21SMike Christie if (!live_map) { 2547754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2548042d2a9bSAlasdair G Kergon if (r) { 2549042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2550754c5fc7SMike Snitzer goto out; 2551042d2a9bSAlasdair G Kergon } 255287eb5b21SMike Christie } 2553754c5fc7SMike Snitzer 2554042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 255562e08243SMikulas Patocka dm_issue_global_event(); 25561da177e4SLinus Torvalds 255793c534aeSAlasdair G Kergon out: 2558e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2559042d2a9bSAlasdair G Kergon return map; 25601da177e4SLinus Torvalds } 25611da177e4SLinus Torvalds 25621da177e4SLinus Torvalds /* 25631da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 25641da177e4SLinus Torvalds * device. 25651da177e4SLinus Torvalds */ 25662ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 25671da177e4SLinus Torvalds { 2568e39e2e95SAlasdair G Kergon int r; 25691da177e4SLinus Torvalds 25701da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2571dfbe03f6SAlasdair G Kergon 2572db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2573dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2574cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2575e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2576e39e2e95SAlasdair G Kergon return r; 2577dfbe03f6SAlasdair G Kergon } 2578dfbe03f6SAlasdair G Kergon 2579aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2580aa8d7c2fSAlasdair G Kergon 25811da177e4SLinus Torvalds return 0; 25821da177e4SLinus Torvalds } 25831da177e4SLinus Torvalds 25842ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 25851da177e4SLinus Torvalds { 2586aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2587aa8d7c2fSAlasdair G Kergon return; 2588aa8d7c2fSAlasdair G Kergon 2589db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 25901da177e4SLinus Torvalds md->frozen_sb = NULL; 2591aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 25921da177e4SLinus Torvalds } 25931da177e4SLinus Torvalds 25941da177e4SLinus Torvalds /* 2595b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2596b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2597b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2598b48633f8SBart Van Assche * 2599ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2600ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2601ffcc3936SMike Snitzer * are being added to md->deferred list. 2602cec47e3dSKiyoshi Ueda */ 2603ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2604b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2605eaf9a736SMike Snitzer int dmf_suspended_flag) 26061da177e4SLinus Torvalds { 2607ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2608ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2609ffcc3936SMike Snitzer int r; 2610cf222b37SAlasdair G Kergon 26115a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 26125a8f1f80SBart Van Assche 26132e93ccc1SKiyoshi Ueda /* 26142e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 26152e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 26162e93ccc1SKiyoshi Ueda */ 26172e93ccc1SKiyoshi Ueda if (noflush) 26182e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 261986331f39SBart Van Assche else 2620ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 26212e93ccc1SKiyoshi Ueda 2622d67ee213SMike Snitzer /* 2623d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2624d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2625d67ee213SMike Snitzer */ 26261da177e4SLinus Torvalds dm_table_presuspend_targets(map); 26271da177e4SLinus Torvalds 26282e93ccc1SKiyoshi Ueda /* 26299f518b27SKiyoshi Ueda * Flush I/O to the device. 26309f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 26319f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 26329f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 26332e93ccc1SKiyoshi Ueda */ 263432a926daSMikulas Patocka if (!noflush && do_lockfs) { 26352ca3310eSAlasdair G Kergon r = lock_fs(md); 2636d67ee213SMike Snitzer if (r) { 2637d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2638ffcc3936SMike Snitzer return r; 2639aa8d7c2fSAlasdair G Kergon } 2640d67ee213SMike Snitzer } 26411da177e4SLinus Torvalds 26421da177e4SLinus Torvalds /* 26433b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 26443b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 26453b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 26463b00b203SMikulas Patocka * dm_wq_work. 26473b00b203SMikulas Patocka * 26483b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 26493b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 26506a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 26516a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 26526a8736d1STejun Heo * flush_workqueue(md->wq). 26531da177e4SLinus Torvalds */ 26541eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 265541abc4e1SHannes Reinecke if (map) 265683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26571da177e4SLinus Torvalds 2658d0bcb878SKiyoshi Ueda /* 265929e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 266029e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2661d0bcb878SKiyoshi Ueda */ 26626a23e05cSJens Axboe if (dm_request_based(md)) 2663eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2664cec47e3dSKiyoshi Ueda 2665d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2666d0bcb878SKiyoshi Ueda 26671da177e4SLinus Torvalds /* 26683b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 26693b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 26703b00b203SMikulas Patocka * to finish. 26711da177e4SLinus Torvalds */ 2672b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2673eaf9a736SMike Snitzer if (!r) 2674eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 26751da177e4SLinus Torvalds 26766d6f10dfSMilan Broz if (noflush) 2677022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 267841abc4e1SHannes Reinecke if (map) 267983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 26802e93ccc1SKiyoshi Ueda 26811da177e4SLinus Torvalds /* were we interrupted ? */ 268246125c1cSMilan Broz if (r < 0) { 26839a1fb464SMikulas Patocka dm_queue_flush(md); 268473d410c0SMilan Broz 2685cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2686eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2687cec47e3dSKiyoshi Ueda 26882ca3310eSAlasdair G Kergon unlock_fs(md); 2689d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2690ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2691ffcc3936SMike Snitzer } 2692ffcc3936SMike Snitzer 2693ffcc3936SMike Snitzer return r; 26942ca3310eSAlasdair G Kergon } 26952ca3310eSAlasdair G Kergon 26963b00b203SMikulas Patocka /* 2697ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2698ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2699ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2700ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2701ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 27023b00b203SMikulas Patocka */ 2703ffcc3936SMike Snitzer /* 2704ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2705ffcc3936SMike Snitzer * 2706ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2707ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2708ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2709ffcc3936SMike Snitzer * 2710ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2711ffcc3936SMike Snitzer */ 2712ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2713ffcc3936SMike Snitzer { 2714ffcc3936SMike Snitzer struct dm_table *map = NULL; 2715ffcc3936SMike Snitzer int r = 0; 2716ffcc3936SMike Snitzer 2717ffcc3936SMike Snitzer retry: 2718ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2719ffcc3936SMike Snitzer 2720ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2721ffcc3936SMike Snitzer r = -EINVAL; 2722ffcc3936SMike Snitzer goto out_unlock; 2723ffcc3936SMike Snitzer } 2724ffcc3936SMike Snitzer 2725ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2726ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2727ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2728ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2729ffcc3936SMike Snitzer if (r) 2730ffcc3936SMike Snitzer return r; 2731ffcc3936SMike Snitzer goto retry; 2732ffcc3936SMike Snitzer } 2733ffcc3936SMike Snitzer 2734a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2735ffcc3936SMike Snitzer 2736eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2737ffcc3936SMike Snitzer if (r) 2738ffcc3936SMike Snitzer goto out_unlock; 27393b00b203SMikulas Patocka 27404d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 27414d4471cbSKiyoshi Ueda 2742d287483dSAlasdair G Kergon out_unlock: 2743e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2744cf222b37SAlasdair G Kergon return r; 27451da177e4SLinus Torvalds } 27461da177e4SLinus Torvalds 2747ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 27481da177e4SLinus Torvalds { 2749ffcc3936SMike Snitzer if (map) { 2750ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 27518757b776SMilan Broz if (r) 2752ffcc3936SMike Snitzer return r; 2753ffcc3936SMike Snitzer } 27542ca3310eSAlasdair G Kergon 27559a1fb464SMikulas Patocka dm_queue_flush(md); 27562ca3310eSAlasdair G Kergon 2757cec47e3dSKiyoshi Ueda /* 2758cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2759cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2760cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2761cec47e3dSKiyoshi Ueda */ 2762cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2763eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2764cec47e3dSKiyoshi Ueda 27652ca3310eSAlasdair G Kergon unlock_fs(md); 27662ca3310eSAlasdair G Kergon 2767ffcc3936SMike Snitzer return 0; 2768ffcc3936SMike Snitzer } 2769ffcc3936SMike Snitzer 2770ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2771ffcc3936SMike Snitzer { 27728dc23658SMinfei Huang int r; 2773ffcc3936SMike Snitzer struct dm_table *map = NULL; 2774ffcc3936SMike Snitzer 2775ffcc3936SMike Snitzer retry: 27768dc23658SMinfei Huang r = -EINVAL; 2777ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2778ffcc3936SMike Snitzer 2779ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2780ffcc3936SMike Snitzer goto out; 2781ffcc3936SMike Snitzer 2782ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2783ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2784ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2785ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2786ffcc3936SMike Snitzer if (r) 2787ffcc3936SMike Snitzer return r; 2788ffcc3936SMike Snitzer goto retry; 2789ffcc3936SMike Snitzer } 2790ffcc3936SMike Snitzer 2791a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2792ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2793ffcc3936SMike Snitzer goto out; 2794ffcc3936SMike Snitzer 2795ffcc3936SMike Snitzer r = __dm_resume(md, map); 2796ffcc3936SMike Snitzer if (r) 2797ffcc3936SMike Snitzer goto out; 2798ffcc3936SMike Snitzer 27992ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2800cf222b37SAlasdair G Kergon out: 2801e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 28022ca3310eSAlasdair G Kergon 2803cf222b37SAlasdair G Kergon return r; 28041da177e4SLinus Torvalds } 28051da177e4SLinus Torvalds 2806fd2ed4d2SMikulas Patocka /* 2807fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2808fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2809fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2810fd2ed4d2SMikulas Patocka */ 2811fd2ed4d2SMikulas Patocka 2812ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2813ffcc3936SMike Snitzer { 2814ffcc3936SMike Snitzer struct dm_table *map = NULL; 2815ffcc3936SMike Snitzer 28161ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 28171ea0654eSBart Van Assche 281896b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2819ffcc3936SMike Snitzer return; /* nested internal suspend */ 2820ffcc3936SMike Snitzer 2821ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2822ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2823ffcc3936SMike Snitzer return; /* nest suspend */ 2824ffcc3936SMike Snitzer } 2825ffcc3936SMike Snitzer 2826a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2827ffcc3936SMike Snitzer 2828ffcc3936SMike Snitzer /* 2829ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2830ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2831ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2832ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2833ffcc3936SMike Snitzer */ 2834eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2835eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2836ffcc3936SMike Snitzer 2837ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 2838ffcc3936SMike Snitzer } 2839ffcc3936SMike Snitzer 2840ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2841ffcc3936SMike Snitzer { 284296b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 284396b26c8cSMikulas Patocka 284496b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2845ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2846ffcc3936SMike Snitzer 2847ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2848ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2849ffcc3936SMike Snitzer 2850ffcc3936SMike Snitzer /* 2851ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2852ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2853ffcc3936SMike Snitzer */ 2854ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2855ffcc3936SMike Snitzer 2856ffcc3936SMike Snitzer done: 2857ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2858ffcc3936SMike Snitzer smp_mb__after_atomic(); 2859ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2860ffcc3936SMike Snitzer } 2861ffcc3936SMike Snitzer 2862ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2863fd2ed4d2SMikulas Patocka { 2864fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2865ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2866ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2867ffcc3936SMike Snitzer } 2868ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2869ffcc3936SMike Snitzer 2870ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2871ffcc3936SMike Snitzer { 2872ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2873ffcc3936SMike Snitzer __dm_internal_resume(md); 2874ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2875ffcc3936SMike Snitzer } 2876ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2877ffcc3936SMike Snitzer 2878ffcc3936SMike Snitzer /* 2879ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2880ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2881ffcc3936SMike Snitzer */ 2882ffcc3936SMike Snitzer 2883ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2884ffcc3936SMike Snitzer { 2885ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2886ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2887fd2ed4d2SMikulas Patocka return; 2888fd2ed4d2SMikulas Patocka 2889fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2890fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2891fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2892fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2893fd2ed4d2SMikulas Patocka } 2894b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2895fd2ed4d2SMikulas Patocka 2896ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2897fd2ed4d2SMikulas Patocka { 2898ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2899fd2ed4d2SMikulas Patocka goto done; 2900fd2ed4d2SMikulas Patocka 2901fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2902fd2ed4d2SMikulas Patocka 2903fd2ed4d2SMikulas Patocka done: 2904fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2905fd2ed4d2SMikulas Patocka } 2906b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2907fd2ed4d2SMikulas Patocka 29081da177e4SLinus Torvalds /*----------------------------------------------------------------- 29091da177e4SLinus Torvalds * Event notification. 29101da177e4SLinus Torvalds *---------------------------------------------------------------*/ 29113abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 291260935eb2SMilan Broz unsigned cookie) 291369267a30SAlasdair G Kergon { 291460935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 291560935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 291660935eb2SMilan Broz 291760935eb2SMilan Broz if (!cookie) 29183abf85b5SPeter Rajnoha return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 291960935eb2SMilan Broz else { 292060935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 292160935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 29223abf85b5SPeter Rajnoha return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 29233abf85b5SPeter Rajnoha action, envp); 292460935eb2SMilan Broz } 292569267a30SAlasdair G Kergon } 292669267a30SAlasdair G Kergon 29277a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 29287a8c3d3bSMike Anderson { 29297a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 29307a8c3d3bSMike Anderson } 29317a8c3d3bSMike Anderson 29321da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 29331da177e4SLinus Torvalds { 29341da177e4SLinus Torvalds return atomic_read(&md->event_nr); 29351da177e4SLinus Torvalds } 29361da177e4SLinus Torvalds 29371da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 29381da177e4SLinus Torvalds { 29391da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 29401da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 29411da177e4SLinus Torvalds } 29421da177e4SLinus Torvalds 29437a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 29447a8c3d3bSMike Anderson { 29457a8c3d3bSMike Anderson unsigned long flags; 29467a8c3d3bSMike Anderson 29477a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 29487a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 29497a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 29507a8c3d3bSMike Anderson } 29517a8c3d3bSMike Anderson 29521da177e4SLinus Torvalds /* 29531da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 29541da177e4SLinus Torvalds * count on 'md'. 29551da177e4SLinus Torvalds */ 29561da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 29571da177e4SLinus Torvalds { 29581da177e4SLinus Torvalds return md->disk; 29591da177e4SLinus Torvalds } 296065ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 29611da177e4SLinus Torvalds 2962784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2963784aae73SMilan Broz { 29642995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2965784aae73SMilan Broz } 2966784aae73SMilan Broz 2967784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2968784aae73SMilan Broz { 2969784aae73SMilan Broz struct mapped_device *md; 2970784aae73SMilan Broz 29712995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2972784aae73SMilan Broz 2973b9a41d21SHou Tao spin_lock(&_minor_lock); 2974b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2975b9a41d21SHou Tao md = NULL; 2976b9a41d21SHou Tao goto out; 2977b9a41d21SHou Tao } 2978784aae73SMilan Broz dm_get(md); 2979b9a41d21SHou Tao out: 2980b9a41d21SHou Tao spin_unlock(&_minor_lock); 2981b9a41d21SHou Tao 2982784aae73SMilan Broz return md; 2983784aae73SMilan Broz } 2984784aae73SMilan Broz 29854f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 29861da177e4SLinus Torvalds { 29871da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 29881da177e4SLinus Torvalds } 29891da177e4SLinus Torvalds 2990ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2991ffcc3936SMike Snitzer { 2992ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2993ffcc3936SMike Snitzer } 2994ffcc3936SMike Snitzer 29952c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 29962c140a24SMikulas Patocka { 29972c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 29982c140a24SMikulas Patocka } 29992c140a24SMikulas Patocka 300064dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 300164dbce58SKiyoshi Ueda { 3002ecdb2e25SKiyoshi Ueda return dm_suspended_md(dm_table_get_md(ti->table)); 300364dbce58SKiyoshi Ueda } 300464dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 300564dbce58SKiyoshi Ueda 30062e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 30072e93ccc1SKiyoshi Ueda { 3008ecdb2e25SKiyoshi Ueda return __noflush_suspending(dm_table_get_md(ti->table)); 30092e93ccc1SKiyoshi Ueda } 30102e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 30112e93ccc1SKiyoshi Ueda 30127e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 30130776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 30140776aa0eSMike Snitzer unsigned min_pool_size) 3015e6ee8c0bSKiyoshi Ueda { 3016115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 301778d8e58aSMike Snitzer unsigned int pool_size = 0; 301864f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 30196f1c819cSKent Overstreet int ret; 3020e6ee8c0bSKiyoshi Ueda 3021e6ee8c0bSKiyoshi Ueda if (!pools) 30224e6e36c3SMike Snitzer return NULL; 3023e6ee8c0bSKiyoshi Ueda 302478d8e58aSMike Snitzer switch (type) { 302578d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 3026545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 302722c11858SMike Snitzer case DM_TYPE_NVME_BIO_BASED: 30280776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 302930187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 303064f52b0eSMike Snitzer io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 30316f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 30326f1c819cSKent Overstreet if (ret) 303364f52b0eSMike Snitzer goto out; 30346f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 3035eb8db831SChristoph Hellwig goto out; 303678d8e58aSMike Snitzer break; 303778d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 30380776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 303978d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3040591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 304178d8e58aSMike Snitzer break; 304278d8e58aSMike Snitzer default: 304378d8e58aSMike Snitzer BUG(); 304478d8e58aSMike Snitzer } 304578d8e58aSMike Snitzer 30466f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 30476f1c819cSKent Overstreet if (ret) 30485f015204SJun'ichi Nomura goto out; 3049e6ee8c0bSKiyoshi Ueda 30506f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 30515f015204SJun'ichi Nomura goto out; 3052a91a2785SMartin K. Petersen 3053e6ee8c0bSKiyoshi Ueda return pools; 305478d8e58aSMike Snitzer 30555f015204SJun'ichi Nomura out: 30565f015204SJun'ichi Nomura dm_free_md_mempools(pools); 3057e6ee8c0bSKiyoshi Ueda 30584e6e36c3SMike Snitzer return NULL; 3059e6ee8c0bSKiyoshi Ueda } 3060e6ee8c0bSKiyoshi Ueda 3061e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 3062e6ee8c0bSKiyoshi Ueda { 3063e6ee8c0bSKiyoshi Ueda if (!pools) 3064e6ee8c0bSKiyoshi Ueda return; 3065e6ee8c0bSKiyoshi Ueda 30666f1c819cSKent Overstreet bioset_exit(&pools->bs); 30676f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 3068e6ee8c0bSKiyoshi Ueda 3069e6ee8c0bSKiyoshi Ueda kfree(pools); 3070e6ee8c0bSKiyoshi Ueda } 3071e6ee8c0bSKiyoshi Ueda 30729c72bad1SChristoph Hellwig struct dm_pr { 30739c72bad1SChristoph Hellwig u64 old_key; 30749c72bad1SChristoph Hellwig u64 new_key; 30759c72bad1SChristoph Hellwig u32 flags; 30769c72bad1SChristoph Hellwig bool fail_early; 30779c72bad1SChristoph Hellwig }; 30789c72bad1SChristoph Hellwig 30799c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 30809c72bad1SChristoph Hellwig void *data) 30819c72bad1SChristoph Hellwig { 30829c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 30839c72bad1SChristoph Hellwig struct dm_table *table; 30849c72bad1SChristoph Hellwig struct dm_target *ti; 30859c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 30869c72bad1SChristoph Hellwig 30879c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 30889c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 30899c72bad1SChristoph Hellwig goto out; 30909c72bad1SChristoph Hellwig 30919c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 30929c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30939c72bad1SChristoph Hellwig goto out; 30949c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30959c72bad1SChristoph Hellwig 30969c72bad1SChristoph Hellwig ret = -EINVAL; 30979c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30989c72bad1SChristoph Hellwig goto out; 30999c72bad1SChristoph Hellwig 31009c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 31019c72bad1SChristoph Hellwig out: 31029c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 31039c72bad1SChristoph Hellwig return ret; 31049c72bad1SChristoph Hellwig } 31059c72bad1SChristoph Hellwig 31069c72bad1SChristoph Hellwig /* 31079c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 31089c72bad1SChristoph Hellwig */ 31099c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 31109c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 31119c72bad1SChristoph Hellwig { 31129c72bad1SChristoph Hellwig struct dm_pr *pr = data; 31139c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 31149c72bad1SChristoph Hellwig 31159c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 31169c72bad1SChristoph Hellwig return -EOPNOTSUPP; 31179c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 31189c72bad1SChristoph Hellwig } 31199c72bad1SChristoph Hellwig 312071cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 312171cdb697SChristoph Hellwig u32 flags) 312271cdb697SChristoph Hellwig { 31239c72bad1SChristoph Hellwig struct dm_pr pr = { 31249c72bad1SChristoph Hellwig .old_key = old_key, 31259c72bad1SChristoph Hellwig .new_key = new_key, 31269c72bad1SChristoph Hellwig .flags = flags, 31279c72bad1SChristoph Hellwig .fail_early = true, 31289c72bad1SChristoph Hellwig }; 31299c72bad1SChristoph Hellwig int ret; 313071cdb697SChristoph Hellwig 31319c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 31329c72bad1SChristoph Hellwig if (ret && new_key) { 31339c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 31349c72bad1SChristoph Hellwig pr.old_key = new_key; 31359c72bad1SChristoph Hellwig pr.new_key = 0; 31369c72bad1SChristoph Hellwig pr.flags = 0; 31379c72bad1SChristoph Hellwig pr.fail_early = false; 31389c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 31399c72bad1SChristoph Hellwig } 314071cdb697SChristoph Hellwig 31419c72bad1SChristoph Hellwig return ret; 314271cdb697SChristoph Hellwig } 314371cdb697SChristoph Hellwig 314471cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 314571cdb697SChristoph Hellwig u32 flags) 314671cdb697SChristoph Hellwig { 314771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 314871cdb697SChristoph Hellwig const struct pr_ops *ops; 3149971888c4SMike Snitzer int r, srcu_idx; 315071cdb697SChristoph Hellwig 31515bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 315271cdb697SChristoph Hellwig if (r < 0) 3153971888c4SMike Snitzer goto out; 315471cdb697SChristoph Hellwig 315571cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 315671cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 315771cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 315871cdb697SChristoph Hellwig else 315971cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3160971888c4SMike Snitzer out: 3161971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 316271cdb697SChristoph Hellwig return r; 316371cdb697SChristoph Hellwig } 316471cdb697SChristoph Hellwig 316571cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 316671cdb697SChristoph Hellwig { 316771cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 316871cdb697SChristoph Hellwig const struct pr_ops *ops; 3169971888c4SMike Snitzer int r, srcu_idx; 317071cdb697SChristoph Hellwig 31715bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 317271cdb697SChristoph Hellwig if (r < 0) 3173971888c4SMike Snitzer goto out; 317471cdb697SChristoph Hellwig 317571cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 317671cdb697SChristoph Hellwig if (ops && ops->pr_release) 317771cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 317871cdb697SChristoph Hellwig else 317971cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3180971888c4SMike Snitzer out: 3181971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 318271cdb697SChristoph Hellwig return r; 318371cdb697SChristoph Hellwig } 318471cdb697SChristoph Hellwig 318571cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 318671cdb697SChristoph Hellwig enum pr_type type, bool abort) 318771cdb697SChristoph Hellwig { 318871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 318971cdb697SChristoph Hellwig const struct pr_ops *ops; 3190971888c4SMike Snitzer int r, srcu_idx; 319171cdb697SChristoph Hellwig 31925bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 319371cdb697SChristoph Hellwig if (r < 0) 3194971888c4SMike Snitzer goto out; 319571cdb697SChristoph Hellwig 319671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 319771cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 319871cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 319971cdb697SChristoph Hellwig else 320071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3201971888c4SMike Snitzer out: 3202971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 320371cdb697SChristoph Hellwig return r; 320471cdb697SChristoph Hellwig } 320571cdb697SChristoph Hellwig 320671cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 320771cdb697SChristoph Hellwig { 320871cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 320971cdb697SChristoph Hellwig const struct pr_ops *ops; 3210971888c4SMike Snitzer int r, srcu_idx; 321171cdb697SChristoph Hellwig 32125bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 321371cdb697SChristoph Hellwig if (r < 0) 3214971888c4SMike Snitzer goto out; 321571cdb697SChristoph Hellwig 321671cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 321771cdb697SChristoph Hellwig if (ops && ops->pr_clear) 321871cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 321971cdb697SChristoph Hellwig else 322071cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3221971888c4SMike Snitzer out: 3222971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 322371cdb697SChristoph Hellwig return r; 322471cdb697SChristoph Hellwig } 322571cdb697SChristoph Hellwig 322671cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 322771cdb697SChristoph Hellwig .pr_register = dm_pr_register, 322871cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 322971cdb697SChristoph Hellwig .pr_release = dm_pr_release, 323071cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 323171cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 323271cdb697SChristoph Hellwig }; 323371cdb697SChristoph Hellwig 323483d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 3235c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 32361da177e4SLinus Torvalds .open = dm_blk_open, 32371da177e4SLinus Torvalds .release = dm_blk_close, 3238aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 32393ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3240e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 324171cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 32421da177e4SLinus Torvalds .owner = THIS_MODULE 32431da177e4SLinus Torvalds }; 32441da177e4SLinus Torvalds 3245f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3246f26c5719SDan Williams .direct_access = dm_dax_direct_access, 32477bf7eac8SDan Williams .dax_supported = dm_dax_supported, 32487e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3249b3a9a0c3SDan Williams .copy_to_iter = dm_dax_copy_to_iter, 3250cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 3251f26c5719SDan Williams }; 3252f26c5719SDan Williams 32531da177e4SLinus Torvalds /* 32541da177e4SLinus Torvalds * module hooks 32551da177e4SLinus Torvalds */ 32561da177e4SLinus Torvalds module_init(dm_init); 32571da177e4SLinus Torvalds module_exit(dm_exit); 32581da177e4SLinus Torvalds 32591da177e4SLinus Torvalds module_param(major, uint, 0); 32601da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3261f4790826SMike Snitzer 3262e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3263e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3264e8603136SMike Snitzer 3265115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3266115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3267115485e8SMike Snitzer 32681da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 32691da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 32701da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3271