11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 156958c1c6SMikulas Patocka #include <linux/sched/mm.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 171da177e4SLinus Torvalds #include <linux/blkpg.h> 181da177e4SLinus Torvalds #include <linux/bio.h> 191da177e4SLinus Torvalds #include <linux/mempool.h> 20f26c5719SDan Williams #include <linux/dax.h> 211da177e4SLinus Torvalds #include <linux/slab.h> 221da177e4SLinus Torvalds #include <linux/idr.h> 237e026c8cSDan Williams #include <linux/uio.h> 243ac51e74SDarrick J. Wong #include <linux/hdreg.h> 253f77316dSKiyoshi Ueda #include <linux/delay.h> 26ffcc3936SMike Snitzer #include <linux/wait.h> 2771cdb697SChristoph Hellwig #include <linux/pr.h> 28b0b4d7c6SElena Reshetova #include <linux/refcount.h> 29c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 30a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 3155782138SLi Zefan 3272d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3372d94861SAlasdair G Kergon 3460935eb2SMilan Broz /* 3560935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3660935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3760935eb2SMilan Broz */ 3860935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 3960935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4060935eb2SMilan Broz 411da177e4SLinus Torvalds static const char *_name = DM_NAME; 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds static unsigned int major = 0; 441da177e4SLinus Torvalds static unsigned int _major = 0; 451da177e4SLinus Torvalds 46d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 47d15b774cSAlasdair G Kergon 48f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 492c140a24SMikulas Patocka 502c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 512c140a24SMikulas Patocka 522c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 532c140a24SMikulas Patocka 54acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 55acfe0ad7SMikulas Patocka 5693e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5793e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5893e6442cSMikulas Patocka 5962e08243SMikulas Patocka void dm_issue_global_event(void) 6062e08243SMikulas Patocka { 6162e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 6262e08243SMikulas Patocka wake_up(&dm_global_eventq); 6362e08243SMikulas Patocka } 6462e08243SMikulas Patocka 651da177e4SLinus Torvalds /* 6664f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 671da177e4SLinus Torvalds */ 6864f52b0eSMike Snitzer struct clone_info { 6964f52b0eSMike Snitzer struct dm_table *map; 7064f52b0eSMike Snitzer struct bio *bio; 7164f52b0eSMike Snitzer struct dm_io *io; 7264f52b0eSMike Snitzer sector_t sector; 7364f52b0eSMike Snitzer unsigned sector_count; 7464f52b0eSMike Snitzer }; 7564f52b0eSMike Snitzer 7664f52b0eSMike Snitzer /* 7764f52b0eSMike Snitzer * One of these is allocated per clone bio. 7864f52b0eSMike Snitzer */ 7964f52b0eSMike Snitzer #define DM_TIO_MAGIC 7282014 8064f52b0eSMike Snitzer struct dm_target_io { 8164f52b0eSMike Snitzer unsigned magic; 8264f52b0eSMike Snitzer struct dm_io *io; 8364f52b0eSMike Snitzer struct dm_target *ti; 8464f52b0eSMike Snitzer unsigned target_bio_nr; 8564f52b0eSMike Snitzer unsigned *len_ptr; 8664f52b0eSMike Snitzer bool inside_dm_io; 8764f52b0eSMike Snitzer struct bio clone; 8864f52b0eSMike Snitzer }; 8964f52b0eSMike Snitzer 9064f52b0eSMike Snitzer /* 9164f52b0eSMike Snitzer * One of these is allocated per original bio. 9264f52b0eSMike Snitzer * It contains the first clone used for that original. 9364f52b0eSMike Snitzer */ 9464f52b0eSMike Snitzer #define DM_IO_MAGIC 5191977 951da177e4SLinus Torvalds struct dm_io { 9664f52b0eSMike Snitzer unsigned magic; 971da177e4SLinus Torvalds struct mapped_device *md; 984e4cbee9SChristoph Hellwig blk_status_t status; 991da177e4SLinus Torvalds atomic_t io_count; 100745dc570SMike Snitzer struct bio *orig_bio; 1013eaf840eSJun'ichi "Nick" Nomura unsigned long start_time; 102f88fb981SKiyoshi Ueda spinlock_t endio_lock; 103fd2ed4d2SMikulas Patocka struct dm_stats_aux stats_aux; 10464f52b0eSMike Snitzer /* last member of dm_target_io is 'struct bio' */ 10564f52b0eSMike Snitzer struct dm_target_io tio; 1061da177e4SLinus Torvalds }; 1071da177e4SLinus Torvalds 10864f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 10964f52b0eSMike Snitzer { 11064f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11164f52b0eSMike Snitzer if (!tio->inside_dm_io) 11264f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 11364f52b0eSMike Snitzer return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; 11464f52b0eSMike Snitzer } 11564f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 11664f52b0eSMike Snitzer 11764f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 11864f52b0eSMike Snitzer { 11964f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 12064f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 12164f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); 12264f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 12364f52b0eSMike Snitzer return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); 12464f52b0eSMike Snitzer } 12564f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 12664f52b0eSMike Snitzer 12764f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 12864f52b0eSMike Snitzer { 12964f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 13064f52b0eSMike Snitzer } 13164f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 13264f52b0eSMike Snitzer 133ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 134ba61fdd1SJeff Mahoney 1351da177e4SLinus Torvalds /* 1361da177e4SLinus Torvalds * Bits for the md->flags field. 1371da177e4SLinus Torvalds */ 1381eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0 1391da177e4SLinus Torvalds #define DMF_SUSPENDED 1 140aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2 141fba9f90eSJeff Mahoney #define DMF_FREEING 3 1425c6bd75dSAlasdair G Kergon #define DMF_DELETING 4 1432e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5 1448ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6 1458ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7 1465df96f2bSMikulas Patocka #define DMF_POST_SUSPENDING 8 1471da177e4SLinus Torvalds 148115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 149115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 150faad87dfSMike Snitzer 151e6ee8c0bSKiyoshi Ueda /* 152e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 153e6ee8c0bSKiyoshi Ueda */ 154e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1556f1c819cSKent Overstreet struct bio_set bs; 1566f1c819cSKent Overstreet struct bio_set io_bs; 157e6ee8c0bSKiyoshi Ueda }; 158e6ee8c0bSKiyoshi Ueda 15986f1152bSBenjamin Marzinski struct table_device { 16086f1152bSBenjamin Marzinski struct list_head list; 161b0b4d7c6SElena Reshetova refcount_t count; 16286f1152bSBenjamin Marzinski struct dm_dev dm_dev; 16386f1152bSBenjamin Marzinski }; 16486f1152bSBenjamin Marzinski 165f4790826SMike Snitzer /* 166e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 167e8603136SMike Snitzer */ 1684cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 169e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 170e8603136SMike Snitzer 171115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 172115485e8SMike Snitzer { 1736aa7de05SMark Rutland int param = READ_ONCE(*module_param); 174115485e8SMike Snitzer int modified_param = 0; 175115485e8SMike Snitzer bool modified = true; 176115485e8SMike Snitzer 177115485e8SMike Snitzer if (param < min) 178115485e8SMike Snitzer modified_param = min; 179115485e8SMike Snitzer else if (param > max) 180115485e8SMike Snitzer modified_param = max; 181115485e8SMike Snitzer else 182115485e8SMike Snitzer modified = false; 183115485e8SMike Snitzer 184115485e8SMike Snitzer if (modified) { 185115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 186115485e8SMike Snitzer param = modified_param; 187115485e8SMike Snitzer } 188115485e8SMike Snitzer 189115485e8SMike Snitzer return param; 190115485e8SMike Snitzer } 191115485e8SMike Snitzer 1924cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 193f4790826SMike Snitzer unsigned def, unsigned max) 194f4790826SMike Snitzer { 1956aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 19609c2d531SMike Snitzer unsigned modified_param = 0; 197f4790826SMike Snitzer 19809c2d531SMike Snitzer if (!param) 19909c2d531SMike Snitzer modified_param = def; 20009c2d531SMike Snitzer else if (param > max) 20109c2d531SMike Snitzer modified_param = max; 202f4790826SMike Snitzer 20309c2d531SMike Snitzer if (modified_param) { 20409c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 20509c2d531SMike Snitzer param = modified_param; 206f4790826SMike Snitzer } 207f4790826SMike Snitzer 20809c2d531SMike Snitzer return param; 209f4790826SMike Snitzer } 210f4790826SMike Snitzer 211e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 212e8603136SMike Snitzer { 21309c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 2144cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 215e8603136SMike Snitzer } 216e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 217e8603136SMike Snitzer 218115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 219115485e8SMike Snitzer { 220115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 221115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 222115485e8SMike Snitzer } 223115485e8SMike Snitzer 2241da177e4SLinus Torvalds static int __init local_init(void) 2251da177e4SLinus Torvalds { 226e689fbabSMike Snitzer int r; 2271ae49ea2SMike Snitzer 22851e5b2bdSMike Anderson r = dm_uevent_init(); 22951157b4aSKiyoshi Ueda if (r) 230e689fbabSMike Snitzer return r; 23151e5b2bdSMike Anderson 232acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 233acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 234acfe0ad7SMikulas Patocka r = -ENOMEM; 235acfe0ad7SMikulas Patocka goto out_uevent_exit; 236acfe0ad7SMikulas Patocka } 237acfe0ad7SMikulas Patocka 2381da177e4SLinus Torvalds _major = major; 2391da177e4SLinus Torvalds r = register_blkdev(_major, _name); 24051157b4aSKiyoshi Ueda if (r < 0) 241acfe0ad7SMikulas Patocka goto out_free_workqueue; 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds if (!_major) 2441da177e4SLinus Torvalds _major = r; 2451da177e4SLinus Torvalds 2461da177e4SLinus Torvalds return 0; 24751157b4aSKiyoshi Ueda 248acfe0ad7SMikulas Patocka out_free_workqueue: 249acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 25051157b4aSKiyoshi Ueda out_uevent_exit: 25151157b4aSKiyoshi Ueda dm_uevent_exit(); 25251157b4aSKiyoshi Ueda 25351157b4aSKiyoshi Ueda return r; 2541da177e4SLinus Torvalds } 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds static void local_exit(void) 2571da177e4SLinus Torvalds { 2582c140a24SMikulas Patocka flush_scheduled_work(); 259acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2602c140a24SMikulas Patocka 26100d59405SAkinobu Mita unregister_blkdev(_major, _name); 26251e5b2bdSMike Anderson dm_uevent_exit(); 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds _major = 0; 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds DMINFO("cleaned up"); 2671da177e4SLinus Torvalds } 2681da177e4SLinus Torvalds 269b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2701da177e4SLinus Torvalds local_init, 2711da177e4SLinus Torvalds dm_target_init, 2721da177e4SLinus Torvalds dm_linear_init, 2731da177e4SLinus Torvalds dm_stripe_init, 274952b3557SMikulas Patocka dm_io_init, 275945fa4d2SMikulas Patocka dm_kcopyd_init, 2761da177e4SLinus Torvalds dm_interface_init, 277fd2ed4d2SMikulas Patocka dm_statistics_init, 2781da177e4SLinus Torvalds }; 2791da177e4SLinus Torvalds 280b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2811da177e4SLinus Torvalds local_exit, 2821da177e4SLinus Torvalds dm_target_exit, 2831da177e4SLinus Torvalds dm_linear_exit, 2841da177e4SLinus Torvalds dm_stripe_exit, 285952b3557SMikulas Patocka dm_io_exit, 286945fa4d2SMikulas Patocka dm_kcopyd_exit, 2871da177e4SLinus Torvalds dm_interface_exit, 288fd2ed4d2SMikulas Patocka dm_statistics_exit, 2891da177e4SLinus Torvalds }; 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds static int __init dm_init(void) 2921da177e4SLinus Torvalds { 2931da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds int r, i; 2961da177e4SLinus Torvalds 2971da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2981da177e4SLinus Torvalds r = _inits[i](); 2991da177e4SLinus Torvalds if (r) 3001da177e4SLinus Torvalds goto bad; 3011da177e4SLinus Torvalds } 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds return 0; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds bad: 3061da177e4SLinus Torvalds while (i--) 3071da177e4SLinus Torvalds _exits[i](); 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds return r; 3101da177e4SLinus Torvalds } 3111da177e4SLinus Torvalds 3121da177e4SLinus Torvalds static void __exit dm_exit(void) 3131da177e4SLinus Torvalds { 3141da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 3151da177e4SLinus Torvalds 3161da177e4SLinus Torvalds while (i--) 3171da177e4SLinus Torvalds _exits[i](); 318d15b774cSAlasdair G Kergon 319d15b774cSAlasdair G Kergon /* 320d15b774cSAlasdair G Kergon * Should be empty by this point. 321d15b774cSAlasdair G Kergon */ 322d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3231da177e4SLinus Torvalds } 3241da177e4SLinus Torvalds 3251da177e4SLinus Torvalds /* 3261da177e4SLinus Torvalds * Block device functions 3271da177e4SLinus Torvalds */ 328432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 329432a212cSMike Anderson { 330432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 331432a212cSMike Anderson } 332432a212cSMike Anderson 333fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3341da177e4SLinus Torvalds { 3351da177e4SLinus Torvalds struct mapped_device *md; 3361da177e4SLinus Torvalds 337fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 338fba9f90eSJeff Mahoney 339fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 340fba9f90eSJeff Mahoney if (!md) 341fba9f90eSJeff Mahoney goto out; 342fba9f90eSJeff Mahoney 3435c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 344432a212cSMike Anderson dm_deleting_md(md)) { 345fba9f90eSJeff Mahoney md = NULL; 346fba9f90eSJeff Mahoney goto out; 347fba9f90eSJeff Mahoney } 348fba9f90eSJeff Mahoney 3491da177e4SLinus Torvalds dm_get(md); 3505c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 351fba9f90eSJeff Mahoney out: 352fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 353fba9f90eSJeff Mahoney 354fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3551da177e4SLinus Torvalds } 3561da177e4SLinus Torvalds 357db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3581da177e4SLinus Torvalds { 35963a4f065SMike Snitzer struct mapped_device *md; 3606e9624b8SArnd Bergmann 3614a1aeb98SMilan Broz spin_lock(&_minor_lock); 3624a1aeb98SMilan Broz 36363a4f065SMike Snitzer md = disk->private_data; 36463a4f065SMike Snitzer if (WARN_ON(!md)) 36563a4f065SMike Snitzer goto out; 36663a4f065SMike Snitzer 3672c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3682c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 369acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3702c140a24SMikulas Patocka 3711da177e4SLinus Torvalds dm_put(md); 37263a4f065SMike Snitzer out: 3734a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds 3765c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3775c6bd75dSAlasdair G Kergon { 3785c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3795c6bd75dSAlasdair G Kergon } 3805c6bd75dSAlasdair G Kergon 3815c6bd75dSAlasdair G Kergon /* 3825c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3835c6bd75dSAlasdair G Kergon */ 3842c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3855c6bd75dSAlasdair G Kergon { 3865c6bd75dSAlasdair G Kergon int r = 0; 3875c6bd75dSAlasdair G Kergon 3885c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3895c6bd75dSAlasdair G Kergon 3902c140a24SMikulas Patocka if (dm_open_count(md)) { 3915c6bd75dSAlasdair G Kergon r = -EBUSY; 3922c140a24SMikulas Patocka if (mark_deferred) 3932c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3942c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3952c140a24SMikulas Patocka r = -EEXIST; 3965c6bd75dSAlasdair G Kergon else 3975c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3985c6bd75dSAlasdair G Kergon 3995c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 4005c6bd75dSAlasdair G Kergon 4015c6bd75dSAlasdair G Kergon return r; 4025c6bd75dSAlasdair G Kergon } 4035c6bd75dSAlasdair G Kergon 4042c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 4052c140a24SMikulas Patocka { 4062c140a24SMikulas Patocka int r = 0; 4072c140a24SMikulas Patocka 4082c140a24SMikulas Patocka spin_lock(&_minor_lock); 4092c140a24SMikulas Patocka 4102c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 4112c140a24SMikulas Patocka r = -EBUSY; 4122c140a24SMikulas Patocka else 4132c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 4142c140a24SMikulas Patocka 4152c140a24SMikulas Patocka spin_unlock(&_minor_lock); 4162c140a24SMikulas Patocka 4172c140a24SMikulas Patocka return r; 4182c140a24SMikulas Patocka } 4192c140a24SMikulas Patocka 4202c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 4212c140a24SMikulas Patocka { 4222c140a24SMikulas Patocka dm_deferred_remove(); 4232c140a24SMikulas Patocka } 4242c140a24SMikulas Patocka 4253ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4263ac51e74SDarrick J. Wong { 4273ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4283ac51e74SDarrick J. Wong 4293ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4303ac51e74SDarrick J. Wong } 4313ac51e74SDarrick J. Wong 432e76239a3SChristoph Hellwig #ifdef CONFIG_BLK_DEV_ZONED 433d4100351SChristoph Hellwig int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) 434d4100351SChristoph Hellwig { 435d4100351SChristoph Hellwig struct dm_report_zones_args *args = data; 436d4100351SChristoph Hellwig sector_t sector_diff = args->tgt->begin - args->start; 437d4100351SChristoph Hellwig 438d4100351SChristoph Hellwig /* 439d4100351SChristoph Hellwig * Ignore zones beyond the target range. 440d4100351SChristoph Hellwig */ 441d4100351SChristoph Hellwig if (zone->start >= args->start + args->tgt->len) 442d4100351SChristoph Hellwig return 0; 443d4100351SChristoph Hellwig 444d4100351SChristoph Hellwig /* 445d4100351SChristoph Hellwig * Remap the start sector and write pointer position of the zone 446d4100351SChristoph Hellwig * to match its position in the target range. 447d4100351SChristoph Hellwig */ 448d4100351SChristoph Hellwig zone->start += sector_diff; 449d4100351SChristoph Hellwig if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { 450d4100351SChristoph Hellwig if (zone->cond == BLK_ZONE_COND_FULL) 451d4100351SChristoph Hellwig zone->wp = zone->start + zone->len; 452d4100351SChristoph Hellwig else if (zone->cond == BLK_ZONE_COND_EMPTY) 453d4100351SChristoph Hellwig zone->wp = zone->start; 454d4100351SChristoph Hellwig else 455d4100351SChristoph Hellwig zone->wp += sector_diff; 456d4100351SChristoph Hellwig } 457d4100351SChristoph Hellwig 458d4100351SChristoph Hellwig args->next_sector = zone->start + zone->len; 459d4100351SChristoph Hellwig return args->orig_cb(zone, args->zone_idx++, args->orig_data); 460d4100351SChristoph Hellwig } 461d4100351SChristoph Hellwig EXPORT_SYMBOL_GPL(dm_report_zones_cb); 462d4100351SChristoph Hellwig 463d4100351SChristoph Hellwig static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, 464d4100351SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data) 465d4100351SChristoph Hellwig { 466e76239a3SChristoph Hellwig struct mapped_device *md = disk->private_data; 467e76239a3SChristoph Hellwig struct dm_table *map; 468e76239a3SChristoph Hellwig int srcu_idx, ret; 469d4100351SChristoph Hellwig struct dm_report_zones_args args = { 470d4100351SChristoph Hellwig .next_sector = sector, 471d4100351SChristoph Hellwig .orig_data = data, 472d4100351SChristoph Hellwig .orig_cb = cb, 473d4100351SChristoph Hellwig }; 474e76239a3SChristoph Hellwig 475e76239a3SChristoph Hellwig if (dm_suspended_md(md)) 476e76239a3SChristoph Hellwig return -EAGAIN; 477e76239a3SChristoph Hellwig 478e76239a3SChristoph Hellwig map = dm_get_live_table(md, &srcu_idx); 479e76239a3SChristoph Hellwig if (!map) 480e76239a3SChristoph Hellwig return -EIO; 481e76239a3SChristoph Hellwig 482d4100351SChristoph Hellwig do { 483d4100351SChristoph Hellwig struct dm_target *tgt; 484d4100351SChristoph Hellwig 485d4100351SChristoph Hellwig tgt = dm_table_find_target(map, args.next_sector); 486d4100351SChristoph Hellwig if (WARN_ON_ONCE(!tgt->type->report_zones)) { 487e76239a3SChristoph Hellwig ret = -EIO; 488e76239a3SChristoph Hellwig goto out; 489e76239a3SChristoph Hellwig } 490e76239a3SChristoph Hellwig 491d4100351SChristoph Hellwig args.tgt = tgt; 492a9cb9f41SJohannes Thumshirn ret = tgt->type->report_zones(tgt, &args, 493a9cb9f41SJohannes Thumshirn nr_zones - args.zone_idx); 494d4100351SChristoph Hellwig if (ret < 0) 495e76239a3SChristoph Hellwig goto out; 496d4100351SChristoph Hellwig } while (args.zone_idx < nr_zones && 497d4100351SChristoph Hellwig args.next_sector < get_capacity(disk)); 498e76239a3SChristoph Hellwig 499d4100351SChristoph Hellwig ret = args.zone_idx; 500e76239a3SChristoph Hellwig out: 501e76239a3SChristoph Hellwig dm_put_live_table(md, srcu_idx); 502e76239a3SChristoph Hellwig return ret; 503e76239a3SChristoph Hellwig } 504d4100351SChristoph Hellwig #else 505d4100351SChristoph Hellwig #define dm_blk_report_zones NULL 506d4100351SChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */ 507e76239a3SChristoph Hellwig 508971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 5095bd5e8d8SMike Snitzer struct block_device **bdev) 510971888c4SMike Snitzer __acquires(md->io_barrier) 511aa129a22SMilan Broz { 51266482026SMike Snitzer struct dm_target *tgt; 5136c182cd8SHannes Reinecke struct dm_table *map; 514971888c4SMike Snitzer int r; 515aa129a22SMilan Broz 5166c182cd8SHannes Reinecke retry: 517e56f81e0SChristoph Hellwig r = -ENOTTY; 518971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 519aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 520971888c4SMike Snitzer return r; 521aa129a22SMilan Broz 522aa129a22SMilan Broz /* We only support devices that have a single target */ 523aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 524971888c4SMike Snitzer return r; 525aa129a22SMilan Broz 52666482026SMike Snitzer tgt = dm_table_get_target(map, 0); 52766482026SMike Snitzer if (!tgt->type->prepare_ioctl) 528e56f81e0SChristoph Hellwig return r; 529aa129a22SMilan Broz 530971888c4SMike Snitzer if (dm_suspended_md(md)) 531971888c4SMike Snitzer return -EAGAIN; 532971888c4SMike Snitzer 5335bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 5345bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 535971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 5366c182cd8SHannes Reinecke msleep(10); 5376c182cd8SHannes Reinecke goto retry; 5386c182cd8SHannes Reinecke } 539971888c4SMike Snitzer 540e56f81e0SChristoph Hellwig return r; 541e56f81e0SChristoph Hellwig } 5426c182cd8SHannes Reinecke 543971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 544971888c4SMike Snitzer __releases(md->io_barrier) 545971888c4SMike Snitzer { 546971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 547971888c4SMike Snitzer } 548971888c4SMike Snitzer 549e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 550e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 551e56f81e0SChristoph Hellwig { 552e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 553971888c4SMike Snitzer int r, srcu_idx; 554e56f81e0SChristoph Hellwig 5555bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 556e56f81e0SChristoph Hellwig if (r < 0) 557971888c4SMike Snitzer goto out; 558e56f81e0SChristoph Hellwig 559e56f81e0SChristoph Hellwig if (r > 0) { 560e56f81e0SChristoph Hellwig /* 561e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 562e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 563e56f81e0SChristoph Hellwig */ 564e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 565e980f623SChristoph Hellwig DMWARN_LIMIT( 566e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 567e980f623SChristoph Hellwig current->comm, cmd); 568e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 569e56f81e0SChristoph Hellwig goto out; 570e56f81e0SChristoph Hellwig } 571e980f623SChristoph Hellwig } 572e56f81e0SChristoph Hellwig 57366482026SMike Snitzer r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 574e56f81e0SChristoph Hellwig out: 575971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 576aa129a22SMilan Broz return r; 577aa129a22SMilan Broz } 578aa129a22SMilan Broz 5797465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio) 5807465d7acSMike Snitzer { 5817465d7acSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 5827465d7acSMike Snitzer struct dm_io *io = tio->io; 5837465d7acSMike Snitzer 5847465d7acSMike Snitzer return jiffies_to_nsecs(io->start_time); 5857465d7acSMike Snitzer } 5867465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 5877465d7acSMike Snitzer 5887465d7acSMike Snitzer static void start_io_acct(struct dm_io *io) 5897465d7acSMike Snitzer { 5907465d7acSMike Snitzer struct mapped_device *md = io->md; 5917465d7acSMike Snitzer struct bio *bio = io->orig_bio; 5927465d7acSMike Snitzer 5937465d7acSMike Snitzer io->start_time = bio_start_io_acct(bio); 5947465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 5957465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 5967465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 5977465d7acSMike Snitzer false, 0, &io->stats_aux); 5987465d7acSMike Snitzer } 5997465d7acSMike Snitzer 6007465d7acSMike Snitzer static void end_io_acct(struct dm_io *io) 6017465d7acSMike Snitzer { 6027465d7acSMike Snitzer struct mapped_device *md = io->md; 6037465d7acSMike Snitzer struct bio *bio = io->orig_bio; 6047465d7acSMike Snitzer unsigned long duration = jiffies - io->start_time; 6057465d7acSMike Snitzer 6067465d7acSMike Snitzer bio_end_io_acct(bio, io->start_time); 6077465d7acSMike Snitzer 6087465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 6097465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 6107465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 6117465d7acSMike Snitzer true, duration, &io->stats_aux); 6127465d7acSMike Snitzer 6137465d7acSMike Snitzer /* nudge anyone waiting on suspend queue */ 6147465d7acSMike Snitzer if (unlikely(wq_has_sleeper(&md->wait))) 6157465d7acSMike Snitzer wake_up(&md->wait); 6167465d7acSMike Snitzer } 617978e51baSMike Snitzer 618978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 6191da177e4SLinus Torvalds { 62064f52b0eSMike Snitzer struct dm_io *io; 62164f52b0eSMike Snitzer struct dm_target_io *tio; 62264f52b0eSMike Snitzer struct bio *clone; 62364f52b0eSMike Snitzer 6246f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 62564f52b0eSMike Snitzer if (!clone) 62664f52b0eSMike Snitzer return NULL; 62764f52b0eSMike Snitzer 62864f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 62964f52b0eSMike Snitzer tio->inside_dm_io = true; 63064f52b0eSMike Snitzer tio->io = NULL; 63164f52b0eSMike Snitzer 63264f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 63364f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 634978e51baSMike Snitzer io->status = 0; 635978e51baSMike Snitzer atomic_set(&io->io_count, 1); 636978e51baSMike Snitzer io->orig_bio = bio; 637978e51baSMike Snitzer io->md = md; 638978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 639978e51baSMike Snitzer 640978e51baSMike Snitzer start_io_acct(io); 64164f52b0eSMike Snitzer 64264f52b0eSMike Snitzer return io; 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds 645028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 6461da177e4SLinus Torvalds { 64764f52b0eSMike Snitzer bio_put(&io->tio.clone); 64864f52b0eSMike Snitzer } 64964f52b0eSMike Snitzer 65064f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 65164f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 65264f52b0eSMike Snitzer { 65364f52b0eSMike Snitzer struct dm_target_io *tio; 65464f52b0eSMike Snitzer 65564f52b0eSMike Snitzer if (!ci->io->tio.io) { 65664f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 65764f52b0eSMike Snitzer tio = &ci->io->tio; 65864f52b0eSMike Snitzer } else { 6596f1c819cSKent Overstreet struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 66064f52b0eSMike Snitzer if (!clone) 66164f52b0eSMike Snitzer return NULL; 66264f52b0eSMike Snitzer 66364f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 66464f52b0eSMike Snitzer tio->inside_dm_io = false; 66564f52b0eSMike Snitzer } 66664f52b0eSMike Snitzer 66764f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 66864f52b0eSMike Snitzer tio->io = ci->io; 66964f52b0eSMike Snitzer tio->ti = ti; 67064f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 67164f52b0eSMike Snitzer 67264f52b0eSMike Snitzer return tio; 6731da177e4SLinus Torvalds } 6741da177e4SLinus Torvalds 675cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 6761da177e4SLinus Torvalds { 67764f52b0eSMike Snitzer if (tio->inside_dm_io) 67864f52b0eSMike Snitzer return; 679dba14160SMikulas Patocka bio_put(&tio->clone); 6801da177e4SLinus Torvalds } 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds /* 6831da177e4SLinus Torvalds * Add the bio to the list of deferred io. 6841da177e4SLinus Torvalds */ 68592c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 6861da177e4SLinus Torvalds { 68705447420SKiyoshi Ueda unsigned long flags; 6881da177e4SLinus Torvalds 68905447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 6901da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 69105447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 69292c63902SMikulas Patocka queue_work(md->wq, &md->work); 6931da177e4SLinus Torvalds } 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds /* 6961da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 6971da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 69883d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 6991da177e4SLinus Torvalds */ 70083d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 7011da177e4SLinus Torvalds { 70283d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 7031da177e4SLinus Torvalds 70483d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 70583d5e5b0SMikulas Patocka } 7061da177e4SLinus Torvalds 70783d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 70883d5e5b0SMikulas Patocka { 70983d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 71083d5e5b0SMikulas Patocka } 71183d5e5b0SMikulas Patocka 71283d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 71383d5e5b0SMikulas Patocka { 71483d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 71583d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 71683d5e5b0SMikulas Patocka } 71783d5e5b0SMikulas Patocka 71883d5e5b0SMikulas Patocka /* 71983d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 72083d5e5b0SMikulas Patocka * The caller must not block between these two functions. 72183d5e5b0SMikulas Patocka */ 72283d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 72383d5e5b0SMikulas Patocka { 72483d5e5b0SMikulas Patocka rcu_read_lock(); 72583d5e5b0SMikulas Patocka return rcu_dereference(md->map); 72683d5e5b0SMikulas Patocka } 72783d5e5b0SMikulas Patocka 72883d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 72983d5e5b0SMikulas Patocka { 73083d5e5b0SMikulas Patocka rcu_read_unlock(); 7311da177e4SLinus Torvalds } 7321da177e4SLinus Torvalds 733971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 734971888c4SMike Snitzer 7353ac51e74SDarrick J. Wong /* 73686f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 73786f1152bSBenjamin Marzinski */ 73886f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 73986f1152bSBenjamin Marzinski struct mapped_device *md) 74086f1152bSBenjamin Marzinski { 74186f1152bSBenjamin Marzinski struct block_device *bdev; 74286f1152bSBenjamin Marzinski 74386f1152bSBenjamin Marzinski int r; 74486f1152bSBenjamin Marzinski 74586f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 74686f1152bSBenjamin Marzinski 747519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 74886f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 74986f1152bSBenjamin Marzinski return PTR_ERR(bdev); 75086f1152bSBenjamin Marzinski 75186f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 75286f1152bSBenjamin Marzinski if (r) { 75386f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 75486f1152bSBenjamin Marzinski return r; 75586f1152bSBenjamin Marzinski } 75686f1152bSBenjamin Marzinski 75786f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 758817bf402SDan Williams td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 75986f1152bSBenjamin Marzinski return 0; 76086f1152bSBenjamin Marzinski } 76186f1152bSBenjamin Marzinski 76286f1152bSBenjamin Marzinski /* 76386f1152bSBenjamin Marzinski * Close a table device that we've been using. 76486f1152bSBenjamin Marzinski */ 76586f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 76686f1152bSBenjamin Marzinski { 76786f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 76886f1152bSBenjamin Marzinski return; 76986f1152bSBenjamin Marzinski 77086f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 77186f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 772817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 77386f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 774817bf402SDan Williams td->dm_dev.dax_dev = NULL; 77586f1152bSBenjamin Marzinski } 77686f1152bSBenjamin Marzinski 77786f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 7788454fca4SSheetal Singala fmode_t mode) 7798454fca4SSheetal Singala { 78086f1152bSBenjamin Marzinski struct table_device *td; 78186f1152bSBenjamin Marzinski 78286f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 78386f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 78486f1152bSBenjamin Marzinski return td; 78586f1152bSBenjamin Marzinski 78686f1152bSBenjamin Marzinski return NULL; 78786f1152bSBenjamin Marzinski } 78886f1152bSBenjamin Marzinski 78986f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 7908454fca4SSheetal Singala struct dm_dev **result) 7918454fca4SSheetal Singala { 79286f1152bSBenjamin Marzinski int r; 79386f1152bSBenjamin Marzinski struct table_device *td; 79486f1152bSBenjamin Marzinski 79586f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 79686f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 79786f1152bSBenjamin Marzinski if (!td) { 798115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 79986f1152bSBenjamin Marzinski if (!td) { 80086f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80186f1152bSBenjamin Marzinski return -ENOMEM; 80286f1152bSBenjamin Marzinski } 80386f1152bSBenjamin Marzinski 80486f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 80586f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 80686f1152bSBenjamin Marzinski 80786f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 80886f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 80986f1152bSBenjamin Marzinski kfree(td); 81086f1152bSBenjamin Marzinski return r; 81186f1152bSBenjamin Marzinski } 81286f1152bSBenjamin Marzinski 81386f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 81486f1152bSBenjamin Marzinski 815b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 81686f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 817b0b4d7c6SElena Reshetova } else { 818b0b4d7c6SElena Reshetova refcount_inc(&td->count); 81986f1152bSBenjamin Marzinski } 82086f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 82186f1152bSBenjamin Marzinski 82286f1152bSBenjamin Marzinski *result = &td->dm_dev; 82386f1152bSBenjamin Marzinski return 0; 82486f1152bSBenjamin Marzinski } 82586f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device); 82686f1152bSBenjamin Marzinski 82786f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 82886f1152bSBenjamin Marzinski { 82986f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 83086f1152bSBenjamin Marzinski 83186f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 832b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 83386f1152bSBenjamin Marzinski close_table_device(td, md); 83486f1152bSBenjamin Marzinski list_del(&td->list); 83586f1152bSBenjamin Marzinski kfree(td); 83686f1152bSBenjamin Marzinski } 83786f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 83886f1152bSBenjamin Marzinski } 83986f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device); 84086f1152bSBenjamin Marzinski 84186f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 84286f1152bSBenjamin Marzinski { 84386f1152bSBenjamin Marzinski struct list_head *tmp, *next; 84486f1152bSBenjamin Marzinski 84586f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 84686f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 84786f1152bSBenjamin Marzinski 84886f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 849b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 85086f1152bSBenjamin Marzinski kfree(td); 85186f1152bSBenjamin Marzinski } 85286f1152bSBenjamin Marzinski } 85386f1152bSBenjamin Marzinski 85486f1152bSBenjamin Marzinski /* 8553ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 8563ac51e74SDarrick J. Wong */ 8573ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 8583ac51e74SDarrick J. Wong { 8593ac51e74SDarrick J. Wong *geo = md->geometry; 8603ac51e74SDarrick J. Wong 8613ac51e74SDarrick J. Wong return 0; 8623ac51e74SDarrick J. Wong } 8633ac51e74SDarrick J. Wong 8643ac51e74SDarrick J. Wong /* 8653ac51e74SDarrick J. Wong * Set the geometry of a device. 8663ac51e74SDarrick J. Wong */ 8673ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 8683ac51e74SDarrick J. Wong { 8693ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 8703ac51e74SDarrick J. Wong 8713ac51e74SDarrick J. Wong if (geo->start > sz) { 8723ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 8733ac51e74SDarrick J. Wong return -EINVAL; 8743ac51e74SDarrick J. Wong } 8753ac51e74SDarrick J. Wong 8763ac51e74SDarrick J. Wong md->geometry = *geo; 8773ac51e74SDarrick J. Wong 8783ac51e74SDarrick J. Wong return 0; 8793ac51e74SDarrick J. Wong } 8803ac51e74SDarrick J. Wong 8812e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 8822e93ccc1SKiyoshi Ueda { 8832e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 8842e93ccc1SKiyoshi Ueda } 8852e93ccc1SKiyoshi Ueda 8861da177e4SLinus Torvalds /* 8871da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 8881da177e4SLinus Torvalds * cloned into, completing the original io if necc. 8891da177e4SLinus Torvalds */ 8904e4cbee9SChristoph Hellwig static void dec_pending(struct dm_io *io, blk_status_t error) 8911da177e4SLinus Torvalds { 8922e93ccc1SKiyoshi Ueda unsigned long flags; 8934e4cbee9SChristoph Hellwig blk_status_t io_error; 894b35f8caaSMilan Broz struct bio *bio; 895b35f8caaSMilan Broz struct mapped_device *md = io->md; 8962e93ccc1SKiyoshi Ueda 8972e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 898f88fb981SKiyoshi Ueda if (unlikely(error)) { 899f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 900745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 9014e4cbee9SChristoph Hellwig io->status = error; 902f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 903f88fb981SKiyoshi Ueda } 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 9064e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 9072e93ccc1SKiyoshi Ueda /* 9082e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 9092e93ccc1SKiyoshi Ueda */ 910022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 9116a8736d1STejun Heo if (__noflush_suspending(md)) 912745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 913745dc570SMike Snitzer bio_list_add_head(&md->deferred, io->orig_bio); 9146a8736d1STejun Heo else 9152e93ccc1SKiyoshi Ueda /* noflush suspend was interrupted. */ 9164e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 917022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 9182e93ccc1SKiyoshi Ueda } 9192e93ccc1SKiyoshi Ueda 9204e4cbee9SChristoph Hellwig io_error = io->status; 921745dc570SMike Snitzer bio = io->orig_bio; 922af7e466aSMikulas Patocka end_io_acct(io); 923a97f925aSMikulas Patocka free_io(md, io); 9241da177e4SLinus Torvalds 9254e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 9266a8736d1STejun Heo return; 9276a8736d1STejun Heo 9281eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 9291da177e4SLinus Torvalds /* 9306a8736d1STejun Heo * Preflush done for flush with data, reissue 93128a8f0d3SMike Christie * without REQ_PREFLUSH. 9321da177e4SLinus Torvalds */ 9331eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 9346a8736d1STejun Heo queue_io(md, bio); 935b35f8caaSMilan Broz } else { 936b372d360SMike Snitzer /* done with normal IO or empty flush */ 9378dd601faSNeilBrown if (io_error) 9384e4cbee9SChristoph Hellwig bio->bi_status = io_error; 9394246a0b6SChristoph Hellwig bio_endio(bio); 9402e93ccc1SKiyoshi Ueda } 9411da177e4SLinus Torvalds } 942af7e466aSMikulas Patocka } 9431da177e4SLinus Torvalds 944bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 945bcb44433SMike Snitzer { 946bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 947bcb44433SMike Snitzer 948bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 949bcb44433SMike Snitzer limits->max_discard_sectors = 0; 950bcb44433SMike Snitzer blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 951bcb44433SMike Snitzer } 952bcb44433SMike Snitzer 9534cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 9547eee4ae2SMike Snitzer { 9557eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 9567eee4ae2SMike Snitzer 9577eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 9587eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 9597eee4ae2SMike Snitzer } 9607eee4ae2SMike Snitzer 961ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 962ac62d620SChristoph Hellwig { 963ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 964ac62d620SChristoph Hellwig 965ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 966ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 967ac62d620SChristoph Hellwig } 968ac62d620SChristoph Hellwig 9694246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 9701da177e4SLinus Torvalds { 9714e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 972bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 973b35f8caaSMilan Broz struct dm_io *io = tio->io; 9749faf400fSStefan Bader struct mapped_device *md = tio->io->md; 9751da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 976415c79e1SJohannes Thumshirn struct bio *orig_bio = io->orig_bio; 9771da177e4SLinus Torvalds 978978e51baSMike Snitzer if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 979bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 980bcb44433SMike Snitzer !bio->bi_disk->queue->limits.max_discard_sectors) 981bcb44433SMike Snitzer disable_discard(md); 982bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME && 98374d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_same_sectors) 9847eee4ae2SMike Snitzer disable_write_same(md); 985bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 98674d46992SChristoph Hellwig !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 987ac62d620SChristoph Hellwig disable_write_zeroes(md); 988ac62d620SChristoph Hellwig } 9897eee4ae2SMike Snitzer 990415c79e1SJohannes Thumshirn /* 991415c79e1SJohannes Thumshirn * For zone-append bios get offset in zone of the written 992415c79e1SJohannes Thumshirn * sector and add that to the original bio sector pos. 993415c79e1SJohannes Thumshirn */ 994415c79e1SJohannes Thumshirn if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { 995415c79e1SJohannes Thumshirn sector_t written_sector = bio->bi_iter.bi_sector; 996415c79e1SJohannes Thumshirn struct request_queue *q = orig_bio->bi_disk->queue; 997415c79e1SJohannes Thumshirn u64 mask = (u64)blk_queue_zone_sectors(q) - 1; 998415c79e1SJohannes Thumshirn 999415c79e1SJohannes Thumshirn orig_bio->bi_iter.bi_sector += written_sector & mask; 1000415c79e1SJohannes Thumshirn } 1001415c79e1SJohannes Thumshirn 10021be56909SChristoph Hellwig if (endio) { 10034e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 10041be56909SChristoph Hellwig switch (r) { 10051be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 10064e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 1007df561f66SGustavo A. R. Silva fallthrough; 10081be56909SChristoph Hellwig case DM_ENDIO_DONE: 10091be56909SChristoph Hellwig break; 10101be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 10111be56909SChristoph Hellwig /* The target will handle the io */ 10121be56909SChristoph Hellwig return; 10131be56909SChristoph Hellwig default: 10141be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 10151be56909SChristoph Hellwig BUG(); 10161be56909SChristoph Hellwig } 10171be56909SChristoph Hellwig } 10181be56909SChristoph Hellwig 1019cfae7529SMike Snitzer free_tio(tio); 1020b35f8caaSMilan Broz dec_pending(io, error); 10211da177e4SLinus Torvalds } 10221da177e4SLinus Torvalds 102378d8e58aSMike Snitzer /* 102456a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 102556a67df7SMike Snitzer * target boundary. 102656a67df7SMike Snitzer */ 10273720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 10283720281dSMike Snitzer sector_t target_offset) 10291da177e4SLinus Torvalds { 103056a67df7SMike Snitzer return ti->len - target_offset; 103156a67df7SMike Snitzer } 103256a67df7SMike Snitzer 10333720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector) 103456a67df7SMike Snitzer { 10353720281dSMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 10363720281dSMike Snitzer sector_t len = max_io_len_target_boundary(ti, target_offset); 10375091cdecSMike Snitzer sector_t max_len; 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds /* 10401da177e4SLinus Torvalds * Does the target need to split even further? 10415091cdecSMike Snitzer * - q->limits.chunk_sectors reflects ti->max_io_len so 10425091cdecSMike Snitzer * blk_max_size_offset() provides required splitting. 10435091cdecSMike Snitzer * - blk_max_size_offset() also respects q->limits.max_sectors 10441da177e4SLinus Torvalds */ 104533bd6f06SMike Snitzer max_len = blk_max_size_offset(ti->table->md->queue, 10463720281dSMike Snitzer target_offset); 1047542f9038SMike Snitzer if (len > max_len) 1048542f9038SMike Snitzer len = max_len; 10491da177e4SLinus Torvalds 10501da177e4SLinus Torvalds return len; 10511da177e4SLinus Torvalds } 10521da177e4SLinus Torvalds 1053542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 1054542f9038SMike Snitzer { 1055542f9038SMike Snitzer if (len > UINT_MAX) { 1056542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 1057542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 1058542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 1059542f9038SMike Snitzer return -EINVAL; 1060542f9038SMike Snitzer } 1061542f9038SMike Snitzer 106275ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 1063542f9038SMike Snitzer 1064542f9038SMike Snitzer return 0; 1065542f9038SMike Snitzer } 1066542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1067542f9038SMike Snitzer 1068f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1069f26c5719SDan Williams sector_t sector, int *srcu_idx) 10703d97c829SMike Snitzer __acquires(md->io_barrier) 1071545ed20eSToshi Kani { 1072545ed20eSToshi Kani struct dm_table *map; 1073545ed20eSToshi Kani struct dm_target *ti; 1074545ed20eSToshi Kani 1075f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 1076545ed20eSToshi Kani if (!map) 1077f26c5719SDan Williams return NULL; 1078545ed20eSToshi Kani 1079545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 1080123d87d5SMikulas Patocka if (!ti) 1081f26c5719SDan Williams return NULL; 1082f26c5719SDan Williams 1083f26c5719SDan Williams return ti; 1084f26c5719SDan Williams } 1085f26c5719SDan Williams 1086f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1087f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1088f26c5719SDan Williams { 1089f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1090f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1091f26c5719SDan Williams struct dm_target *ti; 1092f26c5719SDan Williams long len, ret = -EIO; 1093f26c5719SDan Williams int srcu_idx; 1094f26c5719SDan Williams 1095f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1096f26c5719SDan Williams 1097f26c5719SDan Williams if (!ti) 1098545ed20eSToshi Kani goto out; 1099f26c5719SDan Williams if (!ti->type->direct_access) 1100f26c5719SDan Williams goto out; 11013720281dSMike Snitzer len = max_io_len(ti, sector) / PAGE_SECTORS; 1102f26c5719SDan Williams if (len < 1) 1103f26c5719SDan Williams goto out; 1104f26c5719SDan Williams nr_pages = min(len, nr_pages); 1105817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1106817bf402SDan Williams 1107545ed20eSToshi Kani out: 1108545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1109f26c5719SDan Williams 1110f26c5719SDan Williams return ret; 1111545ed20eSToshi Kani } 1112545ed20eSToshi Kani 11137bf7eac8SDan Williams static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 11147bf7eac8SDan Williams int blocksize, sector_t start, sector_t len) 11157bf7eac8SDan Williams { 11167bf7eac8SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11177bf7eac8SDan Williams struct dm_table *map; 111802186d88SDan Williams bool ret = false; 11197bf7eac8SDan Williams int srcu_idx; 11207bf7eac8SDan Williams 11217bf7eac8SDan Williams map = dm_get_live_table(md, &srcu_idx); 11227bf7eac8SDan Williams if (!map) 112302186d88SDan Williams goto out; 11247bf7eac8SDan Williams 11252e9ee095SPankaj Gupta ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); 11267bf7eac8SDan Williams 112702186d88SDan Williams out: 11287bf7eac8SDan Williams dm_put_live_table(md, srcu_idx); 11297bf7eac8SDan Williams 11307bf7eac8SDan Williams return ret; 11317bf7eac8SDan Williams } 11327bf7eac8SDan Williams 11337e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 11347e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 11357e026c8cSDan Williams { 11367e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 11377e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 11387e026c8cSDan Williams struct dm_target *ti; 11397e026c8cSDan Williams long ret = 0; 11407e026c8cSDan Williams int srcu_idx; 11417e026c8cSDan Williams 11427e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 11437e026c8cSDan Williams 11447e026c8cSDan Williams if (!ti) 11457e026c8cSDan Williams goto out; 11467e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 11477e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 11487e026c8cSDan Williams goto out; 11497e026c8cSDan Williams } 11507e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 11517e026c8cSDan Williams out: 11527e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 11537e026c8cSDan Williams 11547e026c8cSDan Williams return ret; 11557e026c8cSDan Williams } 11567e026c8cSDan Williams 1157b3a9a0c3SDan Williams static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1158b3a9a0c3SDan Williams void *addr, size_t bytes, struct iov_iter *i) 1159b3a9a0c3SDan Williams { 1160b3a9a0c3SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1161b3a9a0c3SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1162b3a9a0c3SDan Williams struct dm_target *ti; 1163b3a9a0c3SDan Williams long ret = 0; 1164b3a9a0c3SDan Williams int srcu_idx; 1165b3a9a0c3SDan Williams 1166b3a9a0c3SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1167b3a9a0c3SDan Williams 1168b3a9a0c3SDan Williams if (!ti) 1169b3a9a0c3SDan Williams goto out; 1170b3a9a0c3SDan Williams if (!ti->type->dax_copy_to_iter) { 1171b3a9a0c3SDan Williams ret = copy_to_iter(addr, bytes, i); 1172b3a9a0c3SDan Williams goto out; 1173b3a9a0c3SDan Williams } 1174b3a9a0c3SDan Williams ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1175b3a9a0c3SDan Williams out: 1176b3a9a0c3SDan Williams dm_put_live_table(md, srcu_idx); 1177b3a9a0c3SDan Williams 1178b3a9a0c3SDan Williams return ret; 1179b3a9a0c3SDan Williams } 1180b3a9a0c3SDan Williams 1181cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1182cdf6cdcdSVivek Goyal size_t nr_pages) 1183cdf6cdcdSVivek Goyal { 1184cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1185cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1186cdf6cdcdSVivek Goyal struct dm_target *ti; 1187cdf6cdcdSVivek Goyal int ret = -EIO; 1188cdf6cdcdSVivek Goyal int srcu_idx; 1189cdf6cdcdSVivek Goyal 1190cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1191cdf6cdcdSVivek Goyal 1192cdf6cdcdSVivek Goyal if (!ti) 1193cdf6cdcdSVivek Goyal goto out; 1194cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1195cdf6cdcdSVivek Goyal /* 1196cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1197cdf6cdcdSVivek Goyal * here, something is wrong. 1198cdf6cdcdSVivek Goyal */ 1199cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1200cdf6cdcdSVivek Goyal goto out; 1201cdf6cdcdSVivek Goyal } 1202cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1203cdf6cdcdSVivek Goyal 1204cdf6cdcdSVivek Goyal out: 1205cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1206cdf6cdcdSVivek Goyal 1207cdf6cdcdSVivek Goyal return ret; 1208cdf6cdcdSVivek Goyal } 1209cdf6cdcdSVivek Goyal 12101dd40c3eSMikulas Patocka /* 12111dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 12122e2d6f7eSAjay Joshi * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, 12132e2d6f7eSAjay Joshi * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH. 12141dd40c3eSMikulas Patocka * 12151dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 12161dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 12171dd40c3eSMikulas Patocka * sent in a next bio. 12181dd40c3eSMikulas Patocka * 12191dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 12201dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12211dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 12221dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 12231dd40c3eSMikulas Patocka * 12241dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 12251dd40c3eSMikulas Patocka * <------- bi_size -------> 12261dd40c3eSMikulas Patocka * <-- n_sectors --> 12271dd40c3eSMikulas Patocka * 12281dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 12291dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 12301dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 12311dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 12321dd40c3eSMikulas Patocka * to make it empty) 12331dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 12341dd40c3eSMikulas Patocka * 12351dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 12361dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 12371dd40c3eSMikulas Patocka * copies of the bio. 12381dd40c3eSMikulas Patocka */ 12391dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 12401dd40c3eSMikulas Patocka { 12411dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 12421dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 12431eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 12441dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 12451dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 12461dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 12471dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 12481dd40c3eSMikulas Patocka } 12491dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 12501dd40c3eSMikulas Patocka 1251978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 12521da177e4SLinus Torvalds { 12531da177e4SLinus Torvalds int r; 12542056a782SJens Axboe sector_t sector; 1255dba14160SMikulas Patocka struct bio *clone = &tio->clone; 125664f52b0eSMike Snitzer struct dm_io *io = tio->io; 1257bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1258978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 12591da177e4SLinus Torvalds 12601da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 12611da177e4SLinus Torvalds 12621da177e4SLinus Torvalds /* 12631da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 12641da177e4SLinus Torvalds * anything, the target has assumed ownership of 12651da177e4SLinus Torvalds * this io. 12661da177e4SLinus Torvalds */ 126764f52b0eSMike Snitzer atomic_inc(&io->io_count); 12684f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1269d67a5f4bSMikulas Patocka 12707de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1271846785e6SChristoph Hellwig switch (r) { 1272846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1273846785e6SChristoph Hellwig break; 1274846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 12751da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 127674d46992SChristoph Hellwig trace_block_bio_remap(clone->bi_disk->queue, clone, 127764f52b0eSMike Snitzer bio_dev(io->orig_bio), sector); 1278ed00aabdSChristoph Hellwig ret = submit_bio_noacct(clone); 1279846785e6SChristoph Hellwig break; 1280846785e6SChristoph Hellwig case DM_MAPIO_KILL: 12814e4cbee9SChristoph Hellwig free_tio(tio); 128264f52b0eSMike Snitzer dec_pending(io, BLK_STS_IOERR); 12834e4cbee9SChristoph Hellwig break; 1284846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1285cfae7529SMike Snitzer free_tio(tio); 128664f52b0eSMike Snitzer dec_pending(io, BLK_STS_DM_REQUEUE); 1287846785e6SChristoph Hellwig break; 1288846785e6SChristoph Hellwig default: 128945cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 129045cbcd79SKiyoshi Ueda BUG(); 12911da177e4SLinus Torvalds } 12921da177e4SLinus Torvalds 1293978e51baSMike Snitzer return ret; 12941da177e4SLinus Torvalds } 12951da177e4SLinus Torvalds 1296e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1297bd2a49b8SAlasdair G Kergon { 12984f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 12994f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 13001da177e4SLinus Torvalds } 13011da177e4SLinus Torvalds 13021da177e4SLinus Torvalds /* 13031da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 13041da177e4SLinus Torvalds */ 1305c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 13061c3b13e6SKent Overstreet sector_t sector, unsigned len) 13071da177e4SLinus Torvalds { 1308dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13091da177e4SLinus Torvalds 13101c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 13119c47008dSMartin K. Petersen 1312a892c8d5SSatya Tangirala bio_crypt_clone(clone, bio, GFP_NOIO); 1313a892c8d5SSatya Tangirala 131457c36519SMike Snitzer if (bio_integrity(bio)) { 1315e2460f2aSMikulas Patocka int r; 1316e2460f2aSMikulas Patocka 1317e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1318e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1319e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1320e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1321e2460f2aSMikulas Patocka tio->ti->type->name); 1322e2460f2aSMikulas Patocka return -EIO; 1323e2460f2aSMikulas Patocka } 1324e2460f2aSMikulas Patocka 1325e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1326c80914e8SMike Snitzer if (r < 0) 1327c80914e8SMike Snitzer return r; 1328c80914e8SMike Snitzer } 13291c3b13e6SKent Overstreet 1330fa8db494SMike Snitzer bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1331fa8db494SMike Snitzer clone->bi_iter.bi_size = to_bytes(len); 1332fa8db494SMike Snitzer 1333fa8db494SMike Snitzer if (bio_integrity(bio)) 1334fa8db494SMike Snitzer bio_integrity_trim(clone); 1335c80914e8SMike Snitzer 1336c80914e8SMike Snitzer return 0; 13371da177e4SLinus Torvalds } 13381da177e4SLinus Torvalds 1339318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1340318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 1341f9ab94ceSMikulas Patocka { 1342dba14160SMikulas Patocka struct dm_target_io *tio; 1343318716ddSMike Snitzer int try; 1344dba14160SMikulas Patocka 1345318716ddSMike Snitzer if (!num_bios) 1346318716ddSMike Snitzer return; 1347f9ab94ceSMikulas Patocka 1348318716ddSMike Snitzer if (num_bios == 1) { 1349318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1350318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1351318716ddSMike Snitzer return; 13529015df24SAlasdair G Kergon } 13539015df24SAlasdair G Kergon 1354318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1355318716ddSMike Snitzer int bio_nr; 1356318716ddSMike Snitzer struct bio *bio; 1357318716ddSMike Snitzer 1358318716ddSMike Snitzer if (try) 1359bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1360318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1361318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1362318716ddSMike Snitzer if (!tio) 1363318716ddSMike Snitzer break; 1364318716ddSMike Snitzer 1365318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1366318716ddSMike Snitzer } 1367318716ddSMike Snitzer if (try) 1368bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1369318716ddSMike Snitzer if (bio_nr == num_bios) 1370318716ddSMike Snitzer return; 1371318716ddSMike Snitzer 1372318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1373318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1374318716ddSMike Snitzer free_tio(tio); 1375318716ddSMike Snitzer } 1376318716ddSMike Snitzer } 1377318716ddSMike Snitzer } 1378318716ddSMike Snitzer 1379978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1380318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 13819015df24SAlasdair G Kergon { 1382dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13839015df24SAlasdair G Kergon 13841dd40c3eSMikulas Patocka tio->len_ptr = len; 13851dd40c3eSMikulas Patocka 13861c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1387bd2a49b8SAlasdair G Kergon if (len) 13881dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1389f9ab94ceSMikulas Patocka 1390978e51baSMike Snitzer return __map_bio(tio); 1391f9ab94ceSMikulas Patocka } 1392f9ab94ceSMikulas Patocka 139314fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13941dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 139506a426ceSMike Snitzer { 1396318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1397318716ddSMike Snitzer struct bio *bio; 1398318716ddSMike Snitzer struct dm_target_io *tio; 139906a426ceSMike Snitzer 1400318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1401318716ddSMike Snitzer 1402318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1403318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1404978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1405318716ddSMike Snitzer } 140606a426ceSMike Snitzer } 140706a426ceSMike Snitzer 140814fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1409f9ab94ceSMikulas Patocka { 141006a426ceSMike Snitzer unsigned target_nr = 0; 1411f9ab94ceSMikulas Patocka struct dm_target *ti; 1412828678b8SMike Snitzer struct bio flush_bio; 1413828678b8SMike Snitzer 1414828678b8SMike Snitzer /* 1415828678b8SMike Snitzer * Use an on-stack bio for this, it's safe since we don't 1416828678b8SMike Snitzer * need to reference it after submit. It's just used as 1417828678b8SMike Snitzer * the basis for the clone(s). 1418828678b8SMike Snitzer */ 1419828678b8SMike Snitzer bio_init(&flush_bio, NULL, 0); 1420828678b8SMike Snitzer flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1421828678b8SMike Snitzer ci->bio = &flush_bio; 1422828678b8SMike Snitzer ci->sector_count = 0; 1423f9ab94ceSMikulas Patocka 1424892ad71fSDennis Zhou /* 1425dbe3ece1SJens Axboe * Empty flush uses a statically initialized bio, as the base for 1426dbe3ece1SJens Axboe * cloning. However, blkg association requires that a bdev is 1427dbe3ece1SJens Axboe * associated with a gendisk, which doesn't happen until the bdev is 1428dbe3ece1SJens Axboe * opened. So, blkg association is done at issue time of the flush 1429dbe3ece1SJens Axboe * rather than when the device is created in alloc_dev(). 1430892ad71fSDennis Zhou */ 1431892ad71fSDennis Zhou bio_set_dev(ci->bio, ci->io->md->bdev); 1432892ad71fSDennis Zhou 1433b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1434f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 14351dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1436828678b8SMike Snitzer 1437828678b8SMike Snitzer bio_uninit(ci->bio); 1438f9ab94ceSMikulas Patocka return 0; 1439f9ab94ceSMikulas Patocka } 1440f9ab94ceSMikulas Patocka 1441c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 14421dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 14435ae89a87SMike Snitzer { 1444dba14160SMikulas Patocka struct bio *bio = ci->bio; 14455ae89a87SMike Snitzer struct dm_target_io *tio; 1446f31c21e4SNeilBrown int r; 14475ae89a87SMike Snitzer 1448318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 14491dd40c3eSMikulas Patocka tio->len_ptr = len; 1450c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1451072623deSMikulas Patocka if (r < 0) { 1452cfae7529SMike Snitzer free_tio(tio); 1453c80914e8SMike Snitzer return r; 1454b0d8ed4dSAlasdair G Kergon } 1455978e51baSMike Snitzer (void) __map_bio(tio); 145655a62eefSAlasdair G Kergon 1457f31c21e4SNeilBrown return 0; 145823508a96SMike Snitzer } 145955a62eefSAlasdair G Kergon 14603d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 146161697a6aSMike Snitzer unsigned num_bios) 14625ae89a87SMike Snitzer { 146351b86f9aSMichael Lass unsigned len; 14645ae89a87SMike Snitzer 14655ae89a87SMike Snitzer /* 146623508a96SMike Snitzer * Even though the device advertised support for this type of 146723508a96SMike Snitzer * request, that does not mean every target supports it, and 1468936688d7SMike Snitzer * reconfiguration might also have changed that since the 14695ae89a87SMike Snitzer * check was performed. 14705ae89a87SMike Snitzer */ 147155a62eefSAlasdair G Kergon if (!num_bios) 14725ae89a87SMike Snitzer return -EOPNOTSUPP; 14735ae89a87SMike Snitzer 14743720281dSMike Snitzer len = min_t(sector_t, ci->sector_count, 14753720281dSMike Snitzer max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 147651b86f9aSMichael Lass 14771dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 14785ae89a87SMike Snitzer 1479a79245b3SMike Snitzer ci->sector += len; 14803d7f4562SMike Snitzer ci->sector_count -= len; 14815ae89a87SMike Snitzer 14825ae89a87SMike Snitzer return 0; 14835ae89a87SMike Snitzer } 14845ae89a87SMike Snitzer 1485568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1486568c73a3SMike Snitzer { 1487568c73a3SMike Snitzer bool r = false; 1488568c73a3SMike Snitzer 1489568c73a3SMike Snitzer switch (bio_op(bio)) { 1490568c73a3SMike Snitzer case REQ_OP_DISCARD: 1491568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1492568c73a3SMike Snitzer case REQ_OP_WRITE_SAME: 1493568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 1494568c73a3SMike Snitzer r = true; 1495568c73a3SMike Snitzer break; 1496568c73a3SMike Snitzer } 1497568c73a3SMike Snitzer 1498568c73a3SMike Snitzer return r; 1499568c73a3SMike Snitzer } 1500568c73a3SMike Snitzer 15010519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 15020519c71eSMike Snitzer int *result) 15030519c71eSMike Snitzer { 15040519c71eSMike Snitzer struct bio *bio = ci->bio; 15059679b5a7SMike Snitzer unsigned num_bios = 0; 15060519c71eSMike Snitzer 15079679b5a7SMike Snitzer switch (bio_op(bio)) { 15089679b5a7SMike Snitzer case REQ_OP_DISCARD: 15099679b5a7SMike Snitzer num_bios = ti->num_discard_bios; 15109679b5a7SMike Snitzer break; 15119679b5a7SMike Snitzer case REQ_OP_SECURE_ERASE: 15129679b5a7SMike Snitzer num_bios = ti->num_secure_erase_bios; 15139679b5a7SMike Snitzer break; 15149679b5a7SMike Snitzer case REQ_OP_WRITE_SAME: 15159679b5a7SMike Snitzer num_bios = ti->num_write_same_bios; 15169679b5a7SMike Snitzer break; 15179679b5a7SMike Snitzer case REQ_OP_WRITE_ZEROES: 15189679b5a7SMike Snitzer num_bios = ti->num_write_zeroes_bios; 15199679b5a7SMike Snitzer break; 15209679b5a7SMike Snitzer default: 15210519c71eSMike Snitzer return false; 15229679b5a7SMike Snitzer } 15230519c71eSMike Snitzer 15249679b5a7SMike Snitzer *result = __send_changing_extent_only(ci, ti, num_bios); 15250519c71eSMike Snitzer return true; 15260519c71eSMike Snitzer } 15270519c71eSMike Snitzer 1528e4c93811SAlasdair G Kergon /* 1529e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1530e4c93811SAlasdair G Kergon */ 1531e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1532e4c93811SAlasdair G Kergon { 1533e4c93811SAlasdair G Kergon struct dm_target *ti; 15341c3b13e6SKent Overstreet unsigned len; 1535c80914e8SMike Snitzer int r; 1536e4c93811SAlasdair G Kergon 1537e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1538123d87d5SMikulas Patocka if (!ti) 1539e4c93811SAlasdair G Kergon return -EIO; 1540e4c93811SAlasdair G Kergon 1541568c73a3SMike Snitzer if (__process_abnormal_io(ci, ti, &r)) 15420519c71eSMike Snitzer return r; 15433d7f4562SMike Snitzer 15443720281dSMike Snitzer len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1545e4c93811SAlasdair G Kergon 1546c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1547c80914e8SMike Snitzer if (r < 0) 1548c80914e8SMike Snitzer return r; 1549e4c93811SAlasdair G Kergon 1550e4c93811SAlasdair G Kergon ci->sector += len; 1551e4c93811SAlasdair G Kergon ci->sector_count -= len; 1552e4c93811SAlasdair G Kergon 1553e4c93811SAlasdair G Kergon return 0; 1554e4c93811SAlasdair G Kergon } 1555e4c93811SAlasdair G Kergon 1556978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1557978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1558978e51baSMike Snitzer { 1559978e51baSMike Snitzer ci->map = map; 1560978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1561978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1562978e51baSMike Snitzer } 1563978e51baSMike Snitzer 1564a1e1cb72SMike Snitzer #define __dm_part_stat_sub(part, field, subnd) \ 1565a1e1cb72SMike Snitzer (part_stat_get(part, field) -= (subnd)) 1566a1e1cb72SMike Snitzer 1567e4c93811SAlasdair G Kergon /* 156814fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 15691da177e4SLinus Torvalds */ 1570978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 157183d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 15721da177e4SLinus Torvalds { 15731da177e4SLinus Torvalds struct clone_info ci; 1574978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1575512875bdSJun'ichi Nomura int error = 0; 15761da177e4SLinus Torvalds 1577978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1578bd2a49b8SAlasdair G Kergon 15791eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 158014fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1581b372d360SMike Snitzer /* dec_pending submits any data associated with flush */ 15822e2d6f7eSAjay Joshi } else if (op_is_zone_mgmt(bio_op(bio))) { 1583a4aa5e56SDamien Le Moal ci.bio = bio; 1584a4aa5e56SDamien Le Moal ci.sector_count = 0; 1585a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1586b372d360SMike Snitzer } else { 15876a8736d1STejun Heo ci.bio = bio; 15881da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 158918a25da8SNeilBrown while (ci.sector_count && !error) { 159014fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 159118a25da8SNeilBrown if (current->bio_list && ci.sector_count && !error) { 159218a25da8SNeilBrown /* 1593ed00aabdSChristoph Hellwig * Remainder must be passed to submit_bio_noacct() 159418a25da8SNeilBrown * so that it gets handled *after* bios already submitted 159518a25da8SNeilBrown * have been completely processed. 159618a25da8SNeilBrown * We take a clone of the original to store in 1597745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 159818a25da8SNeilBrown * for dec_pending to use for completion handling. 159918a25da8SNeilBrown */ 1600f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1601f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1602745dc570SMike Snitzer ci.io->orig_bio = b; 1603a1e1cb72SMike Snitzer 1604a1e1cb72SMike Snitzer /* 1605a1e1cb72SMike Snitzer * Adjust IO stats for each split, otherwise upon queue 1606a1e1cb72SMike Snitzer * reentry there will be redundant IO accounting. 1607a1e1cb72SMike Snitzer * NOTE: this is a stop-gap fix, a proper fix involves 1608a1e1cb72SMike Snitzer * significant refactoring of DM core's bio splitting 1609a1e1cb72SMike Snitzer * (by eliminating DM's splitting and just using bio_split) 1610a1e1cb72SMike Snitzer */ 1611a1e1cb72SMike Snitzer part_stat_lock(); 1612a1e1cb72SMike Snitzer __dm_part_stat_sub(&dm_disk(md)->part0, 1613a1e1cb72SMike Snitzer sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1614a1e1cb72SMike Snitzer part_stat_unlock(); 1615a1e1cb72SMike Snitzer 161618a25da8SNeilBrown bio_chain(b, bio); 1617075c18c3SMike Snitzer trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1618ed00aabdSChristoph Hellwig ret = submit_bio_noacct(bio); 161918a25da8SNeilBrown break; 162018a25da8SNeilBrown } 162118a25da8SNeilBrown } 1622d87f4c14STejun Heo } 16231da177e4SLinus Torvalds 16241da177e4SLinus Torvalds /* drop the extra reference count */ 162554385bf7SBart Van Assche dec_pending(ci.io, errno_to_blk_status(error)); 1626978e51baSMike Snitzer return ret; 16271da177e4SLinus Torvalds } 16281da177e4SLinus Torvalds 16291da177e4SLinus Torvalds /* 1630978e51baSMike Snitzer * Optimized variant of __split_and_process_bio that leverages the 1631978e51baSMike Snitzer * fact that targets that use it do _not_ have a need to split bios. 16321da177e4SLinus Torvalds */ 1633568c73a3SMike Snitzer static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, 1634094ee64dSMike Snitzer struct bio *bio) 16351da177e4SLinus Torvalds { 1636978e51baSMike Snitzer struct clone_info ci; 1637978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1638978e51baSMike Snitzer int error = 0; 1639978e51baSMike Snitzer 1640978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1641978e51baSMike Snitzer 1642978e51baSMike Snitzer if (bio->bi_opf & REQ_PREFLUSH) { 1643978e51baSMike Snitzer error = __send_empty_flush(&ci); 1644978e51baSMike Snitzer /* dec_pending submits any data associated with flush */ 1645978e51baSMike Snitzer } else { 1646978e51baSMike Snitzer struct dm_target_io *tio; 1647094ee64dSMike Snitzer struct dm_target *ti = md->immutable_target; 1648094ee64dSMike Snitzer 1649094ee64dSMike Snitzer if (WARN_ON_ONCE(!ti)) { 1650094ee64dSMike Snitzer error = -EIO; 1651094ee64dSMike Snitzer goto out; 1652094ee64dSMike Snitzer } 1653978e51baSMike Snitzer 1654978e51baSMike Snitzer ci.bio = bio; 1655978e51baSMike Snitzer ci.sector_count = bio_sectors(bio); 1656568c73a3SMike Snitzer if (__process_abnormal_io(&ci, ti, &error)) 16570519c71eSMike Snitzer goto out; 16580519c71eSMike Snitzer 16590519c71eSMike Snitzer tio = alloc_tio(&ci, ti, 0, GFP_NOIO); 1660978e51baSMike Snitzer ret = __clone_and_map_simple_bio(&ci, tio, NULL); 1661978e51baSMike Snitzer } 1662978e51baSMike Snitzer out: 1663978e51baSMike Snitzer /* drop the extra reference count */ 1664978e51baSMike Snitzer dec_pending(ci.io, errno_to_blk_status(error)); 1665978e51baSMike Snitzer return ret; 1666978e51baSMike Snitzer } 1667978e51baSMike Snitzer 16686548c7c5SMike Snitzer static blk_qc_t dm_process_bio(struct mapped_device *md, 16696548c7c5SMike Snitzer struct dm_table *map, struct bio *bio) 16706548c7c5SMike Snitzer { 1671568c73a3SMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1672568c73a3SMike Snitzer 1673568c73a3SMike Snitzer if (unlikely(!map)) { 1674568c73a3SMike Snitzer bio_io_error(bio); 1675568c73a3SMike Snitzer return ret; 1676568c73a3SMike Snitzer } 1677568c73a3SMike Snitzer 1678568c73a3SMike Snitzer /* 1679*0c2915b8SMike Snitzer * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1680*0c2915b8SMike Snitzer * otherwise associated queue_limits won't be imposed. 1681568c73a3SMike Snitzer */ 1682120c9257SMike Snitzer if (is_abnormal_io(bio)) 1683f695ca38SChristoph Hellwig blk_queue_split(&bio); 1684568c73a3SMike Snitzer 16856548c7c5SMike Snitzer if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1686094ee64dSMike Snitzer return __process_bio(md, map, bio); 16876548c7c5SMike Snitzer return __split_and_process_bio(md, map, bio); 16886548c7c5SMike Snitzer } 16896548c7c5SMike Snitzer 1690c62b37d9SChristoph Hellwig static blk_qc_t dm_submit_bio(struct bio *bio) 16911da177e4SLinus Torvalds { 1692c4a59c4eSChristoph Hellwig struct mapped_device *md = bio->bi_disk->private_data; 1693978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 169483d5e5b0SMikulas Patocka int srcu_idx; 169583d5e5b0SMikulas Patocka struct dm_table *map; 16961da177e4SLinus Torvalds 1697ac7c5675SChristoph Hellwig if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { 1698ac7c5675SChristoph Hellwig /* 1699ac7c5675SChristoph Hellwig * We are called with a live reference on q_usage_counter, but 1700ac7c5675SChristoph Hellwig * that one will be released as soon as we return. Grab an 1701c62b37d9SChristoph Hellwig * extra one as blk_mq_submit_bio expects to be able to consume 1702c62b37d9SChristoph Hellwig * a reference (which lives until the request is freed in case a 1703c62b37d9SChristoph Hellwig * request is allocated). 1704ac7c5675SChristoph Hellwig */ 1705c62b37d9SChristoph Hellwig percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); 1706c62b37d9SChristoph Hellwig return blk_mq_submit_bio(bio); 1707ac7c5675SChristoph Hellwig } 17088cf7961dSChristoph Hellwig 170983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 17101da177e4SLinus Torvalds 17116a8736d1STejun Heo /* if we're suspended, we have to queue this io for later */ 17126a8736d1STejun Heo if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 171383d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 17141da177e4SLinus Torvalds 17156abc4946SKonstantin Khlebnikov if (bio->bi_opf & REQ_NOWAIT) 17166abc4946SKonstantin Khlebnikov bio_wouldblock_error(bio); 17176abc4946SKonstantin Khlebnikov else if (!(bio->bi_opf & REQ_RAHEAD)) 171892c63902SMikulas Patocka queue_io(md, bio); 17196a8736d1STejun Heo else 17206a8736d1STejun Heo bio_io_error(bio); 1721978e51baSMike Snitzer return ret; 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds 17246548c7c5SMike Snitzer ret = dm_process_bio(md, map, bio); 1725978e51baSMike Snitzer 172683d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1727978e51baSMike Snitzer return ret; 1728978e51baSMike Snitzer } 1729978e51baSMike Snitzer 17301da177e4SLinus Torvalds /*----------------------------------------------------------------- 17311da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 17321da177e4SLinus Torvalds *---------------------------------------------------------------*/ 17332b06cfffSAlasdair G Kergon static void free_minor(int minor) 17341da177e4SLinus Torvalds { 1735f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17361da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1737f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 17381da177e4SLinus Torvalds } 17391da177e4SLinus Torvalds 17401da177e4SLinus Torvalds /* 17411da177e4SLinus Torvalds * See if the device with a specific minor # is free. 17421da177e4SLinus Torvalds */ 1743cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 17441da177e4SLinus Torvalds { 1745c9d76be6STejun Heo int r; 17461da177e4SLinus Torvalds 17471da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 17481da177e4SLinus Torvalds return -EINVAL; 17491da177e4SLinus Torvalds 1750c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1751f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17521da177e4SLinus Torvalds 1753c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 17541da177e4SLinus Torvalds 1755f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1756c9d76be6STejun Heo idr_preload_end(); 1757c9d76be6STejun Heo if (r < 0) 1758c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1759c9d76be6STejun Heo return 0; 17601da177e4SLinus Torvalds } 17611da177e4SLinus Torvalds 1762cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 17631da177e4SLinus Torvalds { 1764c9d76be6STejun Heo int r; 17651da177e4SLinus Torvalds 1766c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1767f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 17681da177e4SLinus Torvalds 1769c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 17701da177e4SLinus Torvalds 1771f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1772c9d76be6STejun Heo idr_preload_end(); 1773c9d76be6STejun Heo if (r < 0) 17741da177e4SLinus Torvalds return r; 1775c9d76be6STejun Heo *minor = r; 1776c9d76be6STejun Heo return 0; 17771da177e4SLinus Torvalds } 17781da177e4SLinus Torvalds 177983d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1780f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 17811da177e4SLinus Torvalds 178253d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 178353d5914fSMikulas Patocka 17840f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 17850f20972fSMike Snitzer { 17860f20972fSMike Snitzer if (md->wq) 17870f20972fSMike Snitzer destroy_workqueue(md->wq); 17886f1c819cSKent Overstreet bioset_exit(&md->bs); 17896f1c819cSKent Overstreet bioset_exit(&md->io_bs); 17900f20972fSMike Snitzer 1791f26c5719SDan Williams if (md->dax_dev) { 1792f26c5719SDan Williams kill_dax(md->dax_dev); 1793f26c5719SDan Williams put_dax(md->dax_dev); 1794f26c5719SDan Williams md->dax_dev = NULL; 1795f26c5719SDan Williams } 1796f26c5719SDan Williams 17970f20972fSMike Snitzer if (md->disk) { 17980f20972fSMike Snitzer spin_lock(&_minor_lock); 17990f20972fSMike Snitzer md->disk->private_data = NULL; 18000f20972fSMike Snitzer spin_unlock(&_minor_lock); 18010f20972fSMike Snitzer del_gendisk(md->disk); 18020f20972fSMike Snitzer put_disk(md->disk); 18030f20972fSMike Snitzer } 18040f20972fSMike Snitzer 18050f20972fSMike Snitzer if (md->queue) 18060f20972fSMike Snitzer blk_cleanup_queue(md->queue); 18070f20972fSMike Snitzer 1808d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1809d09960b0STahsin Erdogan 18100f20972fSMike Snitzer if (md->bdev) { 18110f20972fSMike Snitzer bdput(md->bdev); 18120f20972fSMike Snitzer md->bdev = NULL; 18130f20972fSMike Snitzer } 18144cc96131SMike Snitzer 1815d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1816d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1817d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1818d5ffebddSMike Snitzer 18194cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 18200f20972fSMike Snitzer } 18210f20972fSMike Snitzer 18221da177e4SLinus Torvalds /* 18231da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 18241da177e4SLinus Torvalds */ 18252b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 18261da177e4SLinus Torvalds { 1827115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1828115485e8SMike Snitzer struct mapped_device *md; 1829ba61fdd1SJeff Mahoney void *old_md; 18301da177e4SLinus Torvalds 1831856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 18321da177e4SLinus Torvalds if (!md) { 18331da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 18341da177e4SLinus Torvalds return NULL; 18351da177e4SLinus Torvalds } 18361da177e4SLinus Torvalds 183710da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 18386ed7ade8SMilan Broz goto bad_module_get; 183910da4f79SJeff Mahoney 18401da177e4SLinus Torvalds /* get a minor number for the dev */ 18412b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1842cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 18432b06cfffSAlasdair G Kergon else 1844cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 18451da177e4SLinus Torvalds if (r < 0) 18466ed7ade8SMilan Broz goto bad_minor; 18471da177e4SLinus Torvalds 184883d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 184983d5e5b0SMikulas Patocka if (r < 0) 185083d5e5b0SMikulas Patocka goto bad_io_barrier; 185183d5e5b0SMikulas Patocka 1852115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1853591ddcfcSMike Snitzer md->init_tio_pdu = false; 1854a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1855e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1856a5664dadSMike Snitzer mutex_init(&md->type_lock); 185786f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1858022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 18591da177e4SLinus Torvalds atomic_set(&md->holders, 1); 18605c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 18611da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 18627a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 18637a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 186486f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 18657a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 18661da177e4SLinus Torvalds 186747ace7e0SMike Snitzer /* 1868c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 1869c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 1870c62b37d9SChristoph Hellwig * override accordingly. 187147ace7e0SMike Snitzer */ 1872c62b37d9SChristoph Hellwig md->queue = blk_alloc_queue(numa_node_id); 18733d745ea5SChristoph Hellwig if (!md->queue) 18743d745ea5SChristoph Hellwig goto bad; 18751da177e4SLinus Torvalds 1876c12c9a3cSMike Snitzer md->disk = alloc_disk_node(1, md->numa_node_id); 18771da177e4SLinus Torvalds if (!md->disk) 18780f20972fSMike Snitzer goto bad; 18791da177e4SLinus Torvalds 1880f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 188153d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1882f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 18832995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1884f0b04115SJeff Mahoney 18851da177e4SLinus Torvalds md->disk->major = _major; 18861da177e4SLinus Torvalds md->disk->first_minor = minor; 18871da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 18881da177e4SLinus Torvalds md->disk->queue = md->queue; 18891da177e4SLinus Torvalds md->disk->private_data = md; 18901da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1891f26c5719SDan Williams 1892976431b0SDan Williams if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1893fefc1d97SPankaj Gupta md->dax_dev = alloc_dax(md, md->disk->disk_name, 1894fefc1d97SPankaj Gupta &dm_dax_ops, 0); 18954e4ced93SVivek Goyal if (IS_ERR(md->dax_dev)) 1896f26c5719SDan Williams goto bad; 1897976431b0SDan Williams } 1898f26c5719SDan Williams 1899c100ec49SMike Snitzer add_disk_no_queue_reg(md->disk); 19007e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 19011da177e4SLinus Torvalds 1902670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1903304f3f6aSMilan Broz if (!md->wq) 19040f20972fSMike Snitzer goto bad; 1905304f3f6aSMilan Broz 190632a926daSMikulas Patocka md->bdev = bdget_disk(md->disk, 0); 190732a926daSMikulas Patocka if (!md->bdev) 19080f20972fSMike Snitzer goto bad; 190932a926daSMikulas Patocka 1910fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1911fd2ed4d2SMikulas Patocka 1912ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1913f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1914ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1915f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1916ba61fdd1SJeff Mahoney 1917ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1918ba61fdd1SJeff Mahoney 19191da177e4SLinus Torvalds return md; 19201da177e4SLinus Torvalds 19210f20972fSMike Snitzer bad: 19220f20972fSMike Snitzer cleanup_mapped_device(md); 192383d5e5b0SMikulas Patocka bad_io_barrier: 19241da177e4SLinus Torvalds free_minor(minor); 19256ed7ade8SMilan Broz bad_minor: 192610da4f79SJeff Mahoney module_put(THIS_MODULE); 19276ed7ade8SMilan Broz bad_module_get: 1928856eb091SMikulas Patocka kvfree(md); 19291da177e4SLinus Torvalds return NULL; 19301da177e4SLinus Torvalds } 19311da177e4SLinus Torvalds 1932ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1933ae9da83fSJun'ichi Nomura 19341da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 19351da177e4SLinus Torvalds { 1936f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 193763d94e48SJun'ichi Nomura 1938ae9da83fSJun'ichi Nomura unlock_fs(md); 19392eb6e1e3SKeith Busch 19400f20972fSMike Snitzer cleanup_mapped_device(md); 19410f20972fSMike Snitzer 19420f20972fSMike Snitzer free_table_devices(&md->table_devices); 19430f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 194463a4f065SMike Snitzer free_minor(minor); 194563a4f065SMike Snitzer 194610da4f79SJeff Mahoney module_put(THIS_MODULE); 1947856eb091SMikulas Patocka kvfree(md); 19481da177e4SLinus Torvalds } 19491da177e4SLinus Torvalds 19502a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1951e6ee8c0bSKiyoshi Ueda { 1952c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 19532a2a4c51SJens Axboe int ret = 0; 1954e6ee8c0bSKiyoshi Ueda 1955545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1956c0820cf5SMikulas Patocka /* 195764f52b0eSMike Snitzer * The md may already have mempools that need changing. 195864f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 195916245bdcSJun'ichi Nomura * because a different table was loaded. 1960c0820cf5SMikulas Patocka */ 19616f1c819cSKent Overstreet bioset_exit(&md->bs); 19626f1c819cSKent Overstreet bioset_exit(&md->io_bs); 19630776aa0eSMike Snitzer 19646f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 1965cbc4e3c1SMike Snitzer /* 19664e6e36c3SMike Snitzer * There's no need to reload with request-based dm 19674e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 19684e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 19694e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 19704e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 19714e6e36c3SMike Snitzer * through the queue to unprep. 1972cbc4e3c1SMike Snitzer */ 1973cbc4e3c1SMike Snitzer goto out; 1974cbc4e3c1SMike Snitzer } 1975cbc4e3c1SMike Snitzer 19766f1c819cSKent Overstreet BUG_ON(!p || 19776f1c819cSKent Overstreet bioset_initialized(&md->bs) || 19786f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 1979e6ee8c0bSKiyoshi Ueda 19802a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 19812a2a4c51SJens Axboe if (ret) 19822a2a4c51SJens Axboe goto out; 19832a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 19842a2a4c51SJens Axboe if (ret) 19852a2a4c51SJens Axboe bioset_exit(&md->bs); 1986e6ee8c0bSKiyoshi Ueda out: 198702233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1988e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 19892a2a4c51SJens Axboe return ret; 1990e6ee8c0bSKiyoshi Ueda } 1991e6ee8c0bSKiyoshi Ueda 19921da177e4SLinus Torvalds /* 19931da177e4SLinus Torvalds * Bind a table to the device. 19941da177e4SLinus Torvalds */ 19951da177e4SLinus Torvalds static void event_callback(void *context) 19961da177e4SLinus Torvalds { 19977a8c3d3bSMike Anderson unsigned long flags; 19987a8c3d3bSMike Anderson LIST_HEAD(uevents); 19991da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 20001da177e4SLinus Torvalds 20017a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 20027a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 20037a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 20047a8c3d3bSMike Anderson 2005ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 20067a8c3d3bSMike Anderson 20071da177e4SLinus Torvalds atomic_inc(&md->event_nr); 20081da177e4SLinus Torvalds wake_up(&md->eventq); 200962e08243SMikulas Patocka dm_issue_global_event(); 20101da177e4SLinus Torvalds } 20111da177e4SLinus Torvalds 2012c217649bSMike Snitzer /* 2013042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 2014042d2a9bSAlasdair G Kergon */ 2015042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2016754c5fc7SMike Snitzer struct queue_limits *limits) 20171da177e4SLinus Torvalds { 2018042d2a9bSAlasdair G Kergon struct dm_table *old_map; 2019165125e1SJens Axboe struct request_queue *q = md->queue; 2020978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 20211da177e4SLinus Torvalds sector_t size; 20222a2a4c51SJens Axboe int ret; 20231da177e4SLinus Torvalds 20245a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 20255a8f1f80SBart Van Assche 20261da177e4SLinus Torvalds size = dm_table_get_size(t); 20273ac51e74SDarrick J. Wong 20283ac51e74SDarrick J. Wong /* 20293ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 20303ac51e74SDarrick J. Wong */ 2031fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 20323ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 20333ac51e74SDarrick J. Wong 2034c2b4bb8cSChristoph Hellwig set_capacity(md->disk, size); 2035c2b4bb8cSChristoph Hellwig bd_set_nr_sectors(md->bdev, size); 20361da177e4SLinus Torvalds 2037cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 20382ca3310eSAlasdair G Kergon 2039e6ee8c0bSKiyoshi Ueda /* 2040e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 2041e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 2042e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 2043e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 2044e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 2045e6ee8c0bSKiyoshi Ueda */ 2046978e51baSMike Snitzer if (request_based) 2047eca7ee6dSMike Snitzer dm_stop_queue(q); 2048978e51baSMike Snitzer 2049978e51baSMike Snitzer if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { 205016f12266SMike Snitzer /* 2051978e51baSMike Snitzer * Leverage the fact that request-based DM targets and 2052978e51baSMike Snitzer * NVMe bio based targets are immutable singletons 2053094ee64dSMike Snitzer * - used to optimize both __process_bio and dm_mq_queue_rq 205416f12266SMike Snitzer */ 205516f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 205616f12266SMike Snitzer } 2057e6ee8c0bSKiyoshi Ueda 20582a2a4c51SJens Axboe ret = __bind_mempools(md, t); 20592a2a4c51SJens Axboe if (ret) { 20602a2a4c51SJens Axboe old_map = ERR_PTR(ret); 20612a2a4c51SJens Axboe goto out; 20622a2a4c51SJens Axboe } 2063e6ee8c0bSKiyoshi Ueda 2064a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 20651d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 206636a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 206736a0456fSAlasdair G Kergon 2068754c5fc7SMike Snitzer dm_table_set_restrictions(t, q, limits); 206941abc4e1SHannes Reinecke if (old_map) 207083d5e5b0SMikulas Patocka dm_sync_table(md); 20712ca3310eSAlasdair G Kergon 20722a2a4c51SJens Axboe out: 2073042d2a9bSAlasdair G Kergon return old_map; 20741da177e4SLinus Torvalds } 20751da177e4SLinus Torvalds 2076a7940155SAlasdair G Kergon /* 2077a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 2078a7940155SAlasdair G Kergon */ 2079a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 20801da177e4SLinus Torvalds { 2081a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 20821da177e4SLinus Torvalds 20831da177e4SLinus Torvalds if (!map) 2084a7940155SAlasdair G Kergon return NULL; 20851da177e4SLinus Torvalds 20861da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 20879cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 208883d5e5b0SMikulas Patocka dm_sync_table(md); 2089a7940155SAlasdair G Kergon 2090a7940155SAlasdair G Kergon return map; 20911da177e4SLinus Torvalds } 20921da177e4SLinus Torvalds 20931da177e4SLinus Torvalds /* 20941da177e4SLinus Torvalds * Constructor for a new device. 20951da177e4SLinus Torvalds */ 20962b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 20971da177e4SLinus Torvalds { 2098c12c9a3cSMike Snitzer int r; 20991da177e4SLinus Torvalds struct mapped_device *md; 21001da177e4SLinus Torvalds 21012b06cfffSAlasdair G Kergon md = alloc_dev(minor); 21021da177e4SLinus Torvalds if (!md) 21031da177e4SLinus Torvalds return -ENXIO; 21041da177e4SLinus Torvalds 2105c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2106c12c9a3cSMike Snitzer if (r) { 2107c12c9a3cSMike Snitzer free_dev(md); 2108c12c9a3cSMike Snitzer return r; 2109c12c9a3cSMike Snitzer } 2110784aae73SMilan Broz 21111da177e4SLinus Torvalds *result = md; 21121da177e4SLinus Torvalds return 0; 21131da177e4SLinus Torvalds } 21141da177e4SLinus Torvalds 2115a5664dadSMike Snitzer /* 2116a5664dadSMike Snitzer * Functions to manage md->type. 2117a5664dadSMike Snitzer * All are required to hold md->type_lock. 2118a5664dadSMike Snitzer */ 2119a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2120a5664dadSMike Snitzer { 2121a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2122a5664dadSMike Snitzer } 2123a5664dadSMike Snitzer 2124a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2125a5664dadSMike Snitzer { 2126a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2127a5664dadSMike Snitzer } 2128a5664dadSMike Snitzer 21297e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2130a5664dadSMike Snitzer { 213100c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2132a5664dadSMike Snitzer md->type = type; 2133a5664dadSMike Snitzer } 2134a5664dadSMike Snitzer 21357e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2136a5664dadSMike Snitzer { 2137a5664dadSMike Snitzer return md->type; 2138a5664dadSMike Snitzer } 2139a5664dadSMike Snitzer 214036a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 214136a0456fSAlasdair G Kergon { 214236a0456fSAlasdair G Kergon return md->immutable_target_type; 214336a0456fSAlasdair G Kergon } 214436a0456fSAlasdair G Kergon 21454a0b4ddfSMike Snitzer /* 2146f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2147f84cb8a4SMike Snitzer * count on 'md'. 2148f84cb8a4SMike Snitzer */ 2149f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2150f84cb8a4SMike Snitzer { 2151f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2152f84cb8a4SMike Snitzer return &md->queue->limits; 2153f84cb8a4SMike Snitzer } 2154f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2155f84cb8a4SMike Snitzer 21564a0b4ddfSMike Snitzer /* 21574a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 21584a0b4ddfSMike Snitzer */ 2159591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 21604a0b4ddfSMike Snitzer { 2161bfebd1cdSMike Snitzer int r; 2162c100ec49SMike Snitzer struct queue_limits limits; 21637e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2164bfebd1cdSMike Snitzer 2165545ed20eSToshi Kani switch (type) { 2166bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2167e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2168bfebd1cdSMike Snitzer if (r) { 2169eca7ee6dSMike Snitzer DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2170bfebd1cdSMike Snitzer return r; 2171bfebd1cdSMike Snitzer } 2172bfebd1cdSMike Snitzer break; 2173bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2174545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2175978e51baSMike Snitzer case DM_TYPE_NVME_BIO_BASED: 2176bfebd1cdSMike Snitzer break; 21777e0d574fSBart Van Assche case DM_TYPE_NONE: 21787e0d574fSBart Van Assche WARN_ON_ONCE(true); 21797e0d574fSBart Van Assche break; 2180ff36ab34SMike Snitzer } 21814a0b4ddfSMike Snitzer 2182c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2183c100ec49SMike Snitzer if (r) { 2184c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2185c100ec49SMike Snitzer return r; 2186c100ec49SMike Snitzer } 2187c100ec49SMike Snitzer dm_table_set_restrictions(t, md->queue, &limits); 2188c100ec49SMike Snitzer blk_register_queue(md->disk); 2189c100ec49SMike Snitzer 21904a0b4ddfSMike Snitzer return 0; 21914a0b4ddfSMike Snitzer } 21924a0b4ddfSMike Snitzer 21932bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 21941da177e4SLinus Torvalds { 21951da177e4SLinus Torvalds struct mapped_device *md; 21961da177e4SLinus Torvalds unsigned minor = MINOR(dev); 21971da177e4SLinus Torvalds 21981da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 21991da177e4SLinus Torvalds return NULL; 22001da177e4SLinus Torvalds 2201f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 22021da177e4SLinus Torvalds 22031da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 220449de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 220549de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2206637842cfSDavid Teigland md = NULL; 2207fba9f90eSJeff Mahoney goto out; 2208fba9f90eSJeff Mahoney } 22092bec1f4aSMikulas Patocka dm_get(md); 2210fba9f90eSJeff Mahoney out: 2211f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22121da177e4SLinus Torvalds 2213637842cfSDavid Teigland return md; 2214637842cfSDavid Teigland } 22153cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2216d229a958SDavid Teigland 22179ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2218637842cfSDavid Teigland { 22199ade92a9SAlasdair G Kergon return md->interface_ptr; 22201da177e4SLinus Torvalds } 22211da177e4SLinus Torvalds 22221da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 22231da177e4SLinus Torvalds { 22241da177e4SLinus Torvalds md->interface_ptr = ptr; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds 22271da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 22281da177e4SLinus Torvalds { 22291da177e4SLinus Torvalds atomic_inc(&md->holders); 22303f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 22311da177e4SLinus Torvalds } 22321da177e4SLinus Torvalds 223309ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 223409ee96b2SMikulas Patocka { 223509ee96b2SMikulas Patocka spin_lock(&_minor_lock); 223609ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 223709ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 223809ee96b2SMikulas Patocka return -EBUSY; 223909ee96b2SMikulas Patocka } 224009ee96b2SMikulas Patocka dm_get(md); 224109ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 224209ee96b2SMikulas Patocka return 0; 224309ee96b2SMikulas Patocka } 224409ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 224509ee96b2SMikulas Patocka 224672d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 224772d94861SAlasdair G Kergon { 224872d94861SAlasdair G Kergon return md->name; 224972d94861SAlasdair G Kergon } 225072d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 225172d94861SAlasdair G Kergon 22523f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 22531da177e4SLinus Torvalds { 22541134e5aeSMike Anderson struct dm_table *map; 225583d5e5b0SMikulas Patocka int srcu_idx; 22561da177e4SLinus Torvalds 22573f77316dSKiyoshi Ueda might_sleep(); 2258fba9f90eSJeff Mahoney 225963a4f065SMike Snitzer spin_lock(&_minor_lock); 22603f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2261fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2262f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 22633f77316dSKiyoshi Ueda 2264c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 22653b785fbcSBart Van Assche 2266ab7c7bb6SMikulas Patocka /* 2267ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2268ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2269ab7c7bb6SMikulas Patocka */ 2270ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 22712a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 22724f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 22731da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2274adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 22755df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 22761da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 22771da177e4SLinus Torvalds } 227883d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 227983d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 22802a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 228183d5e5b0SMikulas Patocka 22823f77316dSKiyoshi Ueda /* 22833f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 22843f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 22853f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 22863f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 22873f77316dSKiyoshi Ueda */ 22883f77316dSKiyoshi Ueda if (wait) 22893f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 22903f77316dSKiyoshi Ueda msleep(1); 22913f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 22923f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 22933f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 22943f77316dSKiyoshi Ueda 2295784aae73SMilan Broz dm_sysfs_exit(md); 2296a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 22971da177e4SLinus Torvalds free_dev(md); 22981da177e4SLinus Torvalds } 22993f77316dSKiyoshi Ueda 23003f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 23013f77316dSKiyoshi Ueda { 23023f77316dSKiyoshi Ueda __dm_destroy(md, true); 23033f77316dSKiyoshi Ueda } 23043f77316dSKiyoshi Ueda 23053f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 23063f77316dSKiyoshi Ueda { 23073f77316dSKiyoshi Ueda __dm_destroy(md, false); 23083f77316dSKiyoshi Ueda } 23093f77316dSKiyoshi Ueda 23103f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 23113f77316dSKiyoshi Ueda { 23123f77316dSKiyoshi Ueda atomic_dec(&md->holders); 23131da177e4SLinus Torvalds } 231479eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 23151da177e4SLinus Torvalds 231685067747SMing Lei static bool md_in_flight_bios(struct mapped_device *md) 231785067747SMing Lei { 231885067747SMing Lei int cpu; 231985067747SMing Lei struct hd_struct *part = &dm_disk(md)->part0; 232085067747SMing Lei long sum = 0; 232185067747SMing Lei 232285067747SMing Lei for_each_possible_cpu(cpu) { 232385067747SMing Lei sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 232485067747SMing Lei sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 232585067747SMing Lei } 232685067747SMing Lei 232785067747SMing Lei return sum != 0; 232885067747SMing Lei } 232985067747SMing Lei 233085067747SMing Lei static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) 233146125c1cSMilan Broz { 233246125c1cSMilan Broz int r = 0; 23339f4c3f87SBart Van Assche DEFINE_WAIT(wait); 233446125c1cSMilan Broz 233585067747SMing Lei while (true) { 23369f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 233746125c1cSMilan Broz 233885067747SMing Lei if (!md_in_flight_bios(md)) 233946125c1cSMilan Broz break; 234046125c1cSMilan Broz 2341e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 234246125c1cSMilan Broz r = -EINTR; 234346125c1cSMilan Broz break; 234446125c1cSMilan Broz } 234546125c1cSMilan Broz 234646125c1cSMilan Broz io_schedule(); 234746125c1cSMilan Broz } 23489f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2349b44ebeb0SMikulas Patocka 235046125c1cSMilan Broz return r; 235146125c1cSMilan Broz } 235246125c1cSMilan Broz 235385067747SMing Lei static int dm_wait_for_completion(struct mapped_device *md, long task_state) 235485067747SMing Lei { 235585067747SMing Lei int r = 0; 235685067747SMing Lei 235785067747SMing Lei if (!queue_is_mq(md->queue)) 235885067747SMing Lei return dm_wait_for_bios_completion(md, task_state); 235985067747SMing Lei 236085067747SMing Lei while (true) { 236185067747SMing Lei if (!blk_mq_queue_inflight(md->queue)) 236285067747SMing Lei break; 236385067747SMing Lei 236485067747SMing Lei if (signal_pending_state(task_state, current)) { 236585067747SMing Lei r = -EINTR; 236685067747SMing Lei break; 236785067747SMing Lei } 236885067747SMing Lei 236985067747SMing Lei msleep(5); 237085067747SMing Lei } 237185067747SMing Lei 237285067747SMing Lei return r; 237385067747SMing Lei } 237485067747SMing Lei 23751da177e4SLinus Torvalds /* 23761da177e4SLinus Torvalds * Process the deferred bios 23771da177e4SLinus Torvalds */ 2378ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 23791da177e4SLinus Torvalds { 2380*0c2915b8SMike Snitzer struct mapped_device *md = container_of(work, struct mapped_device, work); 2381*0c2915b8SMike Snitzer struct bio *bio; 2382ef208587SMikulas Patocka 23833b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2384022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 2385*0c2915b8SMike Snitzer bio = bio_list_pop(&md->deferred); 2386022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2387022c2611SMikulas Patocka 2388*0c2915b8SMike Snitzer if (!bio) 2389df12ee99SAlasdair G Kergon break; 239073d410c0SMilan Broz 2391*0c2915b8SMike Snitzer submit_bio_noacct(bio); 2392e6ee8c0bSKiyoshi Ueda } 23931da177e4SLinus Torvalds } 23941da177e4SLinus Torvalds 23959a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2396304f3f6aSMilan Broz { 23973b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 23984e857c58SPeter Zijlstra smp_mb__after_atomic(); 239953d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2400304f3f6aSMilan Broz } 2401304f3f6aSMilan Broz 24021da177e4SLinus Torvalds /* 2403042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 24041da177e4SLinus Torvalds */ 2405042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 24061da177e4SLinus Torvalds { 240787eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2408754c5fc7SMike Snitzer struct queue_limits limits; 2409042d2a9bSAlasdair G Kergon int r; 24101da177e4SLinus Torvalds 2411e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 24121da177e4SLinus Torvalds 24131da177e4SLinus Torvalds /* device must be suspended */ 24144f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 241593c534aeSAlasdair G Kergon goto out; 24161da177e4SLinus Torvalds 24173ae70656SMike Snitzer /* 24183ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 24193ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 24203ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 24213ae70656SMike Snitzer * reappear. 24223ae70656SMike Snitzer */ 24233ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 242483d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 24253ae70656SMike Snitzer if (live_map) 24263ae70656SMike Snitzer limits = md->queue->limits; 242783d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 24283ae70656SMike Snitzer } 24293ae70656SMike Snitzer 243087eb5b21SMike Christie if (!live_map) { 2431754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2432042d2a9bSAlasdair G Kergon if (r) { 2433042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2434754c5fc7SMike Snitzer goto out; 2435042d2a9bSAlasdair G Kergon } 243687eb5b21SMike Christie } 2437754c5fc7SMike Snitzer 2438042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 243962e08243SMikulas Patocka dm_issue_global_event(); 24401da177e4SLinus Torvalds 244193c534aeSAlasdair G Kergon out: 2442e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2443042d2a9bSAlasdair G Kergon return map; 24441da177e4SLinus Torvalds } 24451da177e4SLinus Torvalds 24461da177e4SLinus Torvalds /* 24471da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 24481da177e4SLinus Torvalds * device. 24491da177e4SLinus Torvalds */ 24502ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 24511da177e4SLinus Torvalds { 2452e39e2e95SAlasdair G Kergon int r; 24531da177e4SLinus Torvalds 24541da177e4SLinus Torvalds WARN_ON(md->frozen_sb); 2455dfbe03f6SAlasdair G Kergon 2456db8fef4fSMikulas Patocka md->frozen_sb = freeze_bdev(md->bdev); 2457dfbe03f6SAlasdair G Kergon if (IS_ERR(md->frozen_sb)) { 2458cf222b37SAlasdair G Kergon r = PTR_ERR(md->frozen_sb); 2459e39e2e95SAlasdair G Kergon md->frozen_sb = NULL; 2460e39e2e95SAlasdair G Kergon return r; 2461dfbe03f6SAlasdair G Kergon } 2462dfbe03f6SAlasdair G Kergon 2463aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2464aa8d7c2fSAlasdair G Kergon 24651da177e4SLinus Torvalds return 0; 24661da177e4SLinus Torvalds } 24671da177e4SLinus Torvalds 24682ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 24691da177e4SLinus Torvalds { 2470aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2471aa8d7c2fSAlasdair G Kergon return; 2472aa8d7c2fSAlasdair G Kergon 2473db8fef4fSMikulas Patocka thaw_bdev(md->bdev, md->frozen_sb); 24741da177e4SLinus Torvalds md->frozen_sb = NULL; 2475aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 24761da177e4SLinus Torvalds } 24771da177e4SLinus Torvalds 24781da177e4SLinus Torvalds /* 2479b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2480b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2481b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2482b48633f8SBart Van Assche * 2483ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2484ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2485ffcc3936SMike Snitzer * are being added to md->deferred list. 2486cec47e3dSKiyoshi Ueda */ 2487ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 2488b48633f8SBart Van Assche unsigned suspend_flags, long task_state, 2489eaf9a736SMike Snitzer int dmf_suspended_flag) 24901da177e4SLinus Torvalds { 2491ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2492ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2493ffcc3936SMike Snitzer int r; 2494cf222b37SAlasdair G Kergon 24955a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 24965a8f1f80SBart Van Assche 24972e93ccc1SKiyoshi Ueda /* 24982e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 24992e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 25002e93ccc1SKiyoshi Ueda */ 25012e93ccc1SKiyoshi Ueda if (noflush) 25022e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 250386331f39SBart Van Assche else 2504ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 25052e93ccc1SKiyoshi Ueda 2506d67ee213SMike Snitzer /* 2507d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2508d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2509d67ee213SMike Snitzer */ 25101da177e4SLinus Torvalds dm_table_presuspend_targets(map); 25111da177e4SLinus Torvalds 25122e93ccc1SKiyoshi Ueda /* 25139f518b27SKiyoshi Ueda * Flush I/O to the device. 25149f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 25159f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 25169f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 25172e93ccc1SKiyoshi Ueda */ 251832a926daSMikulas Patocka if (!noflush && do_lockfs) { 25192ca3310eSAlasdair G Kergon r = lock_fs(md); 2520d67ee213SMike Snitzer if (r) { 2521d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2522ffcc3936SMike Snitzer return r; 2523aa8d7c2fSAlasdair G Kergon } 2524d67ee213SMike Snitzer } 25251da177e4SLinus Torvalds 25261da177e4SLinus Torvalds /* 25273b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 25283b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 25293b00b203SMikulas Patocka * __split_and_process_bio. This is called from dm_request and 25303b00b203SMikulas Patocka * dm_wq_work. 25313b00b203SMikulas Patocka * 25323b00b203SMikulas Patocka * To get all processes out of __split_and_process_bio in dm_request, 25333b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 25346a8736d1STejun Heo * __split_and_process_bio from dm_request and quiesce the thread 25356a8736d1STejun Heo * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call 25366a8736d1STejun Heo * flush_workqueue(md->wq). 25371da177e4SLinus Torvalds */ 25381eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 253941abc4e1SHannes Reinecke if (map) 254083d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25411da177e4SLinus Torvalds 2542d0bcb878SKiyoshi Ueda /* 254329e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 254429e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2545d0bcb878SKiyoshi Ueda */ 25466a23e05cSJens Axboe if (dm_request_based(md)) 2547eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2548cec47e3dSKiyoshi Ueda 2549d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2550d0bcb878SKiyoshi Ueda 25511da177e4SLinus Torvalds /* 25523b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 25533b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 25543b00b203SMikulas Patocka * to finish. 25551da177e4SLinus Torvalds */ 2556b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2557eaf9a736SMike Snitzer if (!r) 2558eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 25591da177e4SLinus Torvalds 25606d6f10dfSMilan Broz if (noflush) 2561022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 256241abc4e1SHannes Reinecke if (map) 256383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 25642e93ccc1SKiyoshi Ueda 25651da177e4SLinus Torvalds /* were we interrupted ? */ 256646125c1cSMilan Broz if (r < 0) { 25679a1fb464SMikulas Patocka dm_queue_flush(md); 256873d410c0SMilan Broz 2569cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2570eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2571cec47e3dSKiyoshi Ueda 25722ca3310eSAlasdair G Kergon unlock_fs(md); 2573d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2574ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2575ffcc3936SMike Snitzer } 2576ffcc3936SMike Snitzer 2577ffcc3936SMike Snitzer return r; 25782ca3310eSAlasdair G Kergon } 25792ca3310eSAlasdair G Kergon 25803b00b203SMikulas Patocka /* 2581ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2582ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2583ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2584ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2585ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 25863b00b203SMikulas Patocka */ 2587ffcc3936SMike Snitzer /* 2588ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2589ffcc3936SMike Snitzer * 2590ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2591ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2592ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2593ffcc3936SMike Snitzer * 2594ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2595ffcc3936SMike Snitzer */ 2596ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2597ffcc3936SMike Snitzer { 2598ffcc3936SMike Snitzer struct dm_table *map = NULL; 2599ffcc3936SMike Snitzer int r = 0; 2600ffcc3936SMike Snitzer 2601ffcc3936SMike Snitzer retry: 2602ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2603ffcc3936SMike Snitzer 2604ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2605ffcc3936SMike Snitzer r = -EINVAL; 2606ffcc3936SMike Snitzer goto out_unlock; 2607ffcc3936SMike Snitzer } 2608ffcc3936SMike Snitzer 2609ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2610ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2611ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2612ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2613ffcc3936SMike Snitzer if (r) 2614ffcc3936SMike Snitzer return r; 2615ffcc3936SMike Snitzer goto retry; 2616ffcc3936SMike Snitzer } 2617ffcc3936SMike Snitzer 2618a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2619ffcc3936SMike Snitzer 2620eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2621ffcc3936SMike Snitzer if (r) 2622ffcc3936SMike Snitzer goto out_unlock; 26233b00b203SMikulas Patocka 26245df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 26254d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 26265df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 26274d4471cbSKiyoshi Ueda 2628d287483dSAlasdair G Kergon out_unlock: 2629e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2630cf222b37SAlasdair G Kergon return r; 26311da177e4SLinus Torvalds } 26321da177e4SLinus Torvalds 2633ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 26341da177e4SLinus Torvalds { 2635ffcc3936SMike Snitzer if (map) { 2636ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 26378757b776SMilan Broz if (r) 2638ffcc3936SMike Snitzer return r; 2639ffcc3936SMike Snitzer } 26402ca3310eSAlasdair G Kergon 26419a1fb464SMikulas Patocka dm_queue_flush(md); 26422ca3310eSAlasdair G Kergon 2643cec47e3dSKiyoshi Ueda /* 2644cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2645cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2646cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2647cec47e3dSKiyoshi Ueda */ 2648cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2649eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2650cec47e3dSKiyoshi Ueda 26512ca3310eSAlasdair G Kergon unlock_fs(md); 26522ca3310eSAlasdair G Kergon 2653ffcc3936SMike Snitzer return 0; 2654ffcc3936SMike Snitzer } 2655ffcc3936SMike Snitzer 2656ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2657ffcc3936SMike Snitzer { 26588dc23658SMinfei Huang int r; 2659ffcc3936SMike Snitzer struct dm_table *map = NULL; 2660ffcc3936SMike Snitzer 2661ffcc3936SMike Snitzer retry: 26628dc23658SMinfei Huang r = -EINVAL; 2663ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2664ffcc3936SMike Snitzer 2665ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2666ffcc3936SMike Snitzer goto out; 2667ffcc3936SMike Snitzer 2668ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2669ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2670ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2671ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2672ffcc3936SMike Snitzer if (r) 2673ffcc3936SMike Snitzer return r; 2674ffcc3936SMike Snitzer goto retry; 2675ffcc3936SMike Snitzer } 2676ffcc3936SMike Snitzer 2677a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2678ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2679ffcc3936SMike Snitzer goto out; 2680ffcc3936SMike Snitzer 2681ffcc3936SMike Snitzer r = __dm_resume(md, map); 2682ffcc3936SMike Snitzer if (r) 2683ffcc3936SMike Snitzer goto out; 2684ffcc3936SMike Snitzer 26852ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2686cf222b37SAlasdair G Kergon out: 2687e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 26882ca3310eSAlasdair G Kergon 2689cf222b37SAlasdair G Kergon return r; 26901da177e4SLinus Torvalds } 26911da177e4SLinus Torvalds 2692fd2ed4d2SMikulas Patocka /* 2693fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2694fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2695fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2696fd2ed4d2SMikulas Patocka */ 2697fd2ed4d2SMikulas Patocka 2698ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2699ffcc3936SMike Snitzer { 2700ffcc3936SMike Snitzer struct dm_table *map = NULL; 2701ffcc3936SMike Snitzer 27021ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 27031ea0654eSBart Van Assche 270496b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2705ffcc3936SMike Snitzer return; /* nested internal suspend */ 2706ffcc3936SMike Snitzer 2707ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2708ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2709ffcc3936SMike Snitzer return; /* nest suspend */ 2710ffcc3936SMike Snitzer } 2711ffcc3936SMike Snitzer 2712a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2713ffcc3936SMike Snitzer 2714ffcc3936SMike Snitzer /* 2715ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2716ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2717ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2718ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2719ffcc3936SMike Snitzer */ 2720eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2721eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2722ffcc3936SMike Snitzer 27235df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 2724ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 27255df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 2726ffcc3936SMike Snitzer } 2727ffcc3936SMike Snitzer 2728ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2729ffcc3936SMike Snitzer { 273096b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 273196b26c8cSMikulas Patocka 273296b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2733ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2734ffcc3936SMike Snitzer 2735ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2736ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2737ffcc3936SMike Snitzer 2738ffcc3936SMike Snitzer /* 2739ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2740ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2741ffcc3936SMike Snitzer */ 2742ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2743ffcc3936SMike Snitzer 2744ffcc3936SMike Snitzer done: 2745ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2746ffcc3936SMike Snitzer smp_mb__after_atomic(); 2747ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2748ffcc3936SMike Snitzer } 2749ffcc3936SMike Snitzer 2750ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2751fd2ed4d2SMikulas Patocka { 2752fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2753ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2754ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2755ffcc3936SMike Snitzer } 2756ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2757ffcc3936SMike Snitzer 2758ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2759ffcc3936SMike Snitzer { 2760ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2761ffcc3936SMike Snitzer __dm_internal_resume(md); 2762ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2763ffcc3936SMike Snitzer } 2764ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2765ffcc3936SMike Snitzer 2766ffcc3936SMike Snitzer /* 2767ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2768ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2769ffcc3936SMike Snitzer */ 2770ffcc3936SMike Snitzer 2771ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2772ffcc3936SMike Snitzer { 2773ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2774ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2775fd2ed4d2SMikulas Patocka return; 2776fd2ed4d2SMikulas Patocka 2777fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2778fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2779fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2780fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2781fd2ed4d2SMikulas Patocka } 2782b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2783fd2ed4d2SMikulas Patocka 2784ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2785fd2ed4d2SMikulas Patocka { 2786ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2787fd2ed4d2SMikulas Patocka goto done; 2788fd2ed4d2SMikulas Patocka 2789fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2790fd2ed4d2SMikulas Patocka 2791fd2ed4d2SMikulas Patocka done: 2792fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2793fd2ed4d2SMikulas Patocka } 2794b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2795fd2ed4d2SMikulas Patocka 27961da177e4SLinus Torvalds /*----------------------------------------------------------------- 27971da177e4SLinus Torvalds * Event notification. 27981da177e4SLinus Torvalds *---------------------------------------------------------------*/ 27993abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 280060935eb2SMilan Broz unsigned cookie) 280169267a30SAlasdair G Kergon { 28026958c1c6SMikulas Patocka int r; 28036958c1c6SMikulas Patocka unsigned noio_flag; 280460935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 280560935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 280660935eb2SMilan Broz 28076958c1c6SMikulas Patocka noio_flag = memalloc_noio_save(); 28086958c1c6SMikulas Patocka 280960935eb2SMilan Broz if (!cookie) 28106958c1c6SMikulas Patocka r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 281160935eb2SMilan Broz else { 281260935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 281360935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 28146958c1c6SMikulas Patocka r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 28153abf85b5SPeter Rajnoha action, envp); 281660935eb2SMilan Broz } 28176958c1c6SMikulas Patocka 28186958c1c6SMikulas Patocka memalloc_noio_restore(noio_flag); 28196958c1c6SMikulas Patocka 28206958c1c6SMikulas Patocka return r; 282169267a30SAlasdair G Kergon } 282269267a30SAlasdair G Kergon 28237a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 28247a8c3d3bSMike Anderson { 28257a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 28267a8c3d3bSMike Anderson } 28277a8c3d3bSMike Anderson 28281da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 28291da177e4SLinus Torvalds { 28301da177e4SLinus Torvalds return atomic_read(&md->event_nr); 28311da177e4SLinus Torvalds } 28321da177e4SLinus Torvalds 28331da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 28341da177e4SLinus Torvalds { 28351da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 28361da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 28371da177e4SLinus Torvalds } 28381da177e4SLinus Torvalds 28397a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 28407a8c3d3bSMike Anderson { 28417a8c3d3bSMike Anderson unsigned long flags; 28427a8c3d3bSMike Anderson 28437a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 28447a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 28457a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 28467a8c3d3bSMike Anderson } 28477a8c3d3bSMike Anderson 28481da177e4SLinus Torvalds /* 28491da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 28501da177e4SLinus Torvalds * count on 'md'. 28511da177e4SLinus Torvalds */ 28521da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 28531da177e4SLinus Torvalds { 28541da177e4SLinus Torvalds return md->disk; 28551da177e4SLinus Torvalds } 285665ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 28571da177e4SLinus Torvalds 2858784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2859784aae73SMilan Broz { 28602995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2861784aae73SMilan Broz } 2862784aae73SMilan Broz 2863784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2864784aae73SMilan Broz { 2865784aae73SMilan Broz struct mapped_device *md; 2866784aae73SMilan Broz 28672995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2868784aae73SMilan Broz 2869b9a41d21SHou Tao spin_lock(&_minor_lock); 2870b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2871b9a41d21SHou Tao md = NULL; 2872b9a41d21SHou Tao goto out; 2873b9a41d21SHou Tao } 2874784aae73SMilan Broz dm_get(md); 2875b9a41d21SHou Tao out: 2876b9a41d21SHou Tao spin_unlock(&_minor_lock); 2877b9a41d21SHou Tao 2878784aae73SMilan Broz return md; 2879784aae73SMilan Broz } 2880784aae73SMilan Broz 28814f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 28821da177e4SLinus Torvalds { 28831da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 28841da177e4SLinus Torvalds } 28851da177e4SLinus Torvalds 28865df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md) 28875df96f2bSMikulas Patocka { 28885df96f2bSMikulas Patocka return test_bit(DMF_POST_SUSPENDING, &md->flags); 28895df96f2bSMikulas Patocka } 28905df96f2bSMikulas Patocka 2891ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2892ffcc3936SMike Snitzer { 2893ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2894ffcc3936SMike Snitzer } 2895ffcc3936SMike Snitzer 28962c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 28972c140a24SMikulas Patocka { 28982c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 28992c140a24SMikulas Patocka } 29002c140a24SMikulas Patocka 290164dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 290264dbce58SKiyoshi Ueda { 290333bd6f06SMike Snitzer return dm_suspended_md(ti->table->md); 290464dbce58SKiyoshi Ueda } 290564dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 290664dbce58SKiyoshi Ueda 29075df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti) 29085df96f2bSMikulas Patocka { 290933bd6f06SMike Snitzer return dm_post_suspending_md(ti->table->md); 29105df96f2bSMikulas Patocka } 29115df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending); 29125df96f2bSMikulas Patocka 29132e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 29142e93ccc1SKiyoshi Ueda { 291533bd6f06SMike Snitzer return __noflush_suspending(ti->table->md); 29162e93ccc1SKiyoshi Ueda } 29172e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 29182e93ccc1SKiyoshi Ueda 29197e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 29200776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 29210776aa0eSMike Snitzer unsigned min_pool_size) 2922e6ee8c0bSKiyoshi Ueda { 2923115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 292478d8e58aSMike Snitzer unsigned int pool_size = 0; 292564f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 29266f1c819cSKent Overstreet int ret; 2927e6ee8c0bSKiyoshi Ueda 2928e6ee8c0bSKiyoshi Ueda if (!pools) 29294e6e36c3SMike Snitzer return NULL; 2930e6ee8c0bSKiyoshi Ueda 293178d8e58aSMike Snitzer switch (type) { 293278d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2933545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 293422c11858SMike Snitzer case DM_TYPE_NVME_BIO_BASED: 29350776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 293630187e1dSMike Snitzer front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 293764f52b0eSMike Snitzer io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); 29386f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 29396f1c819cSKent Overstreet if (ret) 294064f52b0eSMike Snitzer goto out; 29416f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2942eb8db831SChristoph Hellwig goto out; 294378d8e58aSMike Snitzer break; 294478d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 29450776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 294678d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2947591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 294878d8e58aSMike Snitzer break; 294978d8e58aSMike Snitzer default: 295078d8e58aSMike Snitzer BUG(); 295178d8e58aSMike Snitzer } 295278d8e58aSMike Snitzer 29536f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 29546f1c819cSKent Overstreet if (ret) 29555f015204SJun'ichi Nomura goto out; 2956e6ee8c0bSKiyoshi Ueda 29576f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 29585f015204SJun'ichi Nomura goto out; 2959a91a2785SMartin K. Petersen 2960e6ee8c0bSKiyoshi Ueda return pools; 296178d8e58aSMike Snitzer 29625f015204SJun'ichi Nomura out: 29635f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2964e6ee8c0bSKiyoshi Ueda 29654e6e36c3SMike Snitzer return NULL; 2966e6ee8c0bSKiyoshi Ueda } 2967e6ee8c0bSKiyoshi Ueda 2968e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2969e6ee8c0bSKiyoshi Ueda { 2970e6ee8c0bSKiyoshi Ueda if (!pools) 2971e6ee8c0bSKiyoshi Ueda return; 2972e6ee8c0bSKiyoshi Ueda 29736f1c819cSKent Overstreet bioset_exit(&pools->bs); 29746f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 2975e6ee8c0bSKiyoshi Ueda 2976e6ee8c0bSKiyoshi Ueda kfree(pools); 2977e6ee8c0bSKiyoshi Ueda } 2978e6ee8c0bSKiyoshi Ueda 29799c72bad1SChristoph Hellwig struct dm_pr { 29809c72bad1SChristoph Hellwig u64 old_key; 29819c72bad1SChristoph Hellwig u64 new_key; 29829c72bad1SChristoph Hellwig u32 flags; 29839c72bad1SChristoph Hellwig bool fail_early; 29849c72bad1SChristoph Hellwig }; 29859c72bad1SChristoph Hellwig 29869c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 29879c72bad1SChristoph Hellwig void *data) 29889c72bad1SChristoph Hellwig { 29899c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 29909c72bad1SChristoph Hellwig struct dm_table *table; 29919c72bad1SChristoph Hellwig struct dm_target *ti; 29929c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 29939c72bad1SChristoph Hellwig 29949c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 29959c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 29969c72bad1SChristoph Hellwig goto out; 29979c72bad1SChristoph Hellwig 29989c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 29999c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 30009c72bad1SChristoph Hellwig goto out; 30019c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 30029c72bad1SChristoph Hellwig 30039c72bad1SChristoph Hellwig ret = -EINVAL; 30049c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 30059c72bad1SChristoph Hellwig goto out; 30069c72bad1SChristoph Hellwig 30079c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 30089c72bad1SChristoph Hellwig out: 30099c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 30109c72bad1SChristoph Hellwig return ret; 30119c72bad1SChristoph Hellwig } 30129c72bad1SChristoph Hellwig 30139c72bad1SChristoph Hellwig /* 30149c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 30159c72bad1SChristoph Hellwig */ 30169c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 30179c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 30189c72bad1SChristoph Hellwig { 30199c72bad1SChristoph Hellwig struct dm_pr *pr = data; 30209c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 30219c72bad1SChristoph Hellwig 30229c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 30239c72bad1SChristoph Hellwig return -EOPNOTSUPP; 30249c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 30259c72bad1SChristoph Hellwig } 30269c72bad1SChristoph Hellwig 302771cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 302871cdb697SChristoph Hellwig u32 flags) 302971cdb697SChristoph Hellwig { 30309c72bad1SChristoph Hellwig struct dm_pr pr = { 30319c72bad1SChristoph Hellwig .old_key = old_key, 30329c72bad1SChristoph Hellwig .new_key = new_key, 30339c72bad1SChristoph Hellwig .flags = flags, 30349c72bad1SChristoph Hellwig .fail_early = true, 30359c72bad1SChristoph Hellwig }; 30369c72bad1SChristoph Hellwig int ret; 303771cdb697SChristoph Hellwig 30389c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 30399c72bad1SChristoph Hellwig if (ret && new_key) { 30409c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 30419c72bad1SChristoph Hellwig pr.old_key = new_key; 30429c72bad1SChristoph Hellwig pr.new_key = 0; 30439c72bad1SChristoph Hellwig pr.flags = 0; 30449c72bad1SChristoph Hellwig pr.fail_early = false; 30459c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 30469c72bad1SChristoph Hellwig } 304771cdb697SChristoph Hellwig 30489c72bad1SChristoph Hellwig return ret; 304971cdb697SChristoph Hellwig } 305071cdb697SChristoph Hellwig 305171cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 305271cdb697SChristoph Hellwig u32 flags) 305371cdb697SChristoph Hellwig { 305471cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 305571cdb697SChristoph Hellwig const struct pr_ops *ops; 3056971888c4SMike Snitzer int r, srcu_idx; 305771cdb697SChristoph Hellwig 30585bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 305971cdb697SChristoph Hellwig if (r < 0) 3060971888c4SMike Snitzer goto out; 306171cdb697SChristoph Hellwig 306271cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 306371cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 306471cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 306571cdb697SChristoph Hellwig else 306671cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3067971888c4SMike Snitzer out: 3068971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 306971cdb697SChristoph Hellwig return r; 307071cdb697SChristoph Hellwig } 307171cdb697SChristoph Hellwig 307271cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 307371cdb697SChristoph Hellwig { 307471cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 307571cdb697SChristoph Hellwig const struct pr_ops *ops; 3076971888c4SMike Snitzer int r, srcu_idx; 307771cdb697SChristoph Hellwig 30785bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 307971cdb697SChristoph Hellwig if (r < 0) 3080971888c4SMike Snitzer goto out; 308171cdb697SChristoph Hellwig 308271cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 308371cdb697SChristoph Hellwig if (ops && ops->pr_release) 308471cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 308571cdb697SChristoph Hellwig else 308671cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3087971888c4SMike Snitzer out: 3088971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 308971cdb697SChristoph Hellwig return r; 309071cdb697SChristoph Hellwig } 309171cdb697SChristoph Hellwig 309271cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 309371cdb697SChristoph Hellwig enum pr_type type, bool abort) 309471cdb697SChristoph Hellwig { 309571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 309671cdb697SChristoph Hellwig const struct pr_ops *ops; 3097971888c4SMike Snitzer int r, srcu_idx; 309871cdb697SChristoph Hellwig 30995bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 310071cdb697SChristoph Hellwig if (r < 0) 3101971888c4SMike Snitzer goto out; 310271cdb697SChristoph Hellwig 310371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 310471cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 310571cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 310671cdb697SChristoph Hellwig else 310771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3108971888c4SMike Snitzer out: 3109971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 311071cdb697SChristoph Hellwig return r; 311171cdb697SChristoph Hellwig } 311271cdb697SChristoph Hellwig 311371cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 311471cdb697SChristoph Hellwig { 311571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 311671cdb697SChristoph Hellwig const struct pr_ops *ops; 3117971888c4SMike Snitzer int r, srcu_idx; 311871cdb697SChristoph Hellwig 31195bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 312071cdb697SChristoph Hellwig if (r < 0) 3121971888c4SMike Snitzer goto out; 312271cdb697SChristoph Hellwig 312371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 312471cdb697SChristoph Hellwig if (ops && ops->pr_clear) 312571cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 312671cdb697SChristoph Hellwig else 312771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3128971888c4SMike Snitzer out: 3129971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 313071cdb697SChristoph Hellwig return r; 313171cdb697SChristoph Hellwig } 313271cdb697SChristoph Hellwig 313371cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 313471cdb697SChristoph Hellwig .pr_register = dm_pr_register, 313571cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 313671cdb697SChristoph Hellwig .pr_release = dm_pr_release, 313771cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 313871cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 313971cdb697SChristoph Hellwig }; 314071cdb697SChristoph Hellwig 314183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 3142c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 31431da177e4SLinus Torvalds .open = dm_blk_open, 31441da177e4SLinus Torvalds .release = dm_blk_close, 3145aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 31463ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3147e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 314871cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 31491da177e4SLinus Torvalds .owner = THIS_MODULE 31501da177e4SLinus Torvalds }; 31511da177e4SLinus Torvalds 3152f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3153f26c5719SDan Williams .direct_access = dm_dax_direct_access, 31547bf7eac8SDan Williams .dax_supported = dm_dax_supported, 31557e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3156b3a9a0c3SDan Williams .copy_to_iter = dm_dax_copy_to_iter, 3157cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 3158f26c5719SDan Williams }; 3159f26c5719SDan Williams 31601da177e4SLinus Torvalds /* 31611da177e4SLinus Torvalds * module hooks 31621da177e4SLinus Torvalds */ 31631da177e4SLinus Torvalds module_init(dm_init); 31641da177e4SLinus Torvalds module_exit(dm_exit); 31651da177e4SLinus Torvalds 31661da177e4SLinus Torvalds module_param(major, uint, 0); 31671da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3168f4790826SMike Snitzer 3169e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3170e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3171e8603136SMike Snitzer 3172115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3173115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3174115485e8SMike Snitzer 31751da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 31761da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 31771da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3178