11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #include <linux/init.h> 131da177e4SLinus Torvalds #include <linux/module.h> 1448c9c27bSArjan van de Ven #include <linux/mutex.h> 156958c1c6SMikulas Patocka #include <linux/sched/mm.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 171da177e4SLinus Torvalds #include <linux/blkpg.h> 181da177e4SLinus Torvalds #include <linux/bio.h> 191da177e4SLinus Torvalds #include <linux/mempool.h> 20f26c5719SDan Williams #include <linux/dax.h> 211da177e4SLinus Torvalds #include <linux/slab.h> 221da177e4SLinus Torvalds #include <linux/idr.h> 237e026c8cSDan Williams #include <linux/uio.h> 243ac51e74SDarrick J. Wong #include <linux/hdreg.h> 253f77316dSKiyoshi Ueda #include <linux/delay.h> 26ffcc3936SMike Snitzer #include <linux/wait.h> 2771cdb697SChristoph Hellwig #include <linux/pr.h> 28b0b4d7c6SElena Reshetova #include <linux/refcount.h> 29c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 30a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 31aa6ce87aSSatya Tangirala #include <linux/keyslot-manager.h> 3255782138SLi Zefan 3372d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3472d94861SAlasdair G Kergon 3560935eb2SMilan Broz /* 3660935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3760935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3860935eb2SMilan Broz */ 3960935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4060935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4160935eb2SMilan Broz 421da177e4SLinus Torvalds static const char *_name = DM_NAME; 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds static unsigned int major = 0; 451da177e4SLinus Torvalds static unsigned int _major = 0; 461da177e4SLinus Torvalds 47d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 48d15b774cSAlasdair G Kergon 49f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 502c140a24SMikulas Patocka 512c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 522c140a24SMikulas Patocka 532c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 542c140a24SMikulas Patocka 55acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 56acfe0ad7SMikulas Patocka 5793e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5893e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 5993e6442cSMikulas Patocka 6062e08243SMikulas Patocka void dm_issue_global_event(void) 6162e08243SMikulas Patocka { 6262e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 6362e08243SMikulas Patocka wake_up(&dm_global_eventq); 6462e08243SMikulas Patocka } 6562e08243SMikulas Patocka 661da177e4SLinus Torvalds /* 6764f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 681da177e4SLinus Torvalds */ 6964f52b0eSMike Snitzer struct clone_info { 7064f52b0eSMike Snitzer struct dm_table *map; 7164f52b0eSMike Snitzer struct bio *bio; 7264f52b0eSMike Snitzer struct dm_io *io; 7364f52b0eSMike Snitzer sector_t sector; 7464f52b0eSMike Snitzer unsigned sector_count; 7564f52b0eSMike Snitzer }; 7664f52b0eSMike Snitzer 7762f26317SJeffle Xu #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 7862f26317SJeffle Xu #define DM_IO_BIO_OFFSET \ 7962f26317SJeffle Xu (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 8062f26317SJeffle Xu 8164f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 8264f52b0eSMike Snitzer { 8364f52b0eSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 8464f52b0eSMike Snitzer if (!tio->inside_dm_io) 8562f26317SJeffle Xu return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 8662f26317SJeffle Xu return (char *)bio - DM_IO_BIO_OFFSET - data_size; 8764f52b0eSMike Snitzer } 8864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 8964f52b0eSMike Snitzer 9064f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 9164f52b0eSMike Snitzer { 9264f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 9364f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 9462f26317SJeffle Xu return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 9564f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 9662f26317SJeffle Xu return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 9764f52b0eSMike Snitzer } 9864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 9964f52b0eSMike Snitzer 10064f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 10164f52b0eSMike Snitzer { 10264f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 10364f52b0eSMike Snitzer } 10464f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 10564f52b0eSMike Snitzer 106ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 107ba61fdd1SJeff Mahoney 108115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 109115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 110faad87dfSMike Snitzer 111a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 112a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS; 113a666e5c0SMikulas Patocka static int get_swap_bios(void) 114a666e5c0SMikulas Patocka { 115a666e5c0SMikulas Patocka int latch = READ_ONCE(swap_bios); 116a666e5c0SMikulas Patocka if (unlikely(latch <= 0)) 117a666e5c0SMikulas Patocka latch = DEFAULT_SWAP_BIOS; 118a666e5c0SMikulas Patocka return latch; 119a666e5c0SMikulas Patocka } 120a666e5c0SMikulas Patocka 121e6ee8c0bSKiyoshi Ueda /* 122e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 123e6ee8c0bSKiyoshi Ueda */ 124e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1256f1c819cSKent Overstreet struct bio_set bs; 1266f1c819cSKent Overstreet struct bio_set io_bs; 127e6ee8c0bSKiyoshi Ueda }; 128e6ee8c0bSKiyoshi Ueda 12986f1152bSBenjamin Marzinski struct table_device { 13086f1152bSBenjamin Marzinski struct list_head list; 131b0b4d7c6SElena Reshetova refcount_t count; 13286f1152bSBenjamin Marzinski struct dm_dev dm_dev; 13386f1152bSBenjamin Marzinski }; 13486f1152bSBenjamin Marzinski 135f4790826SMike Snitzer /* 136e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 137e8603136SMike Snitzer */ 1384cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 139e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 140e8603136SMike Snitzer 141115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 142115485e8SMike Snitzer { 1436aa7de05SMark Rutland int param = READ_ONCE(*module_param); 144115485e8SMike Snitzer int modified_param = 0; 145115485e8SMike Snitzer bool modified = true; 146115485e8SMike Snitzer 147115485e8SMike Snitzer if (param < min) 148115485e8SMike Snitzer modified_param = min; 149115485e8SMike Snitzer else if (param > max) 150115485e8SMike Snitzer modified_param = max; 151115485e8SMike Snitzer else 152115485e8SMike Snitzer modified = false; 153115485e8SMike Snitzer 154115485e8SMike Snitzer if (modified) { 155115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 156115485e8SMike Snitzer param = modified_param; 157115485e8SMike Snitzer } 158115485e8SMike Snitzer 159115485e8SMike Snitzer return param; 160115485e8SMike Snitzer } 161115485e8SMike Snitzer 1624cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 163f4790826SMike Snitzer unsigned def, unsigned max) 164f4790826SMike Snitzer { 1656aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 16609c2d531SMike Snitzer unsigned modified_param = 0; 167f4790826SMike Snitzer 16809c2d531SMike Snitzer if (!param) 16909c2d531SMike Snitzer modified_param = def; 17009c2d531SMike Snitzer else if (param > max) 17109c2d531SMike Snitzer modified_param = max; 172f4790826SMike Snitzer 17309c2d531SMike Snitzer if (modified_param) { 17409c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 17509c2d531SMike Snitzer param = modified_param; 176f4790826SMike Snitzer } 177f4790826SMike Snitzer 17809c2d531SMike Snitzer return param; 179f4790826SMike Snitzer } 180f4790826SMike Snitzer 181e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 182e8603136SMike Snitzer { 18309c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1844cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 185e8603136SMike Snitzer } 186e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 187e8603136SMike Snitzer 188115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 189115485e8SMike Snitzer { 190115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 191115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 192115485e8SMike Snitzer } 193115485e8SMike Snitzer 1941da177e4SLinus Torvalds static int __init local_init(void) 1951da177e4SLinus Torvalds { 196e689fbabSMike Snitzer int r; 1971ae49ea2SMike Snitzer 19851e5b2bdSMike Anderson r = dm_uevent_init(); 19951157b4aSKiyoshi Ueda if (r) 200e689fbabSMike Snitzer return r; 20151e5b2bdSMike Anderson 202acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 203acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 204acfe0ad7SMikulas Patocka r = -ENOMEM; 205acfe0ad7SMikulas Patocka goto out_uevent_exit; 206acfe0ad7SMikulas Patocka } 207acfe0ad7SMikulas Patocka 2081da177e4SLinus Torvalds _major = major; 2091da177e4SLinus Torvalds r = register_blkdev(_major, _name); 21051157b4aSKiyoshi Ueda if (r < 0) 211acfe0ad7SMikulas Patocka goto out_free_workqueue; 2121da177e4SLinus Torvalds 2131da177e4SLinus Torvalds if (!_major) 2141da177e4SLinus Torvalds _major = r; 2151da177e4SLinus Torvalds 2161da177e4SLinus Torvalds return 0; 21751157b4aSKiyoshi Ueda 218acfe0ad7SMikulas Patocka out_free_workqueue: 219acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 22051157b4aSKiyoshi Ueda out_uevent_exit: 22151157b4aSKiyoshi Ueda dm_uevent_exit(); 22251157b4aSKiyoshi Ueda 22351157b4aSKiyoshi Ueda return r; 2241da177e4SLinus Torvalds } 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds static void local_exit(void) 2271da177e4SLinus Torvalds { 2282c140a24SMikulas Patocka flush_scheduled_work(); 229acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2302c140a24SMikulas Patocka 23100d59405SAkinobu Mita unregister_blkdev(_major, _name); 23251e5b2bdSMike Anderson dm_uevent_exit(); 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds _major = 0; 2351da177e4SLinus Torvalds 2361da177e4SLinus Torvalds DMINFO("cleaned up"); 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds 239b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2401da177e4SLinus Torvalds local_init, 2411da177e4SLinus Torvalds dm_target_init, 2421da177e4SLinus Torvalds dm_linear_init, 2431da177e4SLinus Torvalds dm_stripe_init, 244952b3557SMikulas Patocka dm_io_init, 245945fa4d2SMikulas Patocka dm_kcopyd_init, 2461da177e4SLinus Torvalds dm_interface_init, 247fd2ed4d2SMikulas Patocka dm_statistics_init, 2481da177e4SLinus Torvalds }; 2491da177e4SLinus Torvalds 250b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2511da177e4SLinus Torvalds local_exit, 2521da177e4SLinus Torvalds dm_target_exit, 2531da177e4SLinus Torvalds dm_linear_exit, 2541da177e4SLinus Torvalds dm_stripe_exit, 255952b3557SMikulas Patocka dm_io_exit, 256945fa4d2SMikulas Patocka dm_kcopyd_exit, 2571da177e4SLinus Torvalds dm_interface_exit, 258fd2ed4d2SMikulas Patocka dm_statistics_exit, 2591da177e4SLinus Torvalds }; 2601da177e4SLinus Torvalds 2611da177e4SLinus Torvalds static int __init dm_init(void) 2621da177e4SLinus Torvalds { 2631da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds int r, i; 2661da177e4SLinus Torvalds 2671da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2681da177e4SLinus Torvalds r = _inits[i](); 2691da177e4SLinus Torvalds if (r) 2701da177e4SLinus Torvalds goto bad; 2711da177e4SLinus Torvalds } 2721da177e4SLinus Torvalds 2731da177e4SLinus Torvalds return 0; 2741da177e4SLinus Torvalds 2751da177e4SLinus Torvalds bad: 2761da177e4SLinus Torvalds while (i--) 2771da177e4SLinus Torvalds _exits[i](); 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds return r; 2801da177e4SLinus Torvalds } 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds static void __exit dm_exit(void) 2831da177e4SLinus Torvalds { 2841da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2851da177e4SLinus Torvalds 2861da177e4SLinus Torvalds while (i--) 2871da177e4SLinus Torvalds _exits[i](); 288d15b774cSAlasdair G Kergon 289d15b774cSAlasdair G Kergon /* 290d15b774cSAlasdair G Kergon * Should be empty by this point. 291d15b774cSAlasdair G Kergon */ 292d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 2931da177e4SLinus Torvalds } 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds /* 2961da177e4SLinus Torvalds * Block device functions 2971da177e4SLinus Torvalds */ 298432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 299432a212cSMike Anderson { 300432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 301432a212cSMike Anderson } 302432a212cSMike Anderson 303fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3041da177e4SLinus Torvalds { 3051da177e4SLinus Torvalds struct mapped_device *md; 3061da177e4SLinus Torvalds 307fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 308fba9f90eSJeff Mahoney 309fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 310fba9f90eSJeff Mahoney if (!md) 311fba9f90eSJeff Mahoney goto out; 312fba9f90eSJeff Mahoney 3135c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 314432a212cSMike Anderson dm_deleting_md(md)) { 315fba9f90eSJeff Mahoney md = NULL; 316fba9f90eSJeff Mahoney goto out; 317fba9f90eSJeff Mahoney } 318fba9f90eSJeff Mahoney 3191da177e4SLinus Torvalds dm_get(md); 3205c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 321fba9f90eSJeff Mahoney out: 322fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 323fba9f90eSJeff Mahoney 324fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3251da177e4SLinus Torvalds } 3261da177e4SLinus Torvalds 327db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3281da177e4SLinus Torvalds { 32963a4f065SMike Snitzer struct mapped_device *md; 3306e9624b8SArnd Bergmann 3314a1aeb98SMilan Broz spin_lock(&_minor_lock); 3324a1aeb98SMilan Broz 33363a4f065SMike Snitzer md = disk->private_data; 33463a4f065SMike Snitzer if (WARN_ON(!md)) 33563a4f065SMike Snitzer goto out; 33663a4f065SMike Snitzer 3372c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3382c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 339acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3402c140a24SMikulas Patocka 3411da177e4SLinus Torvalds dm_put(md); 34263a4f065SMike Snitzer out: 3434a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3441da177e4SLinus Torvalds } 3451da177e4SLinus Torvalds 3465c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3475c6bd75dSAlasdair G Kergon { 3485c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3495c6bd75dSAlasdair G Kergon } 3505c6bd75dSAlasdair G Kergon 3515c6bd75dSAlasdair G Kergon /* 3525c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3535c6bd75dSAlasdair G Kergon */ 3542c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3555c6bd75dSAlasdair G Kergon { 3565c6bd75dSAlasdair G Kergon int r = 0; 3575c6bd75dSAlasdair G Kergon 3585c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3595c6bd75dSAlasdair G Kergon 3602c140a24SMikulas Patocka if (dm_open_count(md)) { 3615c6bd75dSAlasdair G Kergon r = -EBUSY; 3622c140a24SMikulas Patocka if (mark_deferred) 3632c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3642c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3652c140a24SMikulas Patocka r = -EEXIST; 3665c6bd75dSAlasdair G Kergon else 3675c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3685c6bd75dSAlasdair G Kergon 3695c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3705c6bd75dSAlasdair G Kergon 3715c6bd75dSAlasdair G Kergon return r; 3725c6bd75dSAlasdair G Kergon } 3735c6bd75dSAlasdair G Kergon 3742c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3752c140a24SMikulas Patocka { 3762c140a24SMikulas Patocka int r = 0; 3772c140a24SMikulas Patocka 3782c140a24SMikulas Patocka spin_lock(&_minor_lock); 3792c140a24SMikulas Patocka 3802c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3812c140a24SMikulas Patocka r = -EBUSY; 3822c140a24SMikulas Patocka else 3832c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3842c140a24SMikulas Patocka 3852c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3862c140a24SMikulas Patocka 3872c140a24SMikulas Patocka return r; 3882c140a24SMikulas Patocka } 3892c140a24SMikulas Patocka 3902c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 3912c140a24SMikulas Patocka { 3922c140a24SMikulas Patocka dm_deferred_remove(); 3932c140a24SMikulas Patocka } 3942c140a24SMikulas Patocka 3953ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3963ac51e74SDarrick J. Wong { 3973ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 3983ac51e74SDarrick J. Wong 3993ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4003ac51e74SDarrick J. Wong } 4013ac51e74SDarrick J. Wong 402971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4035bd5e8d8SMike Snitzer struct block_device **bdev) 404aa129a22SMilan Broz { 40566482026SMike Snitzer struct dm_target *tgt; 4066c182cd8SHannes Reinecke struct dm_table *map; 407971888c4SMike Snitzer int r; 408aa129a22SMilan Broz 4096c182cd8SHannes Reinecke retry: 410e56f81e0SChristoph Hellwig r = -ENOTTY; 411971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 412aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 413971888c4SMike Snitzer return r; 414aa129a22SMilan Broz 415aa129a22SMilan Broz /* We only support devices that have a single target */ 416aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 417971888c4SMike Snitzer return r; 418aa129a22SMilan Broz 41966482026SMike Snitzer tgt = dm_table_get_target(map, 0); 42066482026SMike Snitzer if (!tgt->type->prepare_ioctl) 421e56f81e0SChristoph Hellwig return r; 422aa129a22SMilan Broz 423971888c4SMike Snitzer if (dm_suspended_md(md)) 424971888c4SMike Snitzer return -EAGAIN; 425971888c4SMike Snitzer 4265bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 4275bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 428971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 4296c182cd8SHannes Reinecke msleep(10); 4306c182cd8SHannes Reinecke goto retry; 4316c182cd8SHannes Reinecke } 432971888c4SMike Snitzer 433e56f81e0SChristoph Hellwig return r; 434e56f81e0SChristoph Hellwig } 4356c182cd8SHannes Reinecke 436971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 437971888c4SMike Snitzer { 438971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 439971888c4SMike Snitzer } 440971888c4SMike Snitzer 441e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 442e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 443e56f81e0SChristoph Hellwig { 444e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 445971888c4SMike Snitzer int r, srcu_idx; 446e56f81e0SChristoph Hellwig 4475bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 448e56f81e0SChristoph Hellwig if (r < 0) 449971888c4SMike Snitzer goto out; 450e56f81e0SChristoph Hellwig 451e56f81e0SChristoph Hellwig if (r > 0) { 452e56f81e0SChristoph Hellwig /* 453e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 454e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 455e56f81e0SChristoph Hellwig */ 456e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 4570378c625SMike Snitzer DMDEBUG_LIMIT( 458e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 459e980f623SChristoph Hellwig current->comm, cmd); 460e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 461e56f81e0SChristoph Hellwig goto out; 462e56f81e0SChristoph Hellwig } 463e980f623SChristoph Hellwig } 464e56f81e0SChristoph Hellwig 465a7cb3d2fSChristoph Hellwig if (!bdev->bd_disk->fops->ioctl) 466a7cb3d2fSChristoph Hellwig r = -ENOTTY; 467a7cb3d2fSChristoph Hellwig else 468a7cb3d2fSChristoph Hellwig r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 469e56f81e0SChristoph Hellwig out: 470971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 471aa129a22SMilan Broz return r; 472aa129a22SMilan Broz } 473aa129a22SMilan Broz 4747465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio) 4757465d7acSMike Snitzer { 4767465d7acSMike Snitzer struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 4777465d7acSMike Snitzer struct dm_io *io = tio->io; 4787465d7acSMike Snitzer 4797465d7acSMike Snitzer return jiffies_to_nsecs(io->start_time); 4807465d7acSMike Snitzer } 4817465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 4827465d7acSMike Snitzer 4837465d7acSMike Snitzer static void start_io_acct(struct dm_io *io) 4847465d7acSMike Snitzer { 4857465d7acSMike Snitzer struct mapped_device *md = io->md; 4867465d7acSMike Snitzer struct bio *bio = io->orig_bio; 4877465d7acSMike Snitzer 4887465d7acSMike Snitzer io->start_time = bio_start_io_acct(bio); 4897465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 4907465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 4917465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 4927465d7acSMike Snitzer false, 0, &io->stats_aux); 4937465d7acSMike Snitzer } 4947465d7acSMike Snitzer 4957465d7acSMike Snitzer static void end_io_acct(struct dm_io *io) 4967465d7acSMike Snitzer { 4977465d7acSMike Snitzer struct mapped_device *md = io->md; 4987465d7acSMike Snitzer struct bio *bio = io->orig_bio; 4997465d7acSMike Snitzer unsigned long duration = jiffies - io->start_time; 5007465d7acSMike Snitzer 5017465d7acSMike Snitzer bio_end_io_acct(bio, io->start_time); 5027465d7acSMike Snitzer 5037465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 5047465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 5057465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 5067465d7acSMike Snitzer true, duration, &io->stats_aux); 5077465d7acSMike Snitzer 5087465d7acSMike Snitzer /* nudge anyone waiting on suspend queue */ 5097465d7acSMike Snitzer if (unlikely(wq_has_sleeper(&md->wait))) 5107465d7acSMike Snitzer wake_up(&md->wait); 5117465d7acSMike Snitzer } 512978e51baSMike Snitzer 513978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5141da177e4SLinus Torvalds { 51564f52b0eSMike Snitzer struct dm_io *io; 51664f52b0eSMike Snitzer struct dm_target_io *tio; 51764f52b0eSMike Snitzer struct bio *clone; 51864f52b0eSMike Snitzer 5196f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); 52064f52b0eSMike Snitzer if (!clone) 52164f52b0eSMike Snitzer return NULL; 52264f52b0eSMike Snitzer 52364f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 52464f52b0eSMike Snitzer tio->inside_dm_io = true; 52564f52b0eSMike Snitzer tio->io = NULL; 52664f52b0eSMike Snitzer 52764f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 52864f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 529978e51baSMike Snitzer io->status = 0; 530978e51baSMike Snitzer atomic_set(&io->io_count, 1); 531978e51baSMike Snitzer io->orig_bio = bio; 532978e51baSMike Snitzer io->md = md; 533978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 534978e51baSMike Snitzer 535978e51baSMike Snitzer start_io_acct(io); 53664f52b0eSMike Snitzer 53764f52b0eSMike Snitzer return io; 5381da177e4SLinus Torvalds } 5391da177e4SLinus Torvalds 540028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5411da177e4SLinus Torvalds { 54264f52b0eSMike Snitzer bio_put(&io->tio.clone); 54364f52b0eSMike Snitzer } 54464f52b0eSMike Snitzer 54564f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, 54664f52b0eSMike Snitzer unsigned target_bio_nr, gfp_t gfp_mask) 54764f52b0eSMike Snitzer { 54864f52b0eSMike Snitzer struct dm_target_io *tio; 54964f52b0eSMike Snitzer 55064f52b0eSMike Snitzer if (!ci->io->tio.io) { 55164f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 55264f52b0eSMike Snitzer tio = &ci->io->tio; 55364f52b0eSMike Snitzer } else { 5546f1c819cSKent Overstreet struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); 55564f52b0eSMike Snitzer if (!clone) 55664f52b0eSMike Snitzer return NULL; 55764f52b0eSMike Snitzer 55864f52b0eSMike Snitzer tio = container_of(clone, struct dm_target_io, clone); 55964f52b0eSMike Snitzer tio->inside_dm_io = false; 56064f52b0eSMike Snitzer } 56164f52b0eSMike Snitzer 56264f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 56364f52b0eSMike Snitzer tio->io = ci->io; 56464f52b0eSMike Snitzer tio->ti = ti; 56564f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 56664f52b0eSMike Snitzer 56764f52b0eSMike Snitzer return tio; 5681da177e4SLinus Torvalds } 5691da177e4SLinus Torvalds 570cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio) 5711da177e4SLinus Torvalds { 57264f52b0eSMike Snitzer if (tio->inside_dm_io) 57364f52b0eSMike Snitzer return; 574dba14160SMikulas Patocka bio_put(&tio->clone); 5751da177e4SLinus Torvalds } 5761da177e4SLinus Torvalds 5771da177e4SLinus Torvalds /* 5781da177e4SLinus Torvalds * Add the bio to the list of deferred io. 5791da177e4SLinus Torvalds */ 58092c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 5811da177e4SLinus Torvalds { 58205447420SKiyoshi Ueda unsigned long flags; 5831da177e4SLinus Torvalds 58405447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 5851da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 58605447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 58792c63902SMikulas Patocka queue_work(md->wq, &md->work); 5881da177e4SLinus Torvalds } 5891da177e4SLinus Torvalds 5901da177e4SLinus Torvalds /* 5911da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5921da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 59383d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 5941da177e4SLinus Torvalds */ 59583d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 5961da177e4SLinus Torvalds { 59783d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 5981da177e4SLinus Torvalds 59983d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 60083d5e5b0SMikulas Patocka } 6011da177e4SLinus Torvalds 60283d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 60383d5e5b0SMikulas Patocka { 60483d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 60583d5e5b0SMikulas Patocka } 60683d5e5b0SMikulas Patocka 60783d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 60883d5e5b0SMikulas Patocka { 60983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 61083d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 61183d5e5b0SMikulas Patocka } 61283d5e5b0SMikulas Patocka 61383d5e5b0SMikulas Patocka /* 61483d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 61583d5e5b0SMikulas Patocka * The caller must not block between these two functions. 61683d5e5b0SMikulas Patocka */ 61783d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 61883d5e5b0SMikulas Patocka { 61983d5e5b0SMikulas Patocka rcu_read_lock(); 62083d5e5b0SMikulas Patocka return rcu_dereference(md->map); 62183d5e5b0SMikulas Patocka } 62283d5e5b0SMikulas Patocka 62383d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 62483d5e5b0SMikulas Patocka { 62583d5e5b0SMikulas Patocka rcu_read_unlock(); 6261da177e4SLinus Torvalds } 6271da177e4SLinus Torvalds 628971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 629971888c4SMike Snitzer 6303ac51e74SDarrick J. Wong /* 63186f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 63286f1152bSBenjamin Marzinski */ 63386f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 63486f1152bSBenjamin Marzinski struct mapped_device *md) 63586f1152bSBenjamin Marzinski { 63686f1152bSBenjamin Marzinski struct block_device *bdev; 63786f1152bSBenjamin Marzinski 63886f1152bSBenjamin Marzinski int r; 63986f1152bSBenjamin Marzinski 64086f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 64186f1152bSBenjamin Marzinski 642519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 64386f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 64486f1152bSBenjamin Marzinski return PTR_ERR(bdev); 64586f1152bSBenjamin Marzinski 64686f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 64786f1152bSBenjamin Marzinski if (r) { 64886f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 64986f1152bSBenjamin Marzinski return r; 65086f1152bSBenjamin Marzinski } 65186f1152bSBenjamin Marzinski 65286f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 653*dfa584f6SChristoph Hellwig td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev); 65486f1152bSBenjamin Marzinski return 0; 65586f1152bSBenjamin Marzinski } 65686f1152bSBenjamin Marzinski 65786f1152bSBenjamin Marzinski /* 65886f1152bSBenjamin Marzinski * Close a table device that we've been using. 65986f1152bSBenjamin Marzinski */ 66086f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 66186f1152bSBenjamin Marzinski { 66286f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 66386f1152bSBenjamin Marzinski return; 66486f1152bSBenjamin Marzinski 66586f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 66686f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 667817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 66886f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 669817bf402SDan Williams td->dm_dev.dax_dev = NULL; 67086f1152bSBenjamin Marzinski } 67186f1152bSBenjamin Marzinski 67286f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 6738454fca4SSheetal Singala fmode_t mode) 6748454fca4SSheetal Singala { 67586f1152bSBenjamin Marzinski struct table_device *td; 67686f1152bSBenjamin Marzinski 67786f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 67886f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 67986f1152bSBenjamin Marzinski return td; 68086f1152bSBenjamin Marzinski 68186f1152bSBenjamin Marzinski return NULL; 68286f1152bSBenjamin Marzinski } 68386f1152bSBenjamin Marzinski 68486f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 6858454fca4SSheetal Singala struct dm_dev **result) 6868454fca4SSheetal Singala { 68786f1152bSBenjamin Marzinski int r; 68886f1152bSBenjamin Marzinski struct table_device *td; 68986f1152bSBenjamin Marzinski 69086f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 69186f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 69286f1152bSBenjamin Marzinski if (!td) { 693115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 69486f1152bSBenjamin Marzinski if (!td) { 69586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 69686f1152bSBenjamin Marzinski return -ENOMEM; 69786f1152bSBenjamin Marzinski } 69886f1152bSBenjamin Marzinski 69986f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 70086f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 70186f1152bSBenjamin Marzinski 70286f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 70386f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 70486f1152bSBenjamin Marzinski kfree(td); 70586f1152bSBenjamin Marzinski return r; 70686f1152bSBenjamin Marzinski } 70786f1152bSBenjamin Marzinski 70886f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 70986f1152bSBenjamin Marzinski 710b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 71186f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 712b0b4d7c6SElena Reshetova } else { 713b0b4d7c6SElena Reshetova refcount_inc(&td->count); 71486f1152bSBenjamin Marzinski } 71586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 71686f1152bSBenjamin Marzinski 71786f1152bSBenjamin Marzinski *result = &td->dm_dev; 71886f1152bSBenjamin Marzinski return 0; 71986f1152bSBenjamin Marzinski } 72086f1152bSBenjamin Marzinski 72186f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 72286f1152bSBenjamin Marzinski { 72386f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 72486f1152bSBenjamin Marzinski 72586f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 726b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 72786f1152bSBenjamin Marzinski close_table_device(td, md); 72886f1152bSBenjamin Marzinski list_del(&td->list); 72986f1152bSBenjamin Marzinski kfree(td); 73086f1152bSBenjamin Marzinski } 73186f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 73286f1152bSBenjamin Marzinski } 73386f1152bSBenjamin Marzinski 73486f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 73586f1152bSBenjamin Marzinski { 73686f1152bSBenjamin Marzinski struct list_head *tmp, *next; 73786f1152bSBenjamin Marzinski 73886f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 73986f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 74086f1152bSBenjamin Marzinski 74186f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 742b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 74386f1152bSBenjamin Marzinski kfree(td); 74486f1152bSBenjamin Marzinski } 74586f1152bSBenjamin Marzinski } 74686f1152bSBenjamin Marzinski 74786f1152bSBenjamin Marzinski /* 7483ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 7493ac51e74SDarrick J. Wong */ 7503ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 7513ac51e74SDarrick J. Wong { 7523ac51e74SDarrick J. Wong *geo = md->geometry; 7533ac51e74SDarrick J. Wong 7543ac51e74SDarrick J. Wong return 0; 7553ac51e74SDarrick J. Wong } 7563ac51e74SDarrick J. Wong 7573ac51e74SDarrick J. Wong /* 7583ac51e74SDarrick J. Wong * Set the geometry of a device. 7593ac51e74SDarrick J. Wong */ 7603ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 7613ac51e74SDarrick J. Wong { 7623ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 7633ac51e74SDarrick J. Wong 7643ac51e74SDarrick J. Wong if (geo->start > sz) { 7653ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 7663ac51e74SDarrick J. Wong return -EINVAL; 7673ac51e74SDarrick J. Wong } 7683ac51e74SDarrick J. Wong 7693ac51e74SDarrick J. Wong md->geometry = *geo; 7703ac51e74SDarrick J. Wong 7713ac51e74SDarrick J. Wong return 0; 7723ac51e74SDarrick J. Wong } 7733ac51e74SDarrick J. Wong 7742e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 7752e93ccc1SKiyoshi Ueda { 7762e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7772e93ccc1SKiyoshi Ueda } 7782e93ccc1SKiyoshi Ueda 7791da177e4SLinus Torvalds /* 7801da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7811da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7821da177e4SLinus Torvalds */ 783e2118b3cSDamien Le Moal void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 7841da177e4SLinus Torvalds { 7852e93ccc1SKiyoshi Ueda unsigned long flags; 7864e4cbee9SChristoph Hellwig blk_status_t io_error; 787b35f8caaSMilan Broz struct bio *bio; 788b35f8caaSMilan Broz struct mapped_device *md = io->md; 7892e93ccc1SKiyoshi Ueda 7902e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 791f88fb981SKiyoshi Ueda if (unlikely(error)) { 792f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 793745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 7944e4cbee9SChristoph Hellwig io->status = error; 795f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 796f88fb981SKiyoshi Ueda } 7971da177e4SLinus Torvalds 7981da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 799bf14e2b2SDamien Le Moal bio = io->orig_bio; 8004e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 8012e93ccc1SKiyoshi Ueda /* 8022e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8032e93ccc1SKiyoshi Ueda */ 804022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 805bf14e2b2SDamien Le Moal if (__noflush_suspending(md) && 806bf14e2b2SDamien Le Moal !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 807745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 808bf14e2b2SDamien Le Moal bio_list_add_head(&md->deferred, bio); 809bf14e2b2SDamien Le Moal } else { 810bf14e2b2SDamien Le Moal /* 811bf14e2b2SDamien Le Moal * noflush suspend was interrupted or this is 812bf14e2b2SDamien Le Moal * a write to a zoned target. 813bf14e2b2SDamien Le Moal */ 8144e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 815bf14e2b2SDamien Le Moal } 816022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 8172e93ccc1SKiyoshi Ueda } 8182e93ccc1SKiyoshi Ueda 8194e4cbee9SChristoph Hellwig io_error = io->status; 820af7e466aSMikulas Patocka end_io_acct(io); 821a97f925aSMikulas Patocka free_io(md, io); 8221da177e4SLinus Torvalds 8234e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 8246a8736d1STejun Heo return; 8256a8736d1STejun Heo 8261eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8271da177e4SLinus Torvalds /* 8286a8736d1STejun Heo * Preflush done for flush with data, reissue 82928a8f0d3SMike Christie * without REQ_PREFLUSH. 8301da177e4SLinus Torvalds */ 8311eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 8326a8736d1STejun Heo queue_io(md, bio); 833b35f8caaSMilan Broz } else { 834b372d360SMike Snitzer /* done with normal IO or empty flush */ 8358dd601faSNeilBrown if (io_error) 8364e4cbee9SChristoph Hellwig bio->bi_status = io_error; 8374246a0b6SChristoph Hellwig bio_endio(bio); 8382e93ccc1SKiyoshi Ueda } 8391da177e4SLinus Torvalds } 840af7e466aSMikulas Patocka } 8411da177e4SLinus Torvalds 842bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 843bcb44433SMike Snitzer { 844bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 845bcb44433SMike Snitzer 846bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 847bcb44433SMike Snitzer limits->max_discard_sectors = 0; 848bcb44433SMike Snitzer blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 849bcb44433SMike Snitzer } 850bcb44433SMike Snitzer 8514cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 8527eee4ae2SMike Snitzer { 8537eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 8547eee4ae2SMike Snitzer 8557eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 8567eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 8577eee4ae2SMike Snitzer } 8587eee4ae2SMike Snitzer 859ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 860ac62d620SChristoph Hellwig { 861ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 862ac62d620SChristoph Hellwig 863ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 864ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 865ac62d620SChristoph Hellwig } 866ac62d620SChristoph Hellwig 867a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 868a666e5c0SMikulas Patocka { 869a666e5c0SMikulas Patocka return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 870a666e5c0SMikulas Patocka } 871a666e5c0SMikulas Patocka 8724246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 8731da177e4SLinus Torvalds { 8744e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 875bfc6d41cSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 876b35f8caaSMilan Broz struct dm_io *io = tio->io; 8779faf400fSStefan Bader struct mapped_device *md = tio->io->md; 8781da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 879309dca30SChristoph Hellwig struct request_queue *q = bio->bi_bdev->bd_disk->queue; 8801da177e4SLinus Torvalds 8819c37de29SMike Snitzer if (unlikely(error == BLK_STS_TARGET)) { 882bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 883309dca30SChristoph Hellwig !q->limits.max_discard_sectors) 884bcb44433SMike Snitzer disable_discard(md); 885bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME && 886309dca30SChristoph Hellwig !q->limits.max_write_same_sectors) 8877eee4ae2SMike Snitzer disable_write_same(md); 888bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 889309dca30SChristoph Hellwig !q->limits.max_write_zeroes_sectors) 890ac62d620SChristoph Hellwig disable_write_zeroes(md); 891ac62d620SChristoph Hellwig } 8927eee4ae2SMike Snitzer 893bb37d772SDamien Le Moal if (blk_queue_is_zoned(q)) 894bb37d772SDamien Le Moal dm_zone_endio(io, bio); 895415c79e1SJohannes Thumshirn 8961be56909SChristoph Hellwig if (endio) { 8974e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 8981be56909SChristoph Hellwig switch (r) { 8991be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 900bf14e2b2SDamien Le Moal /* 901bf14e2b2SDamien Le Moal * Requeuing writes to a sequential zone of a zoned 902bf14e2b2SDamien Le Moal * target will break the sequential write pattern: 903bf14e2b2SDamien Le Moal * fail such IO. 904bf14e2b2SDamien Le Moal */ 905bf14e2b2SDamien Le Moal if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 906bf14e2b2SDamien Le Moal error = BLK_STS_IOERR; 907bf14e2b2SDamien Le Moal else 9084e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 909df561f66SGustavo A. R. Silva fallthrough; 9101be56909SChristoph Hellwig case DM_ENDIO_DONE: 9111be56909SChristoph Hellwig break; 9121be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 9131be56909SChristoph Hellwig /* The target will handle the io */ 9141be56909SChristoph Hellwig return; 9151be56909SChristoph Hellwig default: 9161be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 9171be56909SChristoph Hellwig BUG(); 9181be56909SChristoph Hellwig } 9191be56909SChristoph Hellwig } 9201be56909SChristoph Hellwig 921a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(tio->ti, bio))) { 922a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 923a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 924a666e5c0SMikulas Patocka } 925a666e5c0SMikulas Patocka 926cfae7529SMike Snitzer free_tio(tio); 927e2118b3cSDamien Le Moal dm_io_dec_pending(io, error); 9281da177e4SLinus Torvalds } 9291da177e4SLinus Torvalds 93078d8e58aSMike Snitzer /* 93156a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 93256a67df7SMike Snitzer * target boundary. 93356a67df7SMike Snitzer */ 9343720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 9353720281dSMike Snitzer sector_t target_offset) 9361da177e4SLinus Torvalds { 93756a67df7SMike Snitzer return ti->len - target_offset; 93856a67df7SMike Snitzer } 93956a67df7SMike Snitzer 9403720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector) 94156a67df7SMike Snitzer { 9423720281dSMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 9433720281dSMike Snitzer sector_t len = max_io_len_target_boundary(ti, target_offset); 9445091cdecSMike Snitzer sector_t max_len; 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds /* 9473ee16db3SMike Snitzer * Does the target need to split IO even further? 9483ee16db3SMike Snitzer * - varied (per target) IO splitting is a tenet of DM; this 9493ee16db3SMike Snitzer * explains why stacked chunk_sectors based splitting via 9503ee16db3SMike Snitzer * blk_max_size_offset() isn't possible here. So pass in 9513ee16db3SMike Snitzer * ti->max_io_len to override stacked chunk_sectors. 9521da177e4SLinus Torvalds */ 9533ee16db3SMike Snitzer if (ti->max_io_len) { 95433bd6f06SMike Snitzer max_len = blk_max_size_offset(ti->table->md->queue, 9553ee16db3SMike Snitzer target_offset, ti->max_io_len); 956542f9038SMike Snitzer if (len > max_len) 957542f9038SMike Snitzer len = max_len; 9583ee16db3SMike Snitzer } 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds return len; 9611da177e4SLinus Torvalds } 9621da177e4SLinus Torvalds 963542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 964542f9038SMike Snitzer { 965542f9038SMike Snitzer if (len > UINT_MAX) { 966542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 967542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 968542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 969542f9038SMike Snitzer return -EINVAL; 970542f9038SMike Snitzer } 971542f9038SMike Snitzer 97275ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 973542f9038SMike Snitzer 974542f9038SMike Snitzer return 0; 975542f9038SMike Snitzer } 976542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 977542f9038SMike Snitzer 978f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 979f26c5719SDan Williams sector_t sector, int *srcu_idx) 9803d97c829SMike Snitzer __acquires(md->io_barrier) 981545ed20eSToshi Kani { 982545ed20eSToshi Kani struct dm_table *map; 983545ed20eSToshi Kani struct dm_target *ti; 984545ed20eSToshi Kani 985f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 986545ed20eSToshi Kani if (!map) 987f26c5719SDan Williams return NULL; 988545ed20eSToshi Kani 989545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 990123d87d5SMikulas Patocka if (!ti) 991f26c5719SDan Williams return NULL; 992f26c5719SDan Williams 993f26c5719SDan Williams return ti; 994f26c5719SDan Williams } 995f26c5719SDan Williams 996f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 997f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 998f26c5719SDan Williams { 999f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1000f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1001f26c5719SDan Williams struct dm_target *ti; 1002f26c5719SDan Williams long len, ret = -EIO; 1003f26c5719SDan Williams int srcu_idx; 1004f26c5719SDan Williams 1005f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1006f26c5719SDan Williams 1007f26c5719SDan Williams if (!ti) 1008545ed20eSToshi Kani goto out; 1009f26c5719SDan Williams if (!ti->type->direct_access) 1010f26c5719SDan Williams goto out; 10113720281dSMike Snitzer len = max_io_len(ti, sector) / PAGE_SECTORS; 1012f26c5719SDan Williams if (len < 1) 1013f26c5719SDan Williams goto out; 1014f26c5719SDan Williams nr_pages = min(len, nr_pages); 1015817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1016817bf402SDan Williams 1017545ed20eSToshi Kani out: 1018545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1019f26c5719SDan Williams 1020f26c5719SDan Williams return ret; 1021545ed20eSToshi Kani } 1022545ed20eSToshi Kani 10237bf7eac8SDan Williams static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 10247bf7eac8SDan Williams int blocksize, sector_t start, sector_t len) 10257bf7eac8SDan Williams { 10267bf7eac8SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 10277bf7eac8SDan Williams struct dm_table *map; 102802186d88SDan Williams bool ret = false; 10297bf7eac8SDan Williams int srcu_idx; 10307bf7eac8SDan Williams 10317bf7eac8SDan Williams map = dm_get_live_table(md, &srcu_idx); 10327bf7eac8SDan Williams if (!map) 103302186d88SDan Williams goto out; 10347bf7eac8SDan Williams 10355b0fab50SJeffle Xu ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); 10367bf7eac8SDan Williams 103702186d88SDan Williams out: 10387bf7eac8SDan Williams dm_put_live_table(md, srcu_idx); 10397bf7eac8SDan Williams 10407bf7eac8SDan Williams return ret; 10417bf7eac8SDan Williams } 10427bf7eac8SDan Williams 10437e026c8cSDan Williams static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 10447e026c8cSDan Williams void *addr, size_t bytes, struct iov_iter *i) 10457e026c8cSDan Williams { 10467e026c8cSDan Williams struct mapped_device *md = dax_get_private(dax_dev); 10477e026c8cSDan Williams sector_t sector = pgoff * PAGE_SECTORS; 10487e026c8cSDan Williams struct dm_target *ti; 10497e026c8cSDan Williams long ret = 0; 10507e026c8cSDan Williams int srcu_idx; 10517e026c8cSDan Williams 10527e026c8cSDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 10537e026c8cSDan Williams 10547e026c8cSDan Williams if (!ti) 10557e026c8cSDan Williams goto out; 10567e026c8cSDan Williams if (!ti->type->dax_copy_from_iter) { 10577e026c8cSDan Williams ret = copy_from_iter(addr, bytes, i); 10587e026c8cSDan Williams goto out; 10597e026c8cSDan Williams } 10607e026c8cSDan Williams ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); 10617e026c8cSDan Williams out: 10627e026c8cSDan Williams dm_put_live_table(md, srcu_idx); 10637e026c8cSDan Williams 10647e026c8cSDan Williams return ret; 10657e026c8cSDan Williams } 10667e026c8cSDan Williams 1067b3a9a0c3SDan Williams static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1068b3a9a0c3SDan Williams void *addr, size_t bytes, struct iov_iter *i) 1069b3a9a0c3SDan Williams { 1070b3a9a0c3SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1071b3a9a0c3SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1072b3a9a0c3SDan Williams struct dm_target *ti; 1073b3a9a0c3SDan Williams long ret = 0; 1074b3a9a0c3SDan Williams int srcu_idx; 1075b3a9a0c3SDan Williams 1076b3a9a0c3SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1077b3a9a0c3SDan Williams 1078b3a9a0c3SDan Williams if (!ti) 1079b3a9a0c3SDan Williams goto out; 1080b3a9a0c3SDan Williams if (!ti->type->dax_copy_to_iter) { 1081b3a9a0c3SDan Williams ret = copy_to_iter(addr, bytes, i); 1082b3a9a0c3SDan Williams goto out; 1083b3a9a0c3SDan Williams } 1084b3a9a0c3SDan Williams ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); 1085b3a9a0c3SDan Williams out: 1086b3a9a0c3SDan Williams dm_put_live_table(md, srcu_idx); 1087b3a9a0c3SDan Williams 1088b3a9a0c3SDan Williams return ret; 1089b3a9a0c3SDan Williams } 1090b3a9a0c3SDan Williams 1091cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1092cdf6cdcdSVivek Goyal size_t nr_pages) 1093cdf6cdcdSVivek Goyal { 1094cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1095cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1096cdf6cdcdSVivek Goyal struct dm_target *ti; 1097cdf6cdcdSVivek Goyal int ret = -EIO; 1098cdf6cdcdSVivek Goyal int srcu_idx; 1099cdf6cdcdSVivek Goyal 1100cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1101cdf6cdcdSVivek Goyal 1102cdf6cdcdSVivek Goyal if (!ti) 1103cdf6cdcdSVivek Goyal goto out; 1104cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1105cdf6cdcdSVivek Goyal /* 1106cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1107cdf6cdcdSVivek Goyal * here, something is wrong. 1108cdf6cdcdSVivek Goyal */ 1109cdf6cdcdSVivek Goyal goto out; 1110cdf6cdcdSVivek Goyal } 1111cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1112cdf6cdcdSVivek Goyal out: 1113cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1114cdf6cdcdSVivek Goyal 1115cdf6cdcdSVivek Goyal return ret; 1116cdf6cdcdSVivek Goyal } 1117cdf6cdcdSVivek Goyal 11181dd40c3eSMikulas Patocka /* 11191dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 11206842d264SDamien Le Moal * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 11216842d264SDamien Le Moal * operations and REQ_OP_ZONE_APPEND (zone append writes). 11221dd40c3eSMikulas Patocka * 11231dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 11241dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 11251dd40c3eSMikulas Patocka * sent in a next bio. 11261dd40c3eSMikulas Patocka * 11271dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 11281dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11291dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 11301dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 11311dd40c3eSMikulas Patocka * 11321dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 11331dd40c3eSMikulas Patocka * <------- bi_size -------> 11341dd40c3eSMikulas Patocka * <-- n_sectors --> 11351dd40c3eSMikulas Patocka * 11361dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 11371dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 11381dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 11391dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 11401dd40c3eSMikulas Patocka * to make it empty) 11411dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 11421dd40c3eSMikulas Patocka * 11431dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 11441dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 11451dd40c3eSMikulas Patocka * copies of the bio. 11461dd40c3eSMikulas Patocka */ 11471dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 11481dd40c3eSMikulas Patocka { 11491dd40c3eSMikulas Patocka struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 11501dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 11516842d264SDamien Le Moal 11521eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 11536842d264SDamien Le Moal BUG_ON(op_is_zone_mgmt(bio_op(bio))); 11546842d264SDamien Le Moal BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 11551dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 11561dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 11576842d264SDamien Le Moal 11581dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 11591dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 11601dd40c3eSMikulas Patocka } 11611dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 11621dd40c3eSMikulas Patocka 1163a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1164a666e5c0SMikulas Patocka { 1165a666e5c0SMikulas Patocka mutex_lock(&md->swap_bios_lock); 1166a666e5c0SMikulas Patocka while (latch < md->swap_bios) { 1167a666e5c0SMikulas Patocka cond_resched(); 1168a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1169a666e5c0SMikulas Patocka md->swap_bios--; 1170a666e5c0SMikulas Patocka } 1171a666e5c0SMikulas Patocka while (latch > md->swap_bios) { 1172a666e5c0SMikulas Patocka cond_resched(); 1173a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1174a666e5c0SMikulas Patocka md->swap_bios++; 1175a666e5c0SMikulas Patocka } 1176a666e5c0SMikulas Patocka mutex_unlock(&md->swap_bios_lock); 1177a666e5c0SMikulas Patocka } 1178a666e5c0SMikulas Patocka 1179978e51baSMike Snitzer static blk_qc_t __map_bio(struct dm_target_io *tio) 11801da177e4SLinus Torvalds { 11811da177e4SLinus Torvalds int r; 11822056a782SJens Axboe sector_t sector; 1183dba14160SMikulas Patocka struct bio *clone = &tio->clone; 118464f52b0eSMike Snitzer struct dm_io *io = tio->io; 1185bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 1186978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 11871da177e4SLinus Torvalds 11881da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds /* 11911da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 11921da177e4SLinus Torvalds * anything, the target has assumed ownership of 11931da177e4SLinus Torvalds * this io. 11941da177e4SLinus Torvalds */ 1195e2118b3cSDamien Le Moal dm_io_inc_pending(io); 11964f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1197d67a5f4bSMikulas Patocka 1198a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1199a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1200a666e5c0SMikulas Patocka int latch = get_swap_bios(); 1201a666e5c0SMikulas Patocka if (unlikely(latch != md->swap_bios)) 1202a666e5c0SMikulas Patocka __set_swap_bios_limit(md, latch); 1203a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1204a666e5c0SMikulas Patocka } 1205a666e5c0SMikulas Patocka 1206bb37d772SDamien Le Moal /* 1207bb37d772SDamien Le Moal * Check if the IO needs a special mapping due to zone append emulation 1208bb37d772SDamien Le Moal * on zoned target. In this case, dm_zone_map_bio() calls the target 1209bb37d772SDamien Le Moal * map operation. 1210bb37d772SDamien Le Moal */ 1211bb37d772SDamien Le Moal if (dm_emulate_zone_append(io->md)) 1212bb37d772SDamien Le Moal r = dm_zone_map_bio(tio); 1213bb37d772SDamien Le Moal else 12147de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1215bb37d772SDamien Le Moal 1216846785e6SChristoph Hellwig switch (r) { 1217846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1218846785e6SChristoph Hellwig break; 1219846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 12201da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 12211c02fca6SChristoph Hellwig trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); 1222ed00aabdSChristoph Hellwig ret = submit_bio_noacct(clone); 1223846785e6SChristoph Hellwig break; 1224846785e6SChristoph Hellwig case DM_MAPIO_KILL: 1225a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1226a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1227a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1228a666e5c0SMikulas Patocka } 12294e4cbee9SChristoph Hellwig free_tio(tio); 1230e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_IOERR); 12314e4cbee9SChristoph Hellwig break; 1232846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1233a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1234a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1235a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1236a666e5c0SMikulas Patocka } 1237cfae7529SMike Snitzer free_tio(tio); 1238e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1239846785e6SChristoph Hellwig break; 1240846785e6SChristoph Hellwig default: 124145cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 124245cbcd79SKiyoshi Ueda BUG(); 12431da177e4SLinus Torvalds } 12441da177e4SLinus Torvalds 1245978e51baSMike Snitzer return ret; 12461da177e4SLinus Torvalds } 12471da177e4SLinus Torvalds 1248e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1249bd2a49b8SAlasdair G Kergon { 12504f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 12514f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 12521da177e4SLinus Torvalds } 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds /* 12551da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 12561da177e4SLinus Torvalds */ 1257c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio, 12581c3b13e6SKent Overstreet sector_t sector, unsigned len) 12591da177e4SLinus Torvalds { 1260dba14160SMikulas Patocka struct bio *clone = &tio->clone; 126107560151SEric Biggers int r; 12621da177e4SLinus Torvalds 12631c3b13e6SKent Overstreet __bio_clone_fast(clone, bio); 12649c47008dSMartin K. Petersen 126507560151SEric Biggers r = bio_crypt_clone(clone, bio, GFP_NOIO); 126607560151SEric Biggers if (r < 0) 126707560151SEric Biggers return r; 1268a892c8d5SSatya Tangirala 126957c36519SMike Snitzer if (bio_integrity(bio)) { 1270e2460f2aSMikulas Patocka if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1271e2460f2aSMikulas Patocka !dm_target_passes_integrity(tio->ti->type))) { 1272e2460f2aSMikulas Patocka DMWARN("%s: the target %s doesn't support integrity data.", 1273e2460f2aSMikulas Patocka dm_device_name(tio->io->md), 1274e2460f2aSMikulas Patocka tio->ti->type->name); 1275e2460f2aSMikulas Patocka return -EIO; 1276e2460f2aSMikulas Patocka } 1277e2460f2aSMikulas Patocka 1278e2460f2aSMikulas Patocka r = bio_integrity_clone(clone, bio, GFP_NOIO); 1279c80914e8SMike Snitzer if (r < 0) 1280c80914e8SMike Snitzer return r; 1281c80914e8SMike Snitzer } 12821c3b13e6SKent Overstreet 1283fa8db494SMike Snitzer bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1284fa8db494SMike Snitzer clone->bi_iter.bi_size = to_bytes(len); 1285fa8db494SMike Snitzer 1286fa8db494SMike Snitzer if (bio_integrity(bio)) 1287fa8db494SMike Snitzer bio_integrity_trim(clone); 1288c80914e8SMike Snitzer 1289c80914e8SMike Snitzer return 0; 12901da177e4SLinus Torvalds } 12911da177e4SLinus Torvalds 1292318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1293318716ddSMike Snitzer struct dm_target *ti, unsigned num_bios) 1294f9ab94ceSMikulas Patocka { 1295dba14160SMikulas Patocka struct dm_target_io *tio; 1296318716ddSMike Snitzer int try; 1297dba14160SMikulas Patocka 1298318716ddSMike Snitzer if (!num_bios) 1299318716ddSMike Snitzer return; 1300f9ab94ceSMikulas Patocka 1301318716ddSMike Snitzer if (num_bios == 1) { 1302318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 1303318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1304318716ddSMike Snitzer return; 13059015df24SAlasdair G Kergon } 13069015df24SAlasdair G Kergon 1307318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1308318716ddSMike Snitzer int bio_nr; 1309318716ddSMike Snitzer struct bio *bio; 1310318716ddSMike Snitzer 1311318716ddSMike Snitzer if (try) 1312bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1313318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1314318716ddSMike Snitzer tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); 1315318716ddSMike Snitzer if (!tio) 1316318716ddSMike Snitzer break; 1317318716ddSMike Snitzer 1318318716ddSMike Snitzer bio_list_add(blist, &tio->clone); 1319318716ddSMike Snitzer } 1320318716ddSMike Snitzer if (try) 1321bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1322318716ddSMike Snitzer if (bio_nr == num_bios) 1323318716ddSMike Snitzer return; 1324318716ddSMike Snitzer 1325318716ddSMike Snitzer while ((bio = bio_list_pop(blist))) { 1326318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1327318716ddSMike Snitzer free_tio(tio); 1328318716ddSMike Snitzer } 1329318716ddSMike Snitzer } 1330318716ddSMike Snitzer } 1331318716ddSMike Snitzer 1332978e51baSMike Snitzer static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1333318716ddSMike Snitzer struct dm_target_io *tio, unsigned *len) 13349015df24SAlasdair G Kergon { 1335dba14160SMikulas Patocka struct bio *clone = &tio->clone; 13369015df24SAlasdair G Kergon 13371dd40c3eSMikulas Patocka tio->len_ptr = len; 13381dd40c3eSMikulas Patocka 13391c3b13e6SKent Overstreet __bio_clone_fast(clone, ci->bio); 1340bd2a49b8SAlasdair G Kergon if (len) 13411dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 1342f9ab94ceSMikulas Patocka 1343978e51baSMike Snitzer return __map_bio(tio); 1344f9ab94ceSMikulas Patocka } 1345f9ab94ceSMikulas Patocka 134614fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 13471dd40c3eSMikulas Patocka unsigned num_bios, unsigned *len) 134806a426ceSMike Snitzer { 1349318716ddSMike Snitzer struct bio_list blist = BIO_EMPTY_LIST; 1350318716ddSMike Snitzer struct bio *bio; 1351318716ddSMike Snitzer struct dm_target_io *tio; 135206a426ceSMike Snitzer 1353318716ddSMike Snitzer alloc_multiple_bios(&blist, ci, ti, num_bios); 1354318716ddSMike Snitzer 1355318716ddSMike Snitzer while ((bio = bio_list_pop(&blist))) { 1356318716ddSMike Snitzer tio = container_of(bio, struct dm_target_io, clone); 1357978e51baSMike Snitzer (void) __clone_and_map_simple_bio(ci, tio, len); 1358318716ddSMike Snitzer } 135906a426ceSMike Snitzer } 136006a426ceSMike Snitzer 136114fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1362f9ab94ceSMikulas Patocka { 136306a426ceSMike Snitzer unsigned target_nr = 0; 1364f9ab94ceSMikulas Patocka struct dm_target *ti; 1365828678b8SMike Snitzer struct bio flush_bio; 1366828678b8SMike Snitzer 1367828678b8SMike Snitzer /* 1368828678b8SMike Snitzer * Use an on-stack bio for this, it's safe since we don't 1369828678b8SMike Snitzer * need to reference it after submit. It's just used as 1370828678b8SMike Snitzer * the basis for the clone(s). 1371828678b8SMike Snitzer */ 1372828678b8SMike Snitzer bio_init(&flush_bio, NULL, 0); 1373828678b8SMike Snitzer flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1374309dca30SChristoph Hellwig bio_set_dev(&flush_bio, ci->io->md->disk->part0); 137547d95102SChristoph Hellwig 1376828678b8SMike Snitzer ci->bio = &flush_bio; 1377828678b8SMike Snitzer ci->sector_count = 0; 1378f9ab94ceSMikulas Patocka 1379b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1380f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 13811dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1382828678b8SMike Snitzer 1383828678b8SMike Snitzer bio_uninit(ci->bio); 1384f9ab94ceSMikulas Patocka return 0; 1385f9ab94ceSMikulas Patocka } 1386f9ab94ceSMikulas Patocka 1387c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 13881dd40c3eSMikulas Patocka sector_t sector, unsigned *len) 13895ae89a87SMike Snitzer { 1390dba14160SMikulas Patocka struct bio *bio = ci->bio; 13915ae89a87SMike Snitzer struct dm_target_io *tio; 1392f31c21e4SNeilBrown int r; 13935ae89a87SMike Snitzer 1394318716ddSMike Snitzer tio = alloc_tio(ci, ti, 0, GFP_NOIO); 13951dd40c3eSMikulas Patocka tio->len_ptr = len; 1396c80914e8SMike Snitzer r = clone_bio(tio, bio, sector, *len); 1397072623deSMikulas Patocka if (r < 0) { 1398cfae7529SMike Snitzer free_tio(tio); 1399c80914e8SMike Snitzer return r; 1400b0d8ed4dSAlasdair G Kergon } 1401978e51baSMike Snitzer (void) __map_bio(tio); 140255a62eefSAlasdair G Kergon 1403f31c21e4SNeilBrown return 0; 140423508a96SMike Snitzer } 140555a62eefSAlasdair G Kergon 14063d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 140761697a6aSMike Snitzer unsigned num_bios) 14085ae89a87SMike Snitzer { 140951b86f9aSMichael Lass unsigned len; 14105ae89a87SMike Snitzer 14115ae89a87SMike Snitzer /* 141223508a96SMike Snitzer * Even though the device advertised support for this type of 141323508a96SMike Snitzer * request, that does not mean every target supports it, and 1414936688d7SMike Snitzer * reconfiguration might also have changed that since the 14155ae89a87SMike Snitzer * check was performed. 14165ae89a87SMike Snitzer */ 141755a62eefSAlasdair G Kergon if (!num_bios) 14185ae89a87SMike Snitzer return -EOPNOTSUPP; 14195ae89a87SMike Snitzer 14203720281dSMike Snitzer len = min_t(sector_t, ci->sector_count, 14213720281dSMike Snitzer max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 142251b86f9aSMichael Lass 14231dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 14245ae89a87SMike Snitzer 1425a79245b3SMike Snitzer ci->sector += len; 14263d7f4562SMike Snitzer ci->sector_count -= len; 14275ae89a87SMike Snitzer 14285ae89a87SMike Snitzer return 0; 14295ae89a87SMike Snitzer } 14305ae89a87SMike Snitzer 1431568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1432568c73a3SMike Snitzer { 1433568c73a3SMike Snitzer bool r = false; 1434568c73a3SMike Snitzer 1435568c73a3SMike Snitzer switch (bio_op(bio)) { 1436568c73a3SMike Snitzer case REQ_OP_DISCARD: 1437568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1438568c73a3SMike Snitzer case REQ_OP_WRITE_SAME: 1439568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 1440568c73a3SMike Snitzer r = true; 1441568c73a3SMike Snitzer break; 1442568c73a3SMike Snitzer } 1443568c73a3SMike Snitzer 1444568c73a3SMike Snitzer return r; 1445568c73a3SMike Snitzer } 1446568c73a3SMike Snitzer 14470519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 14480519c71eSMike Snitzer int *result) 14490519c71eSMike Snitzer { 14500519c71eSMike Snitzer struct bio *bio = ci->bio; 14519679b5a7SMike Snitzer unsigned num_bios = 0; 14520519c71eSMike Snitzer 14539679b5a7SMike Snitzer switch (bio_op(bio)) { 14549679b5a7SMike Snitzer case REQ_OP_DISCARD: 14559679b5a7SMike Snitzer num_bios = ti->num_discard_bios; 14569679b5a7SMike Snitzer break; 14579679b5a7SMike Snitzer case REQ_OP_SECURE_ERASE: 14589679b5a7SMike Snitzer num_bios = ti->num_secure_erase_bios; 14599679b5a7SMike Snitzer break; 14609679b5a7SMike Snitzer case REQ_OP_WRITE_SAME: 14619679b5a7SMike Snitzer num_bios = ti->num_write_same_bios; 14629679b5a7SMike Snitzer break; 14639679b5a7SMike Snitzer case REQ_OP_WRITE_ZEROES: 14649679b5a7SMike Snitzer num_bios = ti->num_write_zeroes_bios; 14659679b5a7SMike Snitzer break; 14669679b5a7SMike Snitzer default: 14670519c71eSMike Snitzer return false; 14689679b5a7SMike Snitzer } 14690519c71eSMike Snitzer 14709679b5a7SMike Snitzer *result = __send_changing_extent_only(ci, ti, num_bios); 14710519c71eSMike Snitzer return true; 14720519c71eSMike Snitzer } 14730519c71eSMike Snitzer 1474e4c93811SAlasdair G Kergon /* 1475e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1476e4c93811SAlasdair G Kergon */ 1477e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1478e4c93811SAlasdair G Kergon { 1479e4c93811SAlasdair G Kergon struct dm_target *ti; 14801c3b13e6SKent Overstreet unsigned len; 1481c80914e8SMike Snitzer int r; 1482e4c93811SAlasdair G Kergon 1483e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1484123d87d5SMikulas Patocka if (!ti) 1485e4c93811SAlasdair G Kergon return -EIO; 1486e4c93811SAlasdair G Kergon 1487568c73a3SMike Snitzer if (__process_abnormal_io(ci, ti, &r)) 14880519c71eSMike Snitzer return r; 14893d7f4562SMike Snitzer 14903720281dSMike Snitzer len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1491e4c93811SAlasdair G Kergon 1492c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1493c80914e8SMike Snitzer if (r < 0) 1494c80914e8SMike Snitzer return r; 1495e4c93811SAlasdair G Kergon 1496e4c93811SAlasdair G Kergon ci->sector += len; 1497e4c93811SAlasdair G Kergon ci->sector_count -= len; 1498e4c93811SAlasdair G Kergon 1499e4c93811SAlasdair G Kergon return 0; 1500e4c93811SAlasdair G Kergon } 1501e4c93811SAlasdair G Kergon 1502978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1503978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1504978e51baSMike Snitzer { 1505978e51baSMike Snitzer ci->map = map; 1506978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1507978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1508978e51baSMike Snitzer } 1509978e51baSMike Snitzer 1510a1e1cb72SMike Snitzer #define __dm_part_stat_sub(part, field, subnd) \ 1511a1e1cb72SMike Snitzer (part_stat_get(part, field) -= (subnd)) 1512a1e1cb72SMike Snitzer 1513e4c93811SAlasdair G Kergon /* 151414fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 15151da177e4SLinus Torvalds */ 1516978e51baSMike Snitzer static blk_qc_t __split_and_process_bio(struct mapped_device *md, 151783d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 15181da177e4SLinus Torvalds { 15191da177e4SLinus Torvalds struct clone_info ci; 1520978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 1521512875bdSJun'ichi Nomura int error = 0; 15221da177e4SLinus Torvalds 1523978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1524bd2a49b8SAlasdair G Kergon 15251eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 152614fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1527e2118b3cSDamien Le Moal /* dm_io_dec_pending submits any data associated with flush */ 15282e2d6f7eSAjay Joshi } else if (op_is_zone_mgmt(bio_op(bio))) { 1529a4aa5e56SDamien Le Moal ci.bio = bio; 1530a4aa5e56SDamien Le Moal ci.sector_count = 0; 1531a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1532b372d360SMike Snitzer } else { 15336a8736d1STejun Heo ci.bio = bio; 15341da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 153514fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1536985eabdcSJeffle Xu if (ci.sector_count && !error) { 153718a25da8SNeilBrown /* 1538ed00aabdSChristoph Hellwig * Remainder must be passed to submit_bio_noacct() 153918a25da8SNeilBrown * so that it gets handled *after* bios already submitted 154018a25da8SNeilBrown * have been completely processed. 154118a25da8SNeilBrown * We take a clone of the original to store in 1542745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 154318a25da8SNeilBrown * for dec_pending to use for completion handling. 154418a25da8SNeilBrown */ 1545f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1546f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1547745dc570SMike Snitzer ci.io->orig_bio = b; 1548a1e1cb72SMike Snitzer 1549a1e1cb72SMike Snitzer /* 1550a1e1cb72SMike Snitzer * Adjust IO stats for each split, otherwise upon queue 1551a1e1cb72SMike Snitzer * reentry there will be redundant IO accounting. 1552a1e1cb72SMike Snitzer * NOTE: this is a stop-gap fix, a proper fix involves 1553a1e1cb72SMike Snitzer * significant refactoring of DM core's bio splitting 1554a1e1cb72SMike Snitzer * (by eliminating DM's splitting and just using bio_split) 1555a1e1cb72SMike Snitzer */ 1556a1e1cb72SMike Snitzer part_stat_lock(); 15578446fe92SChristoph Hellwig __dm_part_stat_sub(dm_disk(md)->part0, 1558a1e1cb72SMike Snitzer sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1559a1e1cb72SMike Snitzer part_stat_unlock(); 1560a1e1cb72SMike Snitzer 156118a25da8SNeilBrown bio_chain(b, bio); 1562eb6f7f7cSChristoph Hellwig trace_block_split(b, bio->bi_iter.bi_sector); 1563ed00aabdSChristoph Hellwig ret = submit_bio_noacct(bio); 156418a25da8SNeilBrown } 1565d87f4c14STejun Heo } 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds /* drop the extra reference count */ 1568e2118b3cSDamien Le Moal dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 1569978e51baSMike Snitzer return ret; 15701da177e4SLinus Torvalds } 15711da177e4SLinus Torvalds 1572c62b37d9SChristoph Hellwig static blk_qc_t dm_submit_bio(struct bio *bio) 15731da177e4SLinus Torvalds { 1574309dca30SChristoph Hellwig struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1575978e51baSMike Snitzer blk_qc_t ret = BLK_QC_T_NONE; 157683d5e5b0SMikulas Patocka int srcu_idx; 157783d5e5b0SMikulas Patocka struct dm_table *map; 15781da177e4SLinus Torvalds 157983d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 1580b2abdb1bSMike Snitzer if (unlikely(!map)) { 1581b2abdb1bSMike Snitzer DMERR_LIMIT("%s: mapping table unavailable, erroring io", 1582b2abdb1bSMike Snitzer dm_device_name(md)); 15836a8736d1STejun Heo bio_io_error(bio); 1584b2abdb1bSMike Snitzer goto out; 15851da177e4SLinus Torvalds } 158692c63902SMikulas Patocka 1587b2abdb1bSMike Snitzer /* If suspended, queue this IO for later */ 15881da177e4SLinus Torvalds if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 15896abc4946SKonstantin Khlebnikov if (bio->bi_opf & REQ_NOWAIT) 15906abc4946SKonstantin Khlebnikov bio_wouldblock_error(bio); 1591b2abdb1bSMike Snitzer else if (bio->bi_opf & REQ_RAHEAD) 15921da177e4SLinus Torvalds bio_io_error(bio); 1593b2abdb1bSMike Snitzer else 1594b2abdb1bSMike Snitzer queue_io(md, bio); 1595b2abdb1bSMike Snitzer goto out; 15961da177e4SLinus Torvalds } 15971da177e4SLinus Torvalds 1598b2abdb1bSMike Snitzer /* 1599b2abdb1bSMike Snitzer * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1600b2abdb1bSMike Snitzer * otherwise associated queue_limits won't be imposed. 1601b2abdb1bSMike Snitzer */ 1602b2abdb1bSMike Snitzer if (is_abnormal_io(bio)) 1603b2abdb1bSMike Snitzer blk_queue_split(&bio); 1604978e51baSMike Snitzer 1605b2abdb1bSMike Snitzer ret = __split_and_process_bio(md, map, bio); 1606b2abdb1bSMike Snitzer out: 160783d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1608978e51baSMike Snitzer return ret; 1609978e51baSMike Snitzer } 1610978e51baSMike Snitzer 16111da177e4SLinus Torvalds /*----------------------------------------------------------------- 16121da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 16131da177e4SLinus Torvalds *---------------------------------------------------------------*/ 16142b06cfffSAlasdair G Kergon static void free_minor(int minor) 16151da177e4SLinus Torvalds { 1616f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16171da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1618f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds 16211da177e4SLinus Torvalds /* 16221da177e4SLinus Torvalds * See if the device with a specific minor # is free. 16231da177e4SLinus Torvalds */ 1624cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 16251da177e4SLinus Torvalds { 1626c9d76be6STejun Heo int r; 16271da177e4SLinus Torvalds 16281da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 16291da177e4SLinus Torvalds return -EINVAL; 16301da177e4SLinus Torvalds 1631c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1632f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16331da177e4SLinus Torvalds 1634c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 16351da177e4SLinus Torvalds 1636f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1637c9d76be6STejun Heo idr_preload_end(); 1638c9d76be6STejun Heo if (r < 0) 1639c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1640c9d76be6STejun Heo return 0; 16411da177e4SLinus Torvalds } 16421da177e4SLinus Torvalds 1643cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 16441da177e4SLinus Torvalds { 1645c9d76be6STejun Heo int r; 16461da177e4SLinus Torvalds 1647c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1648f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 16491da177e4SLinus Torvalds 1650c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 16511da177e4SLinus Torvalds 1652f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1653c9d76be6STejun Heo idr_preload_end(); 1654c9d76be6STejun Heo if (r < 0) 16551da177e4SLinus Torvalds return r; 1656c9d76be6STejun Heo *minor = r; 1657c9d76be6STejun Heo return 0; 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 166083d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1661681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops; 1662f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 16631da177e4SLinus Torvalds 166453d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 166553d5914fSMikulas Patocka 1666aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1667aa6ce87aSSatya Tangirala static void dm_queue_destroy_keyslot_manager(struct request_queue *q) 1668aa6ce87aSSatya Tangirala { 1669aa6ce87aSSatya Tangirala dm_destroy_keyslot_manager(q->ksm); 1670aa6ce87aSSatya Tangirala } 1671aa6ce87aSSatya Tangirala 1672aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1673aa6ce87aSSatya Tangirala 1674aa6ce87aSSatya Tangirala static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q) 1675aa6ce87aSSatya Tangirala { 1676aa6ce87aSSatya Tangirala } 1677aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1678aa6ce87aSSatya Tangirala 16790f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 16800f20972fSMike Snitzer { 16810f20972fSMike Snitzer if (md->wq) 16820f20972fSMike Snitzer destroy_workqueue(md->wq); 16836f1c819cSKent Overstreet bioset_exit(&md->bs); 16846f1c819cSKent Overstreet bioset_exit(&md->io_bs); 16850f20972fSMike Snitzer 1686f26c5719SDan Williams if (md->dax_dev) { 1687f26c5719SDan Williams kill_dax(md->dax_dev); 1688f26c5719SDan Williams put_dax(md->dax_dev); 1689f26c5719SDan Williams md->dax_dev = NULL; 1690f26c5719SDan Williams } 1691f26c5719SDan Williams 16920f20972fSMike Snitzer if (md->disk) { 16930f20972fSMike Snitzer spin_lock(&_minor_lock); 16940f20972fSMike Snitzer md->disk->private_data = NULL; 16950f20972fSMike Snitzer spin_unlock(&_minor_lock); 16960f20972fSMike Snitzer del_gendisk(md->disk); 16970f20972fSMike Snitzer } 16980f20972fSMike Snitzer 169974fe6ba9SChristoph Hellwig if (md->queue) 1700aa6ce87aSSatya Tangirala dm_queue_destroy_keyslot_manager(md->queue); 170174fe6ba9SChristoph Hellwig 170274fe6ba9SChristoph Hellwig if (md->disk) 170374fe6ba9SChristoph Hellwig blk_cleanup_disk(md->disk); 17040f20972fSMike Snitzer 1705d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1706d09960b0STahsin Erdogan 1707d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1708d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1709d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1710a666e5c0SMikulas Patocka mutex_destroy(&md->swap_bios_lock); 1711d5ffebddSMike Snitzer 17124cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 1713bb37d772SDamien Le Moal dm_cleanup_zoned_dev(md); 17140f20972fSMike Snitzer } 17150f20972fSMike Snitzer 17161da177e4SLinus Torvalds /* 17171da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 17181da177e4SLinus Torvalds */ 17192b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 17201da177e4SLinus Torvalds { 1721115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1722115485e8SMike Snitzer struct mapped_device *md; 1723ba61fdd1SJeff Mahoney void *old_md; 17241da177e4SLinus Torvalds 1725856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 17261da177e4SLinus Torvalds if (!md) { 17271da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 17281da177e4SLinus Torvalds return NULL; 17291da177e4SLinus Torvalds } 17301da177e4SLinus Torvalds 173110da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 17326ed7ade8SMilan Broz goto bad_module_get; 173310da4f79SJeff Mahoney 17341da177e4SLinus Torvalds /* get a minor number for the dev */ 17352b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1736cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 17372b06cfffSAlasdair G Kergon else 1738cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 17391da177e4SLinus Torvalds if (r < 0) 17406ed7ade8SMilan Broz goto bad_minor; 17411da177e4SLinus Torvalds 174283d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 174383d5e5b0SMikulas Patocka if (r < 0) 174483d5e5b0SMikulas Patocka goto bad_io_barrier; 174583d5e5b0SMikulas Patocka 1746115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1747591ddcfcSMike Snitzer md->init_tio_pdu = false; 1748a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1749e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1750a5664dadSMike Snitzer mutex_init(&md->type_lock); 175186f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1752022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 17531da177e4SLinus Torvalds atomic_set(&md->holders, 1); 17545c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 17551da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 17567a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 17577a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 175886f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 17597a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 17601da177e4SLinus Torvalds 176147ace7e0SMike Snitzer /* 1762c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 1763c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 1764c62b37d9SChristoph Hellwig * override accordingly. 176547ace7e0SMike Snitzer */ 176674fe6ba9SChristoph Hellwig md->disk = blk_alloc_disk(md->numa_node_id); 17671da177e4SLinus Torvalds if (!md->disk) 17680f20972fSMike Snitzer goto bad; 176974fe6ba9SChristoph Hellwig md->queue = md->disk->queue; 17701da177e4SLinus Torvalds 1771f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 177253d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1773f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 17742995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1775f0b04115SJeff Mahoney 1776a666e5c0SMikulas Patocka md->swap_bios = get_swap_bios(); 1777a666e5c0SMikulas Patocka sema_init(&md->swap_bios_semaphore, md->swap_bios); 1778a666e5c0SMikulas Patocka mutex_init(&md->swap_bios_lock); 1779a666e5c0SMikulas Patocka 17801da177e4SLinus Torvalds md->disk->major = _major; 17811da177e4SLinus Torvalds md->disk->first_minor = minor; 178274fe6ba9SChristoph Hellwig md->disk->minors = 1; 17831da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 17841da177e4SLinus Torvalds md->disk->queue = md->queue; 17851da177e4SLinus Torvalds md->disk->private_data = md; 17861da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1787f26c5719SDan Williams 1788976431b0SDan Williams if (IS_ENABLED(CONFIG_DAX_DRIVER)) { 1789fefc1d97SPankaj Gupta md->dax_dev = alloc_dax(md, md->disk->disk_name, 1790fefc1d97SPankaj Gupta &dm_dax_ops, 0); 17914e4ced93SVivek Goyal if (IS_ERR(md->dax_dev)) 1792f26c5719SDan Williams goto bad; 1793976431b0SDan Williams } 1794f26c5719SDan Williams 1795c100ec49SMike Snitzer add_disk_no_queue_reg(md->disk); 17967e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 17971da177e4SLinus Torvalds 1798670368a8STejun Heo md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); 1799304f3f6aSMilan Broz if (!md->wq) 18000f20972fSMike Snitzer goto bad; 1801304f3f6aSMilan Broz 1802fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1803fd2ed4d2SMikulas Patocka 1804ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1805f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1806ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1807f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1808ba61fdd1SJeff Mahoney 1809ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1810ba61fdd1SJeff Mahoney 18111da177e4SLinus Torvalds return md; 18121da177e4SLinus Torvalds 18130f20972fSMike Snitzer bad: 18140f20972fSMike Snitzer cleanup_mapped_device(md); 181583d5e5b0SMikulas Patocka bad_io_barrier: 18161da177e4SLinus Torvalds free_minor(minor); 18176ed7ade8SMilan Broz bad_minor: 181810da4f79SJeff Mahoney module_put(THIS_MODULE); 18196ed7ade8SMilan Broz bad_module_get: 1820856eb091SMikulas Patocka kvfree(md); 18211da177e4SLinus Torvalds return NULL; 18221da177e4SLinus Torvalds } 18231da177e4SLinus Torvalds 1824ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1825ae9da83fSJun'ichi Nomura 18261da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 18271da177e4SLinus Torvalds { 1828f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 182963d94e48SJun'ichi Nomura 1830ae9da83fSJun'ichi Nomura unlock_fs(md); 18312eb6e1e3SKeith Busch 18320f20972fSMike Snitzer cleanup_mapped_device(md); 18330f20972fSMike Snitzer 18340f20972fSMike Snitzer free_table_devices(&md->table_devices); 18350f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 183663a4f065SMike Snitzer free_minor(minor); 183763a4f065SMike Snitzer 183810da4f79SJeff Mahoney module_put(THIS_MODULE); 1839856eb091SMikulas Patocka kvfree(md); 18401da177e4SLinus Torvalds } 18411da177e4SLinus Torvalds 18422a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1843e6ee8c0bSKiyoshi Ueda { 1844c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 18452a2a4c51SJens Axboe int ret = 0; 1846e6ee8c0bSKiyoshi Ueda 1847545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1848c0820cf5SMikulas Patocka /* 184964f52b0eSMike Snitzer * The md may already have mempools that need changing. 185064f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 185116245bdcSJun'ichi Nomura * because a different table was loaded. 1852c0820cf5SMikulas Patocka */ 18536f1c819cSKent Overstreet bioset_exit(&md->bs); 18546f1c819cSKent Overstreet bioset_exit(&md->io_bs); 18550776aa0eSMike Snitzer 18566f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 1857cbc4e3c1SMike Snitzer /* 18584e6e36c3SMike Snitzer * There's no need to reload with request-based dm 18594e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 18604e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 18614e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 18624e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 18634e6e36c3SMike Snitzer * through the queue to unprep. 1864cbc4e3c1SMike Snitzer */ 1865cbc4e3c1SMike Snitzer goto out; 1866cbc4e3c1SMike Snitzer } 1867cbc4e3c1SMike Snitzer 18686f1c819cSKent Overstreet BUG_ON(!p || 18696f1c819cSKent Overstreet bioset_initialized(&md->bs) || 18706f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 1871e6ee8c0bSKiyoshi Ueda 18722a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 18732a2a4c51SJens Axboe if (ret) 18742a2a4c51SJens Axboe goto out; 18752a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 18762a2a4c51SJens Axboe if (ret) 18772a2a4c51SJens Axboe bioset_exit(&md->bs); 1878e6ee8c0bSKiyoshi Ueda out: 187902233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1880e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 18812a2a4c51SJens Axboe return ret; 1882e6ee8c0bSKiyoshi Ueda } 1883e6ee8c0bSKiyoshi Ueda 18841da177e4SLinus Torvalds /* 18851da177e4SLinus Torvalds * Bind a table to the device. 18861da177e4SLinus Torvalds */ 18871da177e4SLinus Torvalds static void event_callback(void *context) 18881da177e4SLinus Torvalds { 18897a8c3d3bSMike Anderson unsigned long flags; 18907a8c3d3bSMike Anderson LIST_HEAD(uevents); 18911da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 18921da177e4SLinus Torvalds 18937a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 18947a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 18957a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 18967a8c3d3bSMike Anderson 1897ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 18987a8c3d3bSMike Anderson 18991da177e4SLinus Torvalds atomic_inc(&md->event_nr); 19001da177e4SLinus Torvalds wake_up(&md->eventq); 190162e08243SMikulas Patocka dm_issue_global_event(); 19021da177e4SLinus Torvalds } 19031da177e4SLinus Torvalds 1904c217649bSMike Snitzer /* 1905042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 1906042d2a9bSAlasdair G Kergon */ 1907042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1908754c5fc7SMike Snitzer struct queue_limits *limits) 19091da177e4SLinus Torvalds { 1910042d2a9bSAlasdair G Kergon struct dm_table *old_map; 1911165125e1SJens Axboe struct request_queue *q = md->queue; 1912978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 19131da177e4SLinus Torvalds sector_t size; 19142a2a4c51SJens Axboe int ret; 19151da177e4SLinus Torvalds 19165a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 19175a8f1f80SBart Van Assche 19181da177e4SLinus Torvalds size = dm_table_get_size(t); 19193ac51e74SDarrick J. Wong 19203ac51e74SDarrick J. Wong /* 19213ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 19223ac51e74SDarrick J. Wong */ 1923fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 19243ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 19253ac51e74SDarrick J. Wong 19265424a0b8SMikulas Patocka if (!get_capacity(md->disk)) 19275424a0b8SMikulas Patocka set_capacity(md->disk, size); 19285424a0b8SMikulas Patocka else 1929f64d9b2eSChristoph Hellwig set_capacity_and_notify(md->disk, size); 19301da177e4SLinus Torvalds 1931cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 19322ca3310eSAlasdair G Kergon 1933e6ee8c0bSKiyoshi Ueda /* 1934e6ee8c0bSKiyoshi Ueda * The queue hasn't been stopped yet, if the old table type wasn't 1935e6ee8c0bSKiyoshi Ueda * for request-based during suspension. So stop it to prevent 1936e6ee8c0bSKiyoshi Ueda * I/O mapping before resume. 1937e6ee8c0bSKiyoshi Ueda * This must be done before setting the queue restrictions, 1938e6ee8c0bSKiyoshi Ueda * because request-based dm may be run just after the setting. 1939e6ee8c0bSKiyoshi Ueda */ 1940978e51baSMike Snitzer if (request_based) 1941eca7ee6dSMike Snitzer dm_stop_queue(q); 1942978e51baSMike Snitzer 19439c37de29SMike Snitzer if (request_based) { 194416f12266SMike Snitzer /* 19459c37de29SMike Snitzer * Leverage the fact that request-based DM targets are 19469c37de29SMike Snitzer * immutable singletons - used to optimize dm_mq_queue_rq. 194716f12266SMike Snitzer */ 194816f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 194916f12266SMike Snitzer } 1950e6ee8c0bSKiyoshi Ueda 19512a2a4c51SJens Axboe ret = __bind_mempools(md, t); 19522a2a4c51SJens Axboe if (ret) { 19532a2a4c51SJens Axboe old_map = ERR_PTR(ret); 19542a2a4c51SJens Axboe goto out; 19552a2a4c51SJens Axboe } 1956e6ee8c0bSKiyoshi Ueda 1957bb37d772SDamien Le Moal ret = dm_table_set_restrictions(t, q, limits); 1958bb37d772SDamien Le Moal if (ret) { 1959bb37d772SDamien Le Moal old_map = ERR_PTR(ret); 1960bb37d772SDamien Le Moal goto out; 1961bb37d772SDamien Le Moal } 1962bb37d772SDamien Le Moal 1963a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 19641d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 196536a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 196636a0456fSAlasdair G Kergon 196741abc4e1SHannes Reinecke if (old_map) 196883d5e5b0SMikulas Patocka dm_sync_table(md); 19692ca3310eSAlasdair G Kergon 19702a2a4c51SJens Axboe out: 1971042d2a9bSAlasdair G Kergon return old_map; 19721da177e4SLinus Torvalds } 19731da177e4SLinus Torvalds 1974a7940155SAlasdair G Kergon /* 1975a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 1976a7940155SAlasdair G Kergon */ 1977a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 19781da177e4SLinus Torvalds { 1979a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 19801da177e4SLinus Torvalds 19811da177e4SLinus Torvalds if (!map) 1982a7940155SAlasdair G Kergon return NULL; 19831da177e4SLinus Torvalds 19841da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 19859cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 198683d5e5b0SMikulas Patocka dm_sync_table(md); 1987a7940155SAlasdair G Kergon 1988a7940155SAlasdair G Kergon return map; 19891da177e4SLinus Torvalds } 19901da177e4SLinus Torvalds 19911da177e4SLinus Torvalds /* 19921da177e4SLinus Torvalds * Constructor for a new device. 19931da177e4SLinus Torvalds */ 19942b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 19951da177e4SLinus Torvalds { 1996c12c9a3cSMike Snitzer int r; 19971da177e4SLinus Torvalds struct mapped_device *md; 19981da177e4SLinus Torvalds 19992b06cfffSAlasdair G Kergon md = alloc_dev(minor); 20001da177e4SLinus Torvalds if (!md) 20011da177e4SLinus Torvalds return -ENXIO; 20021da177e4SLinus Torvalds 2003c12c9a3cSMike Snitzer r = dm_sysfs_init(md); 2004c12c9a3cSMike Snitzer if (r) { 2005c12c9a3cSMike Snitzer free_dev(md); 2006c12c9a3cSMike Snitzer return r; 2007c12c9a3cSMike Snitzer } 2008784aae73SMilan Broz 20091da177e4SLinus Torvalds *result = md; 20101da177e4SLinus Torvalds return 0; 20111da177e4SLinus Torvalds } 20121da177e4SLinus Torvalds 2013a5664dadSMike Snitzer /* 2014a5664dadSMike Snitzer * Functions to manage md->type. 2015a5664dadSMike Snitzer * All are required to hold md->type_lock. 2016a5664dadSMike Snitzer */ 2017a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 2018a5664dadSMike Snitzer { 2019a5664dadSMike Snitzer mutex_lock(&md->type_lock); 2020a5664dadSMike Snitzer } 2021a5664dadSMike Snitzer 2022a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 2023a5664dadSMike Snitzer { 2024a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 2025a5664dadSMike Snitzer } 2026a5664dadSMike Snitzer 20277e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 2028a5664dadSMike Snitzer { 202900c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 2030a5664dadSMike Snitzer md->type = type; 2031a5664dadSMike Snitzer } 2032a5664dadSMike Snitzer 20337e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 2034a5664dadSMike Snitzer { 2035a5664dadSMike Snitzer return md->type; 2036a5664dadSMike Snitzer } 2037a5664dadSMike Snitzer 203836a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 203936a0456fSAlasdair G Kergon { 204036a0456fSAlasdair G Kergon return md->immutable_target_type; 204136a0456fSAlasdair G Kergon } 204236a0456fSAlasdair G Kergon 20434a0b4ddfSMike Snitzer /* 2044f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 2045f84cb8a4SMike Snitzer * count on 'md'. 2046f84cb8a4SMike Snitzer */ 2047f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 2048f84cb8a4SMike Snitzer { 2049f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 2050f84cb8a4SMike Snitzer return &md->queue->limits; 2051f84cb8a4SMike Snitzer } 2052f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2053f84cb8a4SMike Snitzer 20544a0b4ddfSMike Snitzer /* 20554a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 20564a0b4ddfSMike Snitzer */ 2057591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 20584a0b4ddfSMike Snitzer { 2059bfebd1cdSMike Snitzer int r; 2060c100ec49SMike Snitzer struct queue_limits limits; 20617e0d574fSBart Van Assche enum dm_queue_mode type = dm_get_md_type(md); 2062bfebd1cdSMike Snitzer 2063545ed20eSToshi Kani switch (type) { 2064bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 2065681cc5e8SMike Snitzer md->disk->fops = &dm_rq_blk_dops; 2066e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 2067bfebd1cdSMike Snitzer if (r) { 2068681cc5e8SMike Snitzer DMERR("Cannot initialize queue for request-based dm mapped device"); 2069bfebd1cdSMike Snitzer return r; 2070bfebd1cdSMike Snitzer } 2071bfebd1cdSMike Snitzer break; 2072bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 2073545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 2074bfebd1cdSMike Snitzer break; 20757e0d574fSBart Van Assche case DM_TYPE_NONE: 20767e0d574fSBart Van Assche WARN_ON_ONCE(true); 20777e0d574fSBart Van Assche break; 2078ff36ab34SMike Snitzer } 20794a0b4ddfSMike Snitzer 2080c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 2081c100ec49SMike Snitzer if (r) { 2082c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 2083c100ec49SMike Snitzer return r; 2084c100ec49SMike Snitzer } 2085bb37d772SDamien Le Moal r = dm_table_set_restrictions(t, md->queue, &limits); 2086bb37d772SDamien Le Moal if (r) 2087bb37d772SDamien Le Moal return r; 2088bb37d772SDamien Le Moal 2089c100ec49SMike Snitzer blk_register_queue(md->disk); 2090c100ec49SMike Snitzer 20914a0b4ddfSMike Snitzer return 0; 20924a0b4ddfSMike Snitzer } 20934a0b4ddfSMike Snitzer 20942bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 20951da177e4SLinus Torvalds { 20961da177e4SLinus Torvalds struct mapped_device *md; 20971da177e4SLinus Torvalds unsigned minor = MINOR(dev); 20981da177e4SLinus Torvalds 20991da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 21001da177e4SLinus Torvalds return NULL; 21011da177e4SLinus Torvalds 2102f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 21031da177e4SLinus Torvalds 21041da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 210549de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 210649de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2107637842cfSDavid Teigland md = NULL; 2108fba9f90eSJeff Mahoney goto out; 2109fba9f90eSJeff Mahoney } 21102bec1f4aSMikulas Patocka dm_get(md); 2111fba9f90eSJeff Mahoney out: 2112f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21131da177e4SLinus Torvalds 2114637842cfSDavid Teigland return md; 2115637842cfSDavid Teigland } 21163cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 2117d229a958SDavid Teigland 21189ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 2119637842cfSDavid Teigland { 21209ade92a9SAlasdair G Kergon return md->interface_ptr; 21211da177e4SLinus Torvalds } 21221da177e4SLinus Torvalds 21231da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 21241da177e4SLinus Torvalds { 21251da177e4SLinus Torvalds md->interface_ptr = ptr; 21261da177e4SLinus Torvalds } 21271da177e4SLinus Torvalds 21281da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 21291da177e4SLinus Torvalds { 21301da177e4SLinus Torvalds atomic_inc(&md->holders); 21313f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds 213409ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 213509ee96b2SMikulas Patocka { 213609ee96b2SMikulas Patocka spin_lock(&_minor_lock); 213709ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 213809ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 213909ee96b2SMikulas Patocka return -EBUSY; 214009ee96b2SMikulas Patocka } 214109ee96b2SMikulas Patocka dm_get(md); 214209ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 214309ee96b2SMikulas Patocka return 0; 214409ee96b2SMikulas Patocka } 214509ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 214609ee96b2SMikulas Patocka 214772d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 214872d94861SAlasdair G Kergon { 214972d94861SAlasdair G Kergon return md->name; 215072d94861SAlasdair G Kergon } 215172d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 215272d94861SAlasdair G Kergon 21533f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 21541da177e4SLinus Torvalds { 21551134e5aeSMike Anderson struct dm_table *map; 215683d5e5b0SMikulas Patocka int srcu_idx; 21571da177e4SLinus Torvalds 21583f77316dSKiyoshi Ueda might_sleep(); 2159fba9f90eSJeff Mahoney 216063a4f065SMike Snitzer spin_lock(&_minor_lock); 21613f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2162fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2163f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 21643f77316dSKiyoshi Ueda 2165c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 21663b785fbcSBart Van Assche 2167ab7c7bb6SMikulas Patocka /* 2168ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2169ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2170ab7c7bb6SMikulas Patocka */ 2171ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 21722a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 21734f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 21741da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2175adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 21765df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 21771da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 21781da177e4SLinus Torvalds } 217983d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 218083d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 21812a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 218283d5e5b0SMikulas Patocka 21833f77316dSKiyoshi Ueda /* 21843f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 21853f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 21863f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 21873f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 21883f77316dSKiyoshi Ueda */ 21893f77316dSKiyoshi Ueda if (wait) 21903f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 21913f77316dSKiyoshi Ueda msleep(1); 21923f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 21933f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 21943f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 21953f77316dSKiyoshi Ueda 2196784aae73SMilan Broz dm_sysfs_exit(md); 2197a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 21981da177e4SLinus Torvalds free_dev(md); 21991da177e4SLinus Torvalds } 22003f77316dSKiyoshi Ueda 22013f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 22023f77316dSKiyoshi Ueda { 22033f77316dSKiyoshi Ueda __dm_destroy(md, true); 22043f77316dSKiyoshi Ueda } 22053f77316dSKiyoshi Ueda 22063f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 22073f77316dSKiyoshi Ueda { 22083f77316dSKiyoshi Ueda __dm_destroy(md, false); 22093f77316dSKiyoshi Ueda } 22103f77316dSKiyoshi Ueda 22113f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 22123f77316dSKiyoshi Ueda { 22133f77316dSKiyoshi Ueda atomic_dec(&md->holders); 22141da177e4SLinus Torvalds } 221579eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 22161da177e4SLinus Torvalds 221785067747SMing Lei static bool md_in_flight_bios(struct mapped_device *md) 221885067747SMing Lei { 221985067747SMing Lei int cpu; 22208446fe92SChristoph Hellwig struct block_device *part = dm_disk(md)->part0; 222185067747SMing Lei long sum = 0; 222285067747SMing Lei 222385067747SMing Lei for_each_possible_cpu(cpu) { 222485067747SMing Lei sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 222585067747SMing Lei sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 222685067747SMing Lei } 222785067747SMing Lei 222885067747SMing Lei return sum != 0; 222985067747SMing Lei } 223085067747SMing Lei 22312f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 223246125c1cSMilan Broz { 223346125c1cSMilan Broz int r = 0; 22349f4c3f87SBart Van Assche DEFINE_WAIT(wait); 223546125c1cSMilan Broz 223685067747SMing Lei while (true) { 22379f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 223846125c1cSMilan Broz 223985067747SMing Lei if (!md_in_flight_bios(md)) 224046125c1cSMilan Broz break; 224146125c1cSMilan Broz 2242e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 224346125c1cSMilan Broz r = -EINTR; 224446125c1cSMilan Broz break; 224546125c1cSMilan Broz } 224646125c1cSMilan Broz 224746125c1cSMilan Broz io_schedule(); 224846125c1cSMilan Broz } 22499f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2250b44ebeb0SMikulas Patocka 225146125c1cSMilan Broz return r; 225246125c1cSMilan Broz } 225346125c1cSMilan Broz 22542f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 225585067747SMing Lei { 225685067747SMing Lei int r = 0; 225785067747SMing Lei 225885067747SMing Lei if (!queue_is_mq(md->queue)) 225985067747SMing Lei return dm_wait_for_bios_completion(md, task_state); 226085067747SMing Lei 226185067747SMing Lei while (true) { 226285067747SMing Lei if (!blk_mq_queue_inflight(md->queue)) 226385067747SMing Lei break; 226485067747SMing Lei 226585067747SMing Lei if (signal_pending_state(task_state, current)) { 226685067747SMing Lei r = -EINTR; 226785067747SMing Lei break; 226885067747SMing Lei } 226985067747SMing Lei 227085067747SMing Lei msleep(5); 227185067747SMing Lei } 227285067747SMing Lei 227385067747SMing Lei return r; 227485067747SMing Lei } 227585067747SMing Lei 22761da177e4SLinus Torvalds /* 22771da177e4SLinus Torvalds * Process the deferred bios 22781da177e4SLinus Torvalds */ 2279ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 22801da177e4SLinus Torvalds { 22810c2915b8SMike Snitzer struct mapped_device *md = container_of(work, struct mapped_device, work); 22820c2915b8SMike Snitzer struct bio *bio; 2283ef208587SMikulas Patocka 22843b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2285022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 22860c2915b8SMike Snitzer bio = bio_list_pop(&md->deferred); 2287022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2288022c2611SMikulas Patocka 22890c2915b8SMike Snitzer if (!bio) 2290df12ee99SAlasdair G Kergon break; 229173d410c0SMilan Broz 22920c2915b8SMike Snitzer submit_bio_noacct(bio); 2293e6ee8c0bSKiyoshi Ueda } 22941da177e4SLinus Torvalds } 22951da177e4SLinus Torvalds 22969a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2297304f3f6aSMilan Broz { 22983b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 22994e857c58SPeter Zijlstra smp_mb__after_atomic(); 230053d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2301304f3f6aSMilan Broz } 2302304f3f6aSMilan Broz 23031da177e4SLinus Torvalds /* 2304042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 23051da177e4SLinus Torvalds */ 2306042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 23071da177e4SLinus Torvalds { 230887eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2309754c5fc7SMike Snitzer struct queue_limits limits; 2310042d2a9bSAlasdair G Kergon int r; 23111da177e4SLinus Torvalds 2312e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 23131da177e4SLinus Torvalds 23141da177e4SLinus Torvalds /* device must be suspended */ 23154f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 231693c534aeSAlasdair G Kergon goto out; 23171da177e4SLinus Torvalds 23183ae70656SMike Snitzer /* 23193ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 23203ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 23213ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 23223ae70656SMike Snitzer * reappear. 23233ae70656SMike Snitzer */ 23243ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 232583d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 23263ae70656SMike Snitzer if (live_map) 23273ae70656SMike Snitzer limits = md->queue->limits; 232883d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 23293ae70656SMike Snitzer } 23303ae70656SMike Snitzer 233187eb5b21SMike Christie if (!live_map) { 2332754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2333042d2a9bSAlasdair G Kergon if (r) { 2334042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2335754c5fc7SMike Snitzer goto out; 2336042d2a9bSAlasdair G Kergon } 233787eb5b21SMike Christie } 2338754c5fc7SMike Snitzer 2339042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 234062e08243SMikulas Patocka dm_issue_global_event(); 23411da177e4SLinus Torvalds 234293c534aeSAlasdair G Kergon out: 2343e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2344042d2a9bSAlasdair G Kergon return map; 23451da177e4SLinus Torvalds } 23461da177e4SLinus Torvalds 23471da177e4SLinus Torvalds /* 23481da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 23491da177e4SLinus Torvalds * device. 23501da177e4SLinus Torvalds */ 23512ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 23521da177e4SLinus Torvalds { 2353e39e2e95SAlasdair G Kergon int r; 23541da177e4SLinus Torvalds 2355040f04bdSChristoph Hellwig WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2356dfbe03f6SAlasdair G Kergon 2357977115c0SChristoph Hellwig r = freeze_bdev(md->disk->part0); 2358040f04bdSChristoph Hellwig if (!r) 2359aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2360040f04bdSChristoph Hellwig return r; 23611da177e4SLinus Torvalds } 23621da177e4SLinus Torvalds 23632ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 23641da177e4SLinus Torvalds { 2365aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2366aa8d7c2fSAlasdair G Kergon return; 2367977115c0SChristoph Hellwig thaw_bdev(md->disk->part0); 2368aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 23691da177e4SLinus Torvalds } 23701da177e4SLinus Torvalds 23711da177e4SLinus Torvalds /* 2372b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2373b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2374b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2375b48633f8SBart Van Assche * 2376ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2377ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2378ffcc3936SMike Snitzer * are being added to md->deferred list. 2379cec47e3dSKiyoshi Ueda */ 2380ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 23812f064a59SPeter Zijlstra unsigned suspend_flags, unsigned int task_state, 2382eaf9a736SMike Snitzer int dmf_suspended_flag) 23831da177e4SLinus Torvalds { 2384ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2385ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2386ffcc3936SMike Snitzer int r; 2387cf222b37SAlasdair G Kergon 23885a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 23895a8f1f80SBart Van Assche 23902e93ccc1SKiyoshi Ueda /* 23912e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 23922e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 23932e93ccc1SKiyoshi Ueda */ 23942e93ccc1SKiyoshi Ueda if (noflush) 23952e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 239686331f39SBart Van Assche else 2397ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 23982e93ccc1SKiyoshi Ueda 2399d67ee213SMike Snitzer /* 2400d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2401d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2402d67ee213SMike Snitzer */ 24031da177e4SLinus Torvalds dm_table_presuspend_targets(map); 24041da177e4SLinus Torvalds 24052e93ccc1SKiyoshi Ueda /* 24069f518b27SKiyoshi Ueda * Flush I/O to the device. 24079f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 24089f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 24099f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 24102e93ccc1SKiyoshi Ueda */ 241132a926daSMikulas Patocka if (!noflush && do_lockfs) { 24122ca3310eSAlasdair G Kergon r = lock_fs(md); 2413d67ee213SMike Snitzer if (r) { 2414d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2415ffcc3936SMike Snitzer return r; 2416aa8d7c2fSAlasdair G Kergon } 2417d67ee213SMike Snitzer } 24181da177e4SLinus Torvalds 24191da177e4SLinus Torvalds /* 24203b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 24213b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 24220cede372SMike Snitzer * __split_and_process_bio from dm_submit_bio. 24233b00b203SMikulas Patocka * 24240cede372SMike Snitzer * To get all processes out of __split_and_process_bio in dm_submit_bio, 24253b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 24260cede372SMike Snitzer * __split_and_process_bio from dm_submit_bio and quiesce the thread 24270cede372SMike Snitzer * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 24286a8736d1STejun Heo * flush_workqueue(md->wq). 24291da177e4SLinus Torvalds */ 24301eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 243141abc4e1SHannes Reinecke if (map) 243283d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 24331da177e4SLinus Torvalds 2434d0bcb878SKiyoshi Ueda /* 243529e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 243629e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2437d0bcb878SKiyoshi Ueda */ 24386a23e05cSJens Axboe if (dm_request_based(md)) 2439eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2440cec47e3dSKiyoshi Ueda 2441d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2442d0bcb878SKiyoshi Ueda 24431da177e4SLinus Torvalds /* 24443b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 24453b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 24463b00b203SMikulas Patocka * to finish. 24471da177e4SLinus Torvalds */ 2448b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2449eaf9a736SMike Snitzer if (!r) 2450eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 24511da177e4SLinus Torvalds 24526d6f10dfSMilan Broz if (noflush) 2453022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 245441abc4e1SHannes Reinecke if (map) 245583d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 24562e93ccc1SKiyoshi Ueda 24571da177e4SLinus Torvalds /* were we interrupted ? */ 245846125c1cSMilan Broz if (r < 0) { 24599a1fb464SMikulas Patocka dm_queue_flush(md); 246073d410c0SMilan Broz 2461cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2462eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2463cec47e3dSKiyoshi Ueda 24642ca3310eSAlasdair G Kergon unlock_fs(md); 2465d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2466ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2467ffcc3936SMike Snitzer } 2468ffcc3936SMike Snitzer 2469ffcc3936SMike Snitzer return r; 24702ca3310eSAlasdair G Kergon } 24712ca3310eSAlasdair G Kergon 24723b00b203SMikulas Patocka /* 2473ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2474ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2475ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2476ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2477ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 24783b00b203SMikulas Patocka */ 2479ffcc3936SMike Snitzer /* 2480ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2481ffcc3936SMike Snitzer * 2482ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2483ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2484ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2485ffcc3936SMike Snitzer * 2486ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2487ffcc3936SMike Snitzer */ 2488ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2489ffcc3936SMike Snitzer { 2490ffcc3936SMike Snitzer struct dm_table *map = NULL; 2491ffcc3936SMike Snitzer int r = 0; 2492ffcc3936SMike Snitzer 2493ffcc3936SMike Snitzer retry: 2494ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2495ffcc3936SMike Snitzer 2496ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2497ffcc3936SMike Snitzer r = -EINVAL; 2498ffcc3936SMike Snitzer goto out_unlock; 2499ffcc3936SMike Snitzer } 2500ffcc3936SMike Snitzer 2501ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2502ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2503ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2504ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2505ffcc3936SMike Snitzer if (r) 2506ffcc3936SMike Snitzer return r; 2507ffcc3936SMike Snitzer goto retry; 2508ffcc3936SMike Snitzer } 2509ffcc3936SMike Snitzer 2510a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2511ffcc3936SMike Snitzer 2512eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2513ffcc3936SMike Snitzer if (r) 2514ffcc3936SMike Snitzer goto out_unlock; 25153b00b203SMikulas Patocka 25165df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 25174d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 25185df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 25194d4471cbSKiyoshi Ueda 2520d287483dSAlasdair G Kergon out_unlock: 2521e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2522cf222b37SAlasdair G Kergon return r; 25231da177e4SLinus Torvalds } 25241da177e4SLinus Torvalds 2525ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 25261da177e4SLinus Torvalds { 2527ffcc3936SMike Snitzer if (map) { 2528ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 25298757b776SMilan Broz if (r) 2530ffcc3936SMike Snitzer return r; 2531ffcc3936SMike Snitzer } 25322ca3310eSAlasdair G Kergon 25339a1fb464SMikulas Patocka dm_queue_flush(md); 25342ca3310eSAlasdair G Kergon 2535cec47e3dSKiyoshi Ueda /* 2536cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2537cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2538cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2539cec47e3dSKiyoshi Ueda */ 2540cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2541eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2542cec47e3dSKiyoshi Ueda 25432ca3310eSAlasdair G Kergon unlock_fs(md); 25442ca3310eSAlasdair G Kergon 2545ffcc3936SMike Snitzer return 0; 2546ffcc3936SMike Snitzer } 2547ffcc3936SMike Snitzer 2548ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2549ffcc3936SMike Snitzer { 25508dc23658SMinfei Huang int r; 2551ffcc3936SMike Snitzer struct dm_table *map = NULL; 2552ffcc3936SMike Snitzer 2553ffcc3936SMike Snitzer retry: 25548dc23658SMinfei Huang r = -EINVAL; 2555ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2556ffcc3936SMike Snitzer 2557ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2558ffcc3936SMike Snitzer goto out; 2559ffcc3936SMike Snitzer 2560ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2561ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2562ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2563ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2564ffcc3936SMike Snitzer if (r) 2565ffcc3936SMike Snitzer return r; 2566ffcc3936SMike Snitzer goto retry; 2567ffcc3936SMike Snitzer } 2568ffcc3936SMike Snitzer 2569a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2570ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2571ffcc3936SMike Snitzer goto out; 2572ffcc3936SMike Snitzer 2573ffcc3936SMike Snitzer r = __dm_resume(md, map); 2574ffcc3936SMike Snitzer if (r) 2575ffcc3936SMike Snitzer goto out; 2576ffcc3936SMike Snitzer 25772ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2578cf222b37SAlasdair G Kergon out: 2579e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 25802ca3310eSAlasdair G Kergon 2581cf222b37SAlasdair G Kergon return r; 25821da177e4SLinus Torvalds } 25831da177e4SLinus Torvalds 2584fd2ed4d2SMikulas Patocka /* 2585fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2586fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2587fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2588fd2ed4d2SMikulas Patocka */ 2589fd2ed4d2SMikulas Patocka 2590ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2591ffcc3936SMike Snitzer { 2592ffcc3936SMike Snitzer struct dm_table *map = NULL; 2593ffcc3936SMike Snitzer 25941ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 25951ea0654eSBart Van Assche 259696b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2597ffcc3936SMike Snitzer return; /* nested internal suspend */ 2598ffcc3936SMike Snitzer 2599ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2600ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2601ffcc3936SMike Snitzer return; /* nest suspend */ 2602ffcc3936SMike Snitzer } 2603ffcc3936SMike Snitzer 2604a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2605ffcc3936SMike Snitzer 2606ffcc3936SMike Snitzer /* 2607ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2608ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2609ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2610ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2611ffcc3936SMike Snitzer */ 2612eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2613eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2614ffcc3936SMike Snitzer 26155df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 2616ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 26175df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 2618ffcc3936SMike Snitzer } 2619ffcc3936SMike Snitzer 2620ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2621ffcc3936SMike Snitzer { 262296b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 262396b26c8cSMikulas Patocka 262496b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2625ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2626ffcc3936SMike Snitzer 2627ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2628ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2629ffcc3936SMike Snitzer 2630ffcc3936SMike Snitzer /* 2631ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2632ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2633ffcc3936SMike Snitzer */ 2634ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2635ffcc3936SMike Snitzer 2636ffcc3936SMike Snitzer done: 2637ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2638ffcc3936SMike Snitzer smp_mb__after_atomic(); 2639ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2640ffcc3936SMike Snitzer } 2641ffcc3936SMike Snitzer 2642ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2643fd2ed4d2SMikulas Patocka { 2644fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2645ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2646ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2647ffcc3936SMike Snitzer } 2648ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2649ffcc3936SMike Snitzer 2650ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2651ffcc3936SMike Snitzer { 2652ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2653ffcc3936SMike Snitzer __dm_internal_resume(md); 2654ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2655ffcc3936SMike Snitzer } 2656ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2657ffcc3936SMike Snitzer 2658ffcc3936SMike Snitzer /* 2659ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2660ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2661ffcc3936SMike Snitzer */ 2662ffcc3936SMike Snitzer 2663ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2664ffcc3936SMike Snitzer { 2665ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2666ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2667fd2ed4d2SMikulas Patocka return; 2668fd2ed4d2SMikulas Patocka 2669fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2670fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2671fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2672fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2673fd2ed4d2SMikulas Patocka } 2674b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2675fd2ed4d2SMikulas Patocka 2676ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2677fd2ed4d2SMikulas Patocka { 2678ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2679fd2ed4d2SMikulas Patocka goto done; 2680fd2ed4d2SMikulas Patocka 2681fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2682fd2ed4d2SMikulas Patocka 2683fd2ed4d2SMikulas Patocka done: 2684fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2685fd2ed4d2SMikulas Patocka } 2686b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2687fd2ed4d2SMikulas Patocka 26881da177e4SLinus Torvalds /*----------------------------------------------------------------- 26891da177e4SLinus Torvalds * Event notification. 26901da177e4SLinus Torvalds *---------------------------------------------------------------*/ 26913abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 269260935eb2SMilan Broz unsigned cookie) 269369267a30SAlasdair G Kergon { 26946958c1c6SMikulas Patocka int r; 26956958c1c6SMikulas Patocka unsigned noio_flag; 269660935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 269760935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 269860935eb2SMilan Broz 26996958c1c6SMikulas Patocka noio_flag = memalloc_noio_save(); 27006958c1c6SMikulas Patocka 270160935eb2SMilan Broz if (!cookie) 27026958c1c6SMikulas Patocka r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 270360935eb2SMilan Broz else { 270460935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 270560935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 27066958c1c6SMikulas Patocka r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 27073abf85b5SPeter Rajnoha action, envp); 270860935eb2SMilan Broz } 27096958c1c6SMikulas Patocka 27106958c1c6SMikulas Patocka memalloc_noio_restore(noio_flag); 27116958c1c6SMikulas Patocka 27126958c1c6SMikulas Patocka return r; 271369267a30SAlasdair G Kergon } 271469267a30SAlasdair G Kergon 27157a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 27167a8c3d3bSMike Anderson { 27177a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 27187a8c3d3bSMike Anderson } 27197a8c3d3bSMike Anderson 27201da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 27211da177e4SLinus Torvalds { 27221da177e4SLinus Torvalds return atomic_read(&md->event_nr); 27231da177e4SLinus Torvalds } 27241da177e4SLinus Torvalds 27251da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 27261da177e4SLinus Torvalds { 27271da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 27281da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 27291da177e4SLinus Torvalds } 27301da177e4SLinus Torvalds 27317a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 27327a8c3d3bSMike Anderson { 27337a8c3d3bSMike Anderson unsigned long flags; 27347a8c3d3bSMike Anderson 27357a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 27367a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 27377a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 27387a8c3d3bSMike Anderson } 27397a8c3d3bSMike Anderson 27401da177e4SLinus Torvalds /* 27411da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 27421da177e4SLinus Torvalds * count on 'md'. 27431da177e4SLinus Torvalds */ 27441da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 27451da177e4SLinus Torvalds { 27461da177e4SLinus Torvalds return md->disk; 27471da177e4SLinus Torvalds } 274865ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 27491da177e4SLinus Torvalds 2750784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2751784aae73SMilan Broz { 27522995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2753784aae73SMilan Broz } 2754784aae73SMilan Broz 2755784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2756784aae73SMilan Broz { 2757784aae73SMilan Broz struct mapped_device *md; 2758784aae73SMilan Broz 27592995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2760784aae73SMilan Broz 2761b9a41d21SHou Tao spin_lock(&_minor_lock); 2762b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2763b9a41d21SHou Tao md = NULL; 2764b9a41d21SHou Tao goto out; 2765b9a41d21SHou Tao } 2766784aae73SMilan Broz dm_get(md); 2767b9a41d21SHou Tao out: 2768b9a41d21SHou Tao spin_unlock(&_minor_lock); 2769b9a41d21SHou Tao 2770784aae73SMilan Broz return md; 2771784aae73SMilan Broz } 2772784aae73SMilan Broz 27734f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 27741da177e4SLinus Torvalds { 27751da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 27761da177e4SLinus Torvalds } 27771da177e4SLinus Torvalds 27785df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md) 27795df96f2bSMikulas Patocka { 27805df96f2bSMikulas Patocka return test_bit(DMF_POST_SUSPENDING, &md->flags); 27815df96f2bSMikulas Patocka } 27825df96f2bSMikulas Patocka 2783ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2784ffcc3936SMike Snitzer { 2785ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2786ffcc3936SMike Snitzer } 2787ffcc3936SMike Snitzer 27882c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 27892c140a24SMikulas Patocka { 27902c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 27912c140a24SMikulas Patocka } 27922c140a24SMikulas Patocka 279364dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 279464dbce58SKiyoshi Ueda { 279533bd6f06SMike Snitzer return dm_suspended_md(ti->table->md); 279664dbce58SKiyoshi Ueda } 279764dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 279864dbce58SKiyoshi Ueda 27995df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti) 28005df96f2bSMikulas Patocka { 280133bd6f06SMike Snitzer return dm_post_suspending_md(ti->table->md); 28025df96f2bSMikulas Patocka } 28035df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending); 28045df96f2bSMikulas Patocka 28052e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 28062e93ccc1SKiyoshi Ueda { 280733bd6f06SMike Snitzer return __noflush_suspending(ti->table->md); 28082e93ccc1SKiyoshi Ueda } 28092e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 28102e93ccc1SKiyoshi Ueda 28117e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 28120776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 28130776aa0eSMike Snitzer unsigned min_pool_size) 2814e6ee8c0bSKiyoshi Ueda { 2815115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 281678d8e58aSMike Snitzer unsigned int pool_size = 0; 281764f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 28186f1c819cSKent Overstreet int ret; 2819e6ee8c0bSKiyoshi Ueda 2820e6ee8c0bSKiyoshi Ueda if (!pools) 28214e6e36c3SMike Snitzer return NULL; 2822e6ee8c0bSKiyoshi Ueda 282378d8e58aSMike Snitzer switch (type) { 282478d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2825545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 28260776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 282762f26317SJeffle Xu front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 282862f26317SJeffle Xu io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 28296f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 28306f1c819cSKent Overstreet if (ret) 283164f52b0eSMike Snitzer goto out; 28326f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2833eb8db831SChristoph Hellwig goto out; 283478d8e58aSMike Snitzer break; 283578d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 28360776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 283778d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2838591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 283978d8e58aSMike Snitzer break; 284078d8e58aSMike Snitzer default: 284178d8e58aSMike Snitzer BUG(); 284278d8e58aSMike Snitzer } 284378d8e58aSMike Snitzer 28446f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 28456f1c819cSKent Overstreet if (ret) 28465f015204SJun'ichi Nomura goto out; 2847e6ee8c0bSKiyoshi Ueda 28486f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 28495f015204SJun'ichi Nomura goto out; 2850a91a2785SMartin K. Petersen 2851e6ee8c0bSKiyoshi Ueda return pools; 285278d8e58aSMike Snitzer 28535f015204SJun'ichi Nomura out: 28545f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2855e6ee8c0bSKiyoshi Ueda 28564e6e36c3SMike Snitzer return NULL; 2857e6ee8c0bSKiyoshi Ueda } 2858e6ee8c0bSKiyoshi Ueda 2859e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2860e6ee8c0bSKiyoshi Ueda { 2861e6ee8c0bSKiyoshi Ueda if (!pools) 2862e6ee8c0bSKiyoshi Ueda return; 2863e6ee8c0bSKiyoshi Ueda 28646f1c819cSKent Overstreet bioset_exit(&pools->bs); 28656f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 2866e6ee8c0bSKiyoshi Ueda 2867e6ee8c0bSKiyoshi Ueda kfree(pools); 2868e6ee8c0bSKiyoshi Ueda } 2869e6ee8c0bSKiyoshi Ueda 28709c72bad1SChristoph Hellwig struct dm_pr { 28719c72bad1SChristoph Hellwig u64 old_key; 28729c72bad1SChristoph Hellwig u64 new_key; 28739c72bad1SChristoph Hellwig u32 flags; 28749c72bad1SChristoph Hellwig bool fail_early; 28759c72bad1SChristoph Hellwig }; 28769c72bad1SChristoph Hellwig 28779c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 28789c72bad1SChristoph Hellwig void *data) 28799c72bad1SChristoph Hellwig { 28809c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 28819c72bad1SChristoph Hellwig struct dm_table *table; 28829c72bad1SChristoph Hellwig struct dm_target *ti; 28839c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 28849c72bad1SChristoph Hellwig 28859c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 28869c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 28879c72bad1SChristoph Hellwig goto out; 28889c72bad1SChristoph Hellwig 28899c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 28909c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 28919c72bad1SChristoph Hellwig goto out; 28929c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 28939c72bad1SChristoph Hellwig 28949c72bad1SChristoph Hellwig ret = -EINVAL; 28959c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 28969c72bad1SChristoph Hellwig goto out; 28979c72bad1SChristoph Hellwig 28989c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 28999c72bad1SChristoph Hellwig out: 29009c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 29019c72bad1SChristoph Hellwig return ret; 29029c72bad1SChristoph Hellwig } 29039c72bad1SChristoph Hellwig 29049c72bad1SChristoph Hellwig /* 29059c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 29069c72bad1SChristoph Hellwig */ 29079c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 29089c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 29099c72bad1SChristoph Hellwig { 29109c72bad1SChristoph Hellwig struct dm_pr *pr = data; 29119c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 29129c72bad1SChristoph Hellwig 29139c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 29149c72bad1SChristoph Hellwig return -EOPNOTSUPP; 29159c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 29169c72bad1SChristoph Hellwig } 29179c72bad1SChristoph Hellwig 291871cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 291971cdb697SChristoph Hellwig u32 flags) 292071cdb697SChristoph Hellwig { 29219c72bad1SChristoph Hellwig struct dm_pr pr = { 29229c72bad1SChristoph Hellwig .old_key = old_key, 29239c72bad1SChristoph Hellwig .new_key = new_key, 29249c72bad1SChristoph Hellwig .flags = flags, 29259c72bad1SChristoph Hellwig .fail_early = true, 29269c72bad1SChristoph Hellwig }; 29279c72bad1SChristoph Hellwig int ret; 292871cdb697SChristoph Hellwig 29299c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 29309c72bad1SChristoph Hellwig if (ret && new_key) { 29319c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 29329c72bad1SChristoph Hellwig pr.old_key = new_key; 29339c72bad1SChristoph Hellwig pr.new_key = 0; 29349c72bad1SChristoph Hellwig pr.flags = 0; 29359c72bad1SChristoph Hellwig pr.fail_early = false; 29369c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 29379c72bad1SChristoph Hellwig } 293871cdb697SChristoph Hellwig 29399c72bad1SChristoph Hellwig return ret; 294071cdb697SChristoph Hellwig } 294171cdb697SChristoph Hellwig 294271cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 294371cdb697SChristoph Hellwig u32 flags) 294471cdb697SChristoph Hellwig { 294571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 294671cdb697SChristoph Hellwig const struct pr_ops *ops; 2947971888c4SMike Snitzer int r, srcu_idx; 294871cdb697SChristoph Hellwig 29495bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 295071cdb697SChristoph Hellwig if (r < 0) 2951971888c4SMike Snitzer goto out; 295271cdb697SChristoph Hellwig 295371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 295471cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 295571cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 295671cdb697SChristoph Hellwig else 295771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2958971888c4SMike Snitzer out: 2959971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 296071cdb697SChristoph Hellwig return r; 296171cdb697SChristoph Hellwig } 296271cdb697SChristoph Hellwig 296371cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 296471cdb697SChristoph Hellwig { 296571cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 296671cdb697SChristoph Hellwig const struct pr_ops *ops; 2967971888c4SMike Snitzer int r, srcu_idx; 296871cdb697SChristoph Hellwig 29695bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 297071cdb697SChristoph Hellwig if (r < 0) 2971971888c4SMike Snitzer goto out; 297271cdb697SChristoph Hellwig 297371cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 297471cdb697SChristoph Hellwig if (ops && ops->pr_release) 297571cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 297671cdb697SChristoph Hellwig else 297771cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2978971888c4SMike Snitzer out: 2979971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 298071cdb697SChristoph Hellwig return r; 298171cdb697SChristoph Hellwig } 298271cdb697SChristoph Hellwig 298371cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 298471cdb697SChristoph Hellwig enum pr_type type, bool abort) 298571cdb697SChristoph Hellwig { 298671cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 298771cdb697SChristoph Hellwig const struct pr_ops *ops; 2988971888c4SMike Snitzer int r, srcu_idx; 298971cdb697SChristoph Hellwig 29905bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 299171cdb697SChristoph Hellwig if (r < 0) 2992971888c4SMike Snitzer goto out; 299371cdb697SChristoph Hellwig 299471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 299571cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 299671cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 299771cdb697SChristoph Hellwig else 299871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2999971888c4SMike Snitzer out: 3000971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 300171cdb697SChristoph Hellwig return r; 300271cdb697SChristoph Hellwig } 300371cdb697SChristoph Hellwig 300471cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 300571cdb697SChristoph Hellwig { 300671cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 300771cdb697SChristoph Hellwig const struct pr_ops *ops; 3008971888c4SMike Snitzer int r, srcu_idx; 300971cdb697SChristoph Hellwig 30105bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 301171cdb697SChristoph Hellwig if (r < 0) 3012971888c4SMike Snitzer goto out; 301371cdb697SChristoph Hellwig 301471cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 301571cdb697SChristoph Hellwig if (ops && ops->pr_clear) 301671cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 301771cdb697SChristoph Hellwig else 301871cdb697SChristoph Hellwig r = -EOPNOTSUPP; 3019971888c4SMike Snitzer out: 3020971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 302171cdb697SChristoph Hellwig return r; 302271cdb697SChristoph Hellwig } 302371cdb697SChristoph Hellwig 302471cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 302571cdb697SChristoph Hellwig .pr_register = dm_pr_register, 302671cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 302771cdb697SChristoph Hellwig .pr_release = dm_pr_release, 302871cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 302971cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 303071cdb697SChristoph Hellwig }; 303171cdb697SChristoph Hellwig 303283d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 3033c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 30341da177e4SLinus Torvalds .open = dm_blk_open, 30351da177e4SLinus Torvalds .release = dm_blk_close, 3036aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 30373ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 3038e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 303971cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 30401da177e4SLinus Torvalds .owner = THIS_MODULE 30411da177e4SLinus Torvalds }; 30421da177e4SLinus Torvalds 3043681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = { 3044681cc5e8SMike Snitzer .open = dm_blk_open, 3045681cc5e8SMike Snitzer .release = dm_blk_close, 3046681cc5e8SMike Snitzer .ioctl = dm_blk_ioctl, 3047681cc5e8SMike Snitzer .getgeo = dm_blk_getgeo, 3048681cc5e8SMike Snitzer .pr_ops = &dm_pr_ops, 3049681cc5e8SMike Snitzer .owner = THIS_MODULE 3050681cc5e8SMike Snitzer }; 3051681cc5e8SMike Snitzer 3052f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 3053f26c5719SDan Williams .direct_access = dm_dax_direct_access, 30547bf7eac8SDan Williams .dax_supported = dm_dax_supported, 30557e026c8cSDan Williams .copy_from_iter = dm_dax_copy_from_iter, 3056b3a9a0c3SDan Williams .copy_to_iter = dm_dax_copy_to_iter, 3057cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 3058f26c5719SDan Williams }; 3059f26c5719SDan Williams 30601da177e4SLinus Torvalds /* 30611da177e4SLinus Torvalds * module hooks 30621da177e4SLinus Torvalds */ 30631da177e4SLinus Torvalds module_init(dm_init); 30641da177e4SLinus Torvalds module_exit(dm_exit); 30651da177e4SLinus Torvalds 30661da177e4SLinus Torvalds module_param(major, uint, 0); 30671da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 3068f4790826SMike Snitzer 3069e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 3070e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 3071e8603136SMike Snitzer 3072115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 3073115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 3074115485e8SMike Snitzer 3075a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR); 3076a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 3077a666e5c0SMikulas Patocka 30781da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 30791da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 30801da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3081