11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3784aae73SMilan Broz * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This file is released under the GPL. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84cc96131SMike Snitzer #include "dm-core.h" 94cc96131SMike Snitzer #include "dm-rq.h" 1051e5b2bdSMike Anderson #include "dm-uevent.h" 1191ccbbacSTushar Sugandhi #include "dm-ima.h" 121da177e4SLinus Torvalds 131da177e4SLinus Torvalds #include <linux/init.h> 141da177e4SLinus Torvalds #include <linux/module.h> 1548c9c27bSArjan van de Ven #include <linux/mutex.h> 166958c1c6SMikulas Patocka #include <linux/sched/mm.h> 17174cd4b1SIngo Molnar #include <linux/sched/signal.h> 181da177e4SLinus Torvalds #include <linux/blkpg.h> 191da177e4SLinus Torvalds #include <linux/bio.h> 201da177e4SLinus Torvalds #include <linux/mempool.h> 21f26c5719SDan Williams #include <linux/dax.h> 221da177e4SLinus Torvalds #include <linux/slab.h> 231da177e4SLinus Torvalds #include <linux/idr.h> 247e026c8cSDan Williams #include <linux/uio.h> 253ac51e74SDarrick J. Wong #include <linux/hdreg.h> 263f77316dSKiyoshi Ueda #include <linux/delay.h> 27ffcc3936SMike Snitzer #include <linux/wait.h> 2871cdb697SChristoph Hellwig #include <linux/pr.h> 29b0b4d7c6SElena Reshetova #include <linux/refcount.h> 30c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 31a892c8d5SSatya Tangirala #include <linux/blk-crypto.h> 321e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h> 3355782138SLi Zefan 3472d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core" 3572d94861SAlasdair G Kergon 3660935eb2SMilan Broz /* 3760935eb2SMilan Broz * Cookies are numeric values sent with CHANGE and REMOVE 3860935eb2SMilan Broz * uevents while resuming, removing or renaming the device. 3960935eb2SMilan Broz */ 4060935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 4160935eb2SMilan Broz #define DM_COOKIE_LENGTH 24 4260935eb2SMilan Broz 431da177e4SLinus Torvalds static const char *_name = DM_NAME; 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds static unsigned int major = 0; 461da177e4SLinus Torvalds static unsigned int _major = 0; 471da177e4SLinus Torvalds 48d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr); 49d15b774cSAlasdair G Kergon 50f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock); 512c140a24SMikulas Patocka 522c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w); 532c140a24SMikulas Patocka 542c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 552c140a24SMikulas Patocka 56acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue; 57acfe0ad7SMikulas Patocka 5893e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0); 5993e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 6093e6442cSMikulas Patocka 6162e08243SMikulas Patocka void dm_issue_global_event(void) 6262e08243SMikulas Patocka { 6362e08243SMikulas Patocka atomic_inc(&dm_global_event_nr); 6462e08243SMikulas Patocka wake_up(&dm_global_eventq); 6562e08243SMikulas Patocka } 6662e08243SMikulas Patocka 671da177e4SLinus Torvalds /* 6864f52b0eSMike Snitzer * One of these is allocated (on-stack) per original bio. 691da177e4SLinus Torvalds */ 7064f52b0eSMike Snitzer struct clone_info { 7164f52b0eSMike Snitzer struct dm_table *map; 7264f52b0eSMike Snitzer struct bio *bio; 7364f52b0eSMike Snitzer struct dm_io *io; 7464f52b0eSMike Snitzer sector_t sector; 7564f52b0eSMike Snitzer unsigned sector_count; 7664f52b0eSMike Snitzer }; 7764f52b0eSMike Snitzer 7862f26317SJeffle Xu #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) 7962f26317SJeffle Xu #define DM_IO_BIO_OFFSET \ 8062f26317SJeffle Xu (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) 8162f26317SJeffle Xu 826c23f0bdSChristoph Hellwig static inline struct dm_target_io *clone_to_tio(struct bio *clone) 836c23f0bdSChristoph Hellwig { 846c23f0bdSChristoph Hellwig return container_of(clone, struct dm_target_io, clone); 856c23f0bdSChristoph Hellwig } 866c23f0bdSChristoph Hellwig 8764f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size) 8864f52b0eSMike Snitzer { 896c23f0bdSChristoph Hellwig if (!clone_to_tio(bio)->inside_dm_io) 9062f26317SJeffle Xu return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; 9162f26317SJeffle Xu return (char *)bio - DM_IO_BIO_OFFSET - data_size; 9264f52b0eSMike Snitzer } 9364f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data); 9464f52b0eSMike Snitzer 9564f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 9664f52b0eSMike Snitzer { 9764f52b0eSMike Snitzer struct dm_io *io = (struct dm_io *)((char *)data + data_size); 9864f52b0eSMike Snitzer if (io->magic == DM_IO_MAGIC) 9962f26317SJeffle Xu return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); 10064f52b0eSMike Snitzer BUG_ON(io->magic != DM_TIO_MAGIC); 10162f26317SJeffle Xu return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); 10264f52b0eSMike Snitzer } 10364f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); 10464f52b0eSMike Snitzer 10564f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 10664f52b0eSMike Snitzer { 10764f52b0eSMike Snitzer return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 10864f52b0eSMike Snitzer } 10964f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); 11064f52b0eSMike Snitzer 111ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1) 112ba61fdd1SJeff Mahoney 113115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE 114115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE; 115faad87dfSMike Snitzer 116a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) 117a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS; 118a666e5c0SMikulas Patocka static int get_swap_bios(void) 119a666e5c0SMikulas Patocka { 120a666e5c0SMikulas Patocka int latch = READ_ONCE(swap_bios); 121a666e5c0SMikulas Patocka if (unlikely(latch <= 0)) 122a666e5c0SMikulas Patocka latch = DEFAULT_SWAP_BIOS; 123a666e5c0SMikulas Patocka return latch; 124a666e5c0SMikulas Patocka } 125a666e5c0SMikulas Patocka 126e6ee8c0bSKiyoshi Ueda /* 127e6ee8c0bSKiyoshi Ueda * For mempools pre-allocation at the table loading time. 128e6ee8c0bSKiyoshi Ueda */ 129e6ee8c0bSKiyoshi Ueda struct dm_md_mempools { 1306f1c819cSKent Overstreet struct bio_set bs; 1316f1c819cSKent Overstreet struct bio_set io_bs; 132e6ee8c0bSKiyoshi Ueda }; 133e6ee8c0bSKiyoshi Ueda 13486f1152bSBenjamin Marzinski struct table_device { 13586f1152bSBenjamin Marzinski struct list_head list; 136b0b4d7c6SElena Reshetova refcount_t count; 13786f1152bSBenjamin Marzinski struct dm_dev dm_dev; 13886f1152bSBenjamin Marzinski }; 13986f1152bSBenjamin Marzinski 140f4790826SMike Snitzer /* 141e8603136SMike Snitzer * Bio-based DM's mempools' reserved IOs set by the user. 142e8603136SMike Snitzer */ 1434cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS 16 144e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; 145e8603136SMike Snitzer 146115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max) 147115485e8SMike Snitzer { 1486aa7de05SMark Rutland int param = READ_ONCE(*module_param); 149115485e8SMike Snitzer int modified_param = 0; 150115485e8SMike Snitzer bool modified = true; 151115485e8SMike Snitzer 152115485e8SMike Snitzer if (param < min) 153115485e8SMike Snitzer modified_param = min; 154115485e8SMike Snitzer else if (param > max) 155115485e8SMike Snitzer modified_param = max; 156115485e8SMike Snitzer else 157115485e8SMike Snitzer modified = false; 158115485e8SMike Snitzer 159115485e8SMike Snitzer if (modified) { 160115485e8SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 161115485e8SMike Snitzer param = modified_param; 162115485e8SMike Snitzer } 163115485e8SMike Snitzer 164115485e8SMike Snitzer return param; 165115485e8SMike Snitzer } 166115485e8SMike Snitzer 1674cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, 168f4790826SMike Snitzer unsigned def, unsigned max) 169f4790826SMike Snitzer { 1706aa7de05SMark Rutland unsigned param = READ_ONCE(*module_param); 17109c2d531SMike Snitzer unsigned modified_param = 0; 172f4790826SMike Snitzer 17309c2d531SMike Snitzer if (!param) 17409c2d531SMike Snitzer modified_param = def; 17509c2d531SMike Snitzer else if (param > max) 17609c2d531SMike Snitzer modified_param = max; 177f4790826SMike Snitzer 17809c2d531SMike Snitzer if (modified_param) { 17909c2d531SMike Snitzer (void)cmpxchg(module_param, param, modified_param); 18009c2d531SMike Snitzer param = modified_param; 181f4790826SMike Snitzer } 182f4790826SMike Snitzer 18309c2d531SMike Snitzer return param; 184f4790826SMike Snitzer } 185f4790826SMike Snitzer 186e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void) 187e8603136SMike Snitzer { 18809c2d531SMike Snitzer return __dm_get_module_param(&reserved_bio_based_ios, 1894cc96131SMike Snitzer RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); 190e8603136SMike Snitzer } 191e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); 192e8603136SMike Snitzer 193115485e8SMike Snitzer static unsigned dm_get_numa_node(void) 194115485e8SMike Snitzer { 195115485e8SMike Snitzer return __dm_get_module_param_int(&dm_numa_node, 196115485e8SMike Snitzer DM_NUMA_NODE, num_online_nodes() - 1); 197115485e8SMike Snitzer } 198115485e8SMike Snitzer 1991da177e4SLinus Torvalds static int __init local_init(void) 2001da177e4SLinus Torvalds { 201e689fbabSMike Snitzer int r; 2021ae49ea2SMike Snitzer 20351e5b2bdSMike Anderson r = dm_uevent_init(); 20451157b4aSKiyoshi Ueda if (r) 205e689fbabSMike Snitzer return r; 20651e5b2bdSMike Anderson 207acfe0ad7SMikulas Patocka deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 208acfe0ad7SMikulas Patocka if (!deferred_remove_workqueue) { 209acfe0ad7SMikulas Patocka r = -ENOMEM; 210acfe0ad7SMikulas Patocka goto out_uevent_exit; 211acfe0ad7SMikulas Patocka } 212acfe0ad7SMikulas Patocka 2131da177e4SLinus Torvalds _major = major; 2141da177e4SLinus Torvalds r = register_blkdev(_major, _name); 21551157b4aSKiyoshi Ueda if (r < 0) 216acfe0ad7SMikulas Patocka goto out_free_workqueue; 2171da177e4SLinus Torvalds 2181da177e4SLinus Torvalds if (!_major) 2191da177e4SLinus Torvalds _major = r; 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds return 0; 22251157b4aSKiyoshi Ueda 223acfe0ad7SMikulas Patocka out_free_workqueue: 224acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 22551157b4aSKiyoshi Ueda out_uevent_exit: 22651157b4aSKiyoshi Ueda dm_uevent_exit(); 22751157b4aSKiyoshi Ueda 22851157b4aSKiyoshi Ueda return r; 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds 2311da177e4SLinus Torvalds static void local_exit(void) 2321da177e4SLinus Torvalds { 2332c140a24SMikulas Patocka flush_scheduled_work(); 234acfe0ad7SMikulas Patocka destroy_workqueue(deferred_remove_workqueue); 2352c140a24SMikulas Patocka 23600d59405SAkinobu Mita unregister_blkdev(_major, _name); 23751e5b2bdSMike Anderson dm_uevent_exit(); 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds _major = 0; 2401da177e4SLinus Torvalds 2411da177e4SLinus Torvalds DMINFO("cleaned up"); 2421da177e4SLinus Torvalds } 2431da177e4SLinus Torvalds 244b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = { 2451da177e4SLinus Torvalds local_init, 2461da177e4SLinus Torvalds dm_target_init, 2471da177e4SLinus Torvalds dm_linear_init, 2481da177e4SLinus Torvalds dm_stripe_init, 249952b3557SMikulas Patocka dm_io_init, 250945fa4d2SMikulas Patocka dm_kcopyd_init, 2511da177e4SLinus Torvalds dm_interface_init, 252fd2ed4d2SMikulas Patocka dm_statistics_init, 2531da177e4SLinus Torvalds }; 2541da177e4SLinus Torvalds 255b9249e55SAlasdair G Kergon static void (*_exits[])(void) = { 2561da177e4SLinus Torvalds local_exit, 2571da177e4SLinus Torvalds dm_target_exit, 2581da177e4SLinus Torvalds dm_linear_exit, 2591da177e4SLinus Torvalds dm_stripe_exit, 260952b3557SMikulas Patocka dm_io_exit, 261945fa4d2SMikulas Patocka dm_kcopyd_exit, 2621da177e4SLinus Torvalds dm_interface_exit, 263fd2ed4d2SMikulas Patocka dm_statistics_exit, 2641da177e4SLinus Torvalds }; 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds static int __init dm_init(void) 2671da177e4SLinus Torvalds { 2681da177e4SLinus Torvalds const int count = ARRAY_SIZE(_inits); 2691da177e4SLinus Torvalds int r, i; 2701da177e4SLinus Torvalds 271f1cd6cb2STushar Sugandhi #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) 272f1cd6cb2STushar Sugandhi DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." 273f1cd6cb2STushar Sugandhi " Duplicate IMA measurements will not be recorded in the IMA log."); 274f1cd6cb2STushar Sugandhi #endif 275f1cd6cb2STushar Sugandhi 2761da177e4SLinus Torvalds for (i = 0; i < count; i++) { 2771da177e4SLinus Torvalds r = _inits[i](); 2781da177e4SLinus Torvalds if (r) 2791da177e4SLinus Torvalds goto bad; 2801da177e4SLinus Torvalds } 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds return 0; 2831da177e4SLinus Torvalds bad: 2841da177e4SLinus Torvalds while (i--) 2851da177e4SLinus Torvalds _exits[i](); 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds return r; 2881da177e4SLinus Torvalds } 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds static void __exit dm_exit(void) 2911da177e4SLinus Torvalds { 2921da177e4SLinus Torvalds int i = ARRAY_SIZE(_exits); 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds while (i--) 2951da177e4SLinus Torvalds _exits[i](); 296d15b774cSAlasdair G Kergon 297d15b774cSAlasdair G Kergon /* 298d15b774cSAlasdair G Kergon * Should be empty by this point. 299d15b774cSAlasdair G Kergon */ 300d15b774cSAlasdair G Kergon idr_destroy(&_minor_idr); 3011da177e4SLinus Torvalds } 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds /* 3041da177e4SLinus Torvalds * Block device functions 3051da177e4SLinus Torvalds */ 306432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md) 307432a212cSMike Anderson { 308432a212cSMike Anderson return test_bit(DMF_DELETING, &md->flags); 309432a212cSMike Anderson } 310432a212cSMike Anderson 311fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds struct mapped_device *md; 3141da177e4SLinus Torvalds 315fba9f90eSJeff Mahoney spin_lock(&_minor_lock); 316fba9f90eSJeff Mahoney 317fe5f9f2cSAl Viro md = bdev->bd_disk->private_data; 318fba9f90eSJeff Mahoney if (!md) 319fba9f90eSJeff Mahoney goto out; 320fba9f90eSJeff Mahoney 3215c6bd75dSAlasdair G Kergon if (test_bit(DMF_FREEING, &md->flags) || 322432a212cSMike Anderson dm_deleting_md(md)) { 323fba9f90eSJeff Mahoney md = NULL; 324fba9f90eSJeff Mahoney goto out; 325fba9f90eSJeff Mahoney } 326fba9f90eSJeff Mahoney 3271da177e4SLinus Torvalds dm_get(md); 3285c6bd75dSAlasdair G Kergon atomic_inc(&md->open_count); 329fba9f90eSJeff Mahoney out: 330fba9f90eSJeff Mahoney spin_unlock(&_minor_lock); 331fba9f90eSJeff Mahoney 332fba9f90eSJeff Mahoney return md ? 0 : -ENXIO; 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 335db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode) 3361da177e4SLinus Torvalds { 33763a4f065SMike Snitzer struct mapped_device *md; 3386e9624b8SArnd Bergmann 3394a1aeb98SMilan Broz spin_lock(&_minor_lock); 3404a1aeb98SMilan Broz 34163a4f065SMike Snitzer md = disk->private_data; 34263a4f065SMike Snitzer if (WARN_ON(!md)) 34363a4f065SMike Snitzer goto out; 34463a4f065SMike Snitzer 3452c140a24SMikulas Patocka if (atomic_dec_and_test(&md->open_count) && 3462c140a24SMikulas Patocka (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 347acfe0ad7SMikulas Patocka queue_work(deferred_remove_workqueue, &deferred_remove_work); 3482c140a24SMikulas Patocka 3491da177e4SLinus Torvalds dm_put(md); 35063a4f065SMike Snitzer out: 3514a1aeb98SMilan Broz spin_unlock(&_minor_lock); 3521da177e4SLinus Torvalds } 3531da177e4SLinus Torvalds 3545c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md) 3555c6bd75dSAlasdair G Kergon { 3565c6bd75dSAlasdair G Kergon return atomic_read(&md->open_count); 3575c6bd75dSAlasdair G Kergon } 3585c6bd75dSAlasdair G Kergon 3595c6bd75dSAlasdair G Kergon /* 3605c6bd75dSAlasdair G Kergon * Guarantees nothing is using the device before it's deleted. 3615c6bd75dSAlasdair G Kergon */ 3622c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 3635c6bd75dSAlasdair G Kergon { 3645c6bd75dSAlasdair G Kergon int r = 0; 3655c6bd75dSAlasdair G Kergon 3665c6bd75dSAlasdair G Kergon spin_lock(&_minor_lock); 3675c6bd75dSAlasdair G Kergon 3682c140a24SMikulas Patocka if (dm_open_count(md)) { 3695c6bd75dSAlasdair G Kergon r = -EBUSY; 3702c140a24SMikulas Patocka if (mark_deferred) 3712c140a24SMikulas Patocka set_bit(DMF_DEFERRED_REMOVE, &md->flags); 3722c140a24SMikulas Patocka } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 3732c140a24SMikulas Patocka r = -EEXIST; 3745c6bd75dSAlasdair G Kergon else 3755c6bd75dSAlasdair G Kergon set_bit(DMF_DELETING, &md->flags); 3765c6bd75dSAlasdair G Kergon 3775c6bd75dSAlasdair G Kergon spin_unlock(&_minor_lock); 3785c6bd75dSAlasdair G Kergon 3795c6bd75dSAlasdair G Kergon return r; 3805c6bd75dSAlasdair G Kergon } 3815c6bd75dSAlasdair G Kergon 3822c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md) 3832c140a24SMikulas Patocka { 3842c140a24SMikulas Patocka int r = 0; 3852c140a24SMikulas Patocka 3862c140a24SMikulas Patocka spin_lock(&_minor_lock); 3872c140a24SMikulas Patocka 3882c140a24SMikulas Patocka if (test_bit(DMF_DELETING, &md->flags)) 3892c140a24SMikulas Patocka r = -EBUSY; 3902c140a24SMikulas Patocka else 3912c140a24SMikulas Patocka clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 3922c140a24SMikulas Patocka 3932c140a24SMikulas Patocka spin_unlock(&_minor_lock); 3942c140a24SMikulas Patocka 3952c140a24SMikulas Patocka return r; 3962c140a24SMikulas Patocka } 3972c140a24SMikulas Patocka 3982c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w) 3992c140a24SMikulas Patocka { 4002c140a24SMikulas Patocka dm_deferred_remove(); 4012c140a24SMikulas Patocka } 4022c140a24SMikulas Patocka 4033ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4043ac51e74SDarrick J. Wong { 4053ac51e74SDarrick J. Wong struct mapped_device *md = bdev->bd_disk->private_data; 4063ac51e74SDarrick J. Wong 4073ac51e74SDarrick J. Wong return dm_get_geometry(md, geo); 4083ac51e74SDarrick J. Wong } 4093ac51e74SDarrick J. Wong 410971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, 4115bd5e8d8SMike Snitzer struct block_device **bdev) 412aa129a22SMilan Broz { 41366482026SMike Snitzer struct dm_target *tgt; 4146c182cd8SHannes Reinecke struct dm_table *map; 415971888c4SMike Snitzer int r; 416aa129a22SMilan Broz 4176c182cd8SHannes Reinecke retry: 418e56f81e0SChristoph Hellwig r = -ENOTTY; 419971888c4SMike Snitzer map = dm_get_live_table(md, srcu_idx); 420aa129a22SMilan Broz if (!map || !dm_table_get_size(map)) 421971888c4SMike Snitzer return r; 422aa129a22SMilan Broz 423aa129a22SMilan Broz /* We only support devices that have a single target */ 424aa129a22SMilan Broz if (dm_table_get_num_targets(map) != 1) 425971888c4SMike Snitzer return r; 426aa129a22SMilan Broz 42766482026SMike Snitzer tgt = dm_table_get_target(map, 0); 42866482026SMike Snitzer if (!tgt->type->prepare_ioctl) 429e56f81e0SChristoph Hellwig return r; 430aa129a22SMilan Broz 431971888c4SMike Snitzer if (dm_suspended_md(md)) 432971888c4SMike Snitzer return -EAGAIN; 433971888c4SMike Snitzer 4345bd5e8d8SMike Snitzer r = tgt->type->prepare_ioctl(tgt, bdev); 4355bbbfdf6SJunichi Nomura if (r == -ENOTCONN && !fatal_signal_pending(current)) { 436971888c4SMike Snitzer dm_put_live_table(md, *srcu_idx); 4376c182cd8SHannes Reinecke msleep(10); 4386c182cd8SHannes Reinecke goto retry; 4396c182cd8SHannes Reinecke } 440971888c4SMike Snitzer 441e56f81e0SChristoph Hellwig return r; 442e56f81e0SChristoph Hellwig } 4436c182cd8SHannes Reinecke 444971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) 445971888c4SMike Snitzer { 446971888c4SMike Snitzer dm_put_live_table(md, srcu_idx); 447971888c4SMike Snitzer } 448971888c4SMike Snitzer 449e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, 450e56f81e0SChristoph Hellwig unsigned int cmd, unsigned long arg) 451e56f81e0SChristoph Hellwig { 452e56f81e0SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 453971888c4SMike Snitzer int r, srcu_idx; 454e56f81e0SChristoph Hellwig 4555bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 456e56f81e0SChristoph Hellwig if (r < 0) 457971888c4SMike Snitzer goto out; 458e56f81e0SChristoph Hellwig 459e56f81e0SChristoph Hellwig if (r > 0) { 460e56f81e0SChristoph Hellwig /* 461e980f623SChristoph Hellwig * Target determined this ioctl is being issued against a 462e980f623SChristoph Hellwig * subset of the parent bdev; require extra privileges. 463e56f81e0SChristoph Hellwig */ 464e980f623SChristoph Hellwig if (!capable(CAP_SYS_RAWIO)) { 4650378c625SMike Snitzer DMDEBUG_LIMIT( 466e980f623SChristoph Hellwig "%s: sending ioctl %x to DM device without required privilege.", 467e980f623SChristoph Hellwig current->comm, cmd); 468e980f623SChristoph Hellwig r = -ENOIOCTLCMD; 469e56f81e0SChristoph Hellwig goto out; 470e56f81e0SChristoph Hellwig } 471e980f623SChristoph Hellwig } 472e56f81e0SChristoph Hellwig 473a7cb3d2fSChristoph Hellwig if (!bdev->bd_disk->fops->ioctl) 474a7cb3d2fSChristoph Hellwig r = -ENOTTY; 475a7cb3d2fSChristoph Hellwig else 476a7cb3d2fSChristoph Hellwig r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 477e56f81e0SChristoph Hellwig out: 478971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 479aa129a22SMilan Broz return r; 480aa129a22SMilan Broz } 481aa129a22SMilan Broz 4827465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio) 4837465d7acSMike Snitzer { 4846c23f0bdSChristoph Hellwig return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); 4857465d7acSMike Snitzer } 4867465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); 4877465d7acSMike Snitzer 4887465d7acSMike Snitzer static void start_io_acct(struct dm_io *io) 4897465d7acSMike Snitzer { 4907465d7acSMike Snitzer struct mapped_device *md = io->md; 4917465d7acSMike Snitzer struct bio *bio = io->orig_bio; 4927465d7acSMike Snitzer 493b879f915SMike Snitzer bio_start_io_acct_time(bio, io->start_time); 4947465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 4957465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 4967465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 4977465d7acSMike Snitzer false, 0, &io->stats_aux); 4987465d7acSMike Snitzer } 4997465d7acSMike Snitzer 500d208b894SJiazi Li static void end_io_acct(struct mapped_device *md, struct bio *bio, 501d208b894SJiazi Li unsigned long start_time, struct dm_stats_aux *stats_aux) 5027465d7acSMike Snitzer { 503d208b894SJiazi Li unsigned long duration = jiffies - start_time; 5047465d7acSMike Snitzer 505d208b894SJiazi Li bio_end_io_acct(bio, start_time); 5067465d7acSMike Snitzer 5077465d7acSMike Snitzer if (unlikely(dm_stats_used(&md->stats))) 5087465d7acSMike Snitzer dm_stats_account_io(&md->stats, bio_data_dir(bio), 5097465d7acSMike Snitzer bio->bi_iter.bi_sector, bio_sectors(bio), 510d208b894SJiazi Li true, duration, stats_aux); 5117465d7acSMike Snitzer 5127465d7acSMike Snitzer /* nudge anyone waiting on suspend queue */ 5137465d7acSMike Snitzer if (unlikely(wq_has_sleeper(&md->wait))) 5147465d7acSMike Snitzer wake_up(&md->wait); 5157465d7acSMike Snitzer } 516978e51baSMike Snitzer 517978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) 5181da177e4SLinus Torvalds { 51964f52b0eSMike Snitzer struct dm_io *io; 52064f52b0eSMike Snitzer struct dm_target_io *tio; 52164f52b0eSMike Snitzer struct bio *clone; 52264f52b0eSMike Snitzer 523*abfc426dSChristoph Hellwig clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); 52464f52b0eSMike Snitzer 5256c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 52664f52b0eSMike Snitzer tio->inside_dm_io = true; 52764f52b0eSMike Snitzer tio->io = NULL; 52864f52b0eSMike Snitzer 52964f52b0eSMike Snitzer io = container_of(tio, struct dm_io, tio); 53064f52b0eSMike Snitzer io->magic = DM_IO_MAGIC; 531978e51baSMike Snitzer io->status = 0; 532978e51baSMike Snitzer atomic_set(&io->io_count, 1); 533978e51baSMike Snitzer io->orig_bio = bio; 534978e51baSMike Snitzer io->md = md; 535978e51baSMike Snitzer spin_lock_init(&io->endio_lock); 536978e51baSMike Snitzer 537b879f915SMike Snitzer io->start_time = jiffies; 53864f52b0eSMike Snitzer 53964f52b0eSMike Snitzer return io; 5401da177e4SLinus Torvalds } 5411da177e4SLinus Torvalds 542028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io) 5431da177e4SLinus Torvalds { 54464f52b0eSMike Snitzer bio_put(&io->tio.clone); 54564f52b0eSMike Snitzer } 54664f52b0eSMike Snitzer 5471d1068ceSChristoph Hellwig static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, 548dc8e2021SChristoph Hellwig unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask) 54964f52b0eSMike Snitzer { 55064f52b0eSMike Snitzer struct dm_target_io *tio; 55164f52b0eSMike Snitzer 55264f52b0eSMike Snitzer if (!ci->io->tio.io) { 55364f52b0eSMike Snitzer /* the dm_target_io embedded in ci->io is available */ 55464f52b0eSMike Snitzer tio = &ci->io->tio; 55564f52b0eSMike Snitzer } else { 556*abfc426dSChristoph Hellwig struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, 557*abfc426dSChristoph Hellwig gfp_mask, &ci->io->md->bs); 55864f52b0eSMike Snitzer if (!clone) 55964f52b0eSMike Snitzer return NULL; 56064f52b0eSMike Snitzer 5616c23f0bdSChristoph Hellwig tio = clone_to_tio(clone); 56264f52b0eSMike Snitzer tio->inside_dm_io = false; 56364f52b0eSMike Snitzer } 56456b4b5abSChristoph Hellwig 56564f52b0eSMike Snitzer tio->magic = DM_TIO_MAGIC; 56664f52b0eSMike Snitzer tio->io = ci->io; 56764f52b0eSMike Snitzer tio->ti = ti; 56864f52b0eSMike Snitzer tio->target_bio_nr = target_bio_nr; 569dc8e2021SChristoph Hellwig tio->len_ptr = len; 57064f52b0eSMike Snitzer 5711d1068ceSChristoph Hellwig return &tio->clone; 5721da177e4SLinus Torvalds } 5731da177e4SLinus Torvalds 5741d1068ceSChristoph Hellwig static void free_tio(struct bio *clone) 5751da177e4SLinus Torvalds { 5761d1068ceSChristoph Hellwig if (clone_to_tio(clone)->inside_dm_io) 57764f52b0eSMike Snitzer return; 5781d1068ceSChristoph Hellwig bio_put(clone); 5791da177e4SLinus Torvalds } 5801da177e4SLinus Torvalds 5811da177e4SLinus Torvalds /* 5821da177e4SLinus Torvalds * Add the bio to the list of deferred io. 5831da177e4SLinus Torvalds */ 58492c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio) 5851da177e4SLinus Torvalds { 58605447420SKiyoshi Ueda unsigned long flags; 5871da177e4SLinus Torvalds 58805447420SKiyoshi Ueda spin_lock_irqsave(&md->deferred_lock, flags); 5891da177e4SLinus Torvalds bio_list_add(&md->deferred, bio); 59005447420SKiyoshi Ueda spin_unlock_irqrestore(&md->deferred_lock, flags); 59192c63902SMikulas Patocka queue_work(md->wq, &md->work); 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds 5941da177e4SLinus Torvalds /* 5951da177e4SLinus Torvalds * Everyone (including functions in this file), should use this 5961da177e4SLinus Torvalds * function to access the md->map field, and make sure they call 59783d5e5b0SMikulas Patocka * dm_put_live_table() when finished. 5981da177e4SLinus Torvalds */ 59983d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 6001da177e4SLinus Torvalds { 60183d5e5b0SMikulas Patocka *srcu_idx = srcu_read_lock(&md->io_barrier); 6021da177e4SLinus Torvalds 60383d5e5b0SMikulas Patocka return srcu_dereference(md->map, &md->io_barrier); 60483d5e5b0SMikulas Patocka } 6051da177e4SLinus Torvalds 60683d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 60783d5e5b0SMikulas Patocka { 60883d5e5b0SMikulas Patocka srcu_read_unlock(&md->io_barrier, srcu_idx); 60983d5e5b0SMikulas Patocka } 61083d5e5b0SMikulas Patocka 61183d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md) 61283d5e5b0SMikulas Patocka { 61383d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 61483d5e5b0SMikulas Patocka synchronize_rcu_expedited(); 61583d5e5b0SMikulas Patocka } 61683d5e5b0SMikulas Patocka 61783d5e5b0SMikulas Patocka /* 61883d5e5b0SMikulas Patocka * A fast alternative to dm_get_live_table/dm_put_live_table. 61983d5e5b0SMikulas Patocka * The caller must not block between these two functions. 62083d5e5b0SMikulas Patocka */ 62183d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) 62283d5e5b0SMikulas Patocka { 62383d5e5b0SMikulas Patocka rcu_read_lock(); 62483d5e5b0SMikulas Patocka return rcu_dereference(md->map); 62583d5e5b0SMikulas Patocka } 62683d5e5b0SMikulas Patocka 62783d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) 62883d5e5b0SMikulas Patocka { 62983d5e5b0SMikulas Patocka rcu_read_unlock(); 6301da177e4SLinus Torvalds } 6311da177e4SLinus Torvalds 632971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper"; 633971888c4SMike Snitzer 6343ac51e74SDarrick J. Wong /* 63586f1152bSBenjamin Marzinski * Open a table device so we can use it as a map destination. 63686f1152bSBenjamin Marzinski */ 63786f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev, 63886f1152bSBenjamin Marzinski struct mapped_device *md) 63986f1152bSBenjamin Marzinski { 64086f1152bSBenjamin Marzinski struct block_device *bdev; 641cd913c76SChristoph Hellwig u64 part_off; 64286f1152bSBenjamin Marzinski int r; 64386f1152bSBenjamin Marzinski 64486f1152bSBenjamin Marzinski BUG_ON(td->dm_dev.bdev); 64586f1152bSBenjamin Marzinski 646519049afSMike Snitzer bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); 64786f1152bSBenjamin Marzinski if (IS_ERR(bdev)) 64886f1152bSBenjamin Marzinski return PTR_ERR(bdev); 64986f1152bSBenjamin Marzinski 65086f1152bSBenjamin Marzinski r = bd_link_disk_holder(bdev, dm_disk(md)); 65186f1152bSBenjamin Marzinski if (r) { 65286f1152bSBenjamin Marzinski blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); 65386f1152bSBenjamin Marzinski return r; 65486f1152bSBenjamin Marzinski } 65586f1152bSBenjamin Marzinski 65686f1152bSBenjamin Marzinski td->dm_dev.bdev = bdev; 657cd913c76SChristoph Hellwig td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); 65886f1152bSBenjamin Marzinski return 0; 65986f1152bSBenjamin Marzinski } 66086f1152bSBenjamin Marzinski 66186f1152bSBenjamin Marzinski /* 66286f1152bSBenjamin Marzinski * Close a table device that we've been using. 66386f1152bSBenjamin Marzinski */ 66486f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md) 66586f1152bSBenjamin Marzinski { 66686f1152bSBenjamin Marzinski if (!td->dm_dev.bdev) 66786f1152bSBenjamin Marzinski return; 66886f1152bSBenjamin Marzinski 66986f1152bSBenjamin Marzinski bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); 67086f1152bSBenjamin Marzinski blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); 671817bf402SDan Williams put_dax(td->dm_dev.dax_dev); 67286f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 673817bf402SDan Williams td->dm_dev.dax_dev = NULL; 67486f1152bSBenjamin Marzinski } 67586f1152bSBenjamin Marzinski 67686f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev, 6778454fca4SSheetal Singala fmode_t mode) 6788454fca4SSheetal Singala { 67986f1152bSBenjamin Marzinski struct table_device *td; 68086f1152bSBenjamin Marzinski 68186f1152bSBenjamin Marzinski list_for_each_entry(td, l, list) 68286f1152bSBenjamin Marzinski if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) 68386f1152bSBenjamin Marzinski return td; 68486f1152bSBenjamin Marzinski 68586f1152bSBenjamin Marzinski return NULL; 68686f1152bSBenjamin Marzinski } 68786f1152bSBenjamin Marzinski 68886f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 6898454fca4SSheetal Singala struct dm_dev **result) 6908454fca4SSheetal Singala { 69186f1152bSBenjamin Marzinski int r; 69286f1152bSBenjamin Marzinski struct table_device *td; 69386f1152bSBenjamin Marzinski 69486f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 69586f1152bSBenjamin Marzinski td = find_table_device(&md->table_devices, dev, mode); 69686f1152bSBenjamin Marzinski if (!td) { 697115485e8SMike Snitzer td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); 69886f1152bSBenjamin Marzinski if (!td) { 69986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 70086f1152bSBenjamin Marzinski return -ENOMEM; 70186f1152bSBenjamin Marzinski } 70286f1152bSBenjamin Marzinski 70386f1152bSBenjamin Marzinski td->dm_dev.mode = mode; 70486f1152bSBenjamin Marzinski td->dm_dev.bdev = NULL; 70586f1152bSBenjamin Marzinski 70686f1152bSBenjamin Marzinski if ((r = open_table_device(td, dev, md))) { 70786f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 70886f1152bSBenjamin Marzinski kfree(td); 70986f1152bSBenjamin Marzinski return r; 71086f1152bSBenjamin Marzinski } 71186f1152bSBenjamin Marzinski 71286f1152bSBenjamin Marzinski format_dev_t(td->dm_dev.name, dev); 71386f1152bSBenjamin Marzinski 714b0b4d7c6SElena Reshetova refcount_set(&td->count, 1); 71586f1152bSBenjamin Marzinski list_add(&td->list, &md->table_devices); 716b0b4d7c6SElena Reshetova } else { 717b0b4d7c6SElena Reshetova refcount_inc(&td->count); 71886f1152bSBenjamin Marzinski } 71986f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 72086f1152bSBenjamin Marzinski 72186f1152bSBenjamin Marzinski *result = &td->dm_dev; 72286f1152bSBenjamin Marzinski return 0; 72386f1152bSBenjamin Marzinski } 72486f1152bSBenjamin Marzinski 72586f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) 72686f1152bSBenjamin Marzinski { 72786f1152bSBenjamin Marzinski struct table_device *td = container_of(d, struct table_device, dm_dev); 72886f1152bSBenjamin Marzinski 72986f1152bSBenjamin Marzinski mutex_lock(&md->table_devices_lock); 730b0b4d7c6SElena Reshetova if (refcount_dec_and_test(&td->count)) { 73186f1152bSBenjamin Marzinski close_table_device(td, md); 73286f1152bSBenjamin Marzinski list_del(&td->list); 73386f1152bSBenjamin Marzinski kfree(td); 73486f1152bSBenjamin Marzinski } 73586f1152bSBenjamin Marzinski mutex_unlock(&md->table_devices_lock); 73686f1152bSBenjamin Marzinski } 73786f1152bSBenjamin Marzinski 73886f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices) 73986f1152bSBenjamin Marzinski { 74086f1152bSBenjamin Marzinski struct list_head *tmp, *next; 74186f1152bSBenjamin Marzinski 74286f1152bSBenjamin Marzinski list_for_each_safe(tmp, next, devices) { 74386f1152bSBenjamin Marzinski struct table_device *td = list_entry(tmp, struct table_device, list); 74486f1152bSBenjamin Marzinski 74586f1152bSBenjamin Marzinski DMWARN("dm_destroy: %s still exists with %d references", 746b0b4d7c6SElena Reshetova td->dm_dev.name, refcount_read(&td->count)); 74786f1152bSBenjamin Marzinski kfree(td); 74886f1152bSBenjamin Marzinski } 74986f1152bSBenjamin Marzinski } 75086f1152bSBenjamin Marzinski 75186f1152bSBenjamin Marzinski /* 7523ac51e74SDarrick J. Wong * Get the geometry associated with a dm device 7533ac51e74SDarrick J. Wong */ 7543ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 7553ac51e74SDarrick J. Wong { 7563ac51e74SDarrick J. Wong *geo = md->geometry; 7573ac51e74SDarrick J. Wong 7583ac51e74SDarrick J. Wong return 0; 7593ac51e74SDarrick J. Wong } 7603ac51e74SDarrick J. Wong 7613ac51e74SDarrick J. Wong /* 7623ac51e74SDarrick J. Wong * Set the geometry of a device. 7633ac51e74SDarrick J. Wong */ 7643ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 7653ac51e74SDarrick J. Wong { 7663ac51e74SDarrick J. Wong sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 7673ac51e74SDarrick J. Wong 7683ac51e74SDarrick J. Wong if (geo->start > sz) { 7693ac51e74SDarrick J. Wong DMWARN("Start sector is beyond the geometry limits."); 7703ac51e74SDarrick J. Wong return -EINVAL; 7713ac51e74SDarrick J. Wong } 7723ac51e74SDarrick J. Wong 7733ac51e74SDarrick J. Wong md->geometry = *geo; 7743ac51e74SDarrick J. Wong 7753ac51e74SDarrick J. Wong return 0; 7763ac51e74SDarrick J. Wong } 7773ac51e74SDarrick J. Wong 7782e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md) 7792e93ccc1SKiyoshi Ueda { 7802e93ccc1SKiyoshi Ueda return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 7812e93ccc1SKiyoshi Ueda } 7822e93ccc1SKiyoshi Ueda 7831da177e4SLinus Torvalds /* 7841da177e4SLinus Torvalds * Decrements the number of outstanding ios that a bio has been 7851da177e4SLinus Torvalds * cloned into, completing the original io if necc. 7861da177e4SLinus Torvalds */ 787e2118b3cSDamien Le Moal void dm_io_dec_pending(struct dm_io *io, blk_status_t error) 7881da177e4SLinus Torvalds { 7892e93ccc1SKiyoshi Ueda unsigned long flags; 7904e4cbee9SChristoph Hellwig blk_status_t io_error; 791b35f8caaSMilan Broz struct bio *bio; 792b35f8caaSMilan Broz struct mapped_device *md = io->md; 793d208b894SJiazi Li unsigned long start_time = 0; 794d208b894SJiazi Li struct dm_stats_aux stats_aux; 7952e93ccc1SKiyoshi Ueda 7962e93ccc1SKiyoshi Ueda /* Push-back supersedes any I/O errors */ 797f88fb981SKiyoshi Ueda if (unlikely(error)) { 798f88fb981SKiyoshi Ueda spin_lock_irqsave(&io->endio_lock, flags); 799745dc570SMike Snitzer if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) 8004e4cbee9SChristoph Hellwig io->status = error; 801f88fb981SKiyoshi Ueda spin_unlock_irqrestore(&io->endio_lock, flags); 802f88fb981SKiyoshi Ueda } 8031da177e4SLinus Torvalds 8041da177e4SLinus Torvalds if (atomic_dec_and_test(&io->io_count)) { 805bf14e2b2SDamien Le Moal bio = io->orig_bio; 8064e4cbee9SChristoph Hellwig if (io->status == BLK_STS_DM_REQUEUE) { 8072e93ccc1SKiyoshi Ueda /* 8082e93ccc1SKiyoshi Ueda * Target requested pushing back the I/O. 8092e93ccc1SKiyoshi Ueda */ 810022c2611SMikulas Patocka spin_lock_irqsave(&md->deferred_lock, flags); 811bf14e2b2SDamien Le Moal if (__noflush_suspending(md) && 812bf14e2b2SDamien Le Moal !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { 813745dc570SMike Snitzer /* NOTE early return due to BLK_STS_DM_REQUEUE below */ 814bf14e2b2SDamien Le Moal bio_list_add_head(&md->deferred, bio); 815bf14e2b2SDamien Le Moal } else { 816bf14e2b2SDamien Le Moal /* 817bf14e2b2SDamien Le Moal * noflush suspend was interrupted or this is 818bf14e2b2SDamien Le Moal * a write to a zoned target. 819bf14e2b2SDamien Le Moal */ 8204e4cbee9SChristoph Hellwig io->status = BLK_STS_IOERR; 821bf14e2b2SDamien Le Moal } 822022c2611SMikulas Patocka spin_unlock_irqrestore(&md->deferred_lock, flags); 8232e93ccc1SKiyoshi Ueda } 8242e93ccc1SKiyoshi Ueda 8254e4cbee9SChristoph Hellwig io_error = io->status; 826d208b894SJiazi Li start_time = io->start_time; 827d208b894SJiazi Li stats_aux = io->stats_aux; 828a97f925aSMikulas Patocka free_io(md, io); 829d208b894SJiazi Li end_io_acct(md, bio, start_time, &stats_aux); 8301da177e4SLinus Torvalds 8314e4cbee9SChristoph Hellwig if (io_error == BLK_STS_DM_REQUEUE) 8326a8736d1STejun Heo return; 8336a8736d1STejun Heo 8341eff9d32SJens Axboe if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { 8351da177e4SLinus Torvalds /* 8366a8736d1STejun Heo * Preflush done for flush with data, reissue 83728a8f0d3SMike Christie * without REQ_PREFLUSH. 838af7e466aSMikulas Patocka */ 8391eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 8406a8736d1STejun Heo queue_io(md, bio); 841af7e466aSMikulas Patocka } else { 842b372d360SMike Snitzer /* done with normal IO or empty flush */ 8438dd601faSNeilBrown if (io_error) 8444e4cbee9SChristoph Hellwig bio->bi_status = io_error; 8454246a0b6SChristoph Hellwig bio_endio(bio); 8462e93ccc1SKiyoshi Ueda } 8471da177e4SLinus Torvalds } 848af7e466aSMikulas Patocka } 8491da177e4SLinus Torvalds 850bcb44433SMike Snitzer void disable_discard(struct mapped_device *md) 851bcb44433SMike Snitzer { 852bcb44433SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 853bcb44433SMike Snitzer 854bcb44433SMike Snitzer /* device doesn't really support DISCARD, disable it */ 855bcb44433SMike Snitzer limits->max_discard_sectors = 0; 856bcb44433SMike Snitzer blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); 857bcb44433SMike Snitzer } 858bcb44433SMike Snitzer 8594cc96131SMike Snitzer void disable_write_same(struct mapped_device *md) 8607eee4ae2SMike Snitzer { 8617eee4ae2SMike Snitzer struct queue_limits *limits = dm_get_queue_limits(md); 8627eee4ae2SMike Snitzer 8637eee4ae2SMike Snitzer /* device doesn't really support WRITE SAME, disable it */ 8647eee4ae2SMike Snitzer limits->max_write_same_sectors = 0; 8657eee4ae2SMike Snitzer } 8667eee4ae2SMike Snitzer 867ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md) 868ac62d620SChristoph Hellwig { 869ac62d620SChristoph Hellwig struct queue_limits *limits = dm_get_queue_limits(md); 870ac62d620SChristoph Hellwig 871ac62d620SChristoph Hellwig /* device doesn't really support WRITE ZEROES, disable it */ 872ac62d620SChristoph Hellwig limits->max_write_zeroes_sectors = 0; 873ac62d620SChristoph Hellwig } 874ac62d620SChristoph Hellwig 875a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) 876a666e5c0SMikulas Patocka { 877a666e5c0SMikulas Patocka return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); 878a666e5c0SMikulas Patocka } 879a666e5c0SMikulas Patocka 8804246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio) 8811da177e4SLinus Torvalds { 8824e4cbee9SChristoph Hellwig blk_status_t error = bio->bi_status; 8836c23f0bdSChristoph Hellwig struct dm_target_io *tio = clone_to_tio(bio); 884b35f8caaSMilan Broz struct dm_io *io = tio->io; 8859faf400fSStefan Bader struct mapped_device *md = tio->io->md; 8861da177e4SLinus Torvalds dm_endio_fn endio = tio->ti->type->end_io; 887309dca30SChristoph Hellwig struct request_queue *q = bio->bi_bdev->bd_disk->queue; 8881da177e4SLinus Torvalds 8899c37de29SMike Snitzer if (unlikely(error == BLK_STS_TARGET)) { 890bcb44433SMike Snitzer if (bio_op(bio) == REQ_OP_DISCARD && 891309dca30SChristoph Hellwig !q->limits.max_discard_sectors) 892bcb44433SMike Snitzer disable_discard(md); 893bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_SAME && 894309dca30SChristoph Hellwig !q->limits.max_write_same_sectors) 8957eee4ae2SMike Snitzer disable_write_same(md); 896bcb44433SMike Snitzer else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 897309dca30SChristoph Hellwig !q->limits.max_write_zeroes_sectors) 898ac62d620SChristoph Hellwig disable_write_zeroes(md); 899ac62d620SChristoph Hellwig } 9007eee4ae2SMike Snitzer 901bb37d772SDamien Le Moal if (blk_queue_is_zoned(q)) 902bb37d772SDamien Le Moal dm_zone_endio(io, bio); 903415c79e1SJohannes Thumshirn 9041be56909SChristoph Hellwig if (endio) { 9054e4cbee9SChristoph Hellwig int r = endio(tio->ti, bio, &error); 9061be56909SChristoph Hellwig switch (r) { 9071be56909SChristoph Hellwig case DM_ENDIO_REQUEUE: 908bf14e2b2SDamien Le Moal /* 909bf14e2b2SDamien Le Moal * Requeuing writes to a sequential zone of a zoned 910bf14e2b2SDamien Le Moal * target will break the sequential write pattern: 911bf14e2b2SDamien Le Moal * fail such IO. 912bf14e2b2SDamien Le Moal */ 913bf14e2b2SDamien Le Moal if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) 914bf14e2b2SDamien Le Moal error = BLK_STS_IOERR; 915bf14e2b2SDamien Le Moal else 9164e4cbee9SChristoph Hellwig error = BLK_STS_DM_REQUEUE; 917df561f66SGustavo A. R. Silva fallthrough; 9181be56909SChristoph Hellwig case DM_ENDIO_DONE: 9191be56909SChristoph Hellwig break; 9201be56909SChristoph Hellwig case DM_ENDIO_INCOMPLETE: 9211be56909SChristoph Hellwig /* The target will handle the io */ 9221be56909SChristoph Hellwig return; 9231be56909SChristoph Hellwig default: 9241be56909SChristoph Hellwig DMWARN("unimplemented target endio return value: %d", r); 9251be56909SChristoph Hellwig BUG(); 9261be56909SChristoph Hellwig } 9271be56909SChristoph Hellwig } 9281be56909SChristoph Hellwig 929a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(tio->ti, bio))) { 930a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 931a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 932a666e5c0SMikulas Patocka } 933a666e5c0SMikulas Patocka 9341d1068ceSChristoph Hellwig free_tio(bio); 935e2118b3cSDamien Le Moal dm_io_dec_pending(io, error); 9361da177e4SLinus Torvalds } 9371da177e4SLinus Torvalds 93878d8e58aSMike Snitzer /* 93956a67df7SMike Snitzer * Return maximum size of I/O possible at the supplied sector up to the current 94056a67df7SMike Snitzer * target boundary. 94156a67df7SMike Snitzer */ 9423720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti, 9433720281dSMike Snitzer sector_t target_offset) 9441da177e4SLinus Torvalds { 94556a67df7SMike Snitzer return ti->len - target_offset; 94656a67df7SMike Snitzer } 94756a67df7SMike Snitzer 9483720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector) 94956a67df7SMike Snitzer { 9503720281dSMike Snitzer sector_t target_offset = dm_target_offset(ti, sector); 9513720281dSMike Snitzer sector_t len = max_io_len_target_boundary(ti, target_offset); 9525091cdecSMike Snitzer sector_t max_len; 9531da177e4SLinus Torvalds 9541da177e4SLinus Torvalds /* 9553ee16db3SMike Snitzer * Does the target need to split IO even further? 9563ee16db3SMike Snitzer * - varied (per target) IO splitting is a tenet of DM; this 9573ee16db3SMike Snitzer * explains why stacked chunk_sectors based splitting via 9583ee16db3SMike Snitzer * blk_max_size_offset() isn't possible here. So pass in 9593ee16db3SMike Snitzer * ti->max_io_len to override stacked chunk_sectors. 9601da177e4SLinus Torvalds */ 9613ee16db3SMike Snitzer if (ti->max_io_len) { 96233bd6f06SMike Snitzer max_len = blk_max_size_offset(ti->table->md->queue, 9633ee16db3SMike Snitzer target_offset, ti->max_io_len); 964542f9038SMike Snitzer if (len > max_len) 965542f9038SMike Snitzer len = max_len; 9663ee16db3SMike Snitzer } 9671da177e4SLinus Torvalds 9681da177e4SLinus Torvalds return len; 9691da177e4SLinus Torvalds } 9701da177e4SLinus Torvalds 971542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) 972542f9038SMike Snitzer { 973542f9038SMike Snitzer if (len > UINT_MAX) { 974542f9038SMike Snitzer DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", 975542f9038SMike Snitzer (unsigned long long)len, UINT_MAX); 976542f9038SMike Snitzer ti->error = "Maximum size of target IO is too large"; 977542f9038SMike Snitzer return -EINVAL; 978542f9038SMike Snitzer } 979542f9038SMike Snitzer 98075ae1936SMikulas Patocka ti->max_io_len = (uint32_t) len; 981542f9038SMike Snitzer 982542f9038SMike Snitzer return 0; 983542f9038SMike Snitzer } 984542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 985542f9038SMike Snitzer 986f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 987f26c5719SDan Williams sector_t sector, int *srcu_idx) 9883d97c829SMike Snitzer __acquires(md->io_barrier) 989545ed20eSToshi Kani { 990545ed20eSToshi Kani struct dm_table *map; 991545ed20eSToshi Kani struct dm_target *ti; 992545ed20eSToshi Kani 993f26c5719SDan Williams map = dm_get_live_table(md, srcu_idx); 994545ed20eSToshi Kani if (!map) 995f26c5719SDan Williams return NULL; 996545ed20eSToshi Kani 997545ed20eSToshi Kani ti = dm_table_find_target(map, sector); 998123d87d5SMikulas Patocka if (!ti) 999f26c5719SDan Williams return NULL; 1000f26c5719SDan Williams 1001f26c5719SDan Williams return ti; 1002f26c5719SDan Williams } 1003f26c5719SDan Williams 1004f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1005f26c5719SDan Williams long nr_pages, void **kaddr, pfn_t *pfn) 1006f26c5719SDan Williams { 1007f26c5719SDan Williams struct mapped_device *md = dax_get_private(dax_dev); 1008f26c5719SDan Williams sector_t sector = pgoff * PAGE_SECTORS; 1009f26c5719SDan Williams struct dm_target *ti; 1010f26c5719SDan Williams long len, ret = -EIO; 1011f26c5719SDan Williams int srcu_idx; 1012f26c5719SDan Williams 1013f26c5719SDan Williams ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1014f26c5719SDan Williams 1015f26c5719SDan Williams if (!ti) 1016545ed20eSToshi Kani goto out; 1017f26c5719SDan Williams if (!ti->type->direct_access) 1018f26c5719SDan Williams goto out; 10193720281dSMike Snitzer len = max_io_len(ti, sector) / PAGE_SECTORS; 1020f26c5719SDan Williams if (len < 1) 1021f26c5719SDan Williams goto out; 1022f26c5719SDan Williams nr_pages = min(len, nr_pages); 1023817bf402SDan Williams ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); 1024817bf402SDan Williams 1025545ed20eSToshi Kani out: 1026545ed20eSToshi Kani dm_put_live_table(md, srcu_idx); 1027f26c5719SDan Williams 1028f26c5719SDan Williams return ret; 1029545ed20eSToshi Kani } 1030545ed20eSToshi Kani 1031cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 1032cdf6cdcdSVivek Goyal size_t nr_pages) 1033cdf6cdcdSVivek Goyal { 1034cdf6cdcdSVivek Goyal struct mapped_device *md = dax_get_private(dax_dev); 1035cdf6cdcdSVivek Goyal sector_t sector = pgoff * PAGE_SECTORS; 1036cdf6cdcdSVivek Goyal struct dm_target *ti; 1037cdf6cdcdSVivek Goyal int ret = -EIO; 1038cdf6cdcdSVivek Goyal int srcu_idx; 1039cdf6cdcdSVivek Goyal 1040cdf6cdcdSVivek Goyal ti = dm_dax_get_live_target(md, sector, &srcu_idx); 1041cdf6cdcdSVivek Goyal 1042cdf6cdcdSVivek Goyal if (!ti) 1043cdf6cdcdSVivek Goyal goto out; 1044cdf6cdcdSVivek Goyal if (WARN_ON(!ti->type->dax_zero_page_range)) { 1045cdf6cdcdSVivek Goyal /* 1046cdf6cdcdSVivek Goyal * ->zero_page_range() is mandatory dax operation. If we are 1047cdf6cdcdSVivek Goyal * here, something is wrong. 1048cdf6cdcdSVivek Goyal */ 1049cdf6cdcdSVivek Goyal goto out; 1050cdf6cdcdSVivek Goyal } 1051cdf6cdcdSVivek Goyal ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); 1052cdf6cdcdSVivek Goyal out: 1053cdf6cdcdSVivek Goyal dm_put_live_table(md, srcu_idx); 1054cdf6cdcdSVivek Goyal 1055cdf6cdcdSVivek Goyal return ret; 1056cdf6cdcdSVivek Goyal } 1057cdf6cdcdSVivek Goyal 10581dd40c3eSMikulas Patocka /* 10591dd40c3eSMikulas Patocka * A target may call dm_accept_partial_bio only from the map routine. It is 10606842d264SDamien Le Moal * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management 10616842d264SDamien Le Moal * operations and REQ_OP_ZONE_APPEND (zone append writes). 10621dd40c3eSMikulas Patocka * 10631dd40c3eSMikulas Patocka * dm_accept_partial_bio informs the dm that the target only wants to process 10641dd40c3eSMikulas Patocka * additional n_sectors sectors of the bio and the rest of the data should be 10651dd40c3eSMikulas Patocka * sent in a next bio. 10661dd40c3eSMikulas Patocka * 10671dd40c3eSMikulas Patocka * A diagram that explains the arithmetics: 10681dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 10691dd40c3eSMikulas Patocka * | 1 | 2 | 3 | 10701dd40c3eSMikulas Patocka * +--------------------+---------------+-------+ 10711dd40c3eSMikulas Patocka * 10721dd40c3eSMikulas Patocka * <-------------- *tio->len_ptr ---------------> 10731dd40c3eSMikulas Patocka * <------- bi_size -------> 10741dd40c3eSMikulas Patocka * <-- n_sectors --> 10751dd40c3eSMikulas Patocka * 10761dd40c3eSMikulas Patocka * Region 1 was already iterated over with bio_advance or similar function. 10771dd40c3eSMikulas Patocka * (it may be empty if the target doesn't use bio_advance) 10781dd40c3eSMikulas Patocka * Region 2 is the remaining bio size that the target wants to process. 10791dd40c3eSMikulas Patocka * (it may be empty if region 1 is non-empty, although there is no reason 10801dd40c3eSMikulas Patocka * to make it empty) 10811dd40c3eSMikulas Patocka * The target requires that region 3 is to be sent in the next bio. 10821dd40c3eSMikulas Patocka * 10831dd40c3eSMikulas Patocka * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 10841dd40c3eSMikulas Patocka * the partially processed part (the sum of regions 1+2) must be the same for all 10851dd40c3eSMikulas Patocka * copies of the bio. 10861dd40c3eSMikulas Patocka */ 10871dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 10881dd40c3eSMikulas Patocka { 10896c23f0bdSChristoph Hellwig struct dm_target_io *tio = clone_to_tio(bio); 10901dd40c3eSMikulas Patocka unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 10916842d264SDamien Le Moal 10921eff9d32SJens Axboe BUG_ON(bio->bi_opf & REQ_PREFLUSH); 10936842d264SDamien Le Moal BUG_ON(op_is_zone_mgmt(bio_op(bio))); 10946842d264SDamien Le Moal BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); 10951dd40c3eSMikulas Patocka BUG_ON(bi_size > *tio->len_ptr); 10961dd40c3eSMikulas Patocka BUG_ON(n_sectors > bi_size); 10976842d264SDamien Le Moal 10981dd40c3eSMikulas Patocka *tio->len_ptr -= bi_size - n_sectors; 10991dd40c3eSMikulas Patocka bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 11001dd40c3eSMikulas Patocka } 11011dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 11021dd40c3eSMikulas Patocka 1103a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) 1104a666e5c0SMikulas Patocka { 1105a666e5c0SMikulas Patocka mutex_lock(&md->swap_bios_lock); 1106a666e5c0SMikulas Patocka while (latch < md->swap_bios) { 1107a666e5c0SMikulas Patocka cond_resched(); 1108a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1109a666e5c0SMikulas Patocka md->swap_bios--; 1110a666e5c0SMikulas Patocka } 1111a666e5c0SMikulas Patocka while (latch > md->swap_bios) { 1112a666e5c0SMikulas Patocka cond_resched(); 1113a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1114a666e5c0SMikulas Patocka md->swap_bios++; 1115a666e5c0SMikulas Patocka } 1116a666e5c0SMikulas Patocka mutex_unlock(&md->swap_bios_lock); 1117a666e5c0SMikulas Patocka } 1118a666e5c0SMikulas Patocka 11191561b396SChristoph Hellwig static void __map_bio(struct bio *clone) 11201da177e4SLinus Torvalds { 11211561b396SChristoph Hellwig struct dm_target_io *tio = clone_to_tio(clone); 11221da177e4SLinus Torvalds int r; 11232056a782SJens Axboe sector_t sector; 112464f52b0eSMike Snitzer struct dm_io *io = tio->io; 1125bd2a49b8SAlasdair G Kergon struct dm_target *ti = tio->ti; 11261da177e4SLinus Torvalds 11271da177e4SLinus Torvalds clone->bi_end_io = clone_endio; 11281da177e4SLinus Torvalds 11291da177e4SLinus Torvalds /* 11301da177e4SLinus Torvalds * Map the clone. If r == 0 we don't need to do 11311da177e4SLinus Torvalds * anything, the target has assumed ownership of 11321da177e4SLinus Torvalds * this io. 11331da177e4SLinus Torvalds */ 1134e2118b3cSDamien Le Moal dm_io_inc_pending(io); 11354f024f37SKent Overstreet sector = clone->bi_iter.bi_sector; 1136d67a5f4bSMikulas Patocka 1137a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1138a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1139a666e5c0SMikulas Patocka int latch = get_swap_bios(); 1140a666e5c0SMikulas Patocka if (unlikely(latch != md->swap_bios)) 1141a666e5c0SMikulas Patocka __set_swap_bios_limit(md, latch); 1142a666e5c0SMikulas Patocka down(&md->swap_bios_semaphore); 1143a666e5c0SMikulas Patocka } 1144a666e5c0SMikulas Patocka 1145bb37d772SDamien Le Moal /* 1146bb37d772SDamien Le Moal * Check if the IO needs a special mapping due to zone append emulation 1147bb37d772SDamien Le Moal * on zoned target. In this case, dm_zone_map_bio() calls the target 1148bb37d772SDamien Le Moal * map operation. 1149bb37d772SDamien Le Moal */ 1150bb37d772SDamien Le Moal if (dm_emulate_zone_append(io->md)) 1151bb37d772SDamien Le Moal r = dm_zone_map_bio(tio); 1152bb37d772SDamien Le Moal else 11537de3ee57SMikulas Patocka r = ti->type->map(ti, clone); 1154bb37d772SDamien Le Moal 1155846785e6SChristoph Hellwig switch (r) { 1156846785e6SChristoph Hellwig case DM_MAPIO_SUBMITTED: 1157846785e6SChristoph Hellwig break; 1158846785e6SChristoph Hellwig case DM_MAPIO_REMAPPED: 11591da177e4SLinus Torvalds /* the bio has been remapped so dispatch it */ 11601c02fca6SChristoph Hellwig trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); 11613e08773cSChristoph Hellwig submit_bio_noacct(clone); 1162846785e6SChristoph Hellwig break; 1163846785e6SChristoph Hellwig case DM_MAPIO_KILL: 1164a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1165a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1166a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1167a666e5c0SMikulas Patocka } 11681d1068ceSChristoph Hellwig free_tio(clone); 1169e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_IOERR); 11704e4cbee9SChristoph Hellwig break; 1171846785e6SChristoph Hellwig case DM_MAPIO_REQUEUE: 1172a666e5c0SMikulas Patocka if (unlikely(swap_bios_limit(ti, clone))) { 1173a666e5c0SMikulas Patocka struct mapped_device *md = io->md; 1174a666e5c0SMikulas Patocka up(&md->swap_bios_semaphore); 1175a666e5c0SMikulas Patocka } 11761d1068ceSChristoph Hellwig free_tio(clone); 1177e2118b3cSDamien Le Moal dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); 1178846785e6SChristoph Hellwig break; 1179846785e6SChristoph Hellwig default: 118045cbcd79SKiyoshi Ueda DMWARN("unimplemented target map return value: %d", r); 118145cbcd79SKiyoshi Ueda BUG(); 11821da177e4SLinus Torvalds } 11831da177e4SLinus Torvalds } 11841da177e4SLinus Torvalds 1185e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) 1186bd2a49b8SAlasdair G Kergon { 11874f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 11884f024f37SKent Overstreet bio->bi_iter.bi_size = to_bytes(len); 11891da177e4SLinus Torvalds } 11901da177e4SLinus Torvalds 11911da177e4SLinus Torvalds /* 11921da177e4SLinus Torvalds * Creates a bio that consists of range of complete bvecs. 11931da177e4SLinus Torvalds */ 1194b1bee792SChristoph Hellwig static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1195b1bee792SChristoph Hellwig sector_t sector, unsigned *len) 11961da177e4SLinus Torvalds { 1197b1bee792SChristoph Hellwig struct bio *bio = ci->bio, *clone; 11981da177e4SLinus Torvalds 11991d1068ceSChristoph Hellwig clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1200fa8db494SMike Snitzer bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1201b1bee792SChristoph Hellwig clone->bi_iter.bi_size = to_bytes(*len); 1202fa8db494SMike Snitzer 1203fa8db494SMike Snitzer if (bio_integrity(bio)) 1204fa8db494SMike Snitzer bio_integrity_trim(clone); 1205c80914e8SMike Snitzer 12061561b396SChristoph Hellwig __map_bio(clone); 1207c80914e8SMike Snitzer return 0; 12081da177e4SLinus Torvalds } 12091da177e4SLinus Torvalds 1210318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1211dc8e2021SChristoph Hellwig struct dm_target *ti, unsigned num_bios, 1212dc8e2021SChristoph Hellwig unsigned *len) 1213f9ab94ceSMikulas Patocka { 12141d1068ceSChristoph Hellwig struct bio *bio; 1215318716ddSMike Snitzer int try; 1216dba14160SMikulas Patocka 1217318716ddSMike Snitzer for (try = 0; try < 2; try++) { 1218318716ddSMike Snitzer int bio_nr; 1219318716ddSMike Snitzer 1220318716ddSMike Snitzer if (try) 1221bc02cdbeSMike Snitzer mutex_lock(&ci->io->md->table_devices_lock); 1222318716ddSMike Snitzer for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 12231d1068ceSChristoph Hellwig bio = alloc_tio(ci, ti, bio_nr, len, 1224dc8e2021SChristoph Hellwig try ? GFP_NOIO : GFP_NOWAIT); 12251d1068ceSChristoph Hellwig if (!bio) 1226318716ddSMike Snitzer break; 1227318716ddSMike Snitzer 12281d1068ceSChristoph Hellwig bio_list_add(blist, bio); 1229318716ddSMike Snitzer } 1230318716ddSMike Snitzer if (try) 1231bc02cdbeSMike Snitzer mutex_unlock(&ci->io->md->table_devices_lock); 1232318716ddSMike Snitzer if (bio_nr == num_bios) 1233318716ddSMike Snitzer return; 1234318716ddSMike Snitzer 12356c23f0bdSChristoph Hellwig while ((bio = bio_list_pop(blist))) 12361d1068ceSChristoph Hellwig free_tio(bio); 1237318716ddSMike Snitzer } 1238318716ddSMike Snitzer } 1239318716ddSMike Snitzer 12408eabf5d0SChristoph Hellwig static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 12418eabf5d0SChristoph Hellwig unsigned num_bios, unsigned *len) 12429015df24SAlasdair G Kergon { 12438eabf5d0SChristoph Hellwig struct bio_list blist = BIO_EMPTY_LIST; 12448eabf5d0SChristoph Hellwig struct bio *clone; 12458eabf5d0SChristoph Hellwig 1246891fced6SChristoph Hellwig switch (num_bios) { 1247891fced6SChristoph Hellwig case 0: 1248891fced6SChristoph Hellwig break; 1249891fced6SChristoph Hellwig case 1: 1250891fced6SChristoph Hellwig clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1251891fced6SChristoph Hellwig if (len) 1252891fced6SChristoph Hellwig bio_setup_sector(clone, ci->sector, *len); 1253891fced6SChristoph Hellwig __map_bio(clone); 1254891fced6SChristoph Hellwig break; 1255891fced6SChristoph Hellwig default: 1256dc8e2021SChristoph Hellwig alloc_multiple_bios(&blist, ci, ti, num_bios, len); 12578eabf5d0SChristoph Hellwig while ((clone = bio_list_pop(&blist))) { 1258bd2a49b8SAlasdair G Kergon if (len) 12591dd40c3eSMikulas Patocka bio_setup_sector(clone, ci->sector, *len); 12601561b396SChristoph Hellwig __map_bio(clone); 1261f9ab94ceSMikulas Patocka } 1262891fced6SChristoph Hellwig break; 1263891fced6SChristoph Hellwig } 126406a426ceSMike Snitzer } 126506a426ceSMike Snitzer 126614fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci) 1267f9ab94ceSMikulas Patocka { 126806a426ceSMike Snitzer unsigned target_nr = 0; 1269f9ab94ceSMikulas Patocka struct dm_target *ti; 1270828678b8SMike Snitzer struct bio flush_bio; 1271828678b8SMike Snitzer 1272828678b8SMike Snitzer /* 1273828678b8SMike Snitzer * Use an on-stack bio for this, it's safe since we don't 1274828678b8SMike Snitzer * need to reference it after submit. It's just used as 1275828678b8SMike Snitzer * the basis for the clone(s). 1276828678b8SMike Snitzer */ 127749add496SChristoph Hellwig bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, 127849add496SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); 127947d95102SChristoph Hellwig 1280828678b8SMike Snitzer ci->bio = &flush_bio; 1281828678b8SMike Snitzer ci->sector_count = 0; 1282f9ab94ceSMikulas Patocka 1283b372d360SMike Snitzer BUG_ON(bio_has_data(ci->bio)); 1284f9ab94ceSMikulas Patocka while ((ti = dm_table_get_target(ci->map, target_nr++))) 12851dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1286828678b8SMike Snitzer 1287828678b8SMike Snitzer bio_uninit(ci->bio); 1288f9ab94ceSMikulas Patocka return 0; 1289f9ab94ceSMikulas Patocka } 1290f9ab94ceSMikulas Patocka 12913d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 129261697a6aSMike Snitzer unsigned num_bios) 12935ae89a87SMike Snitzer { 129451b86f9aSMichael Lass unsigned len; 12955ae89a87SMike Snitzer 12965ae89a87SMike Snitzer /* 129723508a96SMike Snitzer * Even though the device advertised support for this type of 129823508a96SMike Snitzer * request, that does not mean every target supports it, and 1299936688d7SMike Snitzer * reconfiguration might also have changed that since the 13005ae89a87SMike Snitzer * check was performed. 13015ae89a87SMike Snitzer */ 130255a62eefSAlasdair G Kergon if (!num_bios) 13035ae89a87SMike Snitzer return -EOPNOTSUPP; 13045ae89a87SMike Snitzer 13053720281dSMike Snitzer len = min_t(sector_t, ci->sector_count, 13063720281dSMike Snitzer max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); 130751b86f9aSMichael Lass 13081dd40c3eSMikulas Patocka __send_duplicate_bios(ci, ti, num_bios, &len); 13095ae89a87SMike Snitzer 1310a79245b3SMike Snitzer ci->sector += len; 13113d7f4562SMike Snitzer ci->sector_count -= len; 13125ae89a87SMike Snitzer 13135ae89a87SMike Snitzer return 0; 13145ae89a87SMike Snitzer } 13155ae89a87SMike Snitzer 1316568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio) 1317568c73a3SMike Snitzer { 1318568c73a3SMike Snitzer bool r = false; 1319568c73a3SMike Snitzer 1320568c73a3SMike Snitzer switch (bio_op(bio)) { 1321568c73a3SMike Snitzer case REQ_OP_DISCARD: 1322568c73a3SMike Snitzer case REQ_OP_SECURE_ERASE: 1323568c73a3SMike Snitzer case REQ_OP_WRITE_SAME: 1324568c73a3SMike Snitzer case REQ_OP_WRITE_ZEROES: 1325568c73a3SMike Snitzer r = true; 1326568c73a3SMike Snitzer break; 1327568c73a3SMike Snitzer } 1328568c73a3SMike Snitzer 1329568c73a3SMike Snitzer return r; 1330568c73a3SMike Snitzer } 1331568c73a3SMike Snitzer 13320519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, 13330519c71eSMike Snitzer int *result) 13340519c71eSMike Snitzer { 13350519c71eSMike Snitzer struct bio *bio = ci->bio; 13369679b5a7SMike Snitzer unsigned num_bios = 0; 13370519c71eSMike Snitzer 13389679b5a7SMike Snitzer switch (bio_op(bio)) { 13399679b5a7SMike Snitzer case REQ_OP_DISCARD: 13409679b5a7SMike Snitzer num_bios = ti->num_discard_bios; 13419679b5a7SMike Snitzer break; 13429679b5a7SMike Snitzer case REQ_OP_SECURE_ERASE: 13439679b5a7SMike Snitzer num_bios = ti->num_secure_erase_bios; 13449679b5a7SMike Snitzer break; 13459679b5a7SMike Snitzer case REQ_OP_WRITE_SAME: 13469679b5a7SMike Snitzer num_bios = ti->num_write_same_bios; 13479679b5a7SMike Snitzer break; 13489679b5a7SMike Snitzer case REQ_OP_WRITE_ZEROES: 13499679b5a7SMike Snitzer num_bios = ti->num_write_zeroes_bios; 13509679b5a7SMike Snitzer break; 13519679b5a7SMike Snitzer default: 13520519c71eSMike Snitzer return false; 13539679b5a7SMike Snitzer } 13540519c71eSMike Snitzer 13559679b5a7SMike Snitzer *result = __send_changing_extent_only(ci, ti, num_bios); 13560519c71eSMike Snitzer return true; 13570519c71eSMike Snitzer } 13580519c71eSMike Snitzer 1359e4c93811SAlasdair G Kergon /* 1360e4c93811SAlasdair G Kergon * Select the correct strategy for processing a non-flush bio. 1361e4c93811SAlasdair G Kergon */ 1362e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci) 1363e4c93811SAlasdair G Kergon { 1364e4c93811SAlasdair G Kergon struct dm_target *ti; 13651c3b13e6SKent Overstreet unsigned len; 1366c80914e8SMike Snitzer int r; 1367e4c93811SAlasdair G Kergon 1368e4c93811SAlasdair G Kergon ti = dm_table_find_target(ci->map, ci->sector); 1369123d87d5SMikulas Patocka if (!ti) 1370e4c93811SAlasdair G Kergon return -EIO; 1371e4c93811SAlasdair G Kergon 1372568c73a3SMike Snitzer if (__process_abnormal_io(ci, ti, &r)) 13730519c71eSMike Snitzer return r; 13743d7f4562SMike Snitzer 13753720281dSMike Snitzer len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); 1376e4c93811SAlasdair G Kergon 1377c80914e8SMike Snitzer r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1378c80914e8SMike Snitzer if (r < 0) 1379c80914e8SMike Snitzer return r; 1380e4c93811SAlasdair G Kergon 1381e4c93811SAlasdair G Kergon ci->sector += len; 1382e4c93811SAlasdair G Kergon ci->sector_count -= len; 1383e4c93811SAlasdair G Kergon 1384e4c93811SAlasdair G Kergon return 0; 1385e4c93811SAlasdair G Kergon } 1386e4c93811SAlasdair G Kergon 1387978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md, 1388978e51baSMike Snitzer struct dm_table *map, struct bio *bio) 1389978e51baSMike Snitzer { 1390978e51baSMike Snitzer ci->map = map; 1391978e51baSMike Snitzer ci->io = alloc_io(md, bio); 1392978e51baSMike Snitzer ci->sector = bio->bi_iter.bi_sector; 1393978e51baSMike Snitzer } 1394978e51baSMike Snitzer 1395e4c93811SAlasdair G Kergon /* 139614fe594dSAlasdair G Kergon * Entry point to split a bio into clones and submit them to the targets. 13971da177e4SLinus Torvalds */ 13983e08773cSChristoph Hellwig static void __split_and_process_bio(struct mapped_device *md, 139983d5e5b0SMikulas Patocka struct dm_table *map, struct bio *bio) 14001da177e4SLinus Torvalds { 14011da177e4SLinus Torvalds struct clone_info ci; 1402512875bdSJun'ichi Nomura int error = 0; 14031da177e4SLinus Torvalds 1404978e51baSMike Snitzer init_clone_info(&ci, md, map, bio); 1405bd2a49b8SAlasdair G Kergon 14061eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 140714fe594dSAlasdair G Kergon error = __send_empty_flush(&ci); 1408e2118b3cSDamien Le Moal /* dm_io_dec_pending submits any data associated with flush */ 14092e2d6f7eSAjay Joshi } else if (op_is_zone_mgmt(bio_op(bio))) { 1410a4aa5e56SDamien Le Moal ci.bio = bio; 1411a4aa5e56SDamien Le Moal ci.sector_count = 0; 1412a4aa5e56SDamien Le Moal error = __split_and_process_non_flush(&ci); 1413b372d360SMike Snitzer } else { 14146a8736d1STejun Heo ci.bio = bio; 14151da177e4SLinus Torvalds ci.sector_count = bio_sectors(bio); 141614fe594dSAlasdair G Kergon error = __split_and_process_non_flush(&ci); 1417985eabdcSJeffle Xu if (ci.sector_count && !error) { 141818a25da8SNeilBrown /* 1419ed00aabdSChristoph Hellwig * Remainder must be passed to submit_bio_noacct() 142018a25da8SNeilBrown * so that it gets handled *after* bios already submitted 142118a25da8SNeilBrown * have been completely processed. 142218a25da8SNeilBrown * We take a clone of the original to store in 1423745dc570SMike Snitzer * ci.io->orig_bio to be used by end_io_acct() and 142418a25da8SNeilBrown * for dec_pending to use for completion handling. 142518a25da8SNeilBrown */ 1426f21c601aSMike Snitzer struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1427f21c601aSMike Snitzer GFP_NOIO, &md->queue->bio_split); 1428745dc570SMike Snitzer ci.io->orig_bio = b; 1429a1e1cb72SMike Snitzer 143018a25da8SNeilBrown bio_chain(b, bio); 1431eb6f7f7cSChristoph Hellwig trace_block_split(b, bio->bi_iter.bi_sector); 14323e08773cSChristoph Hellwig submit_bio_noacct(bio); 143318a25da8SNeilBrown } 1434d87f4c14STejun Heo } 1435b879f915SMike Snitzer start_io_acct(ci.io); 14361da177e4SLinus Torvalds 14371da177e4SLinus Torvalds /* drop the extra reference count */ 1438e2118b3cSDamien Le Moal dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 14391da177e4SLinus Torvalds } 14401da177e4SLinus Torvalds 14413e08773cSChristoph Hellwig static void dm_submit_bio(struct bio *bio) 14421da177e4SLinus Torvalds { 1443309dca30SChristoph Hellwig struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 144483d5e5b0SMikulas Patocka int srcu_idx; 144583d5e5b0SMikulas Patocka struct dm_table *map; 14461da177e4SLinus Torvalds 144783d5e5b0SMikulas Patocka map = dm_get_live_table(md, &srcu_idx); 1448b2abdb1bSMike Snitzer if (unlikely(!map)) { 1449b2abdb1bSMike Snitzer DMERR_LIMIT("%s: mapping table unavailable, erroring io", 1450b2abdb1bSMike Snitzer dm_device_name(md)); 14516a8736d1STejun Heo bio_io_error(bio); 1452b2abdb1bSMike Snitzer goto out; 14531da177e4SLinus Torvalds } 145492c63902SMikulas Patocka 1455b2abdb1bSMike Snitzer /* If suspended, queue this IO for later */ 14561da177e4SLinus Torvalds if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 14576abc4946SKonstantin Khlebnikov if (bio->bi_opf & REQ_NOWAIT) 14586abc4946SKonstantin Khlebnikov bio_wouldblock_error(bio); 1459b2abdb1bSMike Snitzer else if (bio->bi_opf & REQ_RAHEAD) 14601da177e4SLinus Torvalds bio_io_error(bio); 1461b2abdb1bSMike Snitzer else 1462b2abdb1bSMike Snitzer queue_io(md, bio); 1463b2abdb1bSMike Snitzer goto out; 14641da177e4SLinus Torvalds } 14651da177e4SLinus Torvalds 1466b2abdb1bSMike Snitzer /* 1467b2abdb1bSMike Snitzer * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) 1468b2abdb1bSMike Snitzer * otherwise associated queue_limits won't be imposed. 1469b2abdb1bSMike Snitzer */ 1470b2abdb1bSMike Snitzer if (is_abnormal_io(bio)) 1471b2abdb1bSMike Snitzer blk_queue_split(&bio); 1472978e51baSMike Snitzer 14733e08773cSChristoph Hellwig __split_and_process_bio(md, map, bio); 1474b2abdb1bSMike Snitzer out: 147583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 1476978e51baSMike Snitzer } 1477978e51baSMike Snitzer 14781da177e4SLinus Torvalds /*----------------------------------------------------------------- 14791da177e4SLinus Torvalds * An IDR is used to keep track of allocated minor numbers. 14801da177e4SLinus Torvalds *---------------------------------------------------------------*/ 14812b06cfffSAlasdair G Kergon static void free_minor(int minor) 14821da177e4SLinus Torvalds { 1483f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 14841da177e4SLinus Torvalds idr_remove(&_minor_idr, minor); 1485f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 14861da177e4SLinus Torvalds } 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds /* 14891da177e4SLinus Torvalds * See if the device with a specific minor # is free. 14901da177e4SLinus Torvalds */ 1491cf13ab8eSFrederik Deweerdt static int specific_minor(int minor) 14921da177e4SLinus Torvalds { 1493c9d76be6STejun Heo int r; 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds if (minor >= (1 << MINORBITS)) 14961da177e4SLinus Torvalds return -EINVAL; 14971da177e4SLinus Torvalds 1498c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1499f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 15001da177e4SLinus Torvalds 1501c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); 15021da177e4SLinus Torvalds 1503f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1504c9d76be6STejun Heo idr_preload_end(); 1505c9d76be6STejun Heo if (r < 0) 1506c9d76be6STejun Heo return r == -ENOSPC ? -EBUSY : r; 1507c9d76be6STejun Heo return 0; 15081da177e4SLinus Torvalds } 15091da177e4SLinus Torvalds 1510cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor) 15111da177e4SLinus Torvalds { 1512c9d76be6STejun Heo int r; 15131da177e4SLinus Torvalds 1514c9d76be6STejun Heo idr_preload(GFP_KERNEL); 1515f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 15161da177e4SLinus Torvalds 1517c9d76be6STejun Heo r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); 15181da177e4SLinus Torvalds 1519f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1520c9d76be6STejun Heo idr_preload_end(); 1521c9d76be6STejun Heo if (r < 0) 15221da177e4SLinus Torvalds return r; 1523c9d76be6STejun Heo *minor = r; 1524c9d76be6STejun Heo return 0; 15251da177e4SLinus Torvalds } 15261da177e4SLinus Torvalds 152783d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops; 1528681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops; 1529f26c5719SDan Williams static const struct dax_operations dm_dax_ops; 15301da177e4SLinus Torvalds 153153d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work); 153253d5914fSMikulas Patocka 1533aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1534cb77cb5aSEric Biggers static void dm_queue_destroy_crypto_profile(struct request_queue *q) 1535aa6ce87aSSatya Tangirala { 1536cb77cb5aSEric Biggers dm_destroy_crypto_profile(q->crypto_profile); 1537aa6ce87aSSatya Tangirala } 1538aa6ce87aSSatya Tangirala 1539aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1540aa6ce87aSSatya Tangirala 1541cb77cb5aSEric Biggers static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) 1542aa6ce87aSSatya Tangirala { 1543aa6ce87aSSatya Tangirala } 1544aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ 1545aa6ce87aSSatya Tangirala 15460f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md) 15470f20972fSMike Snitzer { 15480f20972fSMike Snitzer if (md->wq) 15490f20972fSMike Snitzer destroy_workqueue(md->wq); 15506f1c819cSKent Overstreet bioset_exit(&md->bs); 15516f1c819cSKent Overstreet bioset_exit(&md->io_bs); 15520f20972fSMike Snitzer 1553f26c5719SDan Williams if (md->dax_dev) { 1554fb08a190SChristoph Hellwig dax_remove_host(md->disk); 1555f26c5719SDan Williams kill_dax(md->dax_dev); 1556f26c5719SDan Williams put_dax(md->dax_dev); 1557f26c5719SDan Williams md->dax_dev = NULL; 1558f26c5719SDan Williams } 1559f26c5719SDan Williams 15600f20972fSMike Snitzer if (md->disk) { 15610f20972fSMike Snitzer spin_lock(&_minor_lock); 15620f20972fSMike Snitzer md->disk->private_data = NULL; 15630f20972fSMike Snitzer spin_unlock(&_minor_lock); 156489f871afSChristoph Hellwig if (dm_get_md_type(md) != DM_TYPE_NONE) { 156589f871afSChristoph Hellwig dm_sysfs_exit(md); 15660f20972fSMike Snitzer del_gendisk(md->disk); 156789f871afSChristoph Hellwig } 1568cb77cb5aSEric Biggers dm_queue_destroy_crypto_profile(md->queue); 156974fe6ba9SChristoph Hellwig blk_cleanup_disk(md->disk); 157074a2b6ecSChristoph Hellwig } 15710f20972fSMike Snitzer 1572d09960b0STahsin Erdogan cleanup_srcu_struct(&md->io_barrier); 1573d09960b0STahsin Erdogan 1574d5ffebddSMike Snitzer mutex_destroy(&md->suspend_lock); 1575d5ffebddSMike Snitzer mutex_destroy(&md->type_lock); 1576d5ffebddSMike Snitzer mutex_destroy(&md->table_devices_lock); 1577a666e5c0SMikulas Patocka mutex_destroy(&md->swap_bios_lock); 1578d5ffebddSMike Snitzer 15794cc96131SMike Snitzer dm_mq_cleanup_mapped_device(md); 1580bb37d772SDamien Le Moal dm_cleanup_zoned_dev(md); 15810f20972fSMike Snitzer } 15820f20972fSMike Snitzer 15831da177e4SLinus Torvalds /* 15841da177e4SLinus Torvalds * Allocate and initialise a blank device with a given minor. 15851da177e4SLinus Torvalds */ 15862b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor) 15871da177e4SLinus Torvalds { 1588115485e8SMike Snitzer int r, numa_node_id = dm_get_numa_node(); 1589115485e8SMike Snitzer struct mapped_device *md; 1590ba61fdd1SJeff Mahoney void *old_md; 15911da177e4SLinus Torvalds 1592856eb091SMikulas Patocka md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); 15931da177e4SLinus Torvalds if (!md) { 15941da177e4SLinus Torvalds DMWARN("unable to allocate device, out of memory."); 15951da177e4SLinus Torvalds return NULL; 15961da177e4SLinus Torvalds } 15971da177e4SLinus Torvalds 159810da4f79SJeff Mahoney if (!try_module_get(THIS_MODULE)) 15996ed7ade8SMilan Broz goto bad_module_get; 160010da4f79SJeff Mahoney 16011da177e4SLinus Torvalds /* get a minor number for the dev */ 16022b06cfffSAlasdair G Kergon if (minor == DM_ANY_MINOR) 1603cf13ab8eSFrederik Deweerdt r = next_free_minor(&minor); 16042b06cfffSAlasdair G Kergon else 1605cf13ab8eSFrederik Deweerdt r = specific_minor(minor); 16061da177e4SLinus Torvalds if (r < 0) 16076ed7ade8SMilan Broz goto bad_minor; 16081da177e4SLinus Torvalds 160983d5e5b0SMikulas Patocka r = init_srcu_struct(&md->io_barrier); 161083d5e5b0SMikulas Patocka if (r < 0) 161183d5e5b0SMikulas Patocka goto bad_io_barrier; 161283d5e5b0SMikulas Patocka 1613115485e8SMike Snitzer md->numa_node_id = numa_node_id; 1614591ddcfcSMike Snitzer md->init_tio_pdu = false; 1615a5664dadSMike Snitzer md->type = DM_TYPE_NONE; 1616e61290a4SDaniel Walker mutex_init(&md->suspend_lock); 1617a5664dadSMike Snitzer mutex_init(&md->type_lock); 161886f1152bSBenjamin Marzinski mutex_init(&md->table_devices_lock); 1619022c2611SMikulas Patocka spin_lock_init(&md->deferred_lock); 16201da177e4SLinus Torvalds atomic_set(&md->holders, 1); 16215c6bd75dSAlasdair G Kergon atomic_set(&md->open_count, 0); 16221da177e4SLinus Torvalds atomic_set(&md->event_nr, 0); 16237a8c3d3bSMike Anderson atomic_set(&md->uevent_seq, 0); 16247a8c3d3bSMike Anderson INIT_LIST_HEAD(&md->uevent_list); 162586f1152bSBenjamin Marzinski INIT_LIST_HEAD(&md->table_devices); 16267a8c3d3bSMike Anderson spin_lock_init(&md->uevent_lock); 16271da177e4SLinus Torvalds 162847ace7e0SMike Snitzer /* 1629c62b37d9SChristoph Hellwig * default to bio-based until DM table is loaded and md->type 1630c62b37d9SChristoph Hellwig * established. If request-based table is loaded: blk-mq will 1631c62b37d9SChristoph Hellwig * override accordingly. 163247ace7e0SMike Snitzer */ 163374fe6ba9SChristoph Hellwig md->disk = blk_alloc_disk(md->numa_node_id); 16341da177e4SLinus Torvalds if (!md->disk) 16350f20972fSMike Snitzer goto bad; 163674fe6ba9SChristoph Hellwig md->queue = md->disk->queue; 16371da177e4SLinus Torvalds 1638f0b04115SJeff Mahoney init_waitqueue_head(&md->wait); 163953d5914fSMikulas Patocka INIT_WORK(&md->work, dm_wq_work); 1640f0b04115SJeff Mahoney init_waitqueue_head(&md->eventq); 16412995fa78SMikulas Patocka init_completion(&md->kobj_holder.completion); 1642f0b04115SJeff Mahoney 1643a666e5c0SMikulas Patocka md->swap_bios = get_swap_bios(); 1644a666e5c0SMikulas Patocka sema_init(&md->swap_bios_semaphore, md->swap_bios); 1645a666e5c0SMikulas Patocka mutex_init(&md->swap_bios_lock); 1646a666e5c0SMikulas Patocka 16471da177e4SLinus Torvalds md->disk->major = _major; 16481da177e4SLinus Torvalds md->disk->first_minor = minor; 164974fe6ba9SChristoph Hellwig md->disk->minors = 1; 16501ebe2e5fSChristoph Hellwig md->disk->flags |= GENHD_FL_NO_PART; 16511da177e4SLinus Torvalds md->disk->fops = &dm_blk_dops; 16521da177e4SLinus Torvalds md->disk->queue = md->queue; 16531da177e4SLinus Torvalds md->disk->private_data = md; 16541da177e4SLinus Torvalds sprintf(md->disk->disk_name, "dm-%d", minor); 1655f26c5719SDan Williams 16565d2a228bSChristoph Hellwig if (IS_ENABLED(CONFIG_FS_DAX)) { 165730c6828aSChristoph Hellwig md->dax_dev = alloc_dax(md, &dm_dax_ops); 1658d7519392SChristoph Hellwig if (IS_ERR(md->dax_dev)) { 1659d7519392SChristoph Hellwig md->dax_dev = NULL; 1660f26c5719SDan Williams goto bad; 1661976431b0SDan Williams } 16627ac5360cSChristoph Hellwig set_dax_nocache(md->dax_dev); 16637ac5360cSChristoph Hellwig set_dax_nomc(md->dax_dev); 1664fb08a190SChristoph Hellwig if (dax_add_host(md->dax_dev, md->disk)) 1665f26c5719SDan Williams goto bad; 1666f26c5719SDan Williams } 16671da177e4SLinus Torvalds 16687e51f257SMike Anderson format_dev_t(md->name, MKDEV(_major, minor)); 16691da177e4SLinus Torvalds 1670c7c879eeSMichał Mirosław md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); 1671304f3f6aSMilan Broz if (!md->wq) 16720f20972fSMike Snitzer goto bad; 1673304f3f6aSMilan Broz 1674fd2ed4d2SMikulas Patocka dm_stats_init(&md->stats); 1675fd2ed4d2SMikulas Patocka 1676ba61fdd1SJeff Mahoney /* Populate the mapping, nobody knows we exist yet */ 1677f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 1678ba61fdd1SJeff Mahoney old_md = idr_replace(&_minor_idr, md, minor); 1679f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 1680ba61fdd1SJeff Mahoney 1681ba61fdd1SJeff Mahoney BUG_ON(old_md != MINOR_ALLOCED); 1682ba61fdd1SJeff Mahoney 16831da177e4SLinus Torvalds return md; 16841da177e4SLinus Torvalds 16850f20972fSMike Snitzer bad: 16860f20972fSMike Snitzer cleanup_mapped_device(md); 168783d5e5b0SMikulas Patocka bad_io_barrier: 16881da177e4SLinus Torvalds free_minor(minor); 16896ed7ade8SMilan Broz bad_minor: 169010da4f79SJeff Mahoney module_put(THIS_MODULE); 16916ed7ade8SMilan Broz bad_module_get: 1692856eb091SMikulas Patocka kvfree(md); 16931da177e4SLinus Torvalds return NULL; 16941da177e4SLinus Torvalds } 16951da177e4SLinus Torvalds 1696ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md); 1697ae9da83fSJun'ichi Nomura 16981da177e4SLinus Torvalds static void free_dev(struct mapped_device *md) 16991da177e4SLinus Torvalds { 1700f331c029STejun Heo int minor = MINOR(disk_devt(md->disk)); 170163d94e48SJun'ichi Nomura 1702ae9da83fSJun'ichi Nomura unlock_fs(md); 17032eb6e1e3SKeith Busch 17040f20972fSMike Snitzer cleanup_mapped_device(md); 17050f20972fSMike Snitzer 17060f20972fSMike Snitzer free_table_devices(&md->table_devices); 17070f20972fSMike Snitzer dm_stats_cleanup(&md->stats); 170863a4f065SMike Snitzer free_minor(minor); 170963a4f065SMike Snitzer 171010da4f79SJeff Mahoney module_put(THIS_MODULE); 1711856eb091SMikulas Patocka kvfree(md); 17121da177e4SLinus Torvalds } 17131da177e4SLinus Torvalds 17142a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t) 1715e6ee8c0bSKiyoshi Ueda { 1716c0820cf5SMikulas Patocka struct dm_md_mempools *p = dm_table_get_md_mempools(t); 17172a2a4c51SJens Axboe int ret = 0; 1718e6ee8c0bSKiyoshi Ueda 1719545ed20eSToshi Kani if (dm_table_bio_based(t)) { 1720c0820cf5SMikulas Patocka /* 172164f52b0eSMike Snitzer * The md may already have mempools that need changing. 172264f52b0eSMike Snitzer * If so, reload bioset because front_pad may have changed 172316245bdcSJun'ichi Nomura * because a different table was loaded. 1724c0820cf5SMikulas Patocka */ 17256f1c819cSKent Overstreet bioset_exit(&md->bs); 17266f1c819cSKent Overstreet bioset_exit(&md->io_bs); 17270776aa0eSMike Snitzer 17286f1c819cSKent Overstreet } else if (bioset_initialized(&md->bs)) { 1729cbc4e3c1SMike Snitzer /* 17304e6e36c3SMike Snitzer * There's no need to reload with request-based dm 17314e6e36c3SMike Snitzer * because the size of front_pad doesn't change. 17324e6e36c3SMike Snitzer * Note for future: If you are to reload bioset, 17334e6e36c3SMike Snitzer * prep-ed requests in the queue may refer 17344e6e36c3SMike Snitzer * to bio from the old bioset, so you must walk 17354e6e36c3SMike Snitzer * through the queue to unprep. 1736cbc4e3c1SMike Snitzer */ 1737cbc4e3c1SMike Snitzer goto out; 1738cbc4e3c1SMike Snitzer } 1739cbc4e3c1SMike Snitzer 17406f1c819cSKent Overstreet BUG_ON(!p || 17416f1c819cSKent Overstreet bioset_initialized(&md->bs) || 17426f1c819cSKent Overstreet bioset_initialized(&md->io_bs)); 1743e6ee8c0bSKiyoshi Ueda 17442a2a4c51SJens Axboe ret = bioset_init_from_src(&md->bs, &p->bs); 17452a2a4c51SJens Axboe if (ret) 17462a2a4c51SJens Axboe goto out; 17472a2a4c51SJens Axboe ret = bioset_init_from_src(&md->io_bs, &p->io_bs); 17482a2a4c51SJens Axboe if (ret) 17492a2a4c51SJens Axboe bioset_exit(&md->bs); 1750e6ee8c0bSKiyoshi Ueda out: 175102233342SMike Snitzer /* mempool bind completed, no longer need any mempools in the table */ 1752e6ee8c0bSKiyoshi Ueda dm_table_free_md_mempools(t); 17532a2a4c51SJens Axboe return ret; 1754e6ee8c0bSKiyoshi Ueda } 1755e6ee8c0bSKiyoshi Ueda 17561da177e4SLinus Torvalds /* 17571da177e4SLinus Torvalds * Bind a table to the device. 17581da177e4SLinus Torvalds */ 17591da177e4SLinus Torvalds static void event_callback(void *context) 17601da177e4SLinus Torvalds { 17617a8c3d3bSMike Anderson unsigned long flags; 17627a8c3d3bSMike Anderson LIST_HEAD(uevents); 17631da177e4SLinus Torvalds struct mapped_device *md = (struct mapped_device *) context; 17641da177e4SLinus Torvalds 17657a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 17667a8c3d3bSMike Anderson list_splice_init(&md->uevent_list, &uevents); 17677a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 17687a8c3d3bSMike Anderson 1769ed9e1982STejun Heo dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 17707a8c3d3bSMike Anderson 17711da177e4SLinus Torvalds atomic_inc(&md->event_nr); 17721da177e4SLinus Torvalds wake_up(&md->eventq); 177362e08243SMikulas Patocka dm_issue_global_event(); 17741da177e4SLinus Torvalds } 17751da177e4SLinus Torvalds 1776c217649bSMike Snitzer /* 1777042d2a9bSAlasdair G Kergon * Returns old map, which caller must destroy. 1778042d2a9bSAlasdair G Kergon */ 1779042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 1780754c5fc7SMike Snitzer struct queue_limits *limits) 17811da177e4SLinus Torvalds { 1782042d2a9bSAlasdair G Kergon struct dm_table *old_map; 1783165125e1SJens Axboe struct request_queue *q = md->queue; 1784978e51baSMike Snitzer bool request_based = dm_table_request_based(t); 17851da177e4SLinus Torvalds sector_t size; 17862a2a4c51SJens Axboe int ret; 17871da177e4SLinus Torvalds 17885a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 17895a8f1f80SBart Van Assche 17901da177e4SLinus Torvalds size = dm_table_get_size(t); 17913ac51e74SDarrick J. Wong 17923ac51e74SDarrick J. Wong /* 17933ac51e74SDarrick J. Wong * Wipe any geometry if the size of the table changed. 17943ac51e74SDarrick J. Wong */ 1795fd2ed4d2SMikulas Patocka if (size != dm_get_size(md)) 17963ac51e74SDarrick J. Wong memset(&md->geometry, 0, sizeof(md->geometry)); 17973ac51e74SDarrick J. Wong 17985424a0b8SMikulas Patocka if (!get_capacity(md->disk)) 17995424a0b8SMikulas Patocka set_capacity(md->disk, size); 18005424a0b8SMikulas Patocka else 1801f64d9b2eSChristoph Hellwig set_capacity_and_notify(md->disk, size); 18021da177e4SLinus Torvalds 1803cf222b37SAlasdair G Kergon dm_table_event_callback(t, event_callback, md); 18042ca3310eSAlasdair G Kergon 18059c37de29SMike Snitzer if (request_based) { 180616f12266SMike Snitzer /* 18079c37de29SMike Snitzer * Leverage the fact that request-based DM targets are 18089c37de29SMike Snitzer * immutable singletons - used to optimize dm_mq_queue_rq. 180916f12266SMike Snitzer */ 181016f12266SMike Snitzer md->immutable_target = dm_table_get_immutable_target(t); 181116f12266SMike Snitzer } 1812e6ee8c0bSKiyoshi Ueda 18132a2a4c51SJens Axboe ret = __bind_mempools(md, t); 18142a2a4c51SJens Axboe if (ret) { 18152a2a4c51SJens Axboe old_map = ERR_PTR(ret); 18162a2a4c51SJens Axboe goto out; 18172a2a4c51SJens Axboe } 1818e6ee8c0bSKiyoshi Ueda 1819bb37d772SDamien Le Moal ret = dm_table_set_restrictions(t, q, limits); 1820bb37d772SDamien Le Moal if (ret) { 1821bb37d772SDamien Le Moal old_map = ERR_PTR(ret); 1822bb37d772SDamien Le Moal goto out; 1823bb37d772SDamien Le Moal } 1824bb37d772SDamien Le Moal 1825a12f5d48SEric Dumazet old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 18261d3aa6f6SMike Snitzer rcu_assign_pointer(md->map, (void *)t); 182736a0456fSAlasdair G Kergon md->immutable_target_type = dm_table_get_immutable_target_type(t); 182836a0456fSAlasdair G Kergon 182941abc4e1SHannes Reinecke if (old_map) 183083d5e5b0SMikulas Patocka dm_sync_table(md); 18312ca3310eSAlasdair G Kergon 18322a2a4c51SJens Axboe out: 1833042d2a9bSAlasdair G Kergon return old_map; 18341da177e4SLinus Torvalds } 18351da177e4SLinus Torvalds 1836a7940155SAlasdair G Kergon /* 1837a7940155SAlasdair G Kergon * Returns unbound table for the caller to free. 1838a7940155SAlasdair G Kergon */ 1839a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md) 18401da177e4SLinus Torvalds { 1841a12f5d48SEric Dumazet struct dm_table *map = rcu_dereference_protected(md->map, 1); 18421da177e4SLinus Torvalds 18431da177e4SLinus Torvalds if (!map) 1844a7940155SAlasdair G Kergon return NULL; 18451da177e4SLinus Torvalds 18461da177e4SLinus Torvalds dm_table_event_callback(map, NULL, NULL); 18479cdb8520SMonam Agarwal RCU_INIT_POINTER(md->map, NULL); 184883d5e5b0SMikulas Patocka dm_sync_table(md); 1849a7940155SAlasdair G Kergon 1850a7940155SAlasdair G Kergon return map; 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds /* 18541da177e4SLinus Torvalds * Constructor for a new device. 18551da177e4SLinus Torvalds */ 18562b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result) 18571da177e4SLinus Torvalds { 18581da177e4SLinus Torvalds struct mapped_device *md; 18591da177e4SLinus Torvalds 18602b06cfffSAlasdair G Kergon md = alloc_dev(minor); 18611da177e4SLinus Torvalds if (!md) 18621da177e4SLinus Torvalds return -ENXIO; 18631da177e4SLinus Torvalds 186491ccbbacSTushar Sugandhi dm_ima_reset_data(md); 186591ccbbacSTushar Sugandhi 18661da177e4SLinus Torvalds *result = md; 18671da177e4SLinus Torvalds return 0; 18681da177e4SLinus Torvalds } 18691da177e4SLinus Torvalds 1870a5664dadSMike Snitzer /* 1871a5664dadSMike Snitzer * Functions to manage md->type. 1872a5664dadSMike Snitzer * All are required to hold md->type_lock. 1873a5664dadSMike Snitzer */ 1874a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md) 1875a5664dadSMike Snitzer { 1876a5664dadSMike Snitzer mutex_lock(&md->type_lock); 1877a5664dadSMike Snitzer } 1878a5664dadSMike Snitzer 1879a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md) 1880a5664dadSMike Snitzer { 1881a5664dadSMike Snitzer mutex_unlock(&md->type_lock); 1882a5664dadSMike Snitzer } 1883a5664dadSMike Snitzer 18847e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) 1885a5664dadSMike Snitzer { 188600c4fc3bSMike Snitzer BUG_ON(!mutex_is_locked(&md->type_lock)); 1887a5664dadSMike Snitzer md->type = type; 1888a5664dadSMike Snitzer } 1889a5664dadSMike Snitzer 18907e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md) 1891a5664dadSMike Snitzer { 1892a5664dadSMike Snitzer return md->type; 1893a5664dadSMike Snitzer } 1894a5664dadSMike Snitzer 189536a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 189636a0456fSAlasdair G Kergon { 189736a0456fSAlasdair G Kergon return md->immutable_target_type; 189836a0456fSAlasdair G Kergon } 189936a0456fSAlasdair G Kergon 19004a0b4ddfSMike Snitzer /* 1901f84cb8a4SMike Snitzer * The queue_limits are only valid as long as you have a reference 1902f84cb8a4SMike Snitzer * count on 'md'. 1903f84cb8a4SMike Snitzer */ 1904f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md) 1905f84cb8a4SMike Snitzer { 1906f84cb8a4SMike Snitzer BUG_ON(!atomic_read(&md->holders)); 1907f84cb8a4SMike Snitzer return &md->queue->limits; 1908f84cb8a4SMike Snitzer } 1909f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits); 1910f84cb8a4SMike Snitzer 19114a0b4ddfSMike Snitzer /* 19124a0b4ddfSMike Snitzer * Setup the DM device's queue based on md's type 19134a0b4ddfSMike Snitzer */ 1914591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) 19154a0b4ddfSMike Snitzer { 1916ba305859SChristoph Hellwig enum dm_queue_mode type = dm_table_get_type(t); 1917c100ec49SMike Snitzer struct queue_limits limits; 1918ba305859SChristoph Hellwig int r; 1919bfebd1cdSMike Snitzer 1920545ed20eSToshi Kani switch (type) { 1921bfebd1cdSMike Snitzer case DM_TYPE_REQUEST_BASED: 1922681cc5e8SMike Snitzer md->disk->fops = &dm_rq_blk_dops; 1923e83068a5SMike Snitzer r = dm_mq_init_request_queue(md, t); 1924bfebd1cdSMike Snitzer if (r) { 1925681cc5e8SMike Snitzer DMERR("Cannot initialize queue for request-based dm mapped device"); 1926bfebd1cdSMike Snitzer return r; 1927bfebd1cdSMike Snitzer } 1928bfebd1cdSMike Snitzer break; 1929bfebd1cdSMike Snitzer case DM_TYPE_BIO_BASED: 1930545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 1931bfebd1cdSMike Snitzer break; 19327e0d574fSBart Van Assche case DM_TYPE_NONE: 19337e0d574fSBart Van Assche WARN_ON_ONCE(true); 19347e0d574fSBart Van Assche break; 1935ff36ab34SMike Snitzer } 19364a0b4ddfSMike Snitzer 1937c100ec49SMike Snitzer r = dm_calculate_queue_limits(t, &limits); 1938c100ec49SMike Snitzer if (r) { 1939c100ec49SMike Snitzer DMERR("Cannot calculate initial queue limits"); 1940c100ec49SMike Snitzer return r; 1941c100ec49SMike Snitzer } 1942bb37d772SDamien Le Moal r = dm_table_set_restrictions(t, md->queue, &limits); 1943bb37d772SDamien Le Moal if (r) 1944bb37d772SDamien Le Moal return r; 194589f871afSChristoph Hellwig 1946e7089f65SLuis Chamberlain r = add_disk(md->disk); 1947e7089f65SLuis Chamberlain if (r) 1948e7089f65SLuis Chamberlain return r; 194989f871afSChristoph Hellwig 195089f871afSChristoph Hellwig r = dm_sysfs_init(md); 195189f871afSChristoph Hellwig if (r) { 195289f871afSChristoph Hellwig del_gendisk(md->disk); 195389f871afSChristoph Hellwig return r; 195489f871afSChristoph Hellwig } 1955ba305859SChristoph Hellwig md->type = type; 19564a0b4ddfSMike Snitzer return 0; 19574a0b4ddfSMike Snitzer } 19584a0b4ddfSMike Snitzer 19592bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev) 19601da177e4SLinus Torvalds { 19611da177e4SLinus Torvalds struct mapped_device *md; 19621da177e4SLinus Torvalds unsigned minor = MINOR(dev); 19631da177e4SLinus Torvalds 19641da177e4SLinus Torvalds if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 19651da177e4SLinus Torvalds return NULL; 19661da177e4SLinus Torvalds 1967f32c10b0SJeff Mahoney spin_lock(&_minor_lock); 19681da177e4SLinus Torvalds 19691da177e4SLinus Torvalds md = idr_find(&_minor_idr, minor); 197049de5769SMike Snitzer if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || 197149de5769SMike Snitzer test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 1972637842cfSDavid Teigland md = NULL; 1973fba9f90eSJeff Mahoney goto out; 1974fba9f90eSJeff Mahoney } 19752bec1f4aSMikulas Patocka dm_get(md); 1976fba9f90eSJeff Mahoney out: 1977f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 19781da177e4SLinus Torvalds 1979637842cfSDavid Teigland return md; 1980637842cfSDavid Teigland } 19813cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md); 1982d229a958SDavid Teigland 19839ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md) 1984637842cfSDavid Teigland { 19859ade92a9SAlasdair G Kergon return md->interface_ptr; 19861da177e4SLinus Torvalds } 19871da177e4SLinus Torvalds 19881da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr) 19891da177e4SLinus Torvalds { 19901da177e4SLinus Torvalds md->interface_ptr = ptr; 19911da177e4SLinus Torvalds } 19921da177e4SLinus Torvalds 19931da177e4SLinus Torvalds void dm_get(struct mapped_device *md) 19941da177e4SLinus Torvalds { 19951da177e4SLinus Torvalds atomic_inc(&md->holders); 19963f77316dSKiyoshi Ueda BUG_ON(test_bit(DMF_FREEING, &md->flags)); 19971da177e4SLinus Torvalds } 19981da177e4SLinus Torvalds 199909ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md) 200009ee96b2SMikulas Patocka { 200109ee96b2SMikulas Patocka spin_lock(&_minor_lock); 200209ee96b2SMikulas Patocka if (test_bit(DMF_FREEING, &md->flags)) { 200309ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 200409ee96b2SMikulas Patocka return -EBUSY; 200509ee96b2SMikulas Patocka } 200609ee96b2SMikulas Patocka dm_get(md); 200709ee96b2SMikulas Patocka spin_unlock(&_minor_lock); 200809ee96b2SMikulas Patocka return 0; 200909ee96b2SMikulas Patocka } 201009ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold); 201109ee96b2SMikulas Patocka 201272d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md) 201372d94861SAlasdair G Kergon { 201472d94861SAlasdair G Kergon return md->name; 201572d94861SAlasdair G Kergon } 201672d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name); 201772d94861SAlasdair G Kergon 20183f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait) 20191da177e4SLinus Torvalds { 20201134e5aeSMike Anderson struct dm_table *map; 202183d5e5b0SMikulas Patocka int srcu_idx; 20221da177e4SLinus Torvalds 20233f77316dSKiyoshi Ueda might_sleep(); 2024fba9f90eSJeff Mahoney 202563a4f065SMike Snitzer spin_lock(&_minor_lock); 20263f77316dSKiyoshi Ueda idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2027fba9f90eSJeff Mahoney set_bit(DMF_FREEING, &md->flags); 2028f32c10b0SJeff Mahoney spin_unlock(&_minor_lock); 20293f77316dSKiyoshi Ueda 2030c12c9a3cSMike Snitzer blk_set_queue_dying(md->queue); 20313b785fbcSBart Van Assche 2032ab7c7bb6SMikulas Patocka /* 2033ab7c7bb6SMikulas Patocka * Take suspend_lock so that presuspend and postsuspend methods 2034ab7c7bb6SMikulas Patocka * do not race with internal suspend. 2035ab7c7bb6SMikulas Patocka */ 2036ab7c7bb6SMikulas Patocka mutex_lock(&md->suspend_lock); 20372a708cffSJunichi Nomura map = dm_get_live_table(md, &srcu_idx); 20384f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) { 20391da177e4SLinus Torvalds dm_table_presuspend_targets(map); 2040adc0daadSMikulas Patocka set_bit(DMF_SUSPENDED, &md->flags); 20415df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 20421da177e4SLinus Torvalds dm_table_postsuspend_targets(map); 20431da177e4SLinus Torvalds } 204483d5e5b0SMikulas Patocka /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 204583d5e5b0SMikulas Patocka dm_put_live_table(md, srcu_idx); 20462a708cffSJunichi Nomura mutex_unlock(&md->suspend_lock); 204783d5e5b0SMikulas Patocka 20483f77316dSKiyoshi Ueda /* 20493f77316dSKiyoshi Ueda * Rare, but there may be I/O requests still going to complete, 20503f77316dSKiyoshi Ueda * for example. Wait for all references to disappear. 20513f77316dSKiyoshi Ueda * No one should increment the reference count of the mapped_device, 20523f77316dSKiyoshi Ueda * after the mapped_device state becomes DMF_FREEING. 20533f77316dSKiyoshi Ueda */ 20543f77316dSKiyoshi Ueda if (wait) 20553f77316dSKiyoshi Ueda while (atomic_read(&md->holders)) 20563f77316dSKiyoshi Ueda msleep(1); 20573f77316dSKiyoshi Ueda else if (atomic_read(&md->holders)) 20583f77316dSKiyoshi Ueda DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 20593f77316dSKiyoshi Ueda dm_device_name(md), atomic_read(&md->holders)); 20603f77316dSKiyoshi Ueda 2061a7940155SAlasdair G Kergon dm_table_destroy(__unbind(md)); 20621da177e4SLinus Torvalds free_dev(md); 20631da177e4SLinus Torvalds } 20643f77316dSKiyoshi Ueda 20653f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md) 20663f77316dSKiyoshi Ueda { 20673f77316dSKiyoshi Ueda __dm_destroy(md, true); 20683f77316dSKiyoshi Ueda } 20693f77316dSKiyoshi Ueda 20703f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md) 20713f77316dSKiyoshi Ueda { 20723f77316dSKiyoshi Ueda __dm_destroy(md, false); 20733f77316dSKiyoshi Ueda } 20743f77316dSKiyoshi Ueda 20753f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md) 20763f77316dSKiyoshi Ueda { 20773f77316dSKiyoshi Ueda atomic_dec(&md->holders); 20781da177e4SLinus Torvalds } 207979eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put); 20801da177e4SLinus Torvalds 208185067747SMing Lei static bool md_in_flight_bios(struct mapped_device *md) 208285067747SMing Lei { 208385067747SMing Lei int cpu; 20848446fe92SChristoph Hellwig struct block_device *part = dm_disk(md)->part0; 208585067747SMing Lei long sum = 0; 208685067747SMing Lei 208785067747SMing Lei for_each_possible_cpu(cpu) { 208885067747SMing Lei sum += part_stat_local_read_cpu(part, in_flight[0], cpu); 208985067747SMing Lei sum += part_stat_local_read_cpu(part, in_flight[1], cpu); 209085067747SMing Lei } 209185067747SMing Lei 209285067747SMing Lei return sum != 0; 209385067747SMing Lei } 209485067747SMing Lei 20952f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) 209646125c1cSMilan Broz { 209746125c1cSMilan Broz int r = 0; 20989f4c3f87SBart Van Assche DEFINE_WAIT(wait); 209946125c1cSMilan Broz 210085067747SMing Lei while (true) { 21019f4c3f87SBart Van Assche prepare_to_wait(&md->wait, &wait, task_state); 210246125c1cSMilan Broz 210385067747SMing Lei if (!md_in_flight_bios(md)) 210446125c1cSMilan Broz break; 210546125c1cSMilan Broz 2106e3fabdfdSBart Van Assche if (signal_pending_state(task_state, current)) { 210746125c1cSMilan Broz r = -EINTR; 210846125c1cSMilan Broz break; 210946125c1cSMilan Broz } 211046125c1cSMilan Broz 211146125c1cSMilan Broz io_schedule(); 211246125c1cSMilan Broz } 21139f4c3f87SBart Van Assche finish_wait(&md->wait, &wait); 2114b44ebeb0SMikulas Patocka 211546125c1cSMilan Broz return r; 211646125c1cSMilan Broz } 211746125c1cSMilan Broz 21182f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) 211985067747SMing Lei { 212085067747SMing Lei int r = 0; 212185067747SMing Lei 212285067747SMing Lei if (!queue_is_mq(md->queue)) 212385067747SMing Lei return dm_wait_for_bios_completion(md, task_state); 212485067747SMing Lei 212585067747SMing Lei while (true) { 212685067747SMing Lei if (!blk_mq_queue_inflight(md->queue)) 212785067747SMing Lei break; 212885067747SMing Lei 212985067747SMing Lei if (signal_pending_state(task_state, current)) { 213085067747SMing Lei r = -EINTR; 213185067747SMing Lei break; 213285067747SMing Lei } 213385067747SMing Lei 213485067747SMing Lei msleep(5); 213585067747SMing Lei } 213685067747SMing Lei 213785067747SMing Lei return r; 213885067747SMing Lei } 213985067747SMing Lei 21401da177e4SLinus Torvalds /* 21411da177e4SLinus Torvalds * Process the deferred bios 21421da177e4SLinus Torvalds */ 2143ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work) 21441da177e4SLinus Torvalds { 21450c2915b8SMike Snitzer struct mapped_device *md = container_of(work, struct mapped_device, work); 21460c2915b8SMike Snitzer struct bio *bio; 2147ef208587SMikulas Patocka 21483b00b203SMikulas Patocka while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 2149022c2611SMikulas Patocka spin_lock_irq(&md->deferred_lock); 21500c2915b8SMike Snitzer bio = bio_list_pop(&md->deferred); 2151022c2611SMikulas Patocka spin_unlock_irq(&md->deferred_lock); 2152022c2611SMikulas Patocka 21530c2915b8SMike Snitzer if (!bio) 2154df12ee99SAlasdair G Kergon break; 215573d410c0SMilan Broz 21560c2915b8SMike Snitzer submit_bio_noacct(bio); 2157e6ee8c0bSKiyoshi Ueda } 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds 21609a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md) 2161304f3f6aSMilan Broz { 21623b00b203SMikulas Patocka clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 21634e857c58SPeter Zijlstra smp_mb__after_atomic(); 216453d5914fSMikulas Patocka queue_work(md->wq, &md->work); 2165304f3f6aSMilan Broz } 2166304f3f6aSMilan Broz 21671da177e4SLinus Torvalds /* 2168042d2a9bSAlasdair G Kergon * Swap in a new table, returning the old one for the caller to destroy. 21691da177e4SLinus Torvalds */ 2170042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 21711da177e4SLinus Torvalds { 217287eb5b21SMike Christie struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); 2173754c5fc7SMike Snitzer struct queue_limits limits; 2174042d2a9bSAlasdair G Kergon int r; 21751da177e4SLinus Torvalds 2176e61290a4SDaniel Walker mutex_lock(&md->suspend_lock); 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds /* device must be suspended */ 21794f186f8bSKiyoshi Ueda if (!dm_suspended_md(md)) 218093c534aeSAlasdair G Kergon goto out; 21811da177e4SLinus Torvalds 21823ae70656SMike Snitzer /* 21833ae70656SMike Snitzer * If the new table has no data devices, retain the existing limits. 21843ae70656SMike Snitzer * This helps multipath with queue_if_no_path if all paths disappear, 21853ae70656SMike Snitzer * then new I/O is queued based on these limits, and then some paths 21863ae70656SMike Snitzer * reappear. 21873ae70656SMike Snitzer */ 21883ae70656SMike Snitzer if (dm_table_has_no_data_devices(table)) { 218983d5e5b0SMikulas Patocka live_map = dm_get_live_table_fast(md); 21903ae70656SMike Snitzer if (live_map) 21913ae70656SMike Snitzer limits = md->queue->limits; 219283d5e5b0SMikulas Patocka dm_put_live_table_fast(md); 21933ae70656SMike Snitzer } 21943ae70656SMike Snitzer 219587eb5b21SMike Christie if (!live_map) { 2196754c5fc7SMike Snitzer r = dm_calculate_queue_limits(table, &limits); 2197042d2a9bSAlasdair G Kergon if (r) { 2198042d2a9bSAlasdair G Kergon map = ERR_PTR(r); 2199754c5fc7SMike Snitzer goto out; 2200042d2a9bSAlasdair G Kergon } 220187eb5b21SMike Christie } 2202754c5fc7SMike Snitzer 2203042d2a9bSAlasdair G Kergon map = __bind(md, table, &limits); 220462e08243SMikulas Patocka dm_issue_global_event(); 22051da177e4SLinus Torvalds 220693c534aeSAlasdair G Kergon out: 2207e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2208042d2a9bSAlasdair G Kergon return map; 22091da177e4SLinus Torvalds } 22101da177e4SLinus Torvalds 22111da177e4SLinus Torvalds /* 22121da177e4SLinus Torvalds * Functions to lock and unlock any filesystem running on the 22131da177e4SLinus Torvalds * device. 22141da177e4SLinus Torvalds */ 22152ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md) 22161da177e4SLinus Torvalds { 2217e39e2e95SAlasdair G Kergon int r; 22181da177e4SLinus Torvalds 2219040f04bdSChristoph Hellwig WARN_ON(test_bit(DMF_FROZEN, &md->flags)); 2220dfbe03f6SAlasdair G Kergon 2221977115c0SChristoph Hellwig r = freeze_bdev(md->disk->part0); 2222040f04bdSChristoph Hellwig if (!r) 2223aa8d7c2fSAlasdair G Kergon set_bit(DMF_FROZEN, &md->flags); 2224040f04bdSChristoph Hellwig return r; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds 22272ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md) 22281da177e4SLinus Torvalds { 2229aa8d7c2fSAlasdair G Kergon if (!test_bit(DMF_FROZEN, &md->flags)) 2230aa8d7c2fSAlasdair G Kergon return; 2231977115c0SChristoph Hellwig thaw_bdev(md->disk->part0); 2232aa8d7c2fSAlasdair G Kergon clear_bit(DMF_FROZEN, &md->flags); 22331da177e4SLinus Torvalds } 22341da177e4SLinus Torvalds 22351da177e4SLinus Torvalds /* 2236b48633f8SBart Van Assche * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG 2237b48633f8SBart Van Assche * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE 2238b48633f8SBart Van Assche * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY 2239b48633f8SBart Van Assche * 2240ffcc3936SMike Snitzer * If __dm_suspend returns 0, the device is completely quiescent 2241ffcc3936SMike Snitzer * now. There is no request-processing activity. All new requests 2242ffcc3936SMike Snitzer * are being added to md->deferred list. 2243cec47e3dSKiyoshi Ueda */ 2244ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map, 22452f064a59SPeter Zijlstra unsigned suspend_flags, unsigned int task_state, 2246eaf9a736SMike Snitzer int dmf_suspended_flag) 22471da177e4SLinus Torvalds { 2248ffcc3936SMike Snitzer bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; 2249ffcc3936SMike Snitzer bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; 2250ffcc3936SMike Snitzer int r; 2251cf222b37SAlasdair G Kergon 22525a8f1f80SBart Van Assche lockdep_assert_held(&md->suspend_lock); 22535a8f1f80SBart Van Assche 22542e93ccc1SKiyoshi Ueda /* 22552e93ccc1SKiyoshi Ueda * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 22562e93ccc1SKiyoshi Ueda * This flag is cleared before dm_suspend returns. 22572e93ccc1SKiyoshi Ueda */ 22582e93ccc1SKiyoshi Ueda if (noflush) 22592e93ccc1SKiyoshi Ueda set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 226086331f39SBart Van Assche else 2261ac75b09fSMike Snitzer DMDEBUG("%s: suspending with flush", dm_device_name(md)); 22622e93ccc1SKiyoshi Ueda 2263d67ee213SMike Snitzer /* 2264d67ee213SMike Snitzer * This gets reverted if there's an error later and the targets 2265d67ee213SMike Snitzer * provide the .presuspend_undo hook. 2266d67ee213SMike Snitzer */ 22671da177e4SLinus Torvalds dm_table_presuspend_targets(map); 22681da177e4SLinus Torvalds 22692e93ccc1SKiyoshi Ueda /* 22709f518b27SKiyoshi Ueda * Flush I/O to the device. 22719f518b27SKiyoshi Ueda * Any I/O submitted after lock_fs() may not be flushed. 22729f518b27SKiyoshi Ueda * noflush takes precedence over do_lockfs. 22739f518b27SKiyoshi Ueda * (lock_fs() flushes I/Os and waits for them to complete.) 22742e93ccc1SKiyoshi Ueda */ 227532a926daSMikulas Patocka if (!noflush && do_lockfs) { 22762ca3310eSAlasdair G Kergon r = lock_fs(md); 2277d67ee213SMike Snitzer if (r) { 2278d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2279ffcc3936SMike Snitzer return r; 2280aa8d7c2fSAlasdair G Kergon } 2281d67ee213SMike Snitzer } 22821da177e4SLinus Torvalds 22831da177e4SLinus Torvalds /* 22843b00b203SMikulas Patocka * Here we must make sure that no processes are submitting requests 22853b00b203SMikulas Patocka * to target drivers i.e. no one may be executing 22860cede372SMike Snitzer * __split_and_process_bio from dm_submit_bio. 22873b00b203SMikulas Patocka * 22880cede372SMike Snitzer * To get all processes out of __split_and_process_bio in dm_submit_bio, 22893b00b203SMikulas Patocka * we take the write lock. To prevent any process from reentering 22900cede372SMike Snitzer * __split_and_process_bio from dm_submit_bio and quiesce the thread 22910cede372SMike Snitzer * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call 22926a8736d1STejun Heo * flush_workqueue(md->wq). 22931da177e4SLinus Torvalds */ 22941eb787ecSAlasdair G Kergon set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 229541abc4e1SHannes Reinecke if (map) 229683d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 22971da177e4SLinus Torvalds 2298d0bcb878SKiyoshi Ueda /* 229929e4013dSTejun Heo * Stop md->queue before flushing md->wq in case request-based 230029e4013dSTejun Heo * dm defers requests to md->wq from md->queue. 2301d0bcb878SKiyoshi Ueda */ 23026a23e05cSJens Axboe if (dm_request_based(md)) 2303eca7ee6dSMike Snitzer dm_stop_queue(md->queue); 2304cec47e3dSKiyoshi Ueda 2305d0bcb878SKiyoshi Ueda flush_workqueue(md->wq); 2306d0bcb878SKiyoshi Ueda 23071da177e4SLinus Torvalds /* 23083b00b203SMikulas Patocka * At this point no more requests are entering target request routines. 23093b00b203SMikulas Patocka * We call dm_wait_for_completion to wait for all existing requests 23103b00b203SMikulas Patocka * to finish. 23111da177e4SLinus Torvalds */ 2312b48633f8SBart Van Assche r = dm_wait_for_completion(md, task_state); 2313eaf9a736SMike Snitzer if (!r) 2314eaf9a736SMike Snitzer set_bit(dmf_suspended_flag, &md->flags); 23151da177e4SLinus Torvalds 23166d6f10dfSMilan Broz if (noflush) 2317022c2611SMikulas Patocka clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 231841abc4e1SHannes Reinecke if (map) 231983d5e5b0SMikulas Patocka synchronize_srcu(&md->io_barrier); 23202e93ccc1SKiyoshi Ueda 23211da177e4SLinus Torvalds /* were we interrupted ? */ 232246125c1cSMilan Broz if (r < 0) { 23239a1fb464SMikulas Patocka dm_queue_flush(md); 232473d410c0SMilan Broz 2325cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2326eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2327cec47e3dSKiyoshi Ueda 23282ca3310eSAlasdair G Kergon unlock_fs(md); 2329d67ee213SMike Snitzer dm_table_presuspend_undo_targets(map); 2330ffcc3936SMike Snitzer /* pushback list is already flushed, so skip flush */ 2331ffcc3936SMike Snitzer } 2332ffcc3936SMike Snitzer 2333ffcc3936SMike Snitzer return r; 23342ca3310eSAlasdair G Kergon } 23352ca3310eSAlasdair G Kergon 23363b00b203SMikulas Patocka /* 2337ffcc3936SMike Snitzer * We need to be able to change a mapping table under a mounted 2338ffcc3936SMike Snitzer * filesystem. For example we might want to move some data in 2339ffcc3936SMike Snitzer * the background. Before the table can be swapped with 2340ffcc3936SMike Snitzer * dm_bind_table, dm_suspend must be called to flush any in 2341ffcc3936SMike Snitzer * flight bios and ensure that any further io gets deferred. 23423b00b203SMikulas Patocka */ 2343ffcc3936SMike Snitzer /* 2344ffcc3936SMike Snitzer * Suspend mechanism in request-based dm. 2345ffcc3936SMike Snitzer * 2346ffcc3936SMike Snitzer * 1. Flush all I/Os by lock_fs() if needed. 2347ffcc3936SMike Snitzer * 2. Stop dispatching any I/O by stopping the request_queue. 2348ffcc3936SMike Snitzer * 3. Wait for all in-flight I/Os to be completed or requeued. 2349ffcc3936SMike Snitzer * 2350ffcc3936SMike Snitzer * To abort suspend, start the request_queue. 2351ffcc3936SMike Snitzer */ 2352ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2353ffcc3936SMike Snitzer { 2354ffcc3936SMike Snitzer struct dm_table *map = NULL; 2355ffcc3936SMike Snitzer int r = 0; 2356ffcc3936SMike Snitzer 2357ffcc3936SMike Snitzer retry: 2358ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2359ffcc3936SMike Snitzer 2360ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2361ffcc3936SMike Snitzer r = -EINVAL; 2362ffcc3936SMike Snitzer goto out_unlock; 2363ffcc3936SMike Snitzer } 2364ffcc3936SMike Snitzer 2365ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2366ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2367ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2368ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2369ffcc3936SMike Snitzer if (r) 2370ffcc3936SMike Snitzer return r; 2371ffcc3936SMike Snitzer goto retry; 2372ffcc3936SMike Snitzer } 2373ffcc3936SMike Snitzer 2374a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2375ffcc3936SMike Snitzer 2376eaf9a736SMike Snitzer r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); 2377ffcc3936SMike Snitzer if (r) 2378ffcc3936SMike Snitzer goto out_unlock; 23793b00b203SMikulas Patocka 23805df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 23814d4471cbSKiyoshi Ueda dm_table_postsuspend_targets(map); 23825df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 23834d4471cbSKiyoshi Ueda 2384d287483dSAlasdair G Kergon out_unlock: 2385e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 2386cf222b37SAlasdair G Kergon return r; 23871da177e4SLinus Torvalds } 23881da177e4SLinus Torvalds 2389ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map) 23901da177e4SLinus Torvalds { 2391ffcc3936SMike Snitzer if (map) { 2392ffcc3936SMike Snitzer int r = dm_table_resume_targets(map); 23938757b776SMilan Broz if (r) 2394ffcc3936SMike Snitzer return r; 2395ffcc3936SMike Snitzer } 23962ca3310eSAlasdair G Kergon 23979a1fb464SMikulas Patocka dm_queue_flush(md); 23982ca3310eSAlasdair G Kergon 2399cec47e3dSKiyoshi Ueda /* 2400cec47e3dSKiyoshi Ueda * Flushing deferred I/Os must be done after targets are resumed 2401cec47e3dSKiyoshi Ueda * so that mapping of targets can work correctly. 2402cec47e3dSKiyoshi Ueda * Request-based dm is queueing the deferred I/Os in its request_queue. 2403cec47e3dSKiyoshi Ueda */ 2404cec47e3dSKiyoshi Ueda if (dm_request_based(md)) 2405eca7ee6dSMike Snitzer dm_start_queue(md->queue); 2406cec47e3dSKiyoshi Ueda 24072ca3310eSAlasdair G Kergon unlock_fs(md); 24082ca3310eSAlasdair G Kergon 2409ffcc3936SMike Snitzer return 0; 2410ffcc3936SMike Snitzer } 2411ffcc3936SMike Snitzer 2412ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md) 2413ffcc3936SMike Snitzer { 24148dc23658SMinfei Huang int r; 2415ffcc3936SMike Snitzer struct dm_table *map = NULL; 2416ffcc3936SMike Snitzer 2417ffcc3936SMike Snitzer retry: 24188dc23658SMinfei Huang r = -EINVAL; 2419ffcc3936SMike Snitzer mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); 2420ffcc3936SMike Snitzer 2421ffcc3936SMike Snitzer if (!dm_suspended_md(md)) 2422ffcc3936SMike Snitzer goto out; 2423ffcc3936SMike Snitzer 2424ffcc3936SMike Snitzer if (dm_suspended_internally_md(md)) { 2425ffcc3936SMike Snitzer /* already internally suspended, wait for internal resume */ 2426ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2427ffcc3936SMike Snitzer r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); 2428ffcc3936SMike Snitzer if (r) 2429ffcc3936SMike Snitzer return r; 2430ffcc3936SMike Snitzer goto retry; 2431ffcc3936SMike Snitzer } 2432ffcc3936SMike Snitzer 2433a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2434ffcc3936SMike Snitzer if (!map || !dm_table_get_size(map)) 2435ffcc3936SMike Snitzer goto out; 2436ffcc3936SMike Snitzer 2437ffcc3936SMike Snitzer r = __dm_resume(md, map); 2438ffcc3936SMike Snitzer if (r) 2439ffcc3936SMike Snitzer goto out; 2440ffcc3936SMike Snitzer 24412ca3310eSAlasdair G Kergon clear_bit(DMF_SUSPENDED, &md->flags); 2442cf222b37SAlasdair G Kergon out: 2443e61290a4SDaniel Walker mutex_unlock(&md->suspend_lock); 24442ca3310eSAlasdair G Kergon 2445cf222b37SAlasdair G Kergon return r; 24461da177e4SLinus Torvalds } 24471da177e4SLinus Torvalds 2448fd2ed4d2SMikulas Patocka /* 2449fd2ed4d2SMikulas Patocka * Internal suspend/resume works like userspace-driven suspend. It waits 2450fd2ed4d2SMikulas Patocka * until all bios finish and prevents issuing new bios to the target drivers. 2451fd2ed4d2SMikulas Patocka * It may be used only from the kernel. 2452fd2ed4d2SMikulas Patocka */ 2453fd2ed4d2SMikulas Patocka 2454ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) 2455ffcc3936SMike Snitzer { 2456ffcc3936SMike Snitzer struct dm_table *map = NULL; 2457ffcc3936SMike Snitzer 24581ea0654eSBart Van Assche lockdep_assert_held(&md->suspend_lock); 24591ea0654eSBart Van Assche 246096b26c8cSMikulas Patocka if (md->internal_suspend_count++) 2461ffcc3936SMike Snitzer return; /* nested internal suspend */ 2462ffcc3936SMike Snitzer 2463ffcc3936SMike Snitzer if (dm_suspended_md(md)) { 2464ffcc3936SMike Snitzer set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2465ffcc3936SMike Snitzer return; /* nest suspend */ 2466ffcc3936SMike Snitzer } 2467ffcc3936SMike Snitzer 2468a12f5d48SEric Dumazet map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); 2469ffcc3936SMike Snitzer 2470ffcc3936SMike Snitzer /* 2471ffcc3936SMike Snitzer * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is 2472ffcc3936SMike Snitzer * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend 2473ffcc3936SMike Snitzer * would require changing .presuspend to return an error -- avoid this 2474ffcc3936SMike Snitzer * until there is a need for more elaborate variants of internal suspend. 2475ffcc3936SMike Snitzer */ 2476eaf9a736SMike Snitzer (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, 2477eaf9a736SMike Snitzer DMF_SUSPENDED_INTERNALLY); 2478ffcc3936SMike Snitzer 24795df96f2bSMikulas Patocka set_bit(DMF_POST_SUSPENDING, &md->flags); 2480ffcc3936SMike Snitzer dm_table_postsuspend_targets(map); 24815df96f2bSMikulas Patocka clear_bit(DMF_POST_SUSPENDING, &md->flags); 2482ffcc3936SMike Snitzer } 2483ffcc3936SMike Snitzer 2484ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md) 2485ffcc3936SMike Snitzer { 248696b26c8cSMikulas Patocka BUG_ON(!md->internal_suspend_count); 248796b26c8cSMikulas Patocka 248896b26c8cSMikulas Patocka if (--md->internal_suspend_count) 2489ffcc3936SMike Snitzer return; /* resume from nested internal suspend */ 2490ffcc3936SMike Snitzer 2491ffcc3936SMike Snitzer if (dm_suspended_md(md)) 2492ffcc3936SMike Snitzer goto done; /* resume from nested suspend */ 2493ffcc3936SMike Snitzer 2494ffcc3936SMike Snitzer /* 2495ffcc3936SMike Snitzer * NOTE: existing callers don't need to call dm_table_resume_targets 2496ffcc3936SMike Snitzer * (which may fail -- so best to avoid it for now by passing NULL map) 2497ffcc3936SMike Snitzer */ 2498ffcc3936SMike Snitzer (void) __dm_resume(md, NULL); 2499ffcc3936SMike Snitzer 2500ffcc3936SMike Snitzer done: 2501ffcc3936SMike Snitzer clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2502ffcc3936SMike Snitzer smp_mb__after_atomic(); 2503ffcc3936SMike Snitzer wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); 2504ffcc3936SMike Snitzer } 2505ffcc3936SMike Snitzer 2506ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md) 2507fd2ed4d2SMikulas Patocka { 2508fd2ed4d2SMikulas Patocka mutex_lock(&md->suspend_lock); 2509ffcc3936SMike Snitzer __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); 2510ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2511ffcc3936SMike Snitzer } 2512ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); 2513ffcc3936SMike Snitzer 2514ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md) 2515ffcc3936SMike Snitzer { 2516ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2517ffcc3936SMike Snitzer __dm_internal_resume(md); 2518ffcc3936SMike Snitzer mutex_unlock(&md->suspend_lock); 2519ffcc3936SMike Snitzer } 2520ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume); 2521ffcc3936SMike Snitzer 2522ffcc3936SMike Snitzer /* 2523ffcc3936SMike Snitzer * Fast variants of internal suspend/resume hold md->suspend_lock, 2524ffcc3936SMike Snitzer * which prevents interaction with userspace-driven suspend. 2525ffcc3936SMike Snitzer */ 2526ffcc3936SMike Snitzer 2527ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md) 2528ffcc3936SMike Snitzer { 2529ffcc3936SMike Snitzer mutex_lock(&md->suspend_lock); 2530ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2531fd2ed4d2SMikulas Patocka return; 2532fd2ed4d2SMikulas Patocka 2533fd2ed4d2SMikulas Patocka set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2534fd2ed4d2SMikulas Patocka synchronize_srcu(&md->io_barrier); 2535fd2ed4d2SMikulas Patocka flush_workqueue(md->wq); 2536fd2ed4d2SMikulas Patocka dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 2537fd2ed4d2SMikulas Patocka } 2538b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); 2539fd2ed4d2SMikulas Patocka 2540ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md) 2541fd2ed4d2SMikulas Patocka { 2542ffcc3936SMike Snitzer if (dm_suspended_md(md) || dm_suspended_internally_md(md)) 2543fd2ed4d2SMikulas Patocka goto done; 2544fd2ed4d2SMikulas Patocka 2545fd2ed4d2SMikulas Patocka dm_queue_flush(md); 2546fd2ed4d2SMikulas Patocka 2547fd2ed4d2SMikulas Patocka done: 2548fd2ed4d2SMikulas Patocka mutex_unlock(&md->suspend_lock); 2549fd2ed4d2SMikulas Patocka } 2550b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast); 2551fd2ed4d2SMikulas Patocka 25521da177e4SLinus Torvalds /*----------------------------------------------------------------- 25531da177e4SLinus Torvalds * Event notification. 25541da177e4SLinus Torvalds *---------------------------------------------------------------*/ 25553abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 255660935eb2SMilan Broz unsigned cookie) 255769267a30SAlasdair G Kergon { 25586958c1c6SMikulas Patocka int r; 25596958c1c6SMikulas Patocka unsigned noio_flag; 256060935eb2SMilan Broz char udev_cookie[DM_COOKIE_LENGTH]; 256160935eb2SMilan Broz char *envp[] = { udev_cookie, NULL }; 256260935eb2SMilan Broz 25636958c1c6SMikulas Patocka noio_flag = memalloc_noio_save(); 25646958c1c6SMikulas Patocka 256560935eb2SMilan Broz if (!cookie) 25666958c1c6SMikulas Patocka r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 256760935eb2SMilan Broz else { 256860935eb2SMilan Broz snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 256960935eb2SMilan Broz DM_COOKIE_ENV_VAR_NAME, cookie); 25706958c1c6SMikulas Patocka r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 25713abf85b5SPeter Rajnoha action, envp); 257260935eb2SMilan Broz } 25736958c1c6SMikulas Patocka 25746958c1c6SMikulas Patocka memalloc_noio_restore(noio_flag); 25756958c1c6SMikulas Patocka 25766958c1c6SMikulas Patocka return r; 257769267a30SAlasdair G Kergon } 257869267a30SAlasdair G Kergon 25797a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md) 25807a8c3d3bSMike Anderson { 25817a8c3d3bSMike Anderson return atomic_add_return(1, &md->uevent_seq); 25827a8c3d3bSMike Anderson } 25837a8c3d3bSMike Anderson 25841da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md) 25851da177e4SLinus Torvalds { 25861da177e4SLinus Torvalds return atomic_read(&md->event_nr); 25871da177e4SLinus Torvalds } 25881da177e4SLinus Torvalds 25891da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr) 25901da177e4SLinus Torvalds { 25911da177e4SLinus Torvalds return wait_event_interruptible(md->eventq, 25921da177e4SLinus Torvalds (event_nr != atomic_read(&md->event_nr))); 25931da177e4SLinus Torvalds } 25941da177e4SLinus Torvalds 25957a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 25967a8c3d3bSMike Anderson { 25977a8c3d3bSMike Anderson unsigned long flags; 25987a8c3d3bSMike Anderson 25997a8c3d3bSMike Anderson spin_lock_irqsave(&md->uevent_lock, flags); 26007a8c3d3bSMike Anderson list_add(elist, &md->uevent_list); 26017a8c3d3bSMike Anderson spin_unlock_irqrestore(&md->uevent_lock, flags); 26027a8c3d3bSMike Anderson } 26037a8c3d3bSMike Anderson 26041da177e4SLinus Torvalds /* 26051da177e4SLinus Torvalds * The gendisk is only valid as long as you have a reference 26061da177e4SLinus Torvalds * count on 'md'. 26071da177e4SLinus Torvalds */ 26081da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md) 26091da177e4SLinus Torvalds { 26101da177e4SLinus Torvalds return md->disk; 26111da177e4SLinus Torvalds } 261265ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk); 26131da177e4SLinus Torvalds 2614784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md) 2615784aae73SMilan Broz { 26162995fa78SMikulas Patocka return &md->kobj_holder.kobj; 2617784aae73SMilan Broz } 2618784aae73SMilan Broz 2619784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj) 2620784aae73SMilan Broz { 2621784aae73SMilan Broz struct mapped_device *md; 2622784aae73SMilan Broz 26232995fa78SMikulas Patocka md = container_of(kobj, struct mapped_device, kobj_holder.kobj); 2624784aae73SMilan Broz 2625b9a41d21SHou Tao spin_lock(&_minor_lock); 2626b9a41d21SHou Tao if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { 2627b9a41d21SHou Tao md = NULL; 2628b9a41d21SHou Tao goto out; 2629b9a41d21SHou Tao } 2630784aae73SMilan Broz dm_get(md); 2631b9a41d21SHou Tao out: 2632b9a41d21SHou Tao spin_unlock(&_minor_lock); 2633b9a41d21SHou Tao 2634784aae73SMilan Broz return md; 2635784aae73SMilan Broz } 2636784aae73SMilan Broz 26374f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md) 26381da177e4SLinus Torvalds { 26391da177e4SLinus Torvalds return test_bit(DMF_SUSPENDED, &md->flags); 26401da177e4SLinus Torvalds } 26411da177e4SLinus Torvalds 26425df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md) 26435df96f2bSMikulas Patocka { 26445df96f2bSMikulas Patocka return test_bit(DMF_POST_SUSPENDING, &md->flags); 26455df96f2bSMikulas Patocka } 26465df96f2bSMikulas Patocka 2647ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md) 2648ffcc3936SMike Snitzer { 2649ffcc3936SMike Snitzer return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); 2650ffcc3936SMike Snitzer } 2651ffcc3936SMike Snitzer 26522c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md) 26532c140a24SMikulas Patocka { 26542c140a24SMikulas Patocka return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 26552c140a24SMikulas Patocka } 26562c140a24SMikulas Patocka 265764dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti) 265864dbce58SKiyoshi Ueda { 265933bd6f06SMike Snitzer return dm_suspended_md(ti->table->md); 266064dbce58SKiyoshi Ueda } 266164dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended); 266264dbce58SKiyoshi Ueda 26635df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti) 26645df96f2bSMikulas Patocka { 266533bd6f06SMike Snitzer return dm_post_suspending_md(ti->table->md); 26665df96f2bSMikulas Patocka } 26675df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending); 26685df96f2bSMikulas Patocka 26692e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti) 26702e93ccc1SKiyoshi Ueda { 267133bd6f06SMike Snitzer return __noflush_suspending(ti->table->md); 26722e93ccc1SKiyoshi Ueda } 26732e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending); 26742e93ccc1SKiyoshi Ueda 26757e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, 26760776aa0eSMike Snitzer unsigned integrity, unsigned per_io_data_size, 26770776aa0eSMike Snitzer unsigned min_pool_size) 2678e6ee8c0bSKiyoshi Ueda { 2679115485e8SMike Snitzer struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 268078d8e58aSMike Snitzer unsigned int pool_size = 0; 268164f52b0eSMike Snitzer unsigned int front_pad, io_front_pad; 26826f1c819cSKent Overstreet int ret; 2683e6ee8c0bSKiyoshi Ueda 2684e6ee8c0bSKiyoshi Ueda if (!pools) 26854e6e36c3SMike Snitzer return NULL; 2686e6ee8c0bSKiyoshi Ueda 268778d8e58aSMike Snitzer switch (type) { 268878d8e58aSMike Snitzer case DM_TYPE_BIO_BASED: 2689545ed20eSToshi Kani case DM_TYPE_DAX_BIO_BASED: 26900776aa0eSMike Snitzer pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); 269162f26317SJeffle Xu front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; 269262f26317SJeffle Xu io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; 26936f1c819cSKent Overstreet ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); 26946f1c819cSKent Overstreet if (ret) 269564f52b0eSMike Snitzer goto out; 26966f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) 2697eb8db831SChristoph Hellwig goto out; 269878d8e58aSMike Snitzer break; 269978d8e58aSMike Snitzer case DM_TYPE_REQUEST_BASED: 27000776aa0eSMike Snitzer pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); 270178d8e58aSMike Snitzer front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2702591ddcfcSMike Snitzer /* per_io_data_size is used for blk-mq pdu at queue allocation */ 270378d8e58aSMike Snitzer break; 270478d8e58aSMike Snitzer default: 270578d8e58aSMike Snitzer BUG(); 270678d8e58aSMike Snitzer } 270778d8e58aSMike Snitzer 27086f1c819cSKent Overstreet ret = bioset_init(&pools->bs, pool_size, front_pad, 0); 27096f1c819cSKent Overstreet if (ret) 27105f015204SJun'ichi Nomura goto out; 2711e6ee8c0bSKiyoshi Ueda 27126f1c819cSKent Overstreet if (integrity && bioset_integrity_create(&pools->bs, pool_size)) 27135f015204SJun'ichi Nomura goto out; 2714a91a2785SMartin K. Petersen 2715e6ee8c0bSKiyoshi Ueda return pools; 271678d8e58aSMike Snitzer 27175f015204SJun'ichi Nomura out: 27185f015204SJun'ichi Nomura dm_free_md_mempools(pools); 2719e6ee8c0bSKiyoshi Ueda 27204e6e36c3SMike Snitzer return NULL; 2721e6ee8c0bSKiyoshi Ueda } 2722e6ee8c0bSKiyoshi Ueda 2723e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools) 2724e6ee8c0bSKiyoshi Ueda { 2725e6ee8c0bSKiyoshi Ueda if (!pools) 2726e6ee8c0bSKiyoshi Ueda return; 2727e6ee8c0bSKiyoshi Ueda 27286f1c819cSKent Overstreet bioset_exit(&pools->bs); 27296f1c819cSKent Overstreet bioset_exit(&pools->io_bs); 2730e6ee8c0bSKiyoshi Ueda 2731e6ee8c0bSKiyoshi Ueda kfree(pools); 2732e6ee8c0bSKiyoshi Ueda } 2733e6ee8c0bSKiyoshi Ueda 27349c72bad1SChristoph Hellwig struct dm_pr { 27359c72bad1SChristoph Hellwig u64 old_key; 27369c72bad1SChristoph Hellwig u64 new_key; 27379c72bad1SChristoph Hellwig u32 flags; 27389c72bad1SChristoph Hellwig bool fail_early; 27399c72bad1SChristoph Hellwig }; 27409c72bad1SChristoph Hellwig 27419c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, 27429c72bad1SChristoph Hellwig void *data) 27439c72bad1SChristoph Hellwig { 27449c72bad1SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 27459c72bad1SChristoph Hellwig struct dm_table *table; 27469c72bad1SChristoph Hellwig struct dm_target *ti; 27479c72bad1SChristoph Hellwig int ret = -ENOTTY, srcu_idx; 27489c72bad1SChristoph Hellwig 27499c72bad1SChristoph Hellwig table = dm_get_live_table(md, &srcu_idx); 27509c72bad1SChristoph Hellwig if (!table || !dm_table_get_size(table)) 27519c72bad1SChristoph Hellwig goto out; 27529c72bad1SChristoph Hellwig 27539c72bad1SChristoph Hellwig /* We only support devices that have a single target */ 27549c72bad1SChristoph Hellwig if (dm_table_get_num_targets(table) != 1) 27559c72bad1SChristoph Hellwig goto out; 27569c72bad1SChristoph Hellwig ti = dm_table_get_target(table, 0); 27579c72bad1SChristoph Hellwig 27589c72bad1SChristoph Hellwig ret = -EINVAL; 27599c72bad1SChristoph Hellwig if (!ti->type->iterate_devices) 27609c72bad1SChristoph Hellwig goto out; 27619c72bad1SChristoph Hellwig 27629c72bad1SChristoph Hellwig ret = ti->type->iterate_devices(ti, fn, data); 27639c72bad1SChristoph Hellwig out: 27649c72bad1SChristoph Hellwig dm_put_live_table(md, srcu_idx); 27659c72bad1SChristoph Hellwig return ret; 27669c72bad1SChristoph Hellwig } 27679c72bad1SChristoph Hellwig 27689c72bad1SChristoph Hellwig /* 27699c72bad1SChristoph Hellwig * For register / unregister we need to manually call out to every path. 27709c72bad1SChristoph Hellwig */ 27719c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, 27729c72bad1SChristoph Hellwig sector_t start, sector_t len, void *data) 27739c72bad1SChristoph Hellwig { 27749c72bad1SChristoph Hellwig struct dm_pr *pr = data; 27759c72bad1SChristoph Hellwig const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 27769c72bad1SChristoph Hellwig 27779c72bad1SChristoph Hellwig if (!ops || !ops->pr_register) 27789c72bad1SChristoph Hellwig return -EOPNOTSUPP; 27799c72bad1SChristoph Hellwig return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); 27809c72bad1SChristoph Hellwig } 27819c72bad1SChristoph Hellwig 278271cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, 278371cdb697SChristoph Hellwig u32 flags) 278471cdb697SChristoph Hellwig { 27859c72bad1SChristoph Hellwig struct dm_pr pr = { 27869c72bad1SChristoph Hellwig .old_key = old_key, 27879c72bad1SChristoph Hellwig .new_key = new_key, 27889c72bad1SChristoph Hellwig .flags = flags, 27899c72bad1SChristoph Hellwig .fail_early = true, 27909c72bad1SChristoph Hellwig }; 27919c72bad1SChristoph Hellwig int ret; 279271cdb697SChristoph Hellwig 27939c72bad1SChristoph Hellwig ret = dm_call_pr(bdev, __dm_pr_register, &pr); 27949c72bad1SChristoph Hellwig if (ret && new_key) { 27959c72bad1SChristoph Hellwig /* unregister all paths if we failed to register any path */ 27969c72bad1SChristoph Hellwig pr.old_key = new_key; 27979c72bad1SChristoph Hellwig pr.new_key = 0; 27989c72bad1SChristoph Hellwig pr.flags = 0; 27999c72bad1SChristoph Hellwig pr.fail_early = false; 28009c72bad1SChristoph Hellwig dm_call_pr(bdev, __dm_pr_register, &pr); 28019c72bad1SChristoph Hellwig } 280271cdb697SChristoph Hellwig 28039c72bad1SChristoph Hellwig return ret; 280471cdb697SChristoph Hellwig } 280571cdb697SChristoph Hellwig 280671cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, 280771cdb697SChristoph Hellwig u32 flags) 280871cdb697SChristoph Hellwig { 280971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 281071cdb697SChristoph Hellwig const struct pr_ops *ops; 2811971888c4SMike Snitzer int r, srcu_idx; 281271cdb697SChristoph Hellwig 28135bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 281471cdb697SChristoph Hellwig if (r < 0) 2815971888c4SMike Snitzer goto out; 281671cdb697SChristoph Hellwig 281771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 281871cdb697SChristoph Hellwig if (ops && ops->pr_reserve) 281971cdb697SChristoph Hellwig r = ops->pr_reserve(bdev, key, type, flags); 282071cdb697SChristoph Hellwig else 282171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2822971888c4SMike Snitzer out: 2823971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 282471cdb697SChristoph Hellwig return r; 282571cdb697SChristoph Hellwig } 282671cdb697SChristoph Hellwig 282771cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 282871cdb697SChristoph Hellwig { 282971cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 283071cdb697SChristoph Hellwig const struct pr_ops *ops; 2831971888c4SMike Snitzer int r, srcu_idx; 283271cdb697SChristoph Hellwig 28335bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 283471cdb697SChristoph Hellwig if (r < 0) 2835971888c4SMike Snitzer goto out; 283671cdb697SChristoph Hellwig 283771cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 283871cdb697SChristoph Hellwig if (ops && ops->pr_release) 283971cdb697SChristoph Hellwig r = ops->pr_release(bdev, key, type); 284071cdb697SChristoph Hellwig else 284171cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2842971888c4SMike Snitzer out: 2843971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 284471cdb697SChristoph Hellwig return r; 284571cdb697SChristoph Hellwig } 284671cdb697SChristoph Hellwig 284771cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 284871cdb697SChristoph Hellwig enum pr_type type, bool abort) 284971cdb697SChristoph Hellwig { 285071cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 285171cdb697SChristoph Hellwig const struct pr_ops *ops; 2852971888c4SMike Snitzer int r, srcu_idx; 285371cdb697SChristoph Hellwig 28545bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 285571cdb697SChristoph Hellwig if (r < 0) 2856971888c4SMike Snitzer goto out; 285771cdb697SChristoph Hellwig 285871cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 285971cdb697SChristoph Hellwig if (ops && ops->pr_preempt) 286071cdb697SChristoph Hellwig r = ops->pr_preempt(bdev, old_key, new_key, type, abort); 286171cdb697SChristoph Hellwig else 286271cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2863971888c4SMike Snitzer out: 2864971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 286571cdb697SChristoph Hellwig return r; 286671cdb697SChristoph Hellwig } 286771cdb697SChristoph Hellwig 286871cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key) 286971cdb697SChristoph Hellwig { 287071cdb697SChristoph Hellwig struct mapped_device *md = bdev->bd_disk->private_data; 287171cdb697SChristoph Hellwig const struct pr_ops *ops; 2872971888c4SMike Snitzer int r, srcu_idx; 287371cdb697SChristoph Hellwig 28745bd5e8d8SMike Snitzer r = dm_prepare_ioctl(md, &srcu_idx, &bdev); 287571cdb697SChristoph Hellwig if (r < 0) 2876971888c4SMike Snitzer goto out; 287771cdb697SChristoph Hellwig 287871cdb697SChristoph Hellwig ops = bdev->bd_disk->fops->pr_ops; 287971cdb697SChristoph Hellwig if (ops && ops->pr_clear) 288071cdb697SChristoph Hellwig r = ops->pr_clear(bdev, key); 288171cdb697SChristoph Hellwig else 288271cdb697SChristoph Hellwig r = -EOPNOTSUPP; 2883971888c4SMike Snitzer out: 2884971888c4SMike Snitzer dm_unprepare_ioctl(md, srcu_idx); 288571cdb697SChristoph Hellwig return r; 288671cdb697SChristoph Hellwig } 288771cdb697SChristoph Hellwig 288871cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = { 288971cdb697SChristoph Hellwig .pr_register = dm_pr_register, 289071cdb697SChristoph Hellwig .pr_reserve = dm_pr_reserve, 289171cdb697SChristoph Hellwig .pr_release = dm_pr_release, 289271cdb697SChristoph Hellwig .pr_preempt = dm_pr_preempt, 289371cdb697SChristoph Hellwig .pr_clear = dm_pr_clear, 289471cdb697SChristoph Hellwig }; 289571cdb697SChristoph Hellwig 289683d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = { 2897c62b37d9SChristoph Hellwig .submit_bio = dm_submit_bio, 28981da177e4SLinus Torvalds .open = dm_blk_open, 28991da177e4SLinus Torvalds .release = dm_blk_close, 2900aa129a22SMilan Broz .ioctl = dm_blk_ioctl, 29013ac51e74SDarrick J. Wong .getgeo = dm_blk_getgeo, 2902e76239a3SChristoph Hellwig .report_zones = dm_blk_report_zones, 290371cdb697SChristoph Hellwig .pr_ops = &dm_pr_ops, 29041da177e4SLinus Torvalds .owner = THIS_MODULE 29051da177e4SLinus Torvalds }; 29061da177e4SLinus Torvalds 2907681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = { 2908681cc5e8SMike Snitzer .open = dm_blk_open, 2909681cc5e8SMike Snitzer .release = dm_blk_close, 2910681cc5e8SMike Snitzer .ioctl = dm_blk_ioctl, 2911681cc5e8SMike Snitzer .getgeo = dm_blk_getgeo, 2912681cc5e8SMike Snitzer .pr_ops = &dm_pr_ops, 2913681cc5e8SMike Snitzer .owner = THIS_MODULE 2914681cc5e8SMike Snitzer }; 2915681cc5e8SMike Snitzer 2916f26c5719SDan Williams static const struct dax_operations dm_dax_ops = { 2917f26c5719SDan Williams .direct_access = dm_dax_direct_access, 2918cdf6cdcdSVivek Goyal .zero_page_range = dm_dax_zero_page_range, 2919f26c5719SDan Williams }; 2920f26c5719SDan Williams 29211da177e4SLinus Torvalds /* 29221da177e4SLinus Torvalds * module hooks 29231da177e4SLinus Torvalds */ 29241da177e4SLinus Torvalds module_init(dm_init); 29251da177e4SLinus Torvalds module_exit(dm_exit); 29261da177e4SLinus Torvalds 29271da177e4SLinus Torvalds module_param(major, uint, 0); 29281da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper"); 2929f4790826SMike Snitzer 2930e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); 2931e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); 2932e8603136SMike Snitzer 2933115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); 2934115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); 2935115485e8SMike Snitzer 2936a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR); 2937a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); 2938a666e5c0SMikulas Patocka 29391da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver"); 29401da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 29411da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 2942