xref: /openbmc/linux/drivers/md/dm.c (revision 444fe04f7a5a7991daa1a8fc3680670ac87fc2ce)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3784aae73SMilan Broz  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * This file is released under the GPL.
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
84cc96131SMike Snitzer #include "dm-core.h"
94cc96131SMike Snitzer #include "dm-rq.h"
1051e5b2bdSMike Anderson #include "dm-uevent.h"
1191ccbbacSTushar Sugandhi #include "dm-ima.h"
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/init.h>
141da177e4SLinus Torvalds #include <linux/module.h>
1548c9c27bSArjan van de Ven #include <linux/mutex.h>
166958c1c6SMikulas Patocka #include <linux/sched/mm.h>
17174cd4b1SIngo Molnar #include <linux/sched/signal.h>
181da177e4SLinus Torvalds #include <linux/blkpg.h>
191da177e4SLinus Torvalds #include <linux/bio.h>
201da177e4SLinus Torvalds #include <linux/mempool.h>
21f26c5719SDan Williams #include <linux/dax.h>
221da177e4SLinus Torvalds #include <linux/slab.h>
231da177e4SLinus Torvalds #include <linux/idr.h>
247e026c8cSDan Williams #include <linux/uio.h>
253ac51e74SDarrick J. Wong #include <linux/hdreg.h>
263f77316dSKiyoshi Ueda #include <linux/delay.h>
27ffcc3936SMike Snitzer #include <linux/wait.h>
2871cdb697SChristoph Hellwig #include <linux/pr.h>
29b0b4d7c6SElena Reshetova #include <linux/refcount.h>
30c6a564ffSChristoph Hellwig #include <linux/part_stat.h>
31a892c8d5SSatya Tangirala #include <linux/blk-crypto.h>
321e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h>
3355782138SLi Zefan 
3472d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core"
3572d94861SAlasdair G Kergon 
3660935eb2SMilan Broz /*
3760935eb2SMilan Broz  * Cookies are numeric values sent with CHANGE and REMOVE
3860935eb2SMilan Broz  * uevents while resuming, removing or renaming the device.
3960935eb2SMilan Broz  */
4060935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
4160935eb2SMilan Broz #define DM_COOKIE_LENGTH 24
4260935eb2SMilan Broz 
43b99fdcdcSMing Lei /*
44b99fdcdcSMing Lei  * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
45b99fdcdcSMing Lei  * dm_io into one list, and reuse bio->bi_private as the list head. Before
46b99fdcdcSMing Lei  * ending this fs bio, we will recover its ->bi_private.
47b99fdcdcSMing Lei  */
48b99fdcdcSMing Lei #define REQ_DM_POLL_LIST	REQ_DRV
49b99fdcdcSMing Lei 
501da177e4SLinus Torvalds static const char *_name = DM_NAME;
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds static unsigned int major = 0;
531da177e4SLinus Torvalds static unsigned int _major = 0;
541da177e4SLinus Torvalds 
55d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr);
56d15b774cSAlasdair G Kergon 
57f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock);
582c140a24SMikulas Patocka 
592c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w);
602c140a24SMikulas Patocka 
612c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
622c140a24SMikulas Patocka 
63acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue;
64acfe0ad7SMikulas Patocka 
6593e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0);
6693e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
6793e6442cSMikulas Patocka 
6862e08243SMikulas Patocka void dm_issue_global_event(void)
6962e08243SMikulas Patocka {
7062e08243SMikulas Patocka 	atomic_inc(&dm_global_event_nr);
7162e08243SMikulas Patocka 	wake_up(&dm_global_eventq);
7262e08243SMikulas Patocka }
7362e08243SMikulas Patocka 
74442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(stats_enabled);
75442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
76442761fdSMike Snitzer DEFINE_STATIC_KEY_FALSE(zoned_enabled);
77442761fdSMike Snitzer 
781da177e4SLinus Torvalds /*
7964f52b0eSMike Snitzer  * One of these is allocated (on-stack) per original bio.
801da177e4SLinus Torvalds  */
8164f52b0eSMike Snitzer struct clone_info {
8264f52b0eSMike Snitzer 	struct dm_table *map;
8364f52b0eSMike Snitzer 	struct bio *bio;
8464f52b0eSMike Snitzer 	struct dm_io *io;
8564f52b0eSMike Snitzer 	sector_t sector;
8664f52b0eSMike Snitzer 	unsigned sector_count;
874edadf6dSMike Snitzer 	bool is_abnormal_io:1;
884edadf6dSMike Snitzer 	bool submit_as_polled:1;
8964f52b0eSMike Snitzer };
9064f52b0eSMike Snitzer 
916c23f0bdSChristoph Hellwig static inline struct dm_target_io *clone_to_tio(struct bio *clone)
926c23f0bdSChristoph Hellwig {
936c23f0bdSChristoph Hellwig 	return container_of(clone, struct dm_target_io, clone);
946c23f0bdSChristoph Hellwig }
956c23f0bdSChristoph Hellwig 
9664f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size)
9764f52b0eSMike Snitzer {
98655f3aadSMike Snitzer 	if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
9962f26317SJeffle Xu 		return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
10062f26317SJeffle Xu 	return (char *)bio - DM_IO_BIO_OFFSET - data_size;
10164f52b0eSMike Snitzer }
10264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data);
10364f52b0eSMike Snitzer 
10464f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
10564f52b0eSMike Snitzer {
10664f52b0eSMike Snitzer 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
10764f52b0eSMike Snitzer 	if (io->magic == DM_IO_MAGIC)
10862f26317SJeffle Xu 		return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
10964f52b0eSMike Snitzer 	BUG_ON(io->magic != DM_TIO_MAGIC);
11062f26317SJeffle Xu 	return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
11164f52b0eSMike Snitzer }
11264f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
11364f52b0eSMike Snitzer 
11464f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
11564f52b0eSMike Snitzer {
11664f52b0eSMike Snitzer 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
11764f52b0eSMike Snitzer }
11864f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
11964f52b0eSMike Snitzer 
120ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1)
121ba61fdd1SJeff Mahoney 
122115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE
123115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE;
124faad87dfSMike Snitzer 
125a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
126a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS;
127a666e5c0SMikulas Patocka static int get_swap_bios(void)
128a666e5c0SMikulas Patocka {
129a666e5c0SMikulas Patocka 	int latch = READ_ONCE(swap_bios);
130a666e5c0SMikulas Patocka 	if (unlikely(latch <= 0))
131a666e5c0SMikulas Patocka 		latch = DEFAULT_SWAP_BIOS;
132a666e5c0SMikulas Patocka 	return latch;
133a666e5c0SMikulas Patocka }
134a666e5c0SMikulas Patocka 
13586f1152bSBenjamin Marzinski struct table_device {
13686f1152bSBenjamin Marzinski 	struct list_head list;
137b0b4d7c6SElena Reshetova 	refcount_t count;
13886f1152bSBenjamin Marzinski 	struct dm_dev dm_dev;
13986f1152bSBenjamin Marzinski };
14086f1152bSBenjamin Marzinski 
141f4790826SMike Snitzer /*
142e8603136SMike Snitzer  * Bio-based DM's mempools' reserved IOs set by the user.
143e8603136SMike Snitzer  */
1444cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS		16
145e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
146e8603136SMike Snitzer 
147115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max)
148115485e8SMike Snitzer {
1496aa7de05SMark Rutland 	int param = READ_ONCE(*module_param);
150115485e8SMike Snitzer 	int modified_param = 0;
151115485e8SMike Snitzer 	bool modified = true;
152115485e8SMike Snitzer 
153115485e8SMike Snitzer 	if (param < min)
154115485e8SMike Snitzer 		modified_param = min;
155115485e8SMike Snitzer 	else if (param > max)
156115485e8SMike Snitzer 		modified_param = max;
157115485e8SMike Snitzer 	else
158115485e8SMike Snitzer 		modified = false;
159115485e8SMike Snitzer 
160115485e8SMike Snitzer 	if (modified) {
161115485e8SMike Snitzer 		(void)cmpxchg(module_param, param, modified_param);
162115485e8SMike Snitzer 		param = modified_param;
163115485e8SMike Snitzer 	}
164115485e8SMike Snitzer 
165115485e8SMike Snitzer 	return param;
166115485e8SMike Snitzer }
167115485e8SMike Snitzer 
1684cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param,
169f4790826SMike Snitzer 			       unsigned def, unsigned max)
170f4790826SMike Snitzer {
1716aa7de05SMark Rutland 	unsigned param = READ_ONCE(*module_param);
17209c2d531SMike Snitzer 	unsigned modified_param = 0;
173f4790826SMike Snitzer 
17409c2d531SMike Snitzer 	if (!param)
17509c2d531SMike Snitzer 		modified_param = def;
17609c2d531SMike Snitzer 	else if (param > max)
17709c2d531SMike Snitzer 		modified_param = max;
178f4790826SMike Snitzer 
17909c2d531SMike Snitzer 	if (modified_param) {
18009c2d531SMike Snitzer 		(void)cmpxchg(module_param, param, modified_param);
18109c2d531SMike Snitzer 		param = modified_param;
182f4790826SMike Snitzer 	}
183f4790826SMike Snitzer 
18409c2d531SMike Snitzer 	return param;
185f4790826SMike Snitzer }
186f4790826SMike Snitzer 
187e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void)
188e8603136SMike Snitzer {
18909c2d531SMike Snitzer 	return __dm_get_module_param(&reserved_bio_based_ios,
1904cc96131SMike Snitzer 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
191e8603136SMike Snitzer }
192e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
193e8603136SMike Snitzer 
194115485e8SMike Snitzer static unsigned dm_get_numa_node(void)
195115485e8SMike Snitzer {
196115485e8SMike Snitzer 	return __dm_get_module_param_int(&dm_numa_node,
197115485e8SMike Snitzer 					 DM_NUMA_NODE, num_online_nodes() - 1);
198115485e8SMike Snitzer }
199115485e8SMike Snitzer 
2001da177e4SLinus Torvalds static int __init local_init(void)
2011da177e4SLinus Torvalds {
202e689fbabSMike Snitzer 	int r;
2031ae49ea2SMike Snitzer 
20451e5b2bdSMike Anderson 	r = dm_uevent_init();
20551157b4aSKiyoshi Ueda 	if (r)
206e689fbabSMike Snitzer 		return r;
20751e5b2bdSMike Anderson 
208acfe0ad7SMikulas Patocka 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
209acfe0ad7SMikulas Patocka 	if (!deferred_remove_workqueue) {
210acfe0ad7SMikulas Patocka 		r = -ENOMEM;
211acfe0ad7SMikulas Patocka 		goto out_uevent_exit;
212acfe0ad7SMikulas Patocka 	}
213acfe0ad7SMikulas Patocka 
2141da177e4SLinus Torvalds 	_major = major;
2151da177e4SLinus Torvalds 	r = register_blkdev(_major, _name);
21651157b4aSKiyoshi Ueda 	if (r < 0)
217acfe0ad7SMikulas Patocka 		goto out_free_workqueue;
2181da177e4SLinus Torvalds 
2191da177e4SLinus Torvalds 	if (!_major)
2201da177e4SLinus Torvalds 		_major = r;
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 	return 0;
22351157b4aSKiyoshi Ueda 
224acfe0ad7SMikulas Patocka out_free_workqueue:
225acfe0ad7SMikulas Patocka 	destroy_workqueue(deferred_remove_workqueue);
22651157b4aSKiyoshi Ueda out_uevent_exit:
22751157b4aSKiyoshi Ueda 	dm_uevent_exit();
22851157b4aSKiyoshi Ueda 
22951157b4aSKiyoshi Ueda 	return r;
2301da177e4SLinus Torvalds }
2311da177e4SLinus Torvalds 
2321da177e4SLinus Torvalds static void local_exit(void)
2331da177e4SLinus Torvalds {
2342c140a24SMikulas Patocka 	flush_scheduled_work();
235acfe0ad7SMikulas Patocka 	destroy_workqueue(deferred_remove_workqueue);
2362c140a24SMikulas Patocka 
23700d59405SAkinobu Mita 	unregister_blkdev(_major, _name);
23851e5b2bdSMike Anderson 	dm_uevent_exit();
2391da177e4SLinus Torvalds 
2401da177e4SLinus Torvalds 	_major = 0;
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds 	DMINFO("cleaned up");
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds 
245b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = {
2461da177e4SLinus Torvalds 	local_init,
2471da177e4SLinus Torvalds 	dm_target_init,
2481da177e4SLinus Torvalds 	dm_linear_init,
2491da177e4SLinus Torvalds 	dm_stripe_init,
250952b3557SMikulas Patocka 	dm_io_init,
251945fa4d2SMikulas Patocka 	dm_kcopyd_init,
2521da177e4SLinus Torvalds 	dm_interface_init,
253fd2ed4d2SMikulas Patocka 	dm_statistics_init,
2541da177e4SLinus Torvalds };
2551da177e4SLinus Torvalds 
256b9249e55SAlasdair G Kergon static void (*_exits[])(void) = {
2571da177e4SLinus Torvalds 	local_exit,
2581da177e4SLinus Torvalds 	dm_target_exit,
2591da177e4SLinus Torvalds 	dm_linear_exit,
2601da177e4SLinus Torvalds 	dm_stripe_exit,
261952b3557SMikulas Patocka 	dm_io_exit,
262945fa4d2SMikulas Patocka 	dm_kcopyd_exit,
2631da177e4SLinus Torvalds 	dm_interface_exit,
264fd2ed4d2SMikulas Patocka 	dm_statistics_exit,
2651da177e4SLinus Torvalds };
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds static int __init dm_init(void)
2681da177e4SLinus Torvalds {
2691da177e4SLinus Torvalds 	const int count = ARRAY_SIZE(_inits);
2701da177e4SLinus Torvalds 	int r, i;
2711da177e4SLinus Torvalds 
272f1cd6cb2STushar Sugandhi #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
273f1cd6cb2STushar Sugandhi 	DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
274f1cd6cb2STushar Sugandhi 	       " Duplicate IMA measurements will not be recorded in the IMA log.");
275f1cd6cb2STushar Sugandhi #endif
276f1cd6cb2STushar Sugandhi 
2771da177e4SLinus Torvalds 	for (i = 0; i < count; i++) {
2781da177e4SLinus Torvalds 		r = _inits[i]();
2791da177e4SLinus Torvalds 		if (r)
2801da177e4SLinus Torvalds 			goto bad;
2811da177e4SLinus Torvalds 	}
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	return 0;
2841da177e4SLinus Torvalds bad:
2851da177e4SLinus Torvalds 	while (i--)
2861da177e4SLinus Torvalds 		_exits[i]();
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds 	return r;
2891da177e4SLinus Torvalds }
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds static void __exit dm_exit(void)
2921da177e4SLinus Torvalds {
2931da177e4SLinus Torvalds 	int i = ARRAY_SIZE(_exits);
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds 	while (i--)
2961da177e4SLinus Torvalds 		_exits[i]();
297d15b774cSAlasdair G Kergon 
298d15b774cSAlasdair G Kergon 	/*
299d15b774cSAlasdair G Kergon 	 * Should be empty by this point.
300d15b774cSAlasdair G Kergon 	 */
301d15b774cSAlasdair G Kergon 	idr_destroy(&_minor_idr);
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds /*
3051da177e4SLinus Torvalds  * Block device functions
3061da177e4SLinus Torvalds  */
307432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md)
308432a212cSMike Anderson {
309432a212cSMike Anderson 	return test_bit(DMF_DELETING, &md->flags);
310432a212cSMike Anderson }
311432a212cSMike Anderson 
312fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode)
3131da177e4SLinus Torvalds {
3141da177e4SLinus Torvalds 	struct mapped_device *md;
3151da177e4SLinus Torvalds 
316fba9f90eSJeff Mahoney 	spin_lock(&_minor_lock);
317fba9f90eSJeff Mahoney 
318fe5f9f2cSAl Viro 	md = bdev->bd_disk->private_data;
319fba9f90eSJeff Mahoney 	if (!md)
320fba9f90eSJeff Mahoney 		goto out;
321fba9f90eSJeff Mahoney 
3225c6bd75dSAlasdair G Kergon 	if (test_bit(DMF_FREEING, &md->flags) ||
323432a212cSMike Anderson 	    dm_deleting_md(md)) {
324fba9f90eSJeff Mahoney 		md = NULL;
325fba9f90eSJeff Mahoney 		goto out;
326fba9f90eSJeff Mahoney 	}
327fba9f90eSJeff Mahoney 
3281da177e4SLinus Torvalds 	dm_get(md);
3295c6bd75dSAlasdair G Kergon 	atomic_inc(&md->open_count);
330fba9f90eSJeff Mahoney out:
331fba9f90eSJeff Mahoney 	spin_unlock(&_minor_lock);
332fba9f90eSJeff Mahoney 
333fba9f90eSJeff Mahoney 	return md ? 0 : -ENXIO;
3341da177e4SLinus Torvalds }
3351da177e4SLinus Torvalds 
336db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode)
3371da177e4SLinus Torvalds {
33863a4f065SMike Snitzer 	struct mapped_device *md;
3396e9624b8SArnd Bergmann 
3404a1aeb98SMilan Broz 	spin_lock(&_minor_lock);
3414a1aeb98SMilan Broz 
34263a4f065SMike Snitzer 	md = disk->private_data;
34363a4f065SMike Snitzer 	if (WARN_ON(!md))
34463a4f065SMike Snitzer 		goto out;
34563a4f065SMike Snitzer 
3462c140a24SMikulas Patocka 	if (atomic_dec_and_test(&md->open_count) &&
3472c140a24SMikulas Patocka 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
348acfe0ad7SMikulas Patocka 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
3492c140a24SMikulas Patocka 
3501da177e4SLinus Torvalds 	dm_put(md);
35163a4f065SMike Snitzer out:
3524a1aeb98SMilan Broz 	spin_unlock(&_minor_lock);
3531da177e4SLinus Torvalds }
3541da177e4SLinus Torvalds 
3555c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md)
3565c6bd75dSAlasdair G Kergon {
3575c6bd75dSAlasdair G Kergon 	return atomic_read(&md->open_count);
3585c6bd75dSAlasdair G Kergon }
3595c6bd75dSAlasdair G Kergon 
3605c6bd75dSAlasdair G Kergon /*
3615c6bd75dSAlasdair G Kergon  * Guarantees nothing is using the device before it's deleted.
3625c6bd75dSAlasdair G Kergon  */
3632c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
3645c6bd75dSAlasdair G Kergon {
3655c6bd75dSAlasdair G Kergon 	int r = 0;
3665c6bd75dSAlasdair G Kergon 
3675c6bd75dSAlasdair G Kergon 	spin_lock(&_minor_lock);
3685c6bd75dSAlasdair G Kergon 
3692c140a24SMikulas Patocka 	if (dm_open_count(md)) {
3705c6bd75dSAlasdair G Kergon 		r = -EBUSY;
3712c140a24SMikulas Patocka 		if (mark_deferred)
3722c140a24SMikulas Patocka 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
3732c140a24SMikulas Patocka 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
3742c140a24SMikulas Patocka 		r = -EEXIST;
3755c6bd75dSAlasdair G Kergon 	else
3765c6bd75dSAlasdair G Kergon 		set_bit(DMF_DELETING, &md->flags);
3775c6bd75dSAlasdair G Kergon 
3785c6bd75dSAlasdair G Kergon 	spin_unlock(&_minor_lock);
3795c6bd75dSAlasdair G Kergon 
3805c6bd75dSAlasdair G Kergon 	return r;
3815c6bd75dSAlasdair G Kergon }
3825c6bd75dSAlasdair G Kergon 
3832c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md)
3842c140a24SMikulas Patocka {
3852c140a24SMikulas Patocka 	int r = 0;
3862c140a24SMikulas Patocka 
3872c140a24SMikulas Patocka 	spin_lock(&_minor_lock);
3882c140a24SMikulas Patocka 
3892c140a24SMikulas Patocka 	if (test_bit(DMF_DELETING, &md->flags))
3902c140a24SMikulas Patocka 		r = -EBUSY;
3912c140a24SMikulas Patocka 	else
3922c140a24SMikulas Patocka 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
3932c140a24SMikulas Patocka 
3942c140a24SMikulas Patocka 	spin_unlock(&_minor_lock);
3952c140a24SMikulas Patocka 
3962c140a24SMikulas Patocka 	return r;
3972c140a24SMikulas Patocka }
3982c140a24SMikulas Patocka 
3992c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w)
4002c140a24SMikulas Patocka {
4012c140a24SMikulas Patocka 	dm_deferred_remove();
4022c140a24SMikulas Patocka }
4032c140a24SMikulas Patocka 
4043ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4053ac51e74SDarrick J. Wong {
4063ac51e74SDarrick J. Wong 	struct mapped_device *md = bdev->bd_disk->private_data;
4073ac51e74SDarrick J. Wong 
4083ac51e74SDarrick J. Wong 	return dm_get_geometry(md, geo);
4093ac51e74SDarrick J. Wong }
4103ac51e74SDarrick J. Wong 
411971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
4125bd5e8d8SMike Snitzer 			    struct block_device **bdev)
413aa129a22SMilan Broz {
41466482026SMike Snitzer 	struct dm_target *tgt;
4156c182cd8SHannes Reinecke 	struct dm_table *map;
416971888c4SMike Snitzer 	int r;
417aa129a22SMilan Broz 
4186c182cd8SHannes Reinecke retry:
419e56f81e0SChristoph Hellwig 	r = -ENOTTY;
420971888c4SMike Snitzer 	map = dm_get_live_table(md, srcu_idx);
421aa129a22SMilan Broz 	if (!map || !dm_table_get_size(map))
422971888c4SMike Snitzer 		return r;
423aa129a22SMilan Broz 
424aa129a22SMilan Broz 	/* We only support devices that have a single target */
425aa129a22SMilan Broz 	if (dm_table_get_num_targets(map) != 1)
426971888c4SMike Snitzer 		return r;
427aa129a22SMilan Broz 
42866482026SMike Snitzer 	tgt = dm_table_get_target(map, 0);
42966482026SMike Snitzer 	if (!tgt->type->prepare_ioctl)
430e56f81e0SChristoph Hellwig 		return r;
431aa129a22SMilan Broz 
432971888c4SMike Snitzer 	if (dm_suspended_md(md))
433971888c4SMike Snitzer 		return -EAGAIN;
434971888c4SMike Snitzer 
4355bd5e8d8SMike Snitzer 	r = tgt->type->prepare_ioctl(tgt, bdev);
4365bbbfdf6SJunichi Nomura 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
437971888c4SMike Snitzer 		dm_put_live_table(md, *srcu_idx);
4386c182cd8SHannes Reinecke 		msleep(10);
4396c182cd8SHannes Reinecke 		goto retry;
4406c182cd8SHannes Reinecke 	}
441971888c4SMike Snitzer 
442e56f81e0SChristoph Hellwig 	return r;
443e56f81e0SChristoph Hellwig }
4446c182cd8SHannes Reinecke 
445971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
446971888c4SMike Snitzer {
447971888c4SMike Snitzer 	dm_put_live_table(md, srcu_idx);
448971888c4SMike Snitzer }
449971888c4SMike Snitzer 
450e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
451e56f81e0SChristoph Hellwig 			unsigned int cmd, unsigned long arg)
452e56f81e0SChristoph Hellwig {
453e56f81e0SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
454971888c4SMike Snitzer 	int r, srcu_idx;
455e56f81e0SChristoph Hellwig 
4565bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
457e56f81e0SChristoph Hellwig 	if (r < 0)
458971888c4SMike Snitzer 		goto out;
459e56f81e0SChristoph Hellwig 
460e56f81e0SChristoph Hellwig 	if (r > 0) {
461e56f81e0SChristoph Hellwig 		/*
462e980f623SChristoph Hellwig 		 * Target determined this ioctl is being issued against a
463e980f623SChristoph Hellwig 		 * subset of the parent bdev; require extra privileges.
464e56f81e0SChristoph Hellwig 		 */
465e980f623SChristoph Hellwig 		if (!capable(CAP_SYS_RAWIO)) {
4660378c625SMike Snitzer 			DMDEBUG_LIMIT(
467e980f623SChristoph Hellwig 	"%s: sending ioctl %x to DM device without required privilege.",
468e980f623SChristoph Hellwig 				current->comm, cmd);
469e980f623SChristoph Hellwig 			r = -ENOIOCTLCMD;
470e56f81e0SChristoph Hellwig 			goto out;
471e56f81e0SChristoph Hellwig 		}
472e980f623SChristoph Hellwig 	}
473e56f81e0SChristoph Hellwig 
474a7cb3d2fSChristoph Hellwig 	if (!bdev->bd_disk->fops->ioctl)
475a7cb3d2fSChristoph Hellwig 		r = -ENOTTY;
476a7cb3d2fSChristoph Hellwig 	else
477a7cb3d2fSChristoph Hellwig 		r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
478e56f81e0SChristoph Hellwig out:
479971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
480aa129a22SMilan Broz 	return r;
481aa129a22SMilan Broz }
482aa129a22SMilan Broz 
4837465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio)
4847465d7acSMike Snitzer {
4856c23f0bdSChristoph Hellwig 	return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
4867465d7acSMike Snitzer }
4877465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
4887465d7acSMike Snitzer 
4898d394bc4SMike Snitzer static bool bio_is_flush_with_data(struct bio *bio)
4907465d7acSMike Snitzer {
4918d394bc4SMike Snitzer 	return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
4927465d7acSMike Snitzer }
4937465d7acSMike Snitzer 
494e6926ad0SMing Lei static void dm_io_acct(struct dm_io *io, bool end)
4957465d7acSMike Snitzer {
496e6926ad0SMing Lei 	struct dm_stats_aux *stats_aux = &io->stats_aux;
497e6926ad0SMing Lei 	unsigned long start_time = io->start_time;
498e6926ad0SMing Lei 	struct mapped_device *md = io->md;
499e6926ad0SMing Lei 	struct bio *bio = io->orig_bio;
500d3de6d12SMing Lei 	unsigned int sectors;
5017465d7acSMike Snitzer 
502d3de6d12SMing Lei 	/*
503d3de6d12SMing Lei 	 * If REQ_PREFLUSH set, don't account payload, it will be
504d3de6d12SMing Lei 	 * submitted (and accounted) after this flush completes.
505d3de6d12SMing Lei 	 */
506d3de6d12SMing Lei 	if (bio_is_flush_with_data(bio))
507d3de6d12SMing Lei 		sectors = 0;
5087dd76d1fSMing Lei 	else if (likely(!(dm_io_flagged(io, DM_IO_WAS_SPLIT))))
509d3de6d12SMing Lei 		sectors = bio_sectors(bio);
5107dd76d1fSMing Lei 	else
5117dd76d1fSMing Lei 		sectors = io->sectors;
5128d394bc4SMike Snitzer 
5138d394bc4SMike Snitzer 	if (!end)
514d3de6d12SMing Lei 		bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
515d3de6d12SMing Lei 				   start_time);
5168d394bc4SMike Snitzer 	else
517d3de6d12SMing Lei 		bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
5187465d7acSMike Snitzer 
519442761fdSMike Snitzer 	if (static_branch_unlikely(&stats_enabled) &&
5207dd76d1fSMing Lei 	    unlikely(dm_stats_used(&md->stats))) {
5217dd76d1fSMing Lei 		sector_t sector;
5227dd76d1fSMing Lei 
5237dd76d1fSMing Lei 		if (likely(!dm_io_flagged(io, DM_IO_WAS_SPLIT)))
5247dd76d1fSMing Lei 			sector = bio->bi_iter.bi_sector;
5257dd76d1fSMing Lei 		else
5267dd76d1fSMing Lei 			sector = bio_end_sector(bio) - io->sector_offset;
5277dd76d1fSMing Lei 
5287465d7acSMike Snitzer 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
5297dd76d1fSMing Lei 				    sector, sectors,
5308d394bc4SMike Snitzer 				    end, start_time, stats_aux);
5318d394bc4SMike Snitzer 	}
5327dd76d1fSMing Lei }
5338d394bc4SMike Snitzer 
534b992b40dSMing Lei static void __dm_start_io_acct(struct dm_io *io)
5358d394bc4SMike Snitzer {
536e6926ad0SMing Lei 	dm_io_acct(io, false);
5378d394bc4SMike Snitzer }
5388d394bc4SMike Snitzer 
5390fbb4d93SMike Snitzer static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
5408d394bc4SMike Snitzer {
5410fbb4d93SMike Snitzer 	/*
5420fbb4d93SMike Snitzer 	 * Ensure IO accounting is only ever started once.
5430fbb4d93SMike Snitzer 	 */
5443b03f7c1SMike Snitzer 	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
5450fbb4d93SMike Snitzer 		return;
5463b03f7c1SMike Snitzer 
5473b03f7c1SMike Snitzer 	/* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
5483b03f7c1SMike Snitzer 	if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
54982f6cdccSMike Snitzer 		dm_io_set_flag(io, DM_IO_ACCOUNTED);
55082f6cdccSMike Snitzer 	} else {
55182f6cdccSMike Snitzer 		unsigned long flags;
552655f3aadSMike Snitzer 		/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
5534d7bca13SMike Snitzer 		spin_lock_irqsave(&io->lock, flags);
55410eb3a0dSBenjamin Marzinski 		if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
55510eb3a0dSBenjamin Marzinski 			spin_unlock_irqrestore(&io->lock, flags);
55610eb3a0dSBenjamin Marzinski 			return;
55710eb3a0dSBenjamin Marzinski 		}
55882f6cdccSMike Snitzer 		dm_io_set_flag(io, DM_IO_ACCOUNTED);
5594d7bca13SMike Snitzer 		spin_unlock_irqrestore(&io->lock, flags);
56082f6cdccSMike Snitzer 	}
5610fbb4d93SMike Snitzer 
562b992b40dSMing Lei 	__dm_start_io_acct(io);
5630fbb4d93SMike Snitzer }
5640fbb4d93SMike Snitzer 
565b992b40dSMing Lei static void dm_end_io_acct(struct dm_io *io)
5660fbb4d93SMike Snitzer {
567e6926ad0SMing Lei 	dm_io_acct(io, true);
5687465d7acSMike Snitzer }
569978e51baSMike Snitzer 
570978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
5711da177e4SLinus Torvalds {
57264f52b0eSMike Snitzer 	struct dm_io *io;
57364f52b0eSMike Snitzer 	struct dm_target_io *tio;
57464f52b0eSMike Snitzer 	struct bio *clone;
57564f52b0eSMike Snitzer 
57629dec90aSChristoph Hellwig 	clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
577ca522482SMike Snitzer 	/* Set default bdev, but target must bio_set_dev() before issuing IO */
578ca522482SMike Snitzer 	clone->bi_bdev = md->disk->part0;
57964f52b0eSMike Snitzer 
5806c23f0bdSChristoph Hellwig 	tio = clone_to_tio(clone);
581655f3aadSMike Snitzer 	tio->flags = 0;
582655f3aadSMike Snitzer 	dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
58364f52b0eSMike Snitzer 	tio->io = NULL;
58464f52b0eSMike Snitzer 
58564f52b0eSMike Snitzer 	io = container_of(tio, struct dm_io, tio);
58664f52b0eSMike Snitzer 	io->magic = DM_IO_MAGIC;
58784b98f4cSMike Snitzer 	io->status = BLK_STS_OK;
5880f14d60aSMing Lei 
5890f14d60aSMing Lei 	/* one ref is for submission, the other is for completion */
5900f14d60aSMing Lei 	atomic_set(&io->io_count, 2);
5919f6dc633SMike Snitzer 	this_cpu_inc(*md->pending_io);
5927dd76d1fSMing Lei 	io->orig_bio = bio;
59361b6e2e5SMing Lei 	io->split_bio = NULL;
594978e51baSMike Snitzer 	io->md = md;
5954d7bca13SMike Snitzer 	spin_lock_init(&io->lock);
596b879f915SMike Snitzer 	io->start_time = jiffies;
59782f6cdccSMike Snitzer 	io->flags = 0;
59864f52b0eSMike Snitzer 
599442761fdSMike Snitzer 	if (static_branch_unlikely(&stats_enabled))
6000cdb90f0SMike Snitzer 		dm_stats_record_start(&md->stats, &io->stats_aux);
60164f52b0eSMike Snitzer 
60264f52b0eSMike Snitzer 	return io;
6031da177e4SLinus Torvalds }
6041da177e4SLinus Torvalds 
6050119ab14SMike Snitzer static void free_io(struct dm_io *io)
6061da177e4SLinus Torvalds {
60764f52b0eSMike Snitzer 	bio_put(&io->tio.clone);
60864f52b0eSMike Snitzer }
60964f52b0eSMike Snitzer 
6101d1068ceSChristoph Hellwig static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
611dc8e2021SChristoph Hellwig 			     unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
61264f52b0eSMike Snitzer {
61364f52b0eSMike Snitzer 	struct dm_target_io *tio;
614018b05ebSMike Snitzer 	struct bio *clone;
61564f52b0eSMike Snitzer 
61664f52b0eSMike Snitzer 	if (!ci->io->tio.io) {
61764f52b0eSMike Snitzer 		/* the dm_target_io embedded in ci->io is available */
61864f52b0eSMike Snitzer 		tio = &ci->io->tio;
619018b05ebSMike Snitzer 		/* alloc_io() already initialized embedded clone */
620018b05ebSMike Snitzer 		clone = &tio->clone;
62164f52b0eSMike Snitzer 	} else {
622ca522482SMike Snitzer 		struct mapped_device *md = ci->io->md;
623ca522482SMike Snitzer 
62429dec90aSChristoph Hellwig 		clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
62529dec90aSChristoph Hellwig 					&md->mempools->bs);
62664f52b0eSMike Snitzer 		if (!clone)
62764f52b0eSMike Snitzer 			return NULL;
628ca522482SMike Snitzer 		/* Set default bdev, but target must bio_set_dev() before issuing IO */
629ca522482SMike Snitzer 		clone->bi_bdev = md->disk->part0;
63064f52b0eSMike Snitzer 
631b99fdcdcSMing Lei 		/* REQ_DM_POLL_LIST shouldn't be inherited */
632b99fdcdcSMing Lei 		clone->bi_opf &= ~REQ_DM_POLL_LIST;
633b99fdcdcSMing Lei 
6346c23f0bdSChristoph Hellwig 		tio = clone_to_tio(clone);
635655f3aadSMike Snitzer 		tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
63664f52b0eSMike Snitzer 	}
63764f52b0eSMike Snitzer 
63864f52b0eSMike Snitzer 	tio->magic = DM_TIO_MAGIC;
63964f52b0eSMike Snitzer 	tio->io = ci->io;
64064f52b0eSMike Snitzer 	tio->ti = ti;
64164f52b0eSMike Snitzer 	tio->target_bio_nr = target_bio_nr;
642dc8e2021SChristoph Hellwig 	tio->len_ptr = len;
643743598f0SMike Snitzer 	tio->old_sector = 0;
64464f52b0eSMike Snitzer 
645018b05ebSMike Snitzer 	if (len) {
646018b05ebSMike Snitzer 		clone->bi_iter.bi_size = to_bytes(*len);
647018b05ebSMike Snitzer 		if (bio_integrity(clone))
648018b05ebSMike Snitzer 			bio_integrity_trim(clone);
649018b05ebSMike Snitzer 	}
650018b05ebSMike Snitzer 
651018b05ebSMike Snitzer 	return clone;
6521da177e4SLinus Torvalds }
6531da177e4SLinus Torvalds 
6541d1068ceSChristoph Hellwig static void free_tio(struct bio *clone)
6551da177e4SLinus Torvalds {
656655f3aadSMike Snitzer 	if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
65764f52b0eSMike Snitzer 		return;
6581d1068ceSChristoph Hellwig 	bio_put(clone);
6591da177e4SLinus Torvalds }
6601da177e4SLinus Torvalds 
6611da177e4SLinus Torvalds /*
6621da177e4SLinus Torvalds  * Add the bio to the list of deferred io.
6631da177e4SLinus Torvalds  */
66492c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio)
6651da177e4SLinus Torvalds {
66605447420SKiyoshi Ueda 	unsigned long flags;
6671da177e4SLinus Torvalds 
66805447420SKiyoshi Ueda 	spin_lock_irqsave(&md->deferred_lock, flags);
6691da177e4SLinus Torvalds 	bio_list_add(&md->deferred, bio);
67005447420SKiyoshi Ueda 	spin_unlock_irqrestore(&md->deferred_lock, flags);
67192c63902SMikulas Patocka 	queue_work(md->wq, &md->work);
6721da177e4SLinus Torvalds }
6731da177e4SLinus Torvalds 
6741da177e4SLinus Torvalds /*
6751da177e4SLinus Torvalds  * Everyone (including functions in this file), should use this
6761da177e4SLinus Torvalds  * function to access the md->map field, and make sure they call
67783d5e5b0SMikulas Patocka  * dm_put_live_table() when finished.
6781da177e4SLinus Torvalds  */
679563a225cSMike Snitzer struct dm_table *dm_get_live_table(struct mapped_device *md,
680563a225cSMike Snitzer 				   int *srcu_idx) __acquires(md->io_barrier)
6811da177e4SLinus Torvalds {
68283d5e5b0SMikulas Patocka 	*srcu_idx = srcu_read_lock(&md->io_barrier);
6831da177e4SLinus Torvalds 
68483d5e5b0SMikulas Patocka 	return srcu_dereference(md->map, &md->io_barrier);
68583d5e5b0SMikulas Patocka }
6861da177e4SLinus Torvalds 
687563a225cSMike Snitzer void dm_put_live_table(struct mapped_device *md,
688563a225cSMike Snitzer 		       int srcu_idx) __releases(md->io_barrier)
68983d5e5b0SMikulas Patocka {
69083d5e5b0SMikulas Patocka 	srcu_read_unlock(&md->io_barrier, srcu_idx);
69183d5e5b0SMikulas Patocka }
69283d5e5b0SMikulas Patocka 
69383d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md)
69483d5e5b0SMikulas Patocka {
69583d5e5b0SMikulas Patocka 	synchronize_srcu(&md->io_barrier);
69683d5e5b0SMikulas Patocka 	synchronize_rcu_expedited();
69783d5e5b0SMikulas Patocka }
69883d5e5b0SMikulas Patocka 
69983d5e5b0SMikulas Patocka /*
70083d5e5b0SMikulas Patocka  * A fast alternative to dm_get_live_table/dm_put_live_table.
70183d5e5b0SMikulas Patocka  * The caller must not block between these two functions.
70283d5e5b0SMikulas Patocka  */
70383d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
70483d5e5b0SMikulas Patocka {
70583d5e5b0SMikulas Patocka 	rcu_read_lock();
70683d5e5b0SMikulas Patocka 	return rcu_dereference(md->map);
70783d5e5b0SMikulas Patocka }
70883d5e5b0SMikulas Patocka 
70983d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
71083d5e5b0SMikulas Patocka {
71183d5e5b0SMikulas Patocka 	rcu_read_unlock();
7121da177e4SLinus Torvalds }
7131da177e4SLinus Torvalds 
714563a225cSMike Snitzer static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
7155d7362d0SMikulas Patocka 						     int *srcu_idx, unsigned bio_opf)
716563a225cSMike Snitzer {
7175d7362d0SMikulas Patocka 	if (bio_opf & REQ_NOWAIT)
718563a225cSMike Snitzer 		return dm_get_live_table_fast(md);
719563a225cSMike Snitzer 	else
720563a225cSMike Snitzer 		return dm_get_live_table(md, srcu_idx);
721563a225cSMike Snitzer }
722563a225cSMike Snitzer 
723563a225cSMike Snitzer static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
7245d7362d0SMikulas Patocka 					 unsigned bio_opf)
725563a225cSMike Snitzer {
7265d7362d0SMikulas Patocka 	if (bio_opf & REQ_NOWAIT)
727563a225cSMike Snitzer 		dm_put_live_table_fast(md);
728563a225cSMike Snitzer 	else
729563a225cSMike Snitzer 		dm_put_live_table(md, srcu_idx);
730563a225cSMike Snitzer }
731563a225cSMike Snitzer 
732971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper";
733971888c4SMike Snitzer 
7343ac51e74SDarrick J. Wong /*
73586f1152bSBenjamin Marzinski  * Open a table device so we can use it as a map destination.
73686f1152bSBenjamin Marzinski  */
73786f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev,
73886f1152bSBenjamin Marzinski 			     struct mapped_device *md)
73986f1152bSBenjamin Marzinski {
74086f1152bSBenjamin Marzinski 	struct block_device *bdev;
741cd913c76SChristoph Hellwig 	u64 part_off;
74286f1152bSBenjamin Marzinski 	int r;
74386f1152bSBenjamin Marzinski 
74486f1152bSBenjamin Marzinski 	BUG_ON(td->dm_dev.bdev);
74586f1152bSBenjamin Marzinski 
746519049afSMike Snitzer 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
74786f1152bSBenjamin Marzinski 	if (IS_ERR(bdev))
74886f1152bSBenjamin Marzinski 		return PTR_ERR(bdev);
74986f1152bSBenjamin Marzinski 
75086f1152bSBenjamin Marzinski 	r = bd_link_disk_holder(bdev, dm_disk(md));
75186f1152bSBenjamin Marzinski 	if (r) {
75286f1152bSBenjamin Marzinski 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
75386f1152bSBenjamin Marzinski 		return r;
75486f1152bSBenjamin Marzinski 	}
75586f1152bSBenjamin Marzinski 
75686f1152bSBenjamin Marzinski 	td->dm_dev.bdev = bdev;
757cd913c76SChristoph Hellwig 	td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
75886f1152bSBenjamin Marzinski 	return 0;
75986f1152bSBenjamin Marzinski }
76086f1152bSBenjamin Marzinski 
76186f1152bSBenjamin Marzinski /*
76286f1152bSBenjamin Marzinski  * Close a table device that we've been using.
76386f1152bSBenjamin Marzinski  */
76486f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md)
76586f1152bSBenjamin Marzinski {
76686f1152bSBenjamin Marzinski 	if (!td->dm_dev.bdev)
76786f1152bSBenjamin Marzinski 		return;
76886f1152bSBenjamin Marzinski 
76986f1152bSBenjamin Marzinski 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
77086f1152bSBenjamin Marzinski 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
771817bf402SDan Williams 	put_dax(td->dm_dev.dax_dev);
77286f1152bSBenjamin Marzinski 	td->dm_dev.bdev = NULL;
773817bf402SDan Williams 	td->dm_dev.dax_dev = NULL;
77486f1152bSBenjamin Marzinski }
77586f1152bSBenjamin Marzinski 
77686f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev,
7778454fca4SSheetal Singala 					      fmode_t mode)
7788454fca4SSheetal Singala {
77986f1152bSBenjamin Marzinski 	struct table_device *td;
78086f1152bSBenjamin Marzinski 
78186f1152bSBenjamin Marzinski 	list_for_each_entry(td, l, list)
78286f1152bSBenjamin Marzinski 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
78386f1152bSBenjamin Marzinski 			return td;
78486f1152bSBenjamin Marzinski 
78586f1152bSBenjamin Marzinski 	return NULL;
78686f1152bSBenjamin Marzinski }
78786f1152bSBenjamin Marzinski 
78886f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
7898454fca4SSheetal Singala 			struct dm_dev **result)
7908454fca4SSheetal Singala {
79186f1152bSBenjamin Marzinski 	int r;
79286f1152bSBenjamin Marzinski 	struct table_device *td;
79386f1152bSBenjamin Marzinski 
79486f1152bSBenjamin Marzinski 	mutex_lock(&md->table_devices_lock);
79586f1152bSBenjamin Marzinski 	td = find_table_device(&md->table_devices, dev, mode);
79686f1152bSBenjamin Marzinski 	if (!td) {
797115485e8SMike Snitzer 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
79886f1152bSBenjamin Marzinski 		if (!td) {
79986f1152bSBenjamin Marzinski 			mutex_unlock(&md->table_devices_lock);
80086f1152bSBenjamin Marzinski 			return -ENOMEM;
80186f1152bSBenjamin Marzinski 		}
80286f1152bSBenjamin Marzinski 
80386f1152bSBenjamin Marzinski 		td->dm_dev.mode = mode;
80486f1152bSBenjamin Marzinski 		td->dm_dev.bdev = NULL;
80586f1152bSBenjamin Marzinski 
80686f1152bSBenjamin Marzinski 		if ((r = open_table_device(td, dev, md))) {
80786f1152bSBenjamin Marzinski 			mutex_unlock(&md->table_devices_lock);
80886f1152bSBenjamin Marzinski 			kfree(td);
80986f1152bSBenjamin Marzinski 			return r;
81086f1152bSBenjamin Marzinski 		}
81186f1152bSBenjamin Marzinski 
81286f1152bSBenjamin Marzinski 		format_dev_t(td->dm_dev.name, dev);
81386f1152bSBenjamin Marzinski 
814b0b4d7c6SElena Reshetova 		refcount_set(&td->count, 1);
81586f1152bSBenjamin Marzinski 		list_add(&td->list, &md->table_devices);
816b0b4d7c6SElena Reshetova 	} else {
817b0b4d7c6SElena Reshetova 		refcount_inc(&td->count);
81886f1152bSBenjamin Marzinski 	}
81986f1152bSBenjamin Marzinski 	mutex_unlock(&md->table_devices_lock);
82086f1152bSBenjamin Marzinski 
82186f1152bSBenjamin Marzinski 	*result = &td->dm_dev;
82286f1152bSBenjamin Marzinski 	return 0;
82386f1152bSBenjamin Marzinski }
82486f1152bSBenjamin Marzinski 
82586f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
82686f1152bSBenjamin Marzinski {
82786f1152bSBenjamin Marzinski 	struct table_device *td = container_of(d, struct table_device, dm_dev);
82886f1152bSBenjamin Marzinski 
82986f1152bSBenjamin Marzinski 	mutex_lock(&md->table_devices_lock);
830b0b4d7c6SElena Reshetova 	if (refcount_dec_and_test(&td->count)) {
83186f1152bSBenjamin Marzinski 		close_table_device(td, md);
83286f1152bSBenjamin Marzinski 		list_del(&td->list);
83386f1152bSBenjamin Marzinski 		kfree(td);
83486f1152bSBenjamin Marzinski 	}
83586f1152bSBenjamin Marzinski 	mutex_unlock(&md->table_devices_lock);
83686f1152bSBenjamin Marzinski }
83786f1152bSBenjamin Marzinski 
83886f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices)
83986f1152bSBenjamin Marzinski {
84086f1152bSBenjamin Marzinski 	struct list_head *tmp, *next;
84186f1152bSBenjamin Marzinski 
84286f1152bSBenjamin Marzinski 	list_for_each_safe(tmp, next, devices) {
84386f1152bSBenjamin Marzinski 		struct table_device *td = list_entry(tmp, struct table_device, list);
84486f1152bSBenjamin Marzinski 
84586f1152bSBenjamin Marzinski 		DMWARN("dm_destroy: %s still exists with %d references",
846b0b4d7c6SElena Reshetova 		       td->dm_dev.name, refcount_read(&td->count));
84786f1152bSBenjamin Marzinski 		kfree(td);
84886f1152bSBenjamin Marzinski 	}
84986f1152bSBenjamin Marzinski }
85086f1152bSBenjamin Marzinski 
85186f1152bSBenjamin Marzinski /*
8523ac51e74SDarrick J. Wong  * Get the geometry associated with a dm device
8533ac51e74SDarrick J. Wong  */
8543ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
8553ac51e74SDarrick J. Wong {
8563ac51e74SDarrick J. Wong 	*geo = md->geometry;
8573ac51e74SDarrick J. Wong 
8583ac51e74SDarrick J. Wong 	return 0;
8593ac51e74SDarrick J. Wong }
8603ac51e74SDarrick J. Wong 
8613ac51e74SDarrick J. Wong /*
8623ac51e74SDarrick J. Wong  * Set the geometry of a device.
8633ac51e74SDarrick J. Wong  */
8643ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
8653ac51e74SDarrick J. Wong {
8663ac51e74SDarrick J. Wong 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
8673ac51e74SDarrick J. Wong 
8683ac51e74SDarrick J. Wong 	if (geo->start > sz) {
8693ac51e74SDarrick J. Wong 		DMWARN("Start sector is beyond the geometry limits.");
8703ac51e74SDarrick J. Wong 		return -EINVAL;
8713ac51e74SDarrick J. Wong 	}
8723ac51e74SDarrick J. Wong 
8733ac51e74SDarrick J. Wong 	md->geometry = *geo;
8743ac51e74SDarrick J. Wong 
8753ac51e74SDarrick J. Wong 	return 0;
8763ac51e74SDarrick J. Wong }
8773ac51e74SDarrick J. Wong 
8782e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md)
8792e93ccc1SKiyoshi Ueda {
8802e93ccc1SKiyoshi Ueda 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
8812e93ccc1SKiyoshi Ueda }
8822e93ccc1SKiyoshi Ueda 
8832e93ccc1SKiyoshi Ueda /*
884*444fe04fSMing Lei  * Return true if the dm_io's original bio is requeued.
885*444fe04fSMing Lei  * io->status is updated with error if requeue disallowed.
886*444fe04fSMing Lei  */
887*444fe04fSMing Lei static bool dm_handle_requeue(struct dm_io *io)
888*444fe04fSMing Lei {
889*444fe04fSMing Lei 	struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
890*444fe04fSMing Lei 	bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
891*444fe04fSMing Lei 	bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
892*444fe04fSMing Lei 				     (bio->bi_opf & REQ_POLLED));
893*444fe04fSMing Lei 	struct mapped_device *md = io->md;
894*444fe04fSMing Lei 	bool requeued = false;
895*444fe04fSMing Lei 
896*444fe04fSMing Lei 	if (handle_requeue || handle_polled_eagain) {
897*444fe04fSMing Lei 		unsigned long flags;
898*444fe04fSMing Lei 
899*444fe04fSMing Lei 		if (bio->bi_opf & REQ_POLLED) {
900*444fe04fSMing Lei 			/*
901*444fe04fSMing Lei 			 * Upper layer won't help us poll split bio
902*444fe04fSMing Lei 			 * (io->orig_bio may only reflect a subset of the
903*444fe04fSMing Lei 			 * pre-split original) so clear REQ_POLLED.
904*444fe04fSMing Lei 			 */
905*444fe04fSMing Lei 			bio_clear_polled(bio);
906*444fe04fSMing Lei 		}
907*444fe04fSMing Lei 
908*444fe04fSMing Lei 		/*
909*444fe04fSMing Lei 		 * Target requested pushing back the I/O or
910*444fe04fSMing Lei 		 * polled IO hit BLK_STS_AGAIN.
9112e93ccc1SKiyoshi Ueda 		 */
912022c2611SMikulas Patocka 		spin_lock_irqsave(&md->deferred_lock, flags);
913*444fe04fSMing Lei 		if ((__noflush_suspending(md) &&
914*444fe04fSMing Lei 		     !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
915*444fe04fSMing Lei 		    handle_polled_eagain) {
916bf14e2b2SDamien Le Moal 			bio_list_add_head(&md->deferred, bio);
917*444fe04fSMing Lei 			requeued = true;
918bf14e2b2SDamien Le Moal 		} else {
919bf14e2b2SDamien Le Moal 			/*
920bf14e2b2SDamien Le Moal 			 * noflush suspend was interrupted or this is
921bf14e2b2SDamien Le Moal 			 * a write to a zoned target.
922bf14e2b2SDamien Le Moal 			 */
9234e4cbee9SChristoph Hellwig 			io->status = BLK_STS_IOERR;
924bf14e2b2SDamien Le Moal 		}
925022c2611SMikulas Patocka 		spin_unlock_irqrestore(&md->deferred_lock, flags);
9262e93ccc1SKiyoshi Ueda 	}
9272e93ccc1SKiyoshi Ueda 
928*444fe04fSMing Lei 	if (requeued)
929*444fe04fSMing Lei 		queue_work(md->wq, &md->work);
930*444fe04fSMing Lei 
931*444fe04fSMing Lei 	return requeued;
932*444fe04fSMing Lei }
933*444fe04fSMing Lei 
934*444fe04fSMing Lei static void dm_io_complete(struct dm_io *io)
935*444fe04fSMing Lei {
936*444fe04fSMing Lei 	struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
937*444fe04fSMing Lei 	struct mapped_device *md = io->md;
938*444fe04fSMing Lei 	blk_status_t io_error;
939*444fe04fSMing Lei 	bool requeued;
940*444fe04fSMing Lei 
941*444fe04fSMing Lei 	requeued = dm_handle_requeue(io);
942*444fe04fSMing Lei 
9434e4cbee9SChristoph Hellwig 	io_error = io->status;
94482f6cdccSMike Snitzer 	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
945b992b40dSMing Lei 		dm_end_io_acct(io);
9460fbb4d93SMike Snitzer 	else if (!io_error) {
9470fbb4d93SMike Snitzer 		/*
9480fbb4d93SMike Snitzer 		 * Must handle target that DM_MAPIO_SUBMITTED only to
9490fbb4d93SMike Snitzer 		 * then bio_endio() rather than dm_submit_bio_remap()
9500fbb4d93SMike Snitzer 		 */
951b992b40dSMing Lei 		__dm_start_io_acct(io);
952b992b40dSMing Lei 		dm_end_io_acct(io);
9530fbb4d93SMike Snitzer 	}
9540119ab14SMike Snitzer 	free_io(io);
9559f6dc633SMike Snitzer 	smp_wmb();
9569f6dc633SMike Snitzer 	this_cpu_dec(*md->pending_io);
9572056a782SJens Axboe 
9589f6dc633SMike Snitzer 	/* nudge anyone waiting on suspend queue */
9599f6dc633SMike Snitzer 	if (unlikely(wq_has_sleeper(&md->wait)))
9609f6dc633SMike Snitzer 		wake_up(&md->wait);
9611da177e4SLinus Torvalds 
962*444fe04fSMing Lei 	/* Return early if the original bio was requeued */
963*444fe04fSMing Lei 	if (requeued)
96478ccef91SMike Snitzer 		return;
9656a8736d1STejun Heo 
9668d394bc4SMike Snitzer 	if (bio_is_flush_with_data(bio)) {
967af7e466aSMikulas Patocka 		/*
9686a8736d1STejun Heo 		 * Preflush done for flush with data, reissue
96928a8f0d3SMike Christie 		 * without REQ_PREFLUSH.
970af7e466aSMikulas Patocka 		 */
9711eff9d32SJens Axboe 		bio->bi_opf &= ~REQ_PREFLUSH;
9726a8736d1STejun Heo 		queue_io(md, bio);
973af7e466aSMikulas Patocka 	} else {
974b372d360SMike Snitzer 		/* done with normal IO or empty flush */
9758dd601faSNeilBrown 		if (io_error)
9764e4cbee9SChristoph Hellwig 			bio->bi_status = io_error;
9774246a0b6SChristoph Hellwig 		bio_endio(bio);
9782e93ccc1SKiyoshi Ueda 	}
9791da177e4SLinus Torvalds }
980e2736347SMike Snitzer 
981e2736347SMike Snitzer /*
982e2736347SMike Snitzer  * Decrements the number of outstanding ios that a bio has been
983e2736347SMike Snitzer  * cloned into, completing the original io if necc.
984e2736347SMike Snitzer  */
98584b98f4cSMike Snitzer static inline void __dm_io_dec_pending(struct dm_io *io)
986e2736347SMike Snitzer {
98784b98f4cSMike Snitzer 	if (atomic_dec_and_test(&io->io_count))
98884b98f4cSMike Snitzer 		dm_io_complete(io);
98984b98f4cSMike Snitzer }
99084b98f4cSMike Snitzer 
99184b98f4cSMike Snitzer static void dm_io_set_error(struct dm_io *io, blk_status_t error)
99284b98f4cSMike Snitzer {
993e2736347SMike Snitzer 	unsigned long flags;
99484b98f4cSMike Snitzer 
99584b98f4cSMike Snitzer 	/* Push-back supersedes any I/O errors */
9964d7bca13SMike Snitzer 	spin_lock_irqsave(&io->lock, flags);
997e2736347SMike Snitzer 	if (!(io->status == BLK_STS_DM_REQUEUE &&
99884b98f4cSMike Snitzer 	      __noflush_suspending(io->md))) {
999e2736347SMike Snitzer 		io->status = error;
100084b98f4cSMike Snitzer 	}
10014d7bca13SMike Snitzer 	spin_unlock_irqrestore(&io->lock, flags);
1002e2736347SMike Snitzer }
1003e2736347SMike Snitzer 
10042e803cd9SMing Lei static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
100584b98f4cSMike Snitzer {
100684b98f4cSMike Snitzer 	if (unlikely(error))
100784b98f4cSMike Snitzer 		dm_io_set_error(io, error);
100884b98f4cSMike Snitzer 
100984b98f4cSMike Snitzer 	__dm_io_dec_pending(io);
1010af7e466aSMikulas Patocka }
10111da177e4SLinus Torvalds 
1012bcb44433SMike Snitzer void disable_discard(struct mapped_device *md)
1013bcb44433SMike Snitzer {
1014bcb44433SMike Snitzer 	struct queue_limits *limits = dm_get_queue_limits(md);
1015bcb44433SMike Snitzer 
1016bcb44433SMike Snitzer 	/* device doesn't really support DISCARD, disable it */
1017bcb44433SMike Snitzer 	limits->max_discard_sectors = 0;
1018bcb44433SMike Snitzer }
1019bcb44433SMike Snitzer 
1020ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md)
1021ac62d620SChristoph Hellwig {
1022ac62d620SChristoph Hellwig 	struct queue_limits *limits = dm_get_queue_limits(md);
1023ac62d620SChristoph Hellwig 
1024ac62d620SChristoph Hellwig 	/* device doesn't really support WRITE ZEROES, disable it */
1025ac62d620SChristoph Hellwig 	limits->max_write_zeroes_sectors = 0;
1026ac62d620SChristoph Hellwig }
1027ac62d620SChristoph Hellwig 
1028a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1029a666e5c0SMikulas Patocka {
1030a666e5c0SMikulas Patocka 	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1031a666e5c0SMikulas Patocka }
1032a666e5c0SMikulas Patocka 
10334246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio)
10341da177e4SLinus Torvalds {
10354e4cbee9SChristoph Hellwig 	blk_status_t error = bio->bi_status;
10366cbce280SMike Snitzer 	struct dm_target_io *tio = clone_to_tio(bio);
10376cbce280SMike Snitzer 	struct dm_target *ti = tio->ti;
10386cbce280SMike Snitzer 	dm_endio_fn endio = ti->type->end_io;
10396cbce280SMike Snitzer 	struct dm_io *io = tio->io;
10406cbce280SMike Snitzer 	struct mapped_device *md = io->md;
10411da177e4SLinus Torvalds 
10429c37de29SMike Snitzer 	if (unlikely(error == BLK_STS_TARGET)) {
1043bcb44433SMike Snitzer 		if (bio_op(bio) == REQ_OP_DISCARD &&
104470200574SChristoph Hellwig 		    !bdev_max_discard_sectors(bio->bi_bdev))
1045bcb44433SMike Snitzer 			disable_discard(md);
1046bcb44433SMike Snitzer 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1047dddf3056SMike Snitzer 			 !bdev_write_zeroes_sectors(bio->bi_bdev))
1048ac62d620SChristoph Hellwig 			disable_write_zeroes(md);
1049ac62d620SChristoph Hellwig 	}
10507eee4ae2SMike Snitzer 
1051442761fdSMike Snitzer 	if (static_branch_unlikely(&zoned_enabled) &&
1052dddf3056SMike Snitzer 	    unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
1053bb37d772SDamien Le Moal 		dm_zone_endio(io, bio);
1054415c79e1SJohannes Thumshirn 
10551be56909SChristoph Hellwig 	if (endio) {
10566cbce280SMike Snitzer 		int r = endio(ti, bio, &error);
10571be56909SChristoph Hellwig 		switch (r) {
10581be56909SChristoph Hellwig 		case DM_ENDIO_REQUEUE:
1059442761fdSMike Snitzer 			if (static_branch_unlikely(&zoned_enabled)) {
1060bf14e2b2SDamien Le Moal 				/*
1061bf14e2b2SDamien Le Moal 				 * Requeuing writes to a sequential zone of a zoned
1062bf14e2b2SDamien Le Moal 				 * target will break the sequential write pattern:
1063bf14e2b2SDamien Le Moal 				 * fail such IO.
1064bf14e2b2SDamien Le Moal 				 */
1065bf14e2b2SDamien Le Moal 				if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1066bf14e2b2SDamien Le Moal 					error = BLK_STS_IOERR;
1067bf14e2b2SDamien Le Moal 				else
10684e4cbee9SChristoph Hellwig 					error = BLK_STS_DM_REQUEUE;
1069442761fdSMike Snitzer 			} else
1070442761fdSMike Snitzer 				error = BLK_STS_DM_REQUEUE;
1071df561f66SGustavo A. R. Silva 			fallthrough;
10721be56909SChristoph Hellwig 		case DM_ENDIO_DONE:
10731be56909SChristoph Hellwig 			break;
10741be56909SChristoph Hellwig 		case DM_ENDIO_INCOMPLETE:
10751be56909SChristoph Hellwig 			/* The target will handle the io */
10761be56909SChristoph Hellwig 			return;
10771be56909SChristoph Hellwig 		default:
10781be56909SChristoph Hellwig 			DMWARN("unimplemented target endio return value: %d", r);
10791be56909SChristoph Hellwig 			BUG();
10801be56909SChristoph Hellwig 		}
10811be56909SChristoph Hellwig 	}
10821be56909SChristoph Hellwig 
1083442761fdSMike Snitzer 	if (static_branch_unlikely(&swap_bios_enabled) &&
1084442761fdSMike Snitzer 	    unlikely(swap_bios_limit(ti, bio)))
1085a666e5c0SMikulas Patocka 		up(&md->swap_bios_semaphore);
1086a666e5c0SMikulas Patocka 
10871d1068ceSChristoph Hellwig 	free_tio(bio);
1088e2118b3cSDamien Le Moal 	dm_io_dec_pending(io, error);
10891da177e4SLinus Torvalds }
10901da177e4SLinus Torvalds 
109178d8e58aSMike Snitzer /*
109256a67df7SMike Snitzer  * Return maximum size of I/O possible at the supplied sector up to the current
109356a67df7SMike Snitzer  * target boundary.
109456a67df7SMike Snitzer  */
10953720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
10963720281dSMike Snitzer 						  sector_t target_offset)
10971da177e4SLinus Torvalds {
109856a67df7SMike Snitzer 	return ti->len - target_offset;
109956a67df7SMike Snitzer }
110056a67df7SMike Snitzer 
11013720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector)
110256a67df7SMike Snitzer {
11033720281dSMike Snitzer 	sector_t target_offset = dm_target_offset(ti, sector);
11043720281dSMike Snitzer 	sector_t len = max_io_len_target_boundary(ti, target_offset);
11051da177e4SLinus Torvalds 
11061da177e4SLinus Torvalds 	/*
11073ee16db3SMike Snitzer 	 * Does the target need to split IO even further?
11083ee16db3SMike Snitzer 	 * - varied (per target) IO splitting is a tenet of DM; this
11093ee16db3SMike Snitzer 	 *   explains why stacked chunk_sectors based splitting via
1110c3949322SChristoph Hellwig 	 *   blk_queue_split() isn't possible here.
11111da177e4SLinus Torvalds 	 */
1112c3949322SChristoph Hellwig 	if (!ti->max_io_len)
11131da177e4SLinus Torvalds 		return len;
1114c3949322SChristoph Hellwig 	return min_t(sector_t, len,
1115c3949322SChristoph Hellwig 		min(queue_max_sectors(ti->table->md->queue),
1116c3949322SChristoph Hellwig 		    blk_chunk_sectors_left(target_offset, ti->max_io_len)));
11171da177e4SLinus Torvalds }
11181da177e4SLinus Torvalds 
1119542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1120542f9038SMike Snitzer {
1121542f9038SMike Snitzer 	if (len > UINT_MAX) {
1122542f9038SMike Snitzer 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1123542f9038SMike Snitzer 		      (unsigned long long)len, UINT_MAX);
1124542f9038SMike Snitzer 		ti->error = "Maximum size of target IO is too large";
1125542f9038SMike Snitzer 		return -EINVAL;
1126542f9038SMike Snitzer 	}
1127542f9038SMike Snitzer 
112875ae1936SMikulas Patocka 	ti->max_io_len = (uint32_t) len;
1129542f9038SMike Snitzer 
1130542f9038SMike Snitzer 	return 0;
1131542f9038SMike Snitzer }
1132542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1133542f9038SMike Snitzer 
1134f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1135f26c5719SDan Williams 						sector_t sector, int *srcu_idx)
11363d97c829SMike Snitzer 	__acquires(md->io_barrier)
1137545ed20eSToshi Kani {
1138545ed20eSToshi Kani 	struct dm_table *map;
1139545ed20eSToshi Kani 	struct dm_target *ti;
1140545ed20eSToshi Kani 
1141f26c5719SDan Williams 	map = dm_get_live_table(md, srcu_idx);
1142545ed20eSToshi Kani 	if (!map)
1143f26c5719SDan Williams 		return NULL;
1144545ed20eSToshi Kani 
1145545ed20eSToshi Kani 	ti = dm_table_find_target(map, sector);
1146123d87d5SMikulas Patocka 	if (!ti)
1147f26c5719SDan Williams 		return NULL;
1148f26c5719SDan Williams 
1149f26c5719SDan Williams 	return ti;
1150f26c5719SDan Williams }
1151f26c5719SDan Williams 
1152f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1153e511c4a3SJane Chu 		long nr_pages, enum dax_access_mode mode, void **kaddr,
1154e511c4a3SJane Chu 		pfn_t *pfn)
1155f26c5719SDan Williams {
1156f26c5719SDan Williams 	struct mapped_device *md = dax_get_private(dax_dev);
1157f26c5719SDan Williams 	sector_t sector = pgoff * PAGE_SECTORS;
1158f26c5719SDan Williams 	struct dm_target *ti;
1159f26c5719SDan Williams 	long len, ret = -EIO;
1160f26c5719SDan Williams 	int srcu_idx;
1161f26c5719SDan Williams 
1162f26c5719SDan Williams 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1163f26c5719SDan Williams 
1164f26c5719SDan Williams 	if (!ti)
1165545ed20eSToshi Kani 		goto out;
1166f26c5719SDan Williams 	if (!ti->type->direct_access)
1167f26c5719SDan Williams 		goto out;
11683720281dSMike Snitzer 	len = max_io_len(ti, sector) / PAGE_SECTORS;
1169f26c5719SDan Williams 	if (len < 1)
1170f26c5719SDan Williams 		goto out;
1171f26c5719SDan Williams 	nr_pages = min(len, nr_pages);
1172e511c4a3SJane Chu 	ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1173817bf402SDan Williams 
1174545ed20eSToshi Kani  out:
1175545ed20eSToshi Kani 	dm_put_live_table(md, srcu_idx);
1176f26c5719SDan Williams 
1177f26c5719SDan Williams 	return ret;
1178545ed20eSToshi Kani }
1179545ed20eSToshi Kani 
1180cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1181cdf6cdcdSVivek Goyal 				  size_t nr_pages)
1182cdf6cdcdSVivek Goyal {
1183cdf6cdcdSVivek Goyal 	struct mapped_device *md = dax_get_private(dax_dev);
1184cdf6cdcdSVivek Goyal 	sector_t sector = pgoff * PAGE_SECTORS;
1185cdf6cdcdSVivek Goyal 	struct dm_target *ti;
1186cdf6cdcdSVivek Goyal 	int ret = -EIO;
1187cdf6cdcdSVivek Goyal 	int srcu_idx;
1188cdf6cdcdSVivek Goyal 
1189cdf6cdcdSVivek Goyal 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1190cdf6cdcdSVivek Goyal 
1191cdf6cdcdSVivek Goyal 	if (!ti)
1192cdf6cdcdSVivek Goyal 		goto out;
1193cdf6cdcdSVivek Goyal 	if (WARN_ON(!ti->type->dax_zero_page_range)) {
1194cdf6cdcdSVivek Goyal 		/*
1195cdf6cdcdSVivek Goyal 		 * ->zero_page_range() is mandatory dax operation. If we are
1196cdf6cdcdSVivek Goyal 		 *  here, something is wrong.
1197cdf6cdcdSVivek Goyal 		 */
1198cdf6cdcdSVivek Goyal 		goto out;
1199cdf6cdcdSVivek Goyal 	}
1200cdf6cdcdSVivek Goyal 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1201cdf6cdcdSVivek Goyal  out:
1202cdf6cdcdSVivek Goyal 	dm_put_live_table(md, srcu_idx);
1203cdf6cdcdSVivek Goyal 
1204cdf6cdcdSVivek Goyal 	return ret;
1205cdf6cdcdSVivek Goyal }
1206cdf6cdcdSVivek Goyal 
1207047218ecSJane Chu static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1208047218ecSJane Chu 		void *addr, size_t bytes, struct iov_iter *i)
1209047218ecSJane Chu {
1210047218ecSJane Chu 	struct mapped_device *md = dax_get_private(dax_dev);
1211047218ecSJane Chu 	sector_t sector = pgoff * PAGE_SECTORS;
1212047218ecSJane Chu 	struct dm_target *ti;
1213047218ecSJane Chu 	int srcu_idx;
1214047218ecSJane Chu 	long ret = 0;
1215047218ecSJane Chu 
1216047218ecSJane Chu 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1217047218ecSJane Chu 	if (!ti || !ti->type->dax_recovery_write)
1218047218ecSJane Chu 		goto out;
1219047218ecSJane Chu 
1220047218ecSJane Chu 	ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1221047218ecSJane Chu out:
1222047218ecSJane Chu 	dm_put_live_table(md, srcu_idx);
1223047218ecSJane Chu 	return ret;
1224047218ecSJane Chu }
1225047218ecSJane Chu 
12261dd40c3eSMikulas Patocka /*
12271dd40c3eSMikulas Patocka  * A target may call dm_accept_partial_bio only from the map routine.  It is
12286842d264SDamien Le Moal  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1229e6fc9f62SMike Snitzer  * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1230e6fc9f62SMike Snitzer  * __send_duplicate_bios().
12311dd40c3eSMikulas Patocka  *
12321dd40c3eSMikulas Patocka  * dm_accept_partial_bio informs the dm that the target only wants to process
12331dd40c3eSMikulas Patocka  * additional n_sectors sectors of the bio and the rest of the data should be
12341dd40c3eSMikulas Patocka  * sent in a next bio.
12351dd40c3eSMikulas Patocka  *
12361dd40c3eSMikulas Patocka  * A diagram that explains the arithmetics:
12371dd40c3eSMikulas Patocka  * +--------------------+---------------+-------+
12381dd40c3eSMikulas Patocka  * |         1          |       2       |   3   |
12391dd40c3eSMikulas Patocka  * +--------------------+---------------+-------+
12401dd40c3eSMikulas Patocka  *
12411dd40c3eSMikulas Patocka  * <-------------- *tio->len_ptr --------------->
1242bdb34759SMike Snitzer  *                      <----- bio_sectors ----->
12431dd40c3eSMikulas Patocka  *                      <-- n_sectors -->
12441dd40c3eSMikulas Patocka  *
12451dd40c3eSMikulas Patocka  * Region 1 was already iterated over with bio_advance or similar function.
12461dd40c3eSMikulas Patocka  *	(it may be empty if the target doesn't use bio_advance)
12471dd40c3eSMikulas Patocka  * Region 2 is the remaining bio size that the target wants to process.
12481dd40c3eSMikulas Patocka  *	(it may be empty if region 1 is non-empty, although there is no reason
12491dd40c3eSMikulas Patocka  *	 to make it empty)
12501dd40c3eSMikulas Patocka  * The target requires that region 3 is to be sent in the next bio.
12511dd40c3eSMikulas Patocka  *
12521dd40c3eSMikulas Patocka  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
12531dd40c3eSMikulas Patocka  * the partially processed part (the sum of regions 1+2) must be the same for all
12541dd40c3eSMikulas Patocka  * copies of the bio.
12551dd40c3eSMikulas Patocka  */
12561dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
12571dd40c3eSMikulas Patocka {
12586c23f0bdSChristoph Hellwig 	struct dm_target_io *tio = clone_to_tio(bio);
1259bdb34759SMike Snitzer 	unsigned bio_sectors = bio_sectors(bio);
12606842d264SDamien Le Moal 
1261655f3aadSMike Snitzer 	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
12626842d264SDamien Le Moal 	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
12636842d264SDamien Le Moal 	BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1264bdb34759SMike Snitzer 	BUG_ON(bio_sectors > *tio->len_ptr);
1265bdb34759SMike Snitzer 	BUG_ON(n_sectors > bio_sectors);
12666842d264SDamien Le Moal 
1267bdb34759SMike Snitzer 	*tio->len_ptr -= bio_sectors - n_sectors;
12681dd40c3eSMikulas Patocka 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
12697dd76d1fSMing Lei 
12707dd76d1fSMing Lei 	/*
12717dd76d1fSMing Lei 	 * __split_and_process_bio() may have already saved mapped part
12727dd76d1fSMing Lei 	 * for accounting but it is being reduced so update accordingly.
12737dd76d1fSMing Lei 	 */
12747dd76d1fSMing Lei 	dm_io_set_flag(tio->io, DM_IO_WAS_SPLIT);
12757dd76d1fSMing Lei 	tio->io->sectors = n_sectors;
12761dd40c3eSMikulas Patocka }
12771dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
12781dd40c3eSMikulas Patocka 
12790fbb4d93SMike Snitzer /*
12800fbb4d93SMike Snitzer  * @clone: clone bio that DM core passed to target's .map function
12810fbb4d93SMike Snitzer  * @tgt_clone: clone of @clone bio that target needs submitted
12820fbb4d93SMike Snitzer  *
12830fbb4d93SMike Snitzer  * Targets should use this interface to submit bios they take
12840fbb4d93SMike Snitzer  * ownership of when returning DM_MAPIO_SUBMITTED.
12850fbb4d93SMike Snitzer  *
12860fbb4d93SMike Snitzer  * Target should also enable ti->accounts_remapped_io
12870fbb4d93SMike Snitzer  */
1288b7f8dff0SMike Snitzer void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
12890fbb4d93SMike Snitzer {
12900fbb4d93SMike Snitzer 	struct dm_target_io *tio = clone_to_tio(clone);
12910fbb4d93SMike Snitzer 	struct dm_io *io = tio->io;
12920fbb4d93SMike Snitzer 
12930fbb4d93SMike Snitzer 	/* establish bio that will get submitted */
12940fbb4d93SMike Snitzer 	if (!tgt_clone)
12950fbb4d93SMike Snitzer 		tgt_clone = clone;
12960fbb4d93SMike Snitzer 
12970fbb4d93SMike Snitzer 	/*
12980fbb4d93SMike Snitzer 	 * Account io->origin_bio to DM dev on behalf of target
12990fbb4d93SMike Snitzer 	 * that took ownership of IO with DM_MAPIO_SUBMITTED.
13000fbb4d93SMike Snitzer 	 */
13010fbb4d93SMike Snitzer 	dm_start_io_acct(io, clone);
13020fbb4d93SMike Snitzer 
13039d20653fSMike Snitzer 	trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
13040fbb4d93SMike Snitzer 			      tio->old_sector);
13059d20653fSMike Snitzer 	submit_bio_noacct(tgt_clone);
13060fbb4d93SMike Snitzer }
13070fbb4d93SMike Snitzer EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
13080fbb4d93SMike Snitzer 
1309a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1310a666e5c0SMikulas Patocka {
1311a666e5c0SMikulas Patocka 	mutex_lock(&md->swap_bios_lock);
1312a666e5c0SMikulas Patocka 	while (latch < md->swap_bios) {
1313a666e5c0SMikulas Patocka 		cond_resched();
1314a666e5c0SMikulas Patocka 		down(&md->swap_bios_semaphore);
1315a666e5c0SMikulas Patocka 		md->swap_bios--;
1316a666e5c0SMikulas Patocka 	}
1317a666e5c0SMikulas Patocka 	while (latch > md->swap_bios) {
1318a666e5c0SMikulas Patocka 		cond_resched();
1319a666e5c0SMikulas Patocka 		up(&md->swap_bios_semaphore);
1320a666e5c0SMikulas Patocka 		md->swap_bios++;
1321a666e5c0SMikulas Patocka 	}
1322a666e5c0SMikulas Patocka 	mutex_unlock(&md->swap_bios_lock);
1323a666e5c0SMikulas Patocka }
1324a666e5c0SMikulas Patocka 
13251561b396SChristoph Hellwig static void __map_bio(struct bio *clone)
13261da177e4SLinus Torvalds {
13271561b396SChristoph Hellwig 	struct dm_target_io *tio = clone_to_tio(clone);
1328bd2a49b8SAlasdair G Kergon 	struct dm_target *ti = tio->ti;
13296cbce280SMike Snitzer 	struct dm_io *io = tio->io;
13306cbce280SMike Snitzer 	struct mapped_device *md = io->md;
13316cbce280SMike Snitzer 	int r;
13321da177e4SLinus Torvalds 
13331da177e4SLinus Torvalds 	clone->bi_end_io = clone_endio;
13341da177e4SLinus Torvalds 
13351da177e4SLinus Torvalds 	/*
13360fbb4d93SMike Snitzer 	 * Map the clone.
13371da177e4SLinus Torvalds 	 */
1338743598f0SMike Snitzer 	tio->old_sector = clone->bi_iter.bi_sector;
1339d67a5f4bSMikulas Patocka 
1340442761fdSMike Snitzer 	if (static_branch_unlikely(&swap_bios_enabled) &&
1341442761fdSMike Snitzer 	    unlikely(swap_bios_limit(ti, clone))) {
1342a666e5c0SMikulas Patocka 		int latch = get_swap_bios();
1343a666e5c0SMikulas Patocka 		if (unlikely(latch != md->swap_bios))
1344a666e5c0SMikulas Patocka 			__set_swap_bios_limit(md, latch);
1345a666e5c0SMikulas Patocka 		down(&md->swap_bios_semaphore);
1346a666e5c0SMikulas Patocka 	}
1347a666e5c0SMikulas Patocka 
1348442761fdSMike Snitzer 	if (static_branch_unlikely(&zoned_enabled)) {
1349bb37d772SDamien Le Moal 		/*
1350442761fdSMike Snitzer 		 * Check if the IO needs a special mapping due to zone append
1351442761fdSMike Snitzer 		 * emulation on zoned target. In this case, dm_zone_map_bio()
1352442761fdSMike Snitzer 		 * calls the target map operation.
1353bb37d772SDamien Le Moal 		 */
13546cbce280SMike Snitzer 		if (unlikely(dm_emulate_zone_append(md)))
1355bb37d772SDamien Le Moal 			r = dm_zone_map_bio(tio);
1356bb37d772SDamien Le Moal 		else
13577de3ee57SMikulas Patocka 			r = ti->type->map(ti, clone);
1358442761fdSMike Snitzer 	} else
1359442761fdSMike Snitzer 		r = ti->type->map(ti, clone);
1360bb37d772SDamien Le Moal 
1361846785e6SChristoph Hellwig 	switch (r) {
1362846785e6SChristoph Hellwig 	case DM_MAPIO_SUBMITTED:
13630fbb4d93SMike Snitzer 		/* target has assumed ownership of this io */
13640fbb4d93SMike Snitzer 		if (!ti->accounts_remapped_io)
13659d20653fSMike Snitzer 			dm_start_io_acct(io, clone);
1366846785e6SChristoph Hellwig 		break;
1367846785e6SChristoph Hellwig 	case DM_MAPIO_REMAPPED:
13689d20653fSMike Snitzer 		dm_submit_bio_remap(clone, NULL);
1369846785e6SChristoph Hellwig 		break;
1370846785e6SChristoph Hellwig 	case DM_MAPIO_KILL:
1371846785e6SChristoph Hellwig 	case DM_MAPIO_REQUEUE:
1372442761fdSMike Snitzer 		if (static_branch_unlikely(&swap_bios_enabled) &&
1373442761fdSMike Snitzer 		    unlikely(swap_bios_limit(ti, clone)))
13746cbce280SMike Snitzer 			up(&md->swap_bios_semaphore);
13751d1068ceSChristoph Hellwig 		free_tio(clone);
137690a2326eSMike Snitzer 		if (r == DM_MAPIO_KILL)
137790a2326eSMike Snitzer 			dm_io_dec_pending(io, BLK_STS_IOERR);
137890a2326eSMike Snitzer 		else
1379e2118b3cSDamien Le Moal 			dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1380846785e6SChristoph Hellwig 		break;
1381846785e6SChristoph Hellwig 	default:
138245cbcd79SKiyoshi Ueda 		DMWARN("unimplemented target map return value: %d", r);
138345cbcd79SKiyoshi Ueda 		BUG();
13841da177e4SLinus Torvalds 	}
13851da177e4SLinus Torvalds }
13861da177e4SLinus Torvalds 
13877dd76d1fSMing Lei static void setup_split_accounting(struct clone_info *ci, unsigned len)
13887dd76d1fSMing Lei {
13897dd76d1fSMing Lei 	struct dm_io *io = ci->io;
13907dd76d1fSMing Lei 
13917dd76d1fSMing Lei 	if (ci->sector_count > len) {
13927dd76d1fSMing Lei 		/*
13937dd76d1fSMing Lei 		 * Split needed, save the mapped part for accounting.
13947dd76d1fSMing Lei 		 * NOTE: dm_accept_partial_bio() will update accordingly.
13957dd76d1fSMing Lei 		 */
13967dd76d1fSMing Lei 		dm_io_set_flag(io, DM_IO_WAS_SPLIT);
13977dd76d1fSMing Lei 		io->sectors = len;
13987dd76d1fSMing Lei 	}
13997dd76d1fSMing Lei 
14007dd76d1fSMing Lei 	if (static_branch_unlikely(&stats_enabled) &&
14017dd76d1fSMing Lei 	    unlikely(dm_stats_used(&io->md->stats))) {
14027dd76d1fSMing Lei 		/*
14037dd76d1fSMing Lei 		 * Save bi_sector in terms of its offset from end of
14047dd76d1fSMing Lei 		 * original bio, only needed for DM-stats' benefit.
14057dd76d1fSMing Lei 		 * - saved regardless of whether split needed so that
14067dd76d1fSMing Lei 		 *   dm_accept_partial_bio() doesn't need to.
14077dd76d1fSMing Lei 		 */
14087dd76d1fSMing Lei 		io->sector_offset = bio_end_sector(ci->bio) - ci->sector;
14097dd76d1fSMing Lei 	}
14107dd76d1fSMing Lei }
14117dd76d1fSMing Lei 
1412318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
14137dd06a25SMike Snitzer 				struct dm_target *ti, unsigned num_bios)
1414f9ab94ceSMikulas Patocka {
14151d1068ceSChristoph Hellwig 	struct bio *bio;
1416318716ddSMike Snitzer 	int try;
1417dba14160SMikulas Patocka 
1418318716ddSMike Snitzer 	for (try = 0; try < 2; try++) {
1419318716ddSMike Snitzer 		int bio_nr;
1420318716ddSMike Snitzer 
1421318716ddSMike Snitzer 		if (try)
1422bc02cdbeSMike Snitzer 			mutex_lock(&ci->io->md->table_devices_lock);
1423318716ddSMike Snitzer 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
14247dd06a25SMike Snitzer 			bio = alloc_tio(ci, ti, bio_nr, NULL,
1425dc8e2021SChristoph Hellwig 					try ? GFP_NOIO : GFP_NOWAIT);
14261d1068ceSChristoph Hellwig 			if (!bio)
1427318716ddSMike Snitzer 				break;
1428318716ddSMike Snitzer 
14291d1068ceSChristoph Hellwig 			bio_list_add(blist, bio);
1430318716ddSMike Snitzer 		}
1431318716ddSMike Snitzer 		if (try)
1432bc02cdbeSMike Snitzer 			mutex_unlock(&ci->io->md->table_devices_lock);
1433318716ddSMike Snitzer 		if (bio_nr == num_bios)
1434318716ddSMike Snitzer 			return;
1435318716ddSMike Snitzer 
14366c23f0bdSChristoph Hellwig 		while ((bio = bio_list_pop(blist)))
14371d1068ceSChristoph Hellwig 			free_tio(bio);
1438318716ddSMike Snitzer 	}
1439318716ddSMike Snitzer }
1440f9ab94ceSMikulas Patocka 
14410f14d60aSMing Lei static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
14421dd40c3eSMikulas Patocka 				  unsigned num_bios, unsigned *len)
144306a426ceSMike Snitzer {
1444318716ddSMike Snitzer 	struct bio_list blist = BIO_EMPTY_LIST;
14458eabf5d0SChristoph Hellwig 	struct bio *clone;
14460f14d60aSMing Lei 	int ret = 0;
144706a426ceSMike Snitzer 
1448891fced6SChristoph Hellwig 	switch (num_bios) {
1449891fced6SChristoph Hellwig 	case 0:
1450891fced6SChristoph Hellwig 		break;
1451891fced6SChristoph Hellwig 	case 1:
14527dd76d1fSMing Lei 		if (len)
14537dd76d1fSMing Lei 			setup_split_accounting(ci, *len);
1454891fced6SChristoph Hellwig 		clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
1455891fced6SChristoph Hellwig 		__map_bio(clone);
14560f14d60aSMing Lei 		ret = 1;
1457891fced6SChristoph Hellwig 		break;
1458891fced6SChristoph Hellwig 	default:
14597dd06a25SMike Snitzer 		/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
14607dd06a25SMike Snitzer 		alloc_multiple_bios(&blist, ci, ti, num_bios);
14618eabf5d0SChristoph Hellwig 		while ((clone = bio_list_pop(&blist))) {
1462655f3aadSMike Snitzer 			dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
14631561b396SChristoph Hellwig 			__map_bio(clone);
14640f14d60aSMing Lei 			ret += 1;
1465f9ab94ceSMikulas Patocka 		}
1466891fced6SChristoph Hellwig 		break;
1467318716ddSMike Snitzer 	}
14680f14d60aSMing Lei 
14690f14d60aSMing Lei 	return ret;
147006a426ceSMike Snitzer }
147106a426ceSMike Snitzer 
1472332f2b1eSMike Snitzer static void __send_empty_flush(struct clone_info *ci)
1473f9ab94ceSMikulas Patocka {
147406a426ceSMike Snitzer 	unsigned target_nr = 0;
1475f9ab94ceSMikulas Patocka 	struct dm_target *ti;
1476828678b8SMike Snitzer 	struct bio flush_bio;
1477828678b8SMike Snitzer 
1478828678b8SMike Snitzer 	/*
1479828678b8SMike Snitzer 	 * Use an on-stack bio for this, it's safe since we don't
1480828678b8SMike Snitzer 	 * need to reference it after submit. It's just used as
1481828678b8SMike Snitzer 	 * the basis for the clone(s).
1482828678b8SMike Snitzer 	 */
148349add496SChristoph Hellwig 	bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
148449add496SChristoph Hellwig 		 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
148547d95102SChristoph Hellwig 
1486828678b8SMike Snitzer 	ci->bio = &flush_bio;
1487828678b8SMike Snitzer 	ci->sector_count = 0;
148892b914e2SShin'ichiro Kawasaki 	ci->io->tio.clone.bi_iter.bi_size = 0;
1489f9ab94ceSMikulas Patocka 
14900f14d60aSMing Lei 	while ((ti = dm_table_get_target(ci->map, target_nr++))) {
14910f14d60aSMing Lei 		int bios;
14920f14d60aSMing Lei 
14930f14d60aSMing Lei 		atomic_add(ti->num_flush_bios, &ci->io->io_count);
14940f14d60aSMing Lei 		bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
14950f14d60aSMing Lei 		atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
14960f14d60aSMing Lei 	}
14970f14d60aSMing Lei 
14980f14d60aSMing Lei 	/*
14990f14d60aSMing Lei 	 * alloc_io() takes one extra reference for submission, so the
15000f14d60aSMing Lei 	 * reference won't reach 0 without the following subtraction
15010f14d60aSMing Lei 	 */
15020f14d60aSMing Lei 	atomic_sub(1, &ci->io->io_count);
1503828678b8SMike Snitzer 
1504828678b8SMike Snitzer 	bio_uninit(ci->bio);
1505f9ab94ceSMikulas Patocka }
1506f9ab94ceSMikulas Patocka 
1507e6fc9f62SMike Snitzer static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
150861697a6aSMike Snitzer 					unsigned num_bios)
15095ae89a87SMike Snitzer {
151051b86f9aSMichael Lass 	unsigned len;
15110f14d60aSMing Lei 	int bios;
15125ae89a87SMike Snitzer 
15133720281dSMike Snitzer 	len = min_t(sector_t, ci->sector_count,
15143720281dSMike Snitzer 		    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
151551b86f9aSMichael Lass 
15160f14d60aSMing Lei 	atomic_add(num_bios, &ci->io->io_count);
15170f14d60aSMing Lei 	bios = __send_duplicate_bios(ci, ti, num_bios, &len);
15180f14d60aSMing Lei 	/*
15190f14d60aSMing Lei 	 * alloc_io() takes one extra reference for submission, so the
15200f14d60aSMing Lei 	 * reference won't reach 0 without the following (+1) subtraction
15210f14d60aSMing Lei 	 */
15220f14d60aSMing Lei 	atomic_sub(num_bios - bios + 1, &ci->io->io_count);
15237dd06a25SMike Snitzer 
1524a79245b3SMike Snitzer 	ci->sector += len;
15253d7f4562SMike Snitzer 	ci->sector_count -= len;
15265ae89a87SMike Snitzer }
15275ae89a87SMike Snitzer 
1528568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio)
1529568c73a3SMike Snitzer {
15304edadf6dSMike Snitzer 	unsigned int op = bio_op(bio);
1531568c73a3SMike Snitzer 
15324edadf6dSMike Snitzer 	if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
15334edadf6dSMike Snitzer 		switch (op) {
1534568c73a3SMike Snitzer 		case REQ_OP_DISCARD:
1535568c73a3SMike Snitzer 		case REQ_OP_SECURE_ERASE:
1536568c73a3SMike Snitzer 		case REQ_OP_WRITE_ZEROES:
15374edadf6dSMike Snitzer 			return true;
15384edadf6dSMike Snitzer 		default:
1539568c73a3SMike Snitzer 			break;
1540568c73a3SMike Snitzer 		}
1541568c73a3SMike Snitzer 	}
1542568c73a3SMike Snitzer 
15434edadf6dSMike Snitzer 	return false;
15444edadf6dSMike Snitzer }
15454edadf6dSMike Snitzer 
15464edadf6dSMike Snitzer static blk_status_t __process_abnormal_io(struct clone_info *ci,
15474edadf6dSMike Snitzer 					  struct dm_target *ti)
15480519c71eSMike Snitzer {
15499679b5a7SMike Snitzer 	unsigned num_bios = 0;
15500519c71eSMike Snitzer 
1551e6fc9f62SMike Snitzer 	switch (bio_op(ci->bio)) {
15529679b5a7SMike Snitzer 	case REQ_OP_DISCARD:
15539679b5a7SMike Snitzer 		num_bios = ti->num_discard_bios;
15549679b5a7SMike Snitzer 		break;
15559679b5a7SMike Snitzer 	case REQ_OP_SECURE_ERASE:
15569679b5a7SMike Snitzer 		num_bios = ti->num_secure_erase_bios;
15579679b5a7SMike Snitzer 		break;
15589679b5a7SMike Snitzer 	case REQ_OP_WRITE_ZEROES:
15599679b5a7SMike Snitzer 		num_bios = ti->num_write_zeroes_bios;
15609679b5a7SMike Snitzer 		break;
15619679b5a7SMike Snitzer 	}
15620519c71eSMike Snitzer 
1563e6fc9f62SMike Snitzer 	/*
1564e6fc9f62SMike Snitzer 	 * Even though the device advertised support for this type of
1565e6fc9f62SMike Snitzer 	 * request, that does not mean every target supports it, and
1566e6fc9f62SMike Snitzer 	 * reconfiguration might also have changed that since the
1567e6fc9f62SMike Snitzer 	 * check was performed.
1568e6fc9f62SMike Snitzer 	 */
156984b98f4cSMike Snitzer 	if (unlikely(!num_bios))
15704edadf6dSMike Snitzer 		return BLK_STS_NOTSUPP;
15714edadf6dSMike Snitzer 
1572e6fc9f62SMike Snitzer 	__send_changing_extent_only(ci, ti, num_bios);
15734edadf6dSMike Snitzer 	return BLK_STS_OK;
15740519c71eSMike Snitzer }
15750519c71eSMike Snitzer 
1576e4c93811SAlasdair G Kergon /*
1577ec211631SMing Lei  * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1578b99fdcdcSMing Lei  * associated with this bio, and this bio's bi_private needs to be
1579b99fdcdcSMing Lei  * stored in dm_io->data before the reuse.
1580b99fdcdcSMing Lei  *
1581b99fdcdcSMing Lei  * bio->bi_private is owned by fs or upper layer, so block layer won't
1582b99fdcdcSMing Lei  * touch it after splitting. Meantime it won't be changed by anyone after
1583b99fdcdcSMing Lei  * bio is submitted. So this reuse is safe.
1584b99fdcdcSMing Lei  */
1585ec211631SMing Lei static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1586b99fdcdcSMing Lei {
1587ec211631SMing Lei 	return (struct dm_io **)&bio->bi_private;
1588b99fdcdcSMing Lei }
1589b99fdcdcSMing Lei 
1590b99fdcdcSMing Lei static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1591b99fdcdcSMing Lei {
1592ec211631SMing Lei 	struct dm_io **head = dm_poll_list_head(bio);
1593b99fdcdcSMing Lei 
1594b99fdcdcSMing Lei 	if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1595b99fdcdcSMing Lei 		bio->bi_opf |= REQ_DM_POLL_LIST;
1596b99fdcdcSMing Lei 		/*
1597b99fdcdcSMing Lei 		 * Save .bi_private into dm_io, so that we can reuse
1598ec211631SMing Lei 		 * .bi_private as dm_io list head for storing dm_io list
1599b99fdcdcSMing Lei 		 */
1600b99fdcdcSMing Lei 		io->data = bio->bi_private;
1601b99fdcdcSMing Lei 
1602b99fdcdcSMing Lei 		/* tell block layer to poll for completion */
1603b99fdcdcSMing Lei 		bio->bi_cookie = ~BLK_QC_T_NONE;
1604ec211631SMing Lei 
1605ec211631SMing Lei 		io->next = NULL;
1606b99fdcdcSMing Lei 	} else {
1607b99fdcdcSMing Lei 		/*
1608b99fdcdcSMing Lei 		 * bio recursed due to split, reuse original poll list,
1609b99fdcdcSMing Lei 		 * and save bio->bi_private too.
1610b99fdcdcSMing Lei 		 */
1611ec211631SMing Lei 		io->data = (*head)->data;
1612ec211631SMing Lei 		io->next = *head;
1613b99fdcdcSMing Lei 	}
1614b99fdcdcSMing Lei 
1615ec211631SMing Lei 	*head = io;
1616b99fdcdcSMing Lei }
1617b99fdcdcSMing Lei 
1618b99fdcdcSMing Lei /*
1619e4c93811SAlasdair G Kergon  * Select the correct strategy for processing a non-flush bio.
1620e4c93811SAlasdair G Kergon  */
162184b98f4cSMike Snitzer static blk_status_t __split_and_process_bio(struct clone_info *ci)
1622e4c93811SAlasdair G Kergon {
162366bdaa43SMike Snitzer 	struct bio *clone;
1624e4c93811SAlasdair G Kergon 	struct dm_target *ti;
16251c3b13e6SKent Overstreet 	unsigned len;
1626e4c93811SAlasdair G Kergon 
1627e4c93811SAlasdair G Kergon 	ti = dm_table_find_target(ci->map, ci->sector);
16284edadf6dSMike Snitzer 	if (unlikely(!ti))
16294edadf6dSMike Snitzer 		return BLK_STS_IOERR;
16301ee88de3SMikulas Patocka 
16311ee88de3SMikulas Patocka 	if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
16321ee88de3SMikulas Patocka 	    unlikely(!dm_target_supports_nowait(ti->type)))
16331ee88de3SMikulas Patocka 		return BLK_STS_NOTSUPP;
16341ee88de3SMikulas Patocka 
16351ee88de3SMikulas Patocka 	if (unlikely(ci->is_abnormal_io))
16364edadf6dSMike Snitzer 		return __process_abnormal_io(ci, ti);
16373d7f4562SMike Snitzer 
1638b99fdcdcSMing Lei 	/*
1639b99fdcdcSMing Lei 	 * Only support bio polling for normal IO, and the target io is
1640b99fdcdcSMing Lei 	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1641b99fdcdcSMing Lei 	 */
1642b99fdcdcSMing Lei 	ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
1643e4c93811SAlasdair G Kergon 
1644e4c93811SAlasdair G Kergon 	len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
16457dd76d1fSMing Lei 	setup_split_accounting(ci, len);
164666bdaa43SMike Snitzer 	clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
164766bdaa43SMike Snitzer 	__map_bio(clone);
1648e4c93811SAlasdair G Kergon 
1649e4c93811SAlasdair G Kergon 	ci->sector += len;
1650e4c93811SAlasdair G Kergon 	ci->sector_count -= len;
1651e4c93811SAlasdair G Kergon 
165284b98f4cSMike Snitzer 	return BLK_STS_OK;
1653e4c93811SAlasdair G Kergon }
1654e4c93811SAlasdair G Kergon 
1655978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
16564edadf6dSMike Snitzer 			    struct dm_table *map, struct bio *bio, bool is_abnormal)
1657978e51baSMike Snitzer {
1658978e51baSMike Snitzer 	ci->map = map;
1659978e51baSMike Snitzer 	ci->io = alloc_io(md, bio);
1660d41e077aSMike Snitzer 	ci->bio = bio;
16614edadf6dSMike Snitzer 	ci->is_abnormal_io = is_abnormal;
1662b99fdcdcSMing Lei 	ci->submit_as_polled = false;
1663978e51baSMike Snitzer 	ci->sector = bio->bi_iter.bi_sector;
1664d41e077aSMike Snitzer 	ci->sector_count = bio_sectors(bio);
1665d41e077aSMike Snitzer 
1666d41e077aSMike Snitzer 	/* Shouldn't happen but sector_count was being set to 0 so... */
1667442761fdSMike Snitzer 	if (static_branch_unlikely(&zoned_enabled) &&
1668442761fdSMike Snitzer 	    WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1669d41e077aSMike Snitzer 		ci->sector_count = 0;
1670978e51baSMike Snitzer }
1671978e51baSMike Snitzer 
1672e4c93811SAlasdair G Kergon /*
167314fe594dSAlasdair G Kergon  * Entry point to split a bio into clones and submit them to the targets.
16741da177e4SLinus Torvalds  */
167596c9865cSMike Snitzer static void dm_split_and_process_bio(struct mapped_device *md,
167683d5e5b0SMikulas Patocka 				     struct dm_table *map, struct bio *bio)
16771da177e4SLinus Torvalds {
16781da177e4SLinus Torvalds 	struct clone_info ci;
16794857abf6SMike Snitzer 	struct dm_io *io;
168084b98f4cSMike Snitzer 	blk_status_t error = BLK_STS_OK;
16814edadf6dSMike Snitzer 	bool is_abnormal;
16821da177e4SLinus Torvalds 
16834edadf6dSMike Snitzer 	is_abnormal = is_abnormal_io(bio);
16844edadf6dSMike Snitzer 	if (unlikely(is_abnormal)) {
16854edadf6dSMike Snitzer 		/*
16864edadf6dSMike Snitzer 		 * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
16874edadf6dSMike Snitzer 		 * otherwise associated queue_limits won't be imposed.
16884edadf6dSMike Snitzer 		 */
16894edadf6dSMike Snitzer 		blk_queue_split(&bio);
16904edadf6dSMike Snitzer 	}
16914edadf6dSMike Snitzer 
16924edadf6dSMike Snitzer 	init_clone_info(&ci, md, map, bio, is_abnormal);
16934857abf6SMike Snitzer 	io = ci.io;
1694bd2a49b8SAlasdair G Kergon 
16951eff9d32SJens Axboe 	if (bio->bi_opf & REQ_PREFLUSH) {
1696332f2b1eSMike Snitzer 		__send_empty_flush(&ci);
1697e2736347SMike Snitzer 		/* dm_io_complete submits any data associated with flush */
1698d41e077aSMike Snitzer 		goto out;
1699d41e077aSMike Snitzer 	}
1700d41e077aSMike Snitzer 
170196c9865cSMike Snitzer 	error = __split_and_process_bio(&ci);
1702d41e077aSMike Snitzer 	if (error || !ci.sector_count)
1703d41e077aSMike Snitzer 		goto out;
170418a25da8SNeilBrown 	/*
1705d41e077aSMike Snitzer 	 * Remainder must be passed to submit_bio_noacct() so it gets handled
1706d41e077aSMike Snitzer 	 * *after* bios already submitted have been completely processed.
170718a25da8SNeilBrown 	 */
170861b6e2e5SMing Lei 	WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT));
170961b6e2e5SMing Lei 	io->split_bio = bio_split(bio, io->sectors, GFP_NOIO,
171061b6e2e5SMing Lei 				  &md->queue->bio_split);
171161b6e2e5SMing Lei 	bio_chain(io->split_bio, bio);
171261b6e2e5SMing Lei 	trace_block_split(io->split_bio, bio->bi_iter.bi_sector);
17133e08773cSChristoph Hellwig 	submit_bio_noacct(bio);
1714d41e077aSMike Snitzer out:
1715b99fdcdcSMing Lei 	/*
1716b99fdcdcSMing Lei 	 * Drop the extra reference count for non-POLLED bio, and hold one
1717b99fdcdcSMing Lei 	 * reference for POLLED bio, which will be released in dm_poll_bio
1718b99fdcdcSMing Lei 	 *
1719ec211631SMing Lei 	 * Add every dm_io instance into the dm_io list head which is stored
1720ec211631SMing Lei 	 * in bio->bi_private, so that dm_poll_bio can poll them all.
1721b99fdcdcSMing Lei 	 */
17220f14d60aSMing Lei 	if (error || !ci.submit_as_polled) {
17230f14d60aSMing Lei 		/*
17240f14d60aSMing Lei 		 * In case of submission failure, the extra reference for
17250f14d60aSMing Lei 		 * submitting io isn't consumed yet
17260f14d60aSMing Lei 		 */
17270f14d60aSMing Lei 		if (error)
17280f14d60aSMing Lei 			atomic_dec(&io->io_count);
17290f14d60aSMing Lei 		dm_io_dec_pending(io, error);
17300f14d60aSMing Lei 	} else
17314857abf6SMike Snitzer 		dm_queue_poll_io(bio, io);
17321da177e4SLinus Torvalds }
17331da177e4SLinus Torvalds 
17343e08773cSChristoph Hellwig static void dm_submit_bio(struct bio *bio)
17351da177e4SLinus Torvalds {
1736309dca30SChristoph Hellwig 	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
173783d5e5b0SMikulas Patocka 	int srcu_idx;
173883d5e5b0SMikulas Patocka 	struct dm_table *map;
17395d7362d0SMikulas Patocka 	unsigned bio_opf = bio->bi_opf;
17401da177e4SLinus Torvalds 
17415d7362d0SMikulas Patocka 	map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
17428cf7961dSChristoph Hellwig 
1743fa247089SMike Snitzer 	/* If suspended, or map not yet available, queue this IO for later */
1744fa247089SMike Snitzer 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1745fa247089SMike Snitzer 	    unlikely(!map)) {
17466abc4946SKonstantin Khlebnikov 		if (bio->bi_opf & REQ_NOWAIT)
17476abc4946SKonstantin Khlebnikov 			bio_wouldblock_error(bio);
1748b2abdb1bSMike Snitzer 		else if (bio->bi_opf & REQ_RAHEAD)
17496a8736d1STejun Heo 			bio_io_error(bio);
1750b2abdb1bSMike Snitzer 		else
1751b2abdb1bSMike Snitzer 			queue_io(md, bio);
1752b2abdb1bSMike Snitzer 		goto out;
17531da177e4SLinus Torvalds 	}
17541da177e4SLinus Torvalds 
175596c9865cSMike Snitzer 	dm_split_and_process_bio(md, map, bio);
1756b2abdb1bSMike Snitzer out:
17575d7362d0SMikulas Patocka 	dm_put_live_table_bio(md, srcu_idx, bio_opf);
1758978e51baSMike Snitzer }
1759978e51baSMike Snitzer 
1760b99fdcdcSMing Lei static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1761b99fdcdcSMing Lei 			  unsigned int flags)
1762b99fdcdcSMing Lei {
1763655f3aadSMike Snitzer 	WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1764b99fdcdcSMing Lei 
1765b99fdcdcSMing Lei 	/* don't poll if the mapped io is done */
1766b99fdcdcSMing Lei 	if (atomic_read(&io->io_count) > 1)
1767b99fdcdcSMing Lei 		bio_poll(&io->tio.clone, iob, flags);
1768b99fdcdcSMing Lei 
1769b99fdcdcSMing Lei 	/* bio_poll holds the last reference */
1770b99fdcdcSMing Lei 	return atomic_read(&io->io_count) == 1;
1771b99fdcdcSMing Lei }
1772b99fdcdcSMing Lei 
1773b99fdcdcSMing Lei static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1774b99fdcdcSMing Lei 		       unsigned int flags)
1775b99fdcdcSMing Lei {
1776ec211631SMing Lei 	struct dm_io **head = dm_poll_list_head(bio);
1777ec211631SMing Lei 	struct dm_io *list = *head;
1778ec211631SMing Lei 	struct dm_io *tmp = NULL;
1779ec211631SMing Lei 	struct dm_io *curr, *next;
1780b99fdcdcSMing Lei 
1781b99fdcdcSMing Lei 	/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1782b99fdcdcSMing Lei 	if (!(bio->bi_opf & REQ_DM_POLL_LIST))
1783b99fdcdcSMing Lei 		return 0;
1784b99fdcdcSMing Lei 
1785ec211631SMing Lei 	WARN_ON_ONCE(!list);
1786b99fdcdcSMing Lei 
1787b99fdcdcSMing Lei 	/*
1788b99fdcdcSMing Lei 	 * Restore .bi_private before possibly completing dm_io.
1789b99fdcdcSMing Lei 	 *
1790b99fdcdcSMing Lei 	 * bio_poll() is only possible once @bio has been completely
1791b99fdcdcSMing Lei 	 * submitted via submit_bio_noacct()'s depth-first submission.
1792b99fdcdcSMing Lei 	 * So there is no dm_queue_poll_io() race associated with
1793b99fdcdcSMing Lei 	 * clearing REQ_DM_POLL_LIST here.
1794b99fdcdcSMing Lei 	 */
1795b99fdcdcSMing Lei 	bio->bi_opf &= ~REQ_DM_POLL_LIST;
1796ec211631SMing Lei 	bio->bi_private = list->data;
1797b99fdcdcSMing Lei 
1798ec211631SMing Lei 	for (curr = list, next = curr->next; curr; curr = next, next =
1799ec211631SMing Lei 			curr ? curr->next : NULL) {
1800ec211631SMing Lei 		if (dm_poll_dm_io(curr, iob, flags)) {
1801b99fdcdcSMing Lei 			/*
180284b98f4cSMike Snitzer 			 * clone_endio() has already occurred, so no
180384b98f4cSMike Snitzer 			 * error handling is needed here.
1804b99fdcdcSMing Lei 			 */
1805ec211631SMing Lei 			__dm_io_dec_pending(curr);
1806ec211631SMing Lei 		} else {
1807ec211631SMing Lei 			curr->next = tmp;
1808ec211631SMing Lei 			tmp = curr;
1809b99fdcdcSMing Lei 		}
1810b99fdcdcSMing Lei 	}
1811b99fdcdcSMing Lei 
1812b99fdcdcSMing Lei 	/* Not done? */
1813ec211631SMing Lei 	if (tmp) {
1814b99fdcdcSMing Lei 		bio->bi_opf |= REQ_DM_POLL_LIST;
1815b99fdcdcSMing Lei 		/* Reset bio->bi_private to dm_io list head */
1816ec211631SMing Lei 		*head = tmp;
1817b99fdcdcSMing Lei 		return 0;
1818b99fdcdcSMing Lei 	}
1819b99fdcdcSMing Lei 	return 1;
1820b99fdcdcSMing Lei }
1821b99fdcdcSMing Lei 
18221da177e4SLinus Torvalds /*-----------------------------------------------------------------
18231da177e4SLinus Torvalds  * An IDR is used to keep track of allocated minor numbers.
18241da177e4SLinus Torvalds  *---------------------------------------------------------------*/
18252b06cfffSAlasdair G Kergon static void free_minor(int minor)
18261da177e4SLinus Torvalds {
1827f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
18281da177e4SLinus Torvalds 	idr_remove(&_minor_idr, minor);
1829f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
18301da177e4SLinus Torvalds }
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds /*
18331da177e4SLinus Torvalds  * See if the device with a specific minor # is free.
18341da177e4SLinus Torvalds  */
1835cf13ab8eSFrederik Deweerdt static int specific_minor(int minor)
18361da177e4SLinus Torvalds {
1837c9d76be6STejun Heo 	int r;
18381da177e4SLinus Torvalds 
18391da177e4SLinus Torvalds 	if (minor >= (1 << MINORBITS))
18401da177e4SLinus Torvalds 		return -EINVAL;
18411da177e4SLinus Torvalds 
1842c9d76be6STejun Heo 	idr_preload(GFP_KERNEL);
1843f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
18441da177e4SLinus Torvalds 
1845c9d76be6STejun Heo 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
18461da177e4SLinus Torvalds 
1847f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
1848c9d76be6STejun Heo 	idr_preload_end();
1849c9d76be6STejun Heo 	if (r < 0)
1850c9d76be6STejun Heo 		return r == -ENOSPC ? -EBUSY : r;
1851c9d76be6STejun Heo 	return 0;
18521da177e4SLinus Torvalds }
18531da177e4SLinus Torvalds 
1854cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor)
18551da177e4SLinus Torvalds {
1856c9d76be6STejun Heo 	int r;
18571da177e4SLinus Torvalds 
1858c9d76be6STejun Heo 	idr_preload(GFP_KERNEL);
1859f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
18601da177e4SLinus Torvalds 
1861c9d76be6STejun Heo 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
18621da177e4SLinus Torvalds 
1863f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
1864c9d76be6STejun Heo 	idr_preload_end();
1865c9d76be6STejun Heo 	if (r < 0)
18661da177e4SLinus Torvalds 		return r;
1867c9d76be6STejun Heo 	*minor = r;
1868c9d76be6STejun Heo 	return 0;
18691da177e4SLinus Torvalds }
18701da177e4SLinus Torvalds 
187183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops;
1872681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops;
1873f26c5719SDan Williams static const struct dax_operations dm_dax_ops;
18741da177e4SLinus Torvalds 
187553d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work);
187653d5914fSMikulas Patocka 
1877aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1878cb77cb5aSEric Biggers static void dm_queue_destroy_crypto_profile(struct request_queue *q)
1879aa6ce87aSSatya Tangirala {
1880cb77cb5aSEric Biggers 	dm_destroy_crypto_profile(q->crypto_profile);
1881aa6ce87aSSatya Tangirala }
1882aa6ce87aSSatya Tangirala 
1883aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1884aa6ce87aSSatya Tangirala 
1885cb77cb5aSEric Biggers static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
1886aa6ce87aSSatya Tangirala {
1887aa6ce87aSSatya Tangirala }
1888aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1889aa6ce87aSSatya Tangirala 
18900f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md)
18910f20972fSMike Snitzer {
18920f20972fSMike Snitzer 	if (md->wq)
18930f20972fSMike Snitzer 		destroy_workqueue(md->wq);
189429dec90aSChristoph Hellwig 	dm_free_md_mempools(md->mempools);
18950f20972fSMike Snitzer 
1896f26c5719SDan Williams 	if (md->dax_dev) {
1897fb08a190SChristoph Hellwig 		dax_remove_host(md->disk);
1898f26c5719SDan Williams 		kill_dax(md->dax_dev);
1899f26c5719SDan Williams 		put_dax(md->dax_dev);
1900f26c5719SDan Williams 		md->dax_dev = NULL;
1901f26c5719SDan Williams 	}
1902f26c5719SDan Williams 
1903588b7f5dSKirill Tkhai 	dm_cleanup_zoned_dev(md);
19040f20972fSMike Snitzer 	if (md->disk) {
19050f20972fSMike Snitzer 		spin_lock(&_minor_lock);
19060f20972fSMike Snitzer 		md->disk->private_data = NULL;
19070f20972fSMike Snitzer 		spin_unlock(&_minor_lock);
190889f871afSChristoph Hellwig 		if (dm_get_md_type(md) != DM_TYPE_NONE) {
190989f871afSChristoph Hellwig 			dm_sysfs_exit(md);
19100f20972fSMike Snitzer 			del_gendisk(md->disk);
191189f871afSChristoph Hellwig 		}
1912cb77cb5aSEric Biggers 		dm_queue_destroy_crypto_profile(md->queue);
19138b9ab626SChristoph Hellwig 		put_disk(md->disk);
191474a2b6ecSChristoph Hellwig 	}
19150f20972fSMike Snitzer 
19169f6dc633SMike Snitzer 	if (md->pending_io) {
19179f6dc633SMike Snitzer 		free_percpu(md->pending_io);
19189f6dc633SMike Snitzer 		md->pending_io = NULL;
19199f6dc633SMike Snitzer 	}
19209f6dc633SMike Snitzer 
1921d09960b0STahsin Erdogan 	cleanup_srcu_struct(&md->io_barrier);
1922d09960b0STahsin Erdogan 
1923d5ffebddSMike Snitzer 	mutex_destroy(&md->suspend_lock);
1924d5ffebddSMike Snitzer 	mutex_destroy(&md->type_lock);
1925d5ffebddSMike Snitzer 	mutex_destroy(&md->table_devices_lock);
1926a666e5c0SMikulas Patocka 	mutex_destroy(&md->swap_bios_lock);
1927d5ffebddSMike Snitzer 
19284cc96131SMike Snitzer 	dm_mq_cleanup_mapped_device(md);
19290f20972fSMike Snitzer }
19300f20972fSMike Snitzer 
19311da177e4SLinus Torvalds /*
19321da177e4SLinus Torvalds  * Allocate and initialise a blank device with a given minor.
19331da177e4SLinus Torvalds  */
19342b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor)
19351da177e4SLinus Torvalds {
1936115485e8SMike Snitzer 	int r, numa_node_id = dm_get_numa_node();
1937115485e8SMike Snitzer 	struct mapped_device *md;
1938ba61fdd1SJeff Mahoney 	void *old_md;
19391da177e4SLinus Torvalds 
1940856eb091SMikulas Patocka 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
19411da177e4SLinus Torvalds 	if (!md) {
19421da177e4SLinus Torvalds 		DMWARN("unable to allocate device, out of memory.");
19431da177e4SLinus Torvalds 		return NULL;
19441da177e4SLinus Torvalds 	}
19451da177e4SLinus Torvalds 
194610da4f79SJeff Mahoney 	if (!try_module_get(THIS_MODULE))
19476ed7ade8SMilan Broz 		goto bad_module_get;
194810da4f79SJeff Mahoney 
19491da177e4SLinus Torvalds 	/* get a minor number for the dev */
19502b06cfffSAlasdair G Kergon 	if (minor == DM_ANY_MINOR)
1951cf13ab8eSFrederik Deweerdt 		r = next_free_minor(&minor);
19522b06cfffSAlasdair G Kergon 	else
1953cf13ab8eSFrederik Deweerdt 		r = specific_minor(minor);
19541da177e4SLinus Torvalds 	if (r < 0)
19556ed7ade8SMilan Broz 		goto bad_minor;
19561da177e4SLinus Torvalds 
195783d5e5b0SMikulas Patocka 	r = init_srcu_struct(&md->io_barrier);
195883d5e5b0SMikulas Patocka 	if (r < 0)
195983d5e5b0SMikulas Patocka 		goto bad_io_barrier;
196083d5e5b0SMikulas Patocka 
1961115485e8SMike Snitzer 	md->numa_node_id = numa_node_id;
1962591ddcfcSMike Snitzer 	md->init_tio_pdu = false;
1963a5664dadSMike Snitzer 	md->type = DM_TYPE_NONE;
1964e61290a4SDaniel Walker 	mutex_init(&md->suspend_lock);
1965a5664dadSMike Snitzer 	mutex_init(&md->type_lock);
196686f1152bSBenjamin Marzinski 	mutex_init(&md->table_devices_lock);
1967022c2611SMikulas Patocka 	spin_lock_init(&md->deferred_lock);
19681da177e4SLinus Torvalds 	atomic_set(&md->holders, 1);
19695c6bd75dSAlasdair G Kergon 	atomic_set(&md->open_count, 0);
19701da177e4SLinus Torvalds 	atomic_set(&md->event_nr, 0);
19717a8c3d3bSMike Anderson 	atomic_set(&md->uevent_seq, 0);
19727a8c3d3bSMike Anderson 	INIT_LIST_HEAD(&md->uevent_list);
197386f1152bSBenjamin Marzinski 	INIT_LIST_HEAD(&md->table_devices);
19747a8c3d3bSMike Anderson 	spin_lock_init(&md->uevent_lock);
19751da177e4SLinus Torvalds 
197647ace7e0SMike Snitzer 	/*
1977c62b37d9SChristoph Hellwig 	 * default to bio-based until DM table is loaded and md->type
1978c62b37d9SChristoph Hellwig 	 * established. If request-based table is loaded: blk-mq will
1979c62b37d9SChristoph Hellwig 	 * override accordingly.
198047ace7e0SMike Snitzer 	 */
198174fe6ba9SChristoph Hellwig 	md->disk = blk_alloc_disk(md->numa_node_id);
19821da177e4SLinus Torvalds 	if (!md->disk)
19830f20972fSMike Snitzer 		goto bad;
198474fe6ba9SChristoph Hellwig 	md->queue = md->disk->queue;
19851da177e4SLinus Torvalds 
1986f0b04115SJeff Mahoney 	init_waitqueue_head(&md->wait);
198753d5914fSMikulas Patocka 	INIT_WORK(&md->work, dm_wq_work);
1988f0b04115SJeff Mahoney 	init_waitqueue_head(&md->eventq);
19892995fa78SMikulas Patocka 	init_completion(&md->kobj_holder.completion);
1990f0b04115SJeff Mahoney 
1991a666e5c0SMikulas Patocka 	md->swap_bios = get_swap_bios();
1992a666e5c0SMikulas Patocka 	sema_init(&md->swap_bios_semaphore, md->swap_bios);
1993a666e5c0SMikulas Patocka 	mutex_init(&md->swap_bios_lock);
1994a666e5c0SMikulas Patocka 
19951da177e4SLinus Torvalds 	md->disk->major = _major;
19961da177e4SLinus Torvalds 	md->disk->first_minor = minor;
199774fe6ba9SChristoph Hellwig 	md->disk->minors = 1;
19981ebe2e5fSChristoph Hellwig 	md->disk->flags |= GENHD_FL_NO_PART;
19991da177e4SLinus Torvalds 	md->disk->fops = &dm_blk_dops;
20001da177e4SLinus Torvalds 	md->disk->queue = md->queue;
20011da177e4SLinus Torvalds 	md->disk->private_data = md;
20021da177e4SLinus Torvalds 	sprintf(md->disk->disk_name, "dm-%d", minor);
2003f26c5719SDan Williams 
20045d2a228bSChristoph Hellwig 	if (IS_ENABLED(CONFIG_FS_DAX)) {
200530c6828aSChristoph Hellwig 		md->dax_dev = alloc_dax(md, &dm_dax_ops);
2006d7519392SChristoph Hellwig 		if (IS_ERR(md->dax_dev)) {
2007d7519392SChristoph Hellwig 			md->dax_dev = NULL;
2008f26c5719SDan Williams 			goto bad;
2009976431b0SDan Williams 		}
20107ac5360cSChristoph Hellwig 		set_dax_nocache(md->dax_dev);
20117ac5360cSChristoph Hellwig 		set_dax_nomc(md->dax_dev);
2012fb08a190SChristoph Hellwig 		if (dax_add_host(md->dax_dev, md->disk))
2013f26c5719SDan Williams 			goto bad;
2014f26c5719SDan Williams 	}
20151da177e4SLinus Torvalds 
20167e51f257SMike Anderson 	format_dev_t(md->name, MKDEV(_major, minor));
20171da177e4SLinus Torvalds 
2018c7c879eeSMichał Mirosław 	md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2019304f3f6aSMilan Broz 	if (!md->wq)
20200f20972fSMike Snitzer 		goto bad;
2021304f3f6aSMilan Broz 
20229f6dc633SMike Snitzer 	md->pending_io = alloc_percpu(unsigned long);
20239f6dc633SMike Snitzer 	if (!md->pending_io)
20249f6dc633SMike Snitzer 		goto bad;
20259f6dc633SMike Snitzer 
2026fd2ed4d2SMikulas Patocka 	dm_stats_init(&md->stats);
2027fd2ed4d2SMikulas Patocka 
2028ba61fdd1SJeff Mahoney 	/* Populate the mapping, nobody knows we exist yet */
2029f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
2030ba61fdd1SJeff Mahoney 	old_md = idr_replace(&_minor_idr, md, minor);
2031f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
2032ba61fdd1SJeff Mahoney 
2033ba61fdd1SJeff Mahoney 	BUG_ON(old_md != MINOR_ALLOCED);
2034ba61fdd1SJeff Mahoney 
20351da177e4SLinus Torvalds 	return md;
20361da177e4SLinus Torvalds 
20370f20972fSMike Snitzer bad:
20380f20972fSMike Snitzer 	cleanup_mapped_device(md);
203983d5e5b0SMikulas Patocka bad_io_barrier:
20401da177e4SLinus Torvalds 	free_minor(minor);
20416ed7ade8SMilan Broz bad_minor:
204210da4f79SJeff Mahoney 	module_put(THIS_MODULE);
20436ed7ade8SMilan Broz bad_module_get:
2044856eb091SMikulas Patocka 	kvfree(md);
20451da177e4SLinus Torvalds 	return NULL;
20461da177e4SLinus Torvalds }
20471da177e4SLinus Torvalds 
2048ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md);
2049ae9da83fSJun'ichi Nomura 
20501da177e4SLinus Torvalds static void free_dev(struct mapped_device *md)
20511da177e4SLinus Torvalds {
2052f331c029STejun Heo 	int minor = MINOR(disk_devt(md->disk));
205363d94e48SJun'ichi Nomura 
2054ae9da83fSJun'ichi Nomura 	unlock_fs(md);
20552eb6e1e3SKeith Busch 
20560f20972fSMike Snitzer 	cleanup_mapped_device(md);
20570f20972fSMike Snitzer 
20580f20972fSMike Snitzer 	free_table_devices(&md->table_devices);
20590f20972fSMike Snitzer 	dm_stats_cleanup(&md->stats);
206063a4f065SMike Snitzer 	free_minor(minor);
206163a4f065SMike Snitzer 
206210da4f79SJeff Mahoney 	module_put(THIS_MODULE);
2063856eb091SMikulas Patocka 	kvfree(md);
20641da177e4SLinus Torvalds }
20651da177e4SLinus Torvalds 
20661da177e4SLinus Torvalds /*
20671da177e4SLinus Torvalds  * Bind a table to the device.
20681da177e4SLinus Torvalds  */
20691da177e4SLinus Torvalds static void event_callback(void *context)
20701da177e4SLinus Torvalds {
20717a8c3d3bSMike Anderson 	unsigned long flags;
20727a8c3d3bSMike Anderson 	LIST_HEAD(uevents);
20731da177e4SLinus Torvalds 	struct mapped_device *md = (struct mapped_device *) context;
20741da177e4SLinus Torvalds 
20757a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
20767a8c3d3bSMike Anderson 	list_splice_init(&md->uevent_list, &uevents);
20777a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
20787a8c3d3bSMike Anderson 
2079ed9e1982STejun Heo 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
20807a8c3d3bSMike Anderson 
20811da177e4SLinus Torvalds 	atomic_inc(&md->event_nr);
20821da177e4SLinus Torvalds 	wake_up(&md->eventq);
208362e08243SMikulas Patocka 	dm_issue_global_event();
20841da177e4SLinus Torvalds }
20851da177e4SLinus Torvalds 
2086c217649bSMike Snitzer /*
2087042d2a9bSAlasdair G Kergon  * Returns old map, which caller must destroy.
2088042d2a9bSAlasdair G Kergon  */
2089042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2090754c5fc7SMike Snitzer 			       struct queue_limits *limits)
20911da177e4SLinus Torvalds {
2092042d2a9bSAlasdair G Kergon 	struct dm_table *old_map;
20931da177e4SLinus Torvalds 	sector_t size;
20942a2a4c51SJens Axboe 	int ret;
20951da177e4SLinus Torvalds 
20965a8f1f80SBart Van Assche 	lockdep_assert_held(&md->suspend_lock);
20975a8f1f80SBart Van Assche 
20981da177e4SLinus Torvalds 	size = dm_table_get_size(t);
20993ac51e74SDarrick J. Wong 
21003ac51e74SDarrick J. Wong 	/*
21013ac51e74SDarrick J. Wong 	 * Wipe any geometry if the size of the table changed.
21023ac51e74SDarrick J. Wong 	 */
2103fd2ed4d2SMikulas Patocka 	if (size != dm_get_size(md))
21043ac51e74SDarrick J. Wong 		memset(&md->geometry, 0, sizeof(md->geometry));
21053ac51e74SDarrick J. Wong 
21065424a0b8SMikulas Patocka 	if (!get_capacity(md->disk))
21075424a0b8SMikulas Patocka 		set_capacity(md->disk, size);
21085424a0b8SMikulas Patocka 	else
2109f64d9b2eSChristoph Hellwig 		set_capacity_and_notify(md->disk, size);
21101da177e4SLinus Torvalds 
2111cf222b37SAlasdair G Kergon 	dm_table_event_callback(t, event_callback, md);
21122ca3310eSAlasdair G Kergon 
2113f5b4aee1SMike Snitzer 	if (dm_table_request_based(t)) {
211416f12266SMike Snitzer 		/*
21159c37de29SMike Snitzer 		 * Leverage the fact that request-based DM targets are
21169c37de29SMike Snitzer 		 * immutable singletons - used to optimize dm_mq_queue_rq.
211716f12266SMike Snitzer 		 */
211816f12266SMike Snitzer 		md->immutable_target = dm_table_get_immutable_target(t);
2119e6ee8c0bSKiyoshi Ueda 
212029dec90aSChristoph Hellwig 		/*
212129dec90aSChristoph Hellwig 		 * There is no need to reload with request-based dm because the
212229dec90aSChristoph Hellwig 		 * size of front_pad doesn't change.
212329dec90aSChristoph Hellwig 		 *
212429dec90aSChristoph Hellwig 		 * Note for future: If you are to reload bioset, prep-ed
212529dec90aSChristoph Hellwig 		 * requests in the queue may refer to bio from the old bioset,
212629dec90aSChristoph Hellwig 		 * so you must walk through the queue to unprep.
212729dec90aSChristoph Hellwig 		 */
212829dec90aSChristoph Hellwig 		if (!md->mempools) {
212929dec90aSChristoph Hellwig 			md->mempools = t->mempools;
213029dec90aSChristoph Hellwig 			t->mempools = NULL;
213129dec90aSChristoph Hellwig 		}
213229dec90aSChristoph Hellwig 	} else {
213329dec90aSChristoph Hellwig 		/*
213429dec90aSChristoph Hellwig 		 * The md may already have mempools that need changing.
213529dec90aSChristoph Hellwig 		 * If so, reload bioset because front_pad may have changed
213629dec90aSChristoph Hellwig 		 * because a different table was loaded.
213729dec90aSChristoph Hellwig 		 */
213829dec90aSChristoph Hellwig 		dm_free_md_mempools(md->mempools);
213929dec90aSChristoph Hellwig 		md->mempools = t->mempools;
214029dec90aSChristoph Hellwig 		t->mempools = NULL;
21412a2a4c51SJens Axboe 	}
2142e6ee8c0bSKiyoshi Ueda 
2143f5b4aee1SMike Snitzer 	ret = dm_table_set_restrictions(t, md->queue, limits);
2144bb37d772SDamien Le Moal 	if (ret) {
2145bb37d772SDamien Le Moal 		old_map = ERR_PTR(ret);
2146bb37d772SDamien Le Moal 		goto out;
2147bb37d772SDamien Le Moal 	}
2148bb37d772SDamien Le Moal 
2149a12f5d48SEric Dumazet 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
21501d3aa6f6SMike Snitzer 	rcu_assign_pointer(md->map, (void *)t);
215136a0456fSAlasdair G Kergon 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
215236a0456fSAlasdair G Kergon 
215341abc4e1SHannes Reinecke 	if (old_map)
215483d5e5b0SMikulas Patocka 		dm_sync_table(md);
21552a2a4c51SJens Axboe out:
2156042d2a9bSAlasdair G Kergon 	return old_map;
21571da177e4SLinus Torvalds }
21581da177e4SLinus Torvalds 
2159a7940155SAlasdair G Kergon /*
2160a7940155SAlasdair G Kergon  * Returns unbound table for the caller to free.
2161a7940155SAlasdair G Kergon  */
2162a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md)
21631da177e4SLinus Torvalds {
2164a12f5d48SEric Dumazet 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
21651da177e4SLinus Torvalds 
21661da177e4SLinus Torvalds 	if (!map)
2167a7940155SAlasdair G Kergon 		return NULL;
21681da177e4SLinus Torvalds 
21691da177e4SLinus Torvalds 	dm_table_event_callback(map, NULL, NULL);
21709cdb8520SMonam Agarwal 	RCU_INIT_POINTER(md->map, NULL);
217183d5e5b0SMikulas Patocka 	dm_sync_table(md);
2172a7940155SAlasdair G Kergon 
2173a7940155SAlasdair G Kergon 	return map;
21741da177e4SLinus Torvalds }
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds /*
21771da177e4SLinus Torvalds  * Constructor for a new device.
21781da177e4SLinus Torvalds  */
21792b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result)
21801da177e4SLinus Torvalds {
21811da177e4SLinus Torvalds 	struct mapped_device *md;
21821da177e4SLinus Torvalds 
21832b06cfffSAlasdair G Kergon 	md = alloc_dev(minor);
21841da177e4SLinus Torvalds 	if (!md)
21851da177e4SLinus Torvalds 		return -ENXIO;
21861da177e4SLinus Torvalds 
218791ccbbacSTushar Sugandhi 	dm_ima_reset_data(md);
218891ccbbacSTushar Sugandhi 
21891da177e4SLinus Torvalds 	*result = md;
21901da177e4SLinus Torvalds 	return 0;
21911da177e4SLinus Torvalds }
21921da177e4SLinus Torvalds 
2193a5664dadSMike Snitzer /*
2194a5664dadSMike Snitzer  * Functions to manage md->type.
2195a5664dadSMike Snitzer  * All are required to hold md->type_lock.
2196a5664dadSMike Snitzer  */
2197a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md)
2198a5664dadSMike Snitzer {
2199a5664dadSMike Snitzer 	mutex_lock(&md->type_lock);
2200a5664dadSMike Snitzer }
2201a5664dadSMike Snitzer 
2202a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md)
2203a5664dadSMike Snitzer {
2204a5664dadSMike Snitzer 	mutex_unlock(&md->type_lock);
2205a5664dadSMike Snitzer }
2206a5664dadSMike Snitzer 
22077e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2208a5664dadSMike Snitzer {
220900c4fc3bSMike Snitzer 	BUG_ON(!mutex_is_locked(&md->type_lock));
2210a5664dadSMike Snitzer 	md->type = type;
2211a5664dadSMike Snitzer }
2212a5664dadSMike Snitzer 
22137e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2214a5664dadSMike Snitzer {
2215a5664dadSMike Snitzer 	return md->type;
2216a5664dadSMike Snitzer }
2217a5664dadSMike Snitzer 
221836a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
221936a0456fSAlasdair G Kergon {
222036a0456fSAlasdair G Kergon 	return md->immutable_target_type;
222136a0456fSAlasdair G Kergon }
222236a0456fSAlasdair G Kergon 
22234a0b4ddfSMike Snitzer /*
2224f84cb8a4SMike Snitzer  * The queue_limits are only valid as long as you have a reference
2225f84cb8a4SMike Snitzer  * count on 'md'.
2226f84cb8a4SMike Snitzer  */
2227f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2228f84cb8a4SMike Snitzer {
2229f84cb8a4SMike Snitzer 	BUG_ON(!atomic_read(&md->holders));
2230f84cb8a4SMike Snitzer 	return &md->queue->limits;
2231f84cb8a4SMike Snitzer }
2232f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2233f84cb8a4SMike Snitzer 
22344a0b4ddfSMike Snitzer /*
22354a0b4ddfSMike Snitzer  * Setup the DM device's queue based on md's type
22364a0b4ddfSMike Snitzer  */
2237591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
22384a0b4ddfSMike Snitzer {
2239ba305859SChristoph Hellwig 	enum dm_queue_mode type = dm_table_get_type(t);
2240c100ec49SMike Snitzer 	struct queue_limits limits;
2241ba305859SChristoph Hellwig 	int r;
2242bfebd1cdSMike Snitzer 
2243545ed20eSToshi Kani 	switch (type) {
2244bfebd1cdSMike Snitzer 	case DM_TYPE_REQUEST_BASED:
2245681cc5e8SMike Snitzer 		md->disk->fops = &dm_rq_blk_dops;
2246e83068a5SMike Snitzer 		r = dm_mq_init_request_queue(md, t);
2247bfebd1cdSMike Snitzer 		if (r) {
2248681cc5e8SMike Snitzer 			DMERR("Cannot initialize queue for request-based dm mapped device");
2249bfebd1cdSMike Snitzer 			return r;
2250bfebd1cdSMike Snitzer 		}
2251bfebd1cdSMike Snitzer 		break;
2252bfebd1cdSMike Snitzer 	case DM_TYPE_BIO_BASED:
2253545ed20eSToshi Kani 	case DM_TYPE_DAX_BIO_BASED:
2254bfebd1cdSMike Snitzer 		break;
22557e0d574fSBart Van Assche 	case DM_TYPE_NONE:
22567e0d574fSBart Van Assche 		WARN_ON_ONCE(true);
22577e0d574fSBart Van Assche 		break;
2258ff36ab34SMike Snitzer 	}
22594a0b4ddfSMike Snitzer 
2260c100ec49SMike Snitzer 	r = dm_calculate_queue_limits(t, &limits);
2261c100ec49SMike Snitzer 	if (r) {
2262c100ec49SMike Snitzer 		DMERR("Cannot calculate initial queue limits");
2263c100ec49SMike Snitzer 		return r;
2264c100ec49SMike Snitzer 	}
2265bb37d772SDamien Le Moal 	r = dm_table_set_restrictions(t, md->queue, &limits);
2266bb37d772SDamien Le Moal 	if (r)
2267bb37d772SDamien Le Moal 		return r;
226889f871afSChristoph Hellwig 
2269e7089f65SLuis Chamberlain 	r = add_disk(md->disk);
2270e7089f65SLuis Chamberlain 	if (r)
2271e7089f65SLuis Chamberlain 		return r;
227289f871afSChristoph Hellwig 
227389f871afSChristoph Hellwig 	r = dm_sysfs_init(md);
227489f871afSChristoph Hellwig 	if (r) {
227589f871afSChristoph Hellwig 		del_gendisk(md->disk);
227689f871afSChristoph Hellwig 		return r;
227789f871afSChristoph Hellwig 	}
2278ba305859SChristoph Hellwig 	md->type = type;
22794a0b4ddfSMike Snitzer 	return 0;
22804a0b4ddfSMike Snitzer }
22814a0b4ddfSMike Snitzer 
22822bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev)
22831da177e4SLinus Torvalds {
22841da177e4SLinus Torvalds 	struct mapped_device *md;
22851da177e4SLinus Torvalds 	unsigned minor = MINOR(dev);
22861da177e4SLinus Torvalds 
22871da177e4SLinus Torvalds 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
22881da177e4SLinus Torvalds 		return NULL;
22891da177e4SLinus Torvalds 
2290f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
22911da177e4SLinus Torvalds 
22921da177e4SLinus Torvalds 	md = idr_find(&_minor_idr, minor);
229349de5769SMike Snitzer 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
229449de5769SMike Snitzer 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2295637842cfSDavid Teigland 		md = NULL;
2296fba9f90eSJeff Mahoney 		goto out;
2297fba9f90eSJeff Mahoney 	}
22982bec1f4aSMikulas Patocka 	dm_get(md);
2299fba9f90eSJeff Mahoney out:
2300f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
23011da177e4SLinus Torvalds 
2302637842cfSDavid Teigland 	return md;
2303637842cfSDavid Teigland }
23043cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md);
2305d229a958SDavid Teigland 
23069ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md)
2307637842cfSDavid Teigland {
23089ade92a9SAlasdair G Kergon 	return md->interface_ptr;
23091da177e4SLinus Torvalds }
23101da177e4SLinus Torvalds 
23111da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr)
23121da177e4SLinus Torvalds {
23131da177e4SLinus Torvalds 	md->interface_ptr = ptr;
23141da177e4SLinus Torvalds }
23151da177e4SLinus Torvalds 
23161da177e4SLinus Torvalds void dm_get(struct mapped_device *md)
23171da177e4SLinus Torvalds {
23181da177e4SLinus Torvalds 	atomic_inc(&md->holders);
23193f77316dSKiyoshi Ueda 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
23201da177e4SLinus Torvalds }
23211da177e4SLinus Torvalds 
232209ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md)
232309ee96b2SMikulas Patocka {
232409ee96b2SMikulas Patocka 	spin_lock(&_minor_lock);
232509ee96b2SMikulas Patocka 	if (test_bit(DMF_FREEING, &md->flags)) {
232609ee96b2SMikulas Patocka 		spin_unlock(&_minor_lock);
232709ee96b2SMikulas Patocka 		return -EBUSY;
232809ee96b2SMikulas Patocka 	}
232909ee96b2SMikulas Patocka 	dm_get(md);
233009ee96b2SMikulas Patocka 	spin_unlock(&_minor_lock);
233109ee96b2SMikulas Patocka 	return 0;
233209ee96b2SMikulas Patocka }
233309ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold);
233409ee96b2SMikulas Patocka 
233572d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md)
233672d94861SAlasdair G Kergon {
233772d94861SAlasdair G Kergon 	return md->name;
233872d94861SAlasdair G Kergon }
233972d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name);
234072d94861SAlasdair G Kergon 
23413f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait)
23421da177e4SLinus Torvalds {
23431134e5aeSMike Anderson 	struct dm_table *map;
234483d5e5b0SMikulas Patocka 	int srcu_idx;
23451da177e4SLinus Torvalds 
23463f77316dSKiyoshi Ueda 	might_sleep();
2347fba9f90eSJeff Mahoney 
234863a4f065SMike Snitzer 	spin_lock(&_minor_lock);
23493f77316dSKiyoshi Ueda 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2350fba9f90eSJeff Mahoney 	set_bit(DMF_FREEING, &md->flags);
2351f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
23523f77316dSKiyoshi Ueda 
23537a5428dcSChristoph Hellwig 	blk_mark_disk_dead(md->disk);
23543b785fbcSBart Van Assche 
2355ab7c7bb6SMikulas Patocka 	/*
2356ab7c7bb6SMikulas Patocka 	 * Take suspend_lock so that presuspend and postsuspend methods
2357ab7c7bb6SMikulas Patocka 	 * do not race with internal suspend.
2358ab7c7bb6SMikulas Patocka 	 */
2359ab7c7bb6SMikulas Patocka 	mutex_lock(&md->suspend_lock);
23602a708cffSJunichi Nomura 	map = dm_get_live_table(md, &srcu_idx);
23614f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md)) {
23621da177e4SLinus Torvalds 		dm_table_presuspend_targets(map);
2363adc0daadSMikulas Patocka 		set_bit(DMF_SUSPENDED, &md->flags);
23645df96f2bSMikulas Patocka 		set_bit(DMF_POST_SUSPENDING, &md->flags);
23651da177e4SLinus Torvalds 		dm_table_postsuspend_targets(map);
23661da177e4SLinus Torvalds 	}
236783d5e5b0SMikulas Patocka 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
236883d5e5b0SMikulas Patocka 	dm_put_live_table(md, srcu_idx);
23692a708cffSJunichi Nomura 	mutex_unlock(&md->suspend_lock);
237083d5e5b0SMikulas Patocka 
23713f77316dSKiyoshi Ueda 	/*
23723f77316dSKiyoshi Ueda 	 * Rare, but there may be I/O requests still going to complete,
23733f77316dSKiyoshi Ueda 	 * for example.  Wait for all references to disappear.
23743f77316dSKiyoshi Ueda 	 * No one should increment the reference count of the mapped_device,
23753f77316dSKiyoshi Ueda 	 * after the mapped_device state becomes DMF_FREEING.
23763f77316dSKiyoshi Ueda 	 */
23773f77316dSKiyoshi Ueda 	if (wait)
23783f77316dSKiyoshi Ueda 		while (atomic_read(&md->holders))
23793f77316dSKiyoshi Ueda 			msleep(1);
23803f77316dSKiyoshi Ueda 	else if (atomic_read(&md->holders))
23813f77316dSKiyoshi Ueda 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
23823f77316dSKiyoshi Ueda 		       dm_device_name(md), atomic_read(&md->holders));
23833f77316dSKiyoshi Ueda 
2384a7940155SAlasdair G Kergon 	dm_table_destroy(__unbind(md));
23851da177e4SLinus Torvalds 	free_dev(md);
23861da177e4SLinus Torvalds }
23873f77316dSKiyoshi Ueda 
23883f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md)
23893f77316dSKiyoshi Ueda {
23903f77316dSKiyoshi Ueda 	__dm_destroy(md, true);
23913f77316dSKiyoshi Ueda }
23923f77316dSKiyoshi Ueda 
23933f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md)
23943f77316dSKiyoshi Ueda {
23953f77316dSKiyoshi Ueda 	__dm_destroy(md, false);
23963f77316dSKiyoshi Ueda }
23973f77316dSKiyoshi Ueda 
23983f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md)
23993f77316dSKiyoshi Ueda {
24003f77316dSKiyoshi Ueda 	atomic_dec(&md->holders);
24011da177e4SLinus Torvalds }
240279eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put);
24031da177e4SLinus Torvalds 
24049f6dc633SMike Snitzer static bool dm_in_flight_bios(struct mapped_device *md)
240585067747SMing Lei {
240685067747SMing Lei 	int cpu;
24079f6dc633SMike Snitzer 	unsigned long sum = 0;
240885067747SMing Lei 
24099f6dc633SMike Snitzer 	for_each_possible_cpu(cpu)
24109f6dc633SMike Snitzer 		sum += *per_cpu_ptr(md->pending_io, cpu);
241185067747SMing Lei 
241285067747SMing Lei 	return sum != 0;
241385067747SMing Lei }
241485067747SMing Lei 
24152f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
241646125c1cSMilan Broz {
241746125c1cSMilan Broz 	int r = 0;
24189f4c3f87SBart Van Assche 	DEFINE_WAIT(wait);
241946125c1cSMilan Broz 
242085067747SMing Lei 	while (true) {
24219f4c3f87SBart Van Assche 		prepare_to_wait(&md->wait, &wait, task_state);
242246125c1cSMilan Broz 
24239f6dc633SMike Snitzer 		if (!dm_in_flight_bios(md))
242446125c1cSMilan Broz 			break;
242546125c1cSMilan Broz 
2426e3fabdfdSBart Van Assche 		if (signal_pending_state(task_state, current)) {
242746125c1cSMilan Broz 			r = -EINTR;
242846125c1cSMilan Broz 			break;
242946125c1cSMilan Broz 		}
243046125c1cSMilan Broz 
243146125c1cSMilan Broz 		io_schedule();
243246125c1cSMilan Broz 	}
24339f4c3f87SBart Van Assche 	finish_wait(&md->wait, &wait);
2434b44ebeb0SMikulas Patocka 
24359f6dc633SMike Snitzer 	smp_rmb();
24369f6dc633SMike Snitzer 
243746125c1cSMilan Broz 	return r;
243846125c1cSMilan Broz }
243946125c1cSMilan Broz 
24402f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
244185067747SMing Lei {
244285067747SMing Lei 	int r = 0;
244385067747SMing Lei 
244485067747SMing Lei 	if (!queue_is_mq(md->queue))
244585067747SMing Lei 		return dm_wait_for_bios_completion(md, task_state);
244685067747SMing Lei 
244785067747SMing Lei 	while (true) {
244885067747SMing Lei 		if (!blk_mq_queue_inflight(md->queue))
244985067747SMing Lei 			break;
245085067747SMing Lei 
245185067747SMing Lei 		if (signal_pending_state(task_state, current)) {
245285067747SMing Lei 			r = -EINTR;
245385067747SMing Lei 			break;
245485067747SMing Lei 		}
245585067747SMing Lei 
245685067747SMing Lei 		msleep(5);
245785067747SMing Lei 	}
245885067747SMing Lei 
245985067747SMing Lei 	return r;
246085067747SMing Lei }
246185067747SMing Lei 
24621da177e4SLinus Torvalds /*
24631da177e4SLinus Torvalds  * Process the deferred bios
24641da177e4SLinus Torvalds  */
2465ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work)
24661da177e4SLinus Torvalds {
24670c2915b8SMike Snitzer 	struct mapped_device *md = container_of(work, struct mapped_device, work);
24680c2915b8SMike Snitzer 	struct bio *bio;
2469ef208587SMikulas Patocka 
24703b00b203SMikulas Patocka 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2471022c2611SMikulas Patocka 		spin_lock_irq(&md->deferred_lock);
24720c2915b8SMike Snitzer 		bio = bio_list_pop(&md->deferred);
2473022c2611SMikulas Patocka 		spin_unlock_irq(&md->deferred_lock);
2474022c2611SMikulas Patocka 
24750c2915b8SMike Snitzer 		if (!bio)
2476df12ee99SAlasdair G Kergon 			break;
247773d410c0SMilan Broz 
24780c2915b8SMike Snitzer 		submit_bio_noacct(bio);
2479e6ee8c0bSKiyoshi Ueda 	}
24801da177e4SLinus Torvalds }
24811da177e4SLinus Torvalds 
24829a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md)
2483304f3f6aSMilan Broz {
24843b00b203SMikulas Patocka 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
24854e857c58SPeter Zijlstra 	smp_mb__after_atomic();
248653d5914fSMikulas Patocka 	queue_work(md->wq, &md->work);
2487304f3f6aSMilan Broz }
2488304f3f6aSMilan Broz 
24891da177e4SLinus Torvalds /*
2490042d2a9bSAlasdair G Kergon  * Swap in a new table, returning the old one for the caller to destroy.
24911da177e4SLinus Torvalds  */
2492042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
24931da177e4SLinus Torvalds {
249487eb5b21SMike Christie 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2495754c5fc7SMike Snitzer 	struct queue_limits limits;
2496042d2a9bSAlasdair G Kergon 	int r;
24971da177e4SLinus Torvalds 
2498e61290a4SDaniel Walker 	mutex_lock(&md->suspend_lock);
24991da177e4SLinus Torvalds 
25001da177e4SLinus Torvalds 	/* device must be suspended */
25014f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md))
250293c534aeSAlasdair G Kergon 		goto out;
25031da177e4SLinus Torvalds 
25043ae70656SMike Snitzer 	/*
25053ae70656SMike Snitzer 	 * If the new table has no data devices, retain the existing limits.
25063ae70656SMike Snitzer 	 * This helps multipath with queue_if_no_path if all paths disappear,
25073ae70656SMike Snitzer 	 * then new I/O is queued based on these limits, and then some paths
25083ae70656SMike Snitzer 	 * reappear.
25093ae70656SMike Snitzer 	 */
25103ae70656SMike Snitzer 	if (dm_table_has_no_data_devices(table)) {
251183d5e5b0SMikulas Patocka 		live_map = dm_get_live_table_fast(md);
25123ae70656SMike Snitzer 		if (live_map)
25133ae70656SMike Snitzer 			limits = md->queue->limits;
251483d5e5b0SMikulas Patocka 		dm_put_live_table_fast(md);
25153ae70656SMike Snitzer 	}
25163ae70656SMike Snitzer 
251787eb5b21SMike Christie 	if (!live_map) {
2518754c5fc7SMike Snitzer 		r = dm_calculate_queue_limits(table, &limits);
2519042d2a9bSAlasdair G Kergon 		if (r) {
2520042d2a9bSAlasdair G Kergon 			map = ERR_PTR(r);
2521754c5fc7SMike Snitzer 			goto out;
2522042d2a9bSAlasdair G Kergon 		}
252387eb5b21SMike Christie 	}
2524754c5fc7SMike Snitzer 
2525042d2a9bSAlasdair G Kergon 	map = __bind(md, table, &limits);
252662e08243SMikulas Patocka 	dm_issue_global_event();
25271da177e4SLinus Torvalds 
252893c534aeSAlasdair G Kergon out:
2529e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
2530042d2a9bSAlasdair G Kergon 	return map;
25311da177e4SLinus Torvalds }
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds /*
25341da177e4SLinus Torvalds  * Functions to lock and unlock any filesystem running on the
25351da177e4SLinus Torvalds  * device.
25361da177e4SLinus Torvalds  */
25372ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md)
25381da177e4SLinus Torvalds {
2539e39e2e95SAlasdair G Kergon 	int r;
25401da177e4SLinus Torvalds 
2541040f04bdSChristoph Hellwig 	WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2542dfbe03f6SAlasdair G Kergon 
2543977115c0SChristoph Hellwig 	r = freeze_bdev(md->disk->part0);
2544040f04bdSChristoph Hellwig 	if (!r)
2545aa8d7c2fSAlasdair G Kergon 		set_bit(DMF_FROZEN, &md->flags);
2546040f04bdSChristoph Hellwig 	return r;
25471da177e4SLinus Torvalds }
25481da177e4SLinus Torvalds 
25492ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md)
25501da177e4SLinus Torvalds {
2551aa8d7c2fSAlasdair G Kergon 	if (!test_bit(DMF_FROZEN, &md->flags))
2552aa8d7c2fSAlasdair G Kergon 		return;
2553977115c0SChristoph Hellwig 	thaw_bdev(md->disk->part0);
2554aa8d7c2fSAlasdair G Kergon 	clear_bit(DMF_FROZEN, &md->flags);
25551da177e4SLinus Torvalds }
25561da177e4SLinus Torvalds 
25571da177e4SLinus Torvalds /*
2558b48633f8SBart Van Assche  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2559b48633f8SBart Van Assche  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2560b48633f8SBart Van Assche  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2561b48633f8SBart Van Assche  *
2562ffcc3936SMike Snitzer  * If __dm_suspend returns 0, the device is completely quiescent
2563ffcc3936SMike Snitzer  * now. There is no request-processing activity. All new requests
2564ffcc3936SMike Snitzer  * are being added to md->deferred list.
2565cec47e3dSKiyoshi Ueda  */
2566ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
25672f064a59SPeter Zijlstra 			unsigned suspend_flags, unsigned int task_state,
2568eaf9a736SMike Snitzer 			int dmf_suspended_flag)
25691da177e4SLinus Torvalds {
2570ffcc3936SMike Snitzer 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2571ffcc3936SMike Snitzer 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2572ffcc3936SMike Snitzer 	int r;
2573cf222b37SAlasdair G Kergon 
25745a8f1f80SBart Van Assche 	lockdep_assert_held(&md->suspend_lock);
25755a8f1f80SBart Van Assche 
25762e93ccc1SKiyoshi Ueda 	/*
25772e93ccc1SKiyoshi Ueda 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
25782e93ccc1SKiyoshi Ueda 	 * This flag is cleared before dm_suspend returns.
25792e93ccc1SKiyoshi Ueda 	 */
25802e93ccc1SKiyoshi Ueda 	if (noflush)
25812e93ccc1SKiyoshi Ueda 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
258286331f39SBart Van Assche 	else
2583ac75b09fSMike Snitzer 		DMDEBUG("%s: suspending with flush", dm_device_name(md));
25842e93ccc1SKiyoshi Ueda 
2585d67ee213SMike Snitzer 	/*
2586d67ee213SMike Snitzer 	 * This gets reverted if there's an error later and the targets
2587d67ee213SMike Snitzer 	 * provide the .presuspend_undo hook.
2588d67ee213SMike Snitzer 	 */
25891da177e4SLinus Torvalds 	dm_table_presuspend_targets(map);
25901da177e4SLinus Torvalds 
25912e93ccc1SKiyoshi Ueda 	/*
25929f518b27SKiyoshi Ueda 	 * Flush I/O to the device.
25939f518b27SKiyoshi Ueda 	 * Any I/O submitted after lock_fs() may not be flushed.
25949f518b27SKiyoshi Ueda 	 * noflush takes precedence over do_lockfs.
25959f518b27SKiyoshi Ueda 	 * (lock_fs() flushes I/Os and waits for them to complete.)
25962e93ccc1SKiyoshi Ueda 	 */
259732a926daSMikulas Patocka 	if (!noflush && do_lockfs) {
25982ca3310eSAlasdair G Kergon 		r = lock_fs(md);
2599d67ee213SMike Snitzer 		if (r) {
2600d67ee213SMike Snitzer 			dm_table_presuspend_undo_targets(map);
2601ffcc3936SMike Snitzer 			return r;
2602aa8d7c2fSAlasdair G Kergon 		}
2603d67ee213SMike Snitzer 	}
26041da177e4SLinus Torvalds 
26051da177e4SLinus Torvalds 	/*
26063b00b203SMikulas Patocka 	 * Here we must make sure that no processes are submitting requests
26073b00b203SMikulas Patocka 	 * to target drivers i.e. no one may be executing
260896c9865cSMike Snitzer 	 * dm_split_and_process_bio from dm_submit_bio.
26093b00b203SMikulas Patocka 	 *
261096c9865cSMike Snitzer 	 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
26113b00b203SMikulas Patocka 	 * we take the write lock. To prevent any process from reentering
261296c9865cSMike Snitzer 	 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
26130cede372SMike Snitzer 	 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
26146a8736d1STejun Heo 	 * flush_workqueue(md->wq).
26151da177e4SLinus Torvalds 	 */
26161eb787ecSAlasdair G Kergon 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
261741abc4e1SHannes Reinecke 	if (map)
261883d5e5b0SMikulas Patocka 		synchronize_srcu(&md->io_barrier);
26191da177e4SLinus Torvalds 
2620d0bcb878SKiyoshi Ueda 	/*
262129e4013dSTejun Heo 	 * Stop md->queue before flushing md->wq in case request-based
262229e4013dSTejun Heo 	 * dm defers requests to md->wq from md->queue.
2623d0bcb878SKiyoshi Ueda 	 */
26246a23e05cSJens Axboe 	if (dm_request_based(md))
2625eca7ee6dSMike Snitzer 		dm_stop_queue(md->queue);
2626cec47e3dSKiyoshi Ueda 
2627d0bcb878SKiyoshi Ueda 	flush_workqueue(md->wq);
2628d0bcb878SKiyoshi Ueda 
26291da177e4SLinus Torvalds 	/*
26303b00b203SMikulas Patocka 	 * At this point no more requests are entering target request routines.
26313b00b203SMikulas Patocka 	 * We call dm_wait_for_completion to wait for all existing requests
26323b00b203SMikulas Patocka 	 * to finish.
26331da177e4SLinus Torvalds 	 */
2634b48633f8SBart Van Assche 	r = dm_wait_for_completion(md, task_state);
2635eaf9a736SMike Snitzer 	if (!r)
2636eaf9a736SMike Snitzer 		set_bit(dmf_suspended_flag, &md->flags);
26371da177e4SLinus Torvalds 
26386d6f10dfSMilan Broz 	if (noflush)
2639022c2611SMikulas Patocka 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
264041abc4e1SHannes Reinecke 	if (map)
264183d5e5b0SMikulas Patocka 		synchronize_srcu(&md->io_barrier);
26422e93ccc1SKiyoshi Ueda 
26431da177e4SLinus Torvalds 	/* were we interrupted ? */
264446125c1cSMilan Broz 	if (r < 0) {
26459a1fb464SMikulas Patocka 		dm_queue_flush(md);
264673d410c0SMilan Broz 
2647cec47e3dSKiyoshi Ueda 		if (dm_request_based(md))
2648eca7ee6dSMike Snitzer 			dm_start_queue(md->queue);
2649cec47e3dSKiyoshi Ueda 
26502ca3310eSAlasdair G Kergon 		unlock_fs(md);
2651d67ee213SMike Snitzer 		dm_table_presuspend_undo_targets(map);
2652ffcc3936SMike Snitzer 		/* pushback list is already flushed, so skip flush */
2653ffcc3936SMike Snitzer 	}
2654ffcc3936SMike Snitzer 
2655ffcc3936SMike Snitzer 	return r;
26562ca3310eSAlasdair G Kergon }
26572ca3310eSAlasdair G Kergon 
26583b00b203SMikulas Patocka /*
2659ffcc3936SMike Snitzer  * We need to be able to change a mapping table under a mounted
2660ffcc3936SMike Snitzer  * filesystem.  For example we might want to move some data in
2661ffcc3936SMike Snitzer  * the background.  Before the table can be swapped with
2662ffcc3936SMike Snitzer  * dm_bind_table, dm_suspend must be called to flush any in
2663ffcc3936SMike Snitzer  * flight bios and ensure that any further io gets deferred.
26643b00b203SMikulas Patocka  */
2665ffcc3936SMike Snitzer /*
2666ffcc3936SMike Snitzer  * Suspend mechanism in request-based dm.
2667ffcc3936SMike Snitzer  *
2668ffcc3936SMike Snitzer  * 1. Flush all I/Os by lock_fs() if needed.
2669ffcc3936SMike Snitzer  * 2. Stop dispatching any I/O by stopping the request_queue.
2670ffcc3936SMike Snitzer  * 3. Wait for all in-flight I/Os to be completed or requeued.
2671ffcc3936SMike Snitzer  *
2672ffcc3936SMike Snitzer  * To abort suspend, start the request_queue.
2673ffcc3936SMike Snitzer  */
2674ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2675ffcc3936SMike Snitzer {
2676ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
2677ffcc3936SMike Snitzer 	int r = 0;
2678ffcc3936SMike Snitzer 
2679ffcc3936SMike Snitzer retry:
2680ffcc3936SMike Snitzer 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2681ffcc3936SMike Snitzer 
2682ffcc3936SMike Snitzer 	if (dm_suspended_md(md)) {
2683ffcc3936SMike Snitzer 		r = -EINVAL;
2684ffcc3936SMike Snitzer 		goto out_unlock;
2685ffcc3936SMike Snitzer 	}
2686ffcc3936SMike Snitzer 
2687ffcc3936SMike Snitzer 	if (dm_suspended_internally_md(md)) {
2688ffcc3936SMike Snitzer 		/* already internally suspended, wait for internal resume */
2689ffcc3936SMike Snitzer 		mutex_unlock(&md->suspend_lock);
2690ffcc3936SMike Snitzer 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2691ffcc3936SMike Snitzer 		if (r)
2692ffcc3936SMike Snitzer 			return r;
2693ffcc3936SMike Snitzer 		goto retry;
2694ffcc3936SMike Snitzer 	}
2695ffcc3936SMike Snitzer 
2696a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2697ffcc3936SMike Snitzer 
2698eaf9a736SMike Snitzer 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2699ffcc3936SMike Snitzer 	if (r)
2700ffcc3936SMike Snitzer 		goto out_unlock;
27013b00b203SMikulas Patocka 
27025df96f2bSMikulas Patocka 	set_bit(DMF_POST_SUSPENDING, &md->flags);
27034d4471cbSKiyoshi Ueda 	dm_table_postsuspend_targets(map);
27045df96f2bSMikulas Patocka 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
27054d4471cbSKiyoshi Ueda 
2706d287483dSAlasdair G Kergon out_unlock:
2707e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
2708cf222b37SAlasdair G Kergon 	return r;
27091da177e4SLinus Torvalds }
27101da177e4SLinus Torvalds 
2711ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map)
27121da177e4SLinus Torvalds {
2713ffcc3936SMike Snitzer 	if (map) {
2714ffcc3936SMike Snitzer 		int r = dm_table_resume_targets(map);
27158757b776SMilan Broz 		if (r)
2716ffcc3936SMike Snitzer 			return r;
2717ffcc3936SMike Snitzer 	}
27182ca3310eSAlasdair G Kergon 
27199a1fb464SMikulas Patocka 	dm_queue_flush(md);
27202ca3310eSAlasdair G Kergon 
2721cec47e3dSKiyoshi Ueda 	/*
2722cec47e3dSKiyoshi Ueda 	 * Flushing deferred I/Os must be done after targets are resumed
2723cec47e3dSKiyoshi Ueda 	 * so that mapping of targets can work correctly.
2724cec47e3dSKiyoshi Ueda 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2725cec47e3dSKiyoshi Ueda 	 */
2726cec47e3dSKiyoshi Ueda 	if (dm_request_based(md))
2727eca7ee6dSMike Snitzer 		dm_start_queue(md->queue);
2728cec47e3dSKiyoshi Ueda 
27292ca3310eSAlasdair G Kergon 	unlock_fs(md);
27302ca3310eSAlasdair G Kergon 
2731ffcc3936SMike Snitzer 	return 0;
2732ffcc3936SMike Snitzer }
2733ffcc3936SMike Snitzer 
2734ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md)
2735ffcc3936SMike Snitzer {
27368dc23658SMinfei Huang 	int r;
2737ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
2738ffcc3936SMike Snitzer 
2739ffcc3936SMike Snitzer retry:
27408dc23658SMinfei Huang 	r = -EINVAL;
2741ffcc3936SMike Snitzer 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2742ffcc3936SMike Snitzer 
2743ffcc3936SMike Snitzer 	if (!dm_suspended_md(md))
2744ffcc3936SMike Snitzer 		goto out;
2745ffcc3936SMike Snitzer 
2746ffcc3936SMike Snitzer 	if (dm_suspended_internally_md(md)) {
2747ffcc3936SMike Snitzer 		/* already internally suspended, wait for internal resume */
2748ffcc3936SMike Snitzer 		mutex_unlock(&md->suspend_lock);
2749ffcc3936SMike Snitzer 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2750ffcc3936SMike Snitzer 		if (r)
2751ffcc3936SMike Snitzer 			return r;
2752ffcc3936SMike Snitzer 		goto retry;
2753ffcc3936SMike Snitzer 	}
2754ffcc3936SMike Snitzer 
2755a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2756ffcc3936SMike Snitzer 	if (!map || !dm_table_get_size(map))
2757ffcc3936SMike Snitzer 		goto out;
2758ffcc3936SMike Snitzer 
2759ffcc3936SMike Snitzer 	r = __dm_resume(md, map);
2760ffcc3936SMike Snitzer 	if (r)
2761ffcc3936SMike Snitzer 		goto out;
2762ffcc3936SMike Snitzer 
27632ca3310eSAlasdair G Kergon 	clear_bit(DMF_SUSPENDED, &md->flags);
2764cf222b37SAlasdair G Kergon out:
2765e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
27662ca3310eSAlasdair G Kergon 
2767cf222b37SAlasdair G Kergon 	return r;
27681da177e4SLinus Torvalds }
27691da177e4SLinus Torvalds 
2770fd2ed4d2SMikulas Patocka /*
2771fd2ed4d2SMikulas Patocka  * Internal suspend/resume works like userspace-driven suspend. It waits
2772fd2ed4d2SMikulas Patocka  * until all bios finish and prevents issuing new bios to the target drivers.
2773fd2ed4d2SMikulas Patocka  * It may be used only from the kernel.
2774fd2ed4d2SMikulas Patocka  */
2775fd2ed4d2SMikulas Patocka 
2776ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2777ffcc3936SMike Snitzer {
2778ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
2779ffcc3936SMike Snitzer 
27801ea0654eSBart Van Assche 	lockdep_assert_held(&md->suspend_lock);
27811ea0654eSBart Van Assche 
278296b26c8cSMikulas Patocka 	if (md->internal_suspend_count++)
2783ffcc3936SMike Snitzer 		return; /* nested internal suspend */
2784ffcc3936SMike Snitzer 
2785ffcc3936SMike Snitzer 	if (dm_suspended_md(md)) {
2786ffcc3936SMike Snitzer 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2787ffcc3936SMike Snitzer 		return; /* nest suspend */
2788ffcc3936SMike Snitzer 	}
2789ffcc3936SMike Snitzer 
2790a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2791ffcc3936SMike Snitzer 
2792ffcc3936SMike Snitzer 	/*
2793ffcc3936SMike Snitzer 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2794ffcc3936SMike Snitzer 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2795ffcc3936SMike Snitzer 	 * would require changing .presuspend to return an error -- avoid this
2796ffcc3936SMike Snitzer 	 * until there is a need for more elaborate variants of internal suspend.
2797ffcc3936SMike Snitzer 	 */
2798eaf9a736SMike Snitzer 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2799eaf9a736SMike Snitzer 			    DMF_SUSPENDED_INTERNALLY);
2800ffcc3936SMike Snitzer 
28015df96f2bSMikulas Patocka 	set_bit(DMF_POST_SUSPENDING, &md->flags);
2802ffcc3936SMike Snitzer 	dm_table_postsuspend_targets(map);
28035df96f2bSMikulas Patocka 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
2804ffcc3936SMike Snitzer }
2805ffcc3936SMike Snitzer 
2806ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md)
2807ffcc3936SMike Snitzer {
280896b26c8cSMikulas Patocka 	BUG_ON(!md->internal_suspend_count);
280996b26c8cSMikulas Patocka 
281096b26c8cSMikulas Patocka 	if (--md->internal_suspend_count)
2811ffcc3936SMike Snitzer 		return; /* resume from nested internal suspend */
2812ffcc3936SMike Snitzer 
2813ffcc3936SMike Snitzer 	if (dm_suspended_md(md))
2814ffcc3936SMike Snitzer 		goto done; /* resume from nested suspend */
2815ffcc3936SMike Snitzer 
2816ffcc3936SMike Snitzer 	/*
2817ffcc3936SMike Snitzer 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2818ffcc3936SMike Snitzer 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2819ffcc3936SMike Snitzer 	 */
2820ffcc3936SMike Snitzer 	(void) __dm_resume(md, NULL);
2821ffcc3936SMike Snitzer 
2822ffcc3936SMike Snitzer done:
2823ffcc3936SMike Snitzer 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2824ffcc3936SMike Snitzer 	smp_mb__after_atomic();
2825ffcc3936SMike Snitzer 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2826ffcc3936SMike Snitzer }
2827ffcc3936SMike Snitzer 
2828ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md)
2829fd2ed4d2SMikulas Patocka {
2830fd2ed4d2SMikulas Patocka 	mutex_lock(&md->suspend_lock);
2831ffcc3936SMike Snitzer 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2832ffcc3936SMike Snitzer 	mutex_unlock(&md->suspend_lock);
2833ffcc3936SMike Snitzer }
2834ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2835ffcc3936SMike Snitzer 
2836ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md)
2837ffcc3936SMike Snitzer {
2838ffcc3936SMike Snitzer 	mutex_lock(&md->suspend_lock);
2839ffcc3936SMike Snitzer 	__dm_internal_resume(md);
2840ffcc3936SMike Snitzer 	mutex_unlock(&md->suspend_lock);
2841ffcc3936SMike Snitzer }
2842ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume);
2843ffcc3936SMike Snitzer 
2844ffcc3936SMike Snitzer /*
2845ffcc3936SMike Snitzer  * Fast variants of internal suspend/resume hold md->suspend_lock,
2846ffcc3936SMike Snitzer  * which prevents interaction with userspace-driven suspend.
2847ffcc3936SMike Snitzer  */
2848ffcc3936SMike Snitzer 
2849ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md)
2850ffcc3936SMike Snitzer {
2851ffcc3936SMike Snitzer 	mutex_lock(&md->suspend_lock);
2852ffcc3936SMike Snitzer 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2853fd2ed4d2SMikulas Patocka 		return;
2854fd2ed4d2SMikulas Patocka 
2855fd2ed4d2SMikulas Patocka 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2856fd2ed4d2SMikulas Patocka 	synchronize_srcu(&md->io_barrier);
2857fd2ed4d2SMikulas Patocka 	flush_workqueue(md->wq);
2858fd2ed4d2SMikulas Patocka 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2859fd2ed4d2SMikulas Patocka }
2860b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2861fd2ed4d2SMikulas Patocka 
2862ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md)
2863fd2ed4d2SMikulas Patocka {
2864ffcc3936SMike Snitzer 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2865fd2ed4d2SMikulas Patocka 		goto done;
2866fd2ed4d2SMikulas Patocka 
2867fd2ed4d2SMikulas Patocka 	dm_queue_flush(md);
2868fd2ed4d2SMikulas Patocka 
2869fd2ed4d2SMikulas Patocka done:
2870fd2ed4d2SMikulas Patocka 	mutex_unlock(&md->suspend_lock);
2871fd2ed4d2SMikulas Patocka }
2872b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2873fd2ed4d2SMikulas Patocka 
28741da177e4SLinus Torvalds /*-----------------------------------------------------------------
28751da177e4SLinus Torvalds  * Event notification.
28761da177e4SLinus Torvalds  *---------------------------------------------------------------*/
28773abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
287860935eb2SMilan Broz 		       unsigned cookie)
287969267a30SAlasdair G Kergon {
28806958c1c6SMikulas Patocka 	int r;
28816958c1c6SMikulas Patocka 	unsigned noio_flag;
288260935eb2SMilan Broz 	char udev_cookie[DM_COOKIE_LENGTH];
288360935eb2SMilan Broz 	char *envp[] = { udev_cookie, NULL };
288460935eb2SMilan Broz 
28856958c1c6SMikulas Patocka 	noio_flag = memalloc_noio_save();
28866958c1c6SMikulas Patocka 
288760935eb2SMilan Broz 	if (!cookie)
28886958c1c6SMikulas Patocka 		r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
288960935eb2SMilan Broz 	else {
289060935eb2SMilan Broz 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
289160935eb2SMilan Broz 			 DM_COOKIE_ENV_VAR_NAME, cookie);
28926958c1c6SMikulas Patocka 		r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
28933abf85b5SPeter Rajnoha 				       action, envp);
289460935eb2SMilan Broz 	}
28956958c1c6SMikulas Patocka 
28966958c1c6SMikulas Patocka 	memalloc_noio_restore(noio_flag);
28976958c1c6SMikulas Patocka 
28986958c1c6SMikulas Patocka 	return r;
289969267a30SAlasdair G Kergon }
290069267a30SAlasdair G Kergon 
29017a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md)
29027a8c3d3bSMike Anderson {
29037a8c3d3bSMike Anderson 	return atomic_add_return(1, &md->uevent_seq);
29047a8c3d3bSMike Anderson }
29057a8c3d3bSMike Anderson 
29061da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md)
29071da177e4SLinus Torvalds {
29081da177e4SLinus Torvalds 	return atomic_read(&md->event_nr);
29091da177e4SLinus Torvalds }
29101da177e4SLinus Torvalds 
29111da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr)
29121da177e4SLinus Torvalds {
29131da177e4SLinus Torvalds 	return wait_event_interruptible(md->eventq,
29141da177e4SLinus Torvalds 			(event_nr != atomic_read(&md->event_nr)));
29151da177e4SLinus Torvalds }
29161da177e4SLinus Torvalds 
29177a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29187a8c3d3bSMike Anderson {
29197a8c3d3bSMike Anderson 	unsigned long flags;
29207a8c3d3bSMike Anderson 
29217a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
29227a8c3d3bSMike Anderson 	list_add(elist, &md->uevent_list);
29237a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
29247a8c3d3bSMike Anderson }
29257a8c3d3bSMike Anderson 
29261da177e4SLinus Torvalds /*
29271da177e4SLinus Torvalds  * The gendisk is only valid as long as you have a reference
29281da177e4SLinus Torvalds  * count on 'md'.
29291da177e4SLinus Torvalds  */
29301da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md)
29311da177e4SLinus Torvalds {
29321da177e4SLinus Torvalds 	return md->disk;
29331da177e4SLinus Torvalds }
293465ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk);
29351da177e4SLinus Torvalds 
2936784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md)
2937784aae73SMilan Broz {
29382995fa78SMikulas Patocka 	return &md->kobj_holder.kobj;
2939784aae73SMilan Broz }
2940784aae73SMilan Broz 
2941784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2942784aae73SMilan Broz {
2943784aae73SMilan Broz 	struct mapped_device *md;
2944784aae73SMilan Broz 
29452995fa78SMikulas Patocka 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2946784aae73SMilan Broz 
2947b9a41d21SHou Tao 	spin_lock(&_minor_lock);
2948b9a41d21SHou Tao 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2949b9a41d21SHou Tao 		md = NULL;
2950b9a41d21SHou Tao 		goto out;
2951b9a41d21SHou Tao 	}
2952784aae73SMilan Broz 	dm_get(md);
2953b9a41d21SHou Tao out:
2954b9a41d21SHou Tao 	spin_unlock(&_minor_lock);
2955b9a41d21SHou Tao 
2956784aae73SMilan Broz 	return md;
2957784aae73SMilan Broz }
2958784aae73SMilan Broz 
29594f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md)
29601da177e4SLinus Torvalds {
29611da177e4SLinus Torvalds 	return test_bit(DMF_SUSPENDED, &md->flags);
29621da177e4SLinus Torvalds }
29631da177e4SLinus Torvalds 
29645df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md)
29655df96f2bSMikulas Patocka {
29665df96f2bSMikulas Patocka 	return test_bit(DMF_POST_SUSPENDING, &md->flags);
29675df96f2bSMikulas Patocka }
29685df96f2bSMikulas Patocka 
2969ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md)
2970ffcc3936SMike Snitzer {
2971ffcc3936SMike Snitzer 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2972ffcc3936SMike Snitzer }
2973ffcc3936SMike Snitzer 
29742c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md)
29752c140a24SMikulas Patocka {
29762c140a24SMikulas Patocka 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
29772c140a24SMikulas Patocka }
29782c140a24SMikulas Patocka 
297964dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti)
298064dbce58SKiyoshi Ueda {
298133bd6f06SMike Snitzer 	return dm_suspended_md(ti->table->md);
298264dbce58SKiyoshi Ueda }
298364dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended);
298464dbce58SKiyoshi Ueda 
29855df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti)
29865df96f2bSMikulas Patocka {
298733bd6f06SMike Snitzer 	return dm_post_suspending_md(ti->table->md);
29885df96f2bSMikulas Patocka }
29895df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending);
29905df96f2bSMikulas Patocka 
29912e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti)
29922e93ccc1SKiyoshi Ueda {
299333bd6f06SMike Snitzer 	return __noflush_suspending(ti->table->md);
29942e93ccc1SKiyoshi Ueda }
29952e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending);
29962e93ccc1SKiyoshi Ueda 
2997e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools)
2998e6ee8c0bSKiyoshi Ueda {
2999e6ee8c0bSKiyoshi Ueda 	if (!pools)
3000e6ee8c0bSKiyoshi Ueda 		return;
3001e6ee8c0bSKiyoshi Ueda 
30026f1c819cSKent Overstreet 	bioset_exit(&pools->bs);
30036f1c819cSKent Overstreet 	bioset_exit(&pools->io_bs);
3004e6ee8c0bSKiyoshi Ueda 
3005e6ee8c0bSKiyoshi Ueda 	kfree(pools);
3006e6ee8c0bSKiyoshi Ueda }
3007e6ee8c0bSKiyoshi Ueda 
30089c72bad1SChristoph Hellwig struct dm_pr {
30099c72bad1SChristoph Hellwig 	u64	old_key;
30109c72bad1SChristoph Hellwig 	u64	new_key;
30119c72bad1SChristoph Hellwig 	u32	flags;
30129c72bad1SChristoph Hellwig 	bool	fail_early;
30139c72bad1SChristoph Hellwig };
30149c72bad1SChristoph Hellwig 
30159c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
30169c72bad1SChristoph Hellwig 		      void *data)
30179c72bad1SChristoph Hellwig {
30189c72bad1SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
30199c72bad1SChristoph Hellwig 	struct dm_table *table;
30209c72bad1SChristoph Hellwig 	struct dm_target *ti;
30219c72bad1SChristoph Hellwig 	int ret = -ENOTTY, srcu_idx;
30229c72bad1SChristoph Hellwig 
30239c72bad1SChristoph Hellwig 	table = dm_get_live_table(md, &srcu_idx);
30249c72bad1SChristoph Hellwig 	if (!table || !dm_table_get_size(table))
30259c72bad1SChristoph Hellwig 		goto out;
30269c72bad1SChristoph Hellwig 
30279c72bad1SChristoph Hellwig 	/* We only support devices that have a single target */
30289c72bad1SChristoph Hellwig 	if (dm_table_get_num_targets(table) != 1)
30299c72bad1SChristoph Hellwig 		goto out;
30309c72bad1SChristoph Hellwig 	ti = dm_table_get_target(table, 0);
30319c72bad1SChristoph Hellwig 
30329c72bad1SChristoph Hellwig 	ret = -EINVAL;
30339c72bad1SChristoph Hellwig 	if (!ti->type->iterate_devices)
30349c72bad1SChristoph Hellwig 		goto out;
30359c72bad1SChristoph Hellwig 
30369c72bad1SChristoph Hellwig 	ret = ti->type->iterate_devices(ti, fn, data);
30379c72bad1SChristoph Hellwig out:
30389c72bad1SChristoph Hellwig 	dm_put_live_table(md, srcu_idx);
30399c72bad1SChristoph Hellwig 	return ret;
30409c72bad1SChristoph Hellwig }
30419c72bad1SChristoph Hellwig 
30429c72bad1SChristoph Hellwig /*
30439c72bad1SChristoph Hellwig  * For register / unregister we need to manually call out to every path.
30449c72bad1SChristoph Hellwig  */
30459c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
30469c72bad1SChristoph Hellwig 			    sector_t start, sector_t len, void *data)
30479c72bad1SChristoph Hellwig {
30489c72bad1SChristoph Hellwig 	struct dm_pr *pr = data;
30499c72bad1SChristoph Hellwig 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
30509c72bad1SChristoph Hellwig 
30519c72bad1SChristoph Hellwig 	if (!ops || !ops->pr_register)
30529c72bad1SChristoph Hellwig 		return -EOPNOTSUPP;
30539c72bad1SChristoph Hellwig 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
30549c72bad1SChristoph Hellwig }
30559c72bad1SChristoph Hellwig 
305671cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
305771cdb697SChristoph Hellwig 			  u32 flags)
305871cdb697SChristoph Hellwig {
30599c72bad1SChristoph Hellwig 	struct dm_pr pr = {
30609c72bad1SChristoph Hellwig 		.old_key	= old_key,
30619c72bad1SChristoph Hellwig 		.new_key	= new_key,
30629c72bad1SChristoph Hellwig 		.flags		= flags,
30639c72bad1SChristoph Hellwig 		.fail_early	= true,
30649c72bad1SChristoph Hellwig 	};
30659c72bad1SChristoph Hellwig 	int ret;
306671cdb697SChristoph Hellwig 
30679c72bad1SChristoph Hellwig 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
30689c72bad1SChristoph Hellwig 	if (ret && new_key) {
30699c72bad1SChristoph Hellwig 		/* unregister all paths if we failed to register any path */
30709c72bad1SChristoph Hellwig 		pr.old_key = new_key;
30719c72bad1SChristoph Hellwig 		pr.new_key = 0;
30729c72bad1SChristoph Hellwig 		pr.flags = 0;
30739c72bad1SChristoph Hellwig 		pr.fail_early = false;
30749c72bad1SChristoph Hellwig 		dm_call_pr(bdev, __dm_pr_register, &pr);
30759c72bad1SChristoph Hellwig 	}
307671cdb697SChristoph Hellwig 
30779c72bad1SChristoph Hellwig 	return ret;
307871cdb697SChristoph Hellwig }
307971cdb697SChristoph Hellwig 
308071cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
308171cdb697SChristoph Hellwig 			 u32 flags)
308271cdb697SChristoph Hellwig {
308371cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
308471cdb697SChristoph Hellwig 	const struct pr_ops *ops;
3085971888c4SMike Snitzer 	int r, srcu_idx;
308671cdb697SChristoph Hellwig 
30875bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
308871cdb697SChristoph Hellwig 	if (r < 0)
3089971888c4SMike Snitzer 		goto out;
309071cdb697SChristoph Hellwig 
309171cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
309271cdb697SChristoph Hellwig 	if (ops && ops->pr_reserve)
309371cdb697SChristoph Hellwig 		r = ops->pr_reserve(bdev, key, type, flags);
309471cdb697SChristoph Hellwig 	else
309571cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
3096971888c4SMike Snitzer out:
3097971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
309871cdb697SChristoph Hellwig 	return r;
309971cdb697SChristoph Hellwig }
310071cdb697SChristoph Hellwig 
310171cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
310271cdb697SChristoph Hellwig {
310371cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
310471cdb697SChristoph Hellwig 	const struct pr_ops *ops;
3105971888c4SMike Snitzer 	int r, srcu_idx;
310671cdb697SChristoph Hellwig 
31075bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
310871cdb697SChristoph Hellwig 	if (r < 0)
3109971888c4SMike Snitzer 		goto out;
311071cdb697SChristoph Hellwig 
311171cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
311271cdb697SChristoph Hellwig 	if (ops && ops->pr_release)
311371cdb697SChristoph Hellwig 		r = ops->pr_release(bdev, key, type);
311471cdb697SChristoph Hellwig 	else
311571cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
3116971888c4SMike Snitzer out:
3117971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
311871cdb697SChristoph Hellwig 	return r;
311971cdb697SChristoph Hellwig }
312071cdb697SChristoph Hellwig 
312171cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
312271cdb697SChristoph Hellwig 			 enum pr_type type, bool abort)
312371cdb697SChristoph Hellwig {
312471cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
312571cdb697SChristoph Hellwig 	const struct pr_ops *ops;
3126971888c4SMike Snitzer 	int r, srcu_idx;
312771cdb697SChristoph Hellwig 
31285bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
312971cdb697SChristoph Hellwig 	if (r < 0)
3130971888c4SMike Snitzer 		goto out;
313171cdb697SChristoph Hellwig 
313271cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
313371cdb697SChristoph Hellwig 	if (ops && ops->pr_preempt)
313471cdb697SChristoph Hellwig 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
313571cdb697SChristoph Hellwig 	else
313671cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
3137971888c4SMike Snitzer out:
3138971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
313971cdb697SChristoph Hellwig 	return r;
314071cdb697SChristoph Hellwig }
314171cdb697SChristoph Hellwig 
314271cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key)
314371cdb697SChristoph Hellwig {
314471cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
314571cdb697SChristoph Hellwig 	const struct pr_ops *ops;
3146971888c4SMike Snitzer 	int r, srcu_idx;
314771cdb697SChristoph Hellwig 
31485bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
314971cdb697SChristoph Hellwig 	if (r < 0)
3150971888c4SMike Snitzer 		goto out;
315171cdb697SChristoph Hellwig 
315271cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
315371cdb697SChristoph Hellwig 	if (ops && ops->pr_clear)
315471cdb697SChristoph Hellwig 		r = ops->pr_clear(bdev, key);
315571cdb697SChristoph Hellwig 	else
315671cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
3157971888c4SMike Snitzer out:
3158971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
315971cdb697SChristoph Hellwig 	return r;
316071cdb697SChristoph Hellwig }
316171cdb697SChristoph Hellwig 
316271cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = {
316371cdb697SChristoph Hellwig 	.pr_register	= dm_pr_register,
316471cdb697SChristoph Hellwig 	.pr_reserve	= dm_pr_reserve,
316571cdb697SChristoph Hellwig 	.pr_release	= dm_pr_release,
316671cdb697SChristoph Hellwig 	.pr_preempt	= dm_pr_preempt,
316771cdb697SChristoph Hellwig 	.pr_clear	= dm_pr_clear,
316871cdb697SChristoph Hellwig };
316971cdb697SChristoph Hellwig 
317083d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = {
3171c62b37d9SChristoph Hellwig 	.submit_bio = dm_submit_bio,
3172b99fdcdcSMing Lei 	.poll_bio = dm_poll_bio,
31731da177e4SLinus Torvalds 	.open = dm_blk_open,
31741da177e4SLinus Torvalds 	.release = dm_blk_close,
3175aa129a22SMilan Broz 	.ioctl = dm_blk_ioctl,
31763ac51e74SDarrick J. Wong 	.getgeo = dm_blk_getgeo,
3177e76239a3SChristoph Hellwig 	.report_zones = dm_blk_report_zones,
317871cdb697SChristoph Hellwig 	.pr_ops = &dm_pr_ops,
31791da177e4SLinus Torvalds 	.owner = THIS_MODULE
31801da177e4SLinus Torvalds };
31811da177e4SLinus Torvalds 
3182681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = {
3183681cc5e8SMike Snitzer 	.open = dm_blk_open,
3184681cc5e8SMike Snitzer 	.release = dm_blk_close,
3185681cc5e8SMike Snitzer 	.ioctl = dm_blk_ioctl,
3186681cc5e8SMike Snitzer 	.getgeo = dm_blk_getgeo,
3187681cc5e8SMike Snitzer 	.pr_ops = &dm_pr_ops,
3188681cc5e8SMike Snitzer 	.owner = THIS_MODULE
3189681cc5e8SMike Snitzer };
3190681cc5e8SMike Snitzer 
3191f26c5719SDan Williams static const struct dax_operations dm_dax_ops = {
3192f26c5719SDan Williams 	.direct_access = dm_dax_direct_access,
3193cdf6cdcdSVivek Goyal 	.zero_page_range = dm_dax_zero_page_range,
3194047218ecSJane Chu 	.recovery_write = dm_dax_recovery_write,
3195f26c5719SDan Williams };
3196f26c5719SDan Williams 
31971da177e4SLinus Torvalds /*
31981da177e4SLinus Torvalds  * module hooks
31991da177e4SLinus Torvalds  */
32001da177e4SLinus Torvalds module_init(dm_init);
32011da177e4SLinus Torvalds module_exit(dm_exit);
32021da177e4SLinus Torvalds 
32031da177e4SLinus Torvalds module_param(major, uint, 0);
32041da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper");
3205f4790826SMike Snitzer 
3206e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3207e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3208e8603136SMike Snitzer 
3209115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3210115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3211115485e8SMike Snitzer 
3212a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR);
3213a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3214a666e5c0SMikulas Patocka 
32151da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver");
32161da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
32171da177e4SLinus Torvalds MODULE_LICENSE("GPL");
3218