xref: /openbmc/linux/drivers/md/dm.c (revision 7ac5360cd4d02cc7e0eaf10867f599e041822f12)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3784aae73SMilan Broz  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * This file is released under the GPL.
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
84cc96131SMike Snitzer #include "dm-core.h"
94cc96131SMike Snitzer #include "dm-rq.h"
1051e5b2bdSMike Anderson #include "dm-uevent.h"
1191ccbbacSTushar Sugandhi #include "dm-ima.h"
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/init.h>
141da177e4SLinus Torvalds #include <linux/module.h>
1548c9c27bSArjan van de Ven #include <linux/mutex.h>
166958c1c6SMikulas Patocka #include <linux/sched/mm.h>
17174cd4b1SIngo Molnar #include <linux/sched/signal.h>
181da177e4SLinus Torvalds #include <linux/blkpg.h>
191da177e4SLinus Torvalds #include <linux/bio.h>
201da177e4SLinus Torvalds #include <linux/mempool.h>
21f26c5719SDan Williams #include <linux/dax.h>
221da177e4SLinus Torvalds #include <linux/slab.h>
231da177e4SLinus Torvalds #include <linux/idr.h>
247e026c8cSDan Williams #include <linux/uio.h>
253ac51e74SDarrick J. Wong #include <linux/hdreg.h>
263f77316dSKiyoshi Ueda #include <linux/delay.h>
27ffcc3936SMike Snitzer #include <linux/wait.h>
2871cdb697SChristoph Hellwig #include <linux/pr.h>
29b0b4d7c6SElena Reshetova #include <linux/refcount.h>
30c6a564ffSChristoph Hellwig #include <linux/part_stat.h>
31a892c8d5SSatya Tangirala #include <linux/blk-crypto.h>
321e8d44bdSEric Biggers #include <linux/blk-crypto-profile.h>
3355782138SLi Zefan 
3472d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core"
3572d94861SAlasdair G Kergon 
3660935eb2SMilan Broz /*
3760935eb2SMilan Broz  * Cookies are numeric values sent with CHANGE and REMOVE
3860935eb2SMilan Broz  * uevents while resuming, removing or renaming the device.
3960935eb2SMilan Broz  */
4060935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
4160935eb2SMilan Broz #define DM_COOKIE_LENGTH 24
4260935eb2SMilan Broz 
431da177e4SLinus Torvalds static const char *_name = DM_NAME;
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static unsigned int major = 0;
461da177e4SLinus Torvalds static unsigned int _major = 0;
471da177e4SLinus Torvalds 
48d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr);
49d15b774cSAlasdair G Kergon 
50f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock);
512c140a24SMikulas Patocka 
522c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w);
532c140a24SMikulas Patocka 
542c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
552c140a24SMikulas Patocka 
56acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue;
57acfe0ad7SMikulas Patocka 
5893e6442cSMikulas Patocka atomic_t dm_global_event_nr = ATOMIC_INIT(0);
5993e6442cSMikulas Patocka DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
6093e6442cSMikulas Patocka 
6162e08243SMikulas Patocka void dm_issue_global_event(void)
6262e08243SMikulas Patocka {
6362e08243SMikulas Patocka 	atomic_inc(&dm_global_event_nr);
6462e08243SMikulas Patocka 	wake_up(&dm_global_eventq);
6562e08243SMikulas Patocka }
6662e08243SMikulas Patocka 
671da177e4SLinus Torvalds /*
6864f52b0eSMike Snitzer  * One of these is allocated (on-stack) per original bio.
691da177e4SLinus Torvalds  */
7064f52b0eSMike Snitzer struct clone_info {
7164f52b0eSMike Snitzer 	struct dm_table *map;
7264f52b0eSMike Snitzer 	struct bio *bio;
7364f52b0eSMike Snitzer 	struct dm_io *io;
7464f52b0eSMike Snitzer 	sector_t sector;
7564f52b0eSMike Snitzer 	unsigned sector_count;
7664f52b0eSMike Snitzer };
7764f52b0eSMike Snitzer 
7862f26317SJeffle Xu #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
7962f26317SJeffle Xu #define DM_IO_BIO_OFFSET \
8062f26317SJeffle Xu 	(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
8162f26317SJeffle Xu 
8264f52b0eSMike Snitzer void *dm_per_bio_data(struct bio *bio, size_t data_size)
8364f52b0eSMike Snitzer {
8464f52b0eSMike Snitzer 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
8564f52b0eSMike Snitzer 	if (!tio->inside_dm_io)
8662f26317SJeffle Xu 		return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
8762f26317SJeffle Xu 	return (char *)bio - DM_IO_BIO_OFFSET - data_size;
8864f52b0eSMike Snitzer }
8964f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_per_bio_data);
9064f52b0eSMike Snitzer 
9164f52b0eSMike Snitzer struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
9264f52b0eSMike Snitzer {
9364f52b0eSMike Snitzer 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
9464f52b0eSMike Snitzer 	if (io->magic == DM_IO_MAGIC)
9562f26317SJeffle Xu 		return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
9664f52b0eSMike Snitzer 	BUG_ON(io->magic != DM_TIO_MAGIC);
9762f26317SJeffle Xu 	return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
9864f52b0eSMike Snitzer }
9964f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
10064f52b0eSMike Snitzer 
10164f52b0eSMike Snitzer unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
10264f52b0eSMike Snitzer {
10364f52b0eSMike Snitzer 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
10464f52b0eSMike Snitzer }
10564f52b0eSMike Snitzer EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
10664f52b0eSMike Snitzer 
107ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1)
108ba61fdd1SJeff Mahoney 
109115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE
110115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE;
111faad87dfSMike Snitzer 
112a666e5c0SMikulas Patocka #define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
113a666e5c0SMikulas Patocka static int swap_bios = DEFAULT_SWAP_BIOS;
114a666e5c0SMikulas Patocka static int get_swap_bios(void)
115a666e5c0SMikulas Patocka {
116a666e5c0SMikulas Patocka 	int latch = READ_ONCE(swap_bios);
117a666e5c0SMikulas Patocka 	if (unlikely(latch <= 0))
118a666e5c0SMikulas Patocka 		latch = DEFAULT_SWAP_BIOS;
119a666e5c0SMikulas Patocka 	return latch;
120a666e5c0SMikulas Patocka }
121a666e5c0SMikulas Patocka 
122e6ee8c0bSKiyoshi Ueda /*
123e6ee8c0bSKiyoshi Ueda  * For mempools pre-allocation at the table loading time.
124e6ee8c0bSKiyoshi Ueda  */
125e6ee8c0bSKiyoshi Ueda struct dm_md_mempools {
1266f1c819cSKent Overstreet 	struct bio_set bs;
1276f1c819cSKent Overstreet 	struct bio_set io_bs;
128e6ee8c0bSKiyoshi Ueda };
129e6ee8c0bSKiyoshi Ueda 
13086f1152bSBenjamin Marzinski struct table_device {
13186f1152bSBenjamin Marzinski 	struct list_head list;
132b0b4d7c6SElena Reshetova 	refcount_t count;
13386f1152bSBenjamin Marzinski 	struct dm_dev dm_dev;
13486f1152bSBenjamin Marzinski };
13586f1152bSBenjamin Marzinski 
136f4790826SMike Snitzer /*
137e8603136SMike Snitzer  * Bio-based DM's mempools' reserved IOs set by the user.
138e8603136SMike Snitzer  */
1394cc96131SMike Snitzer #define RESERVED_BIO_BASED_IOS		16
140e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
141e8603136SMike Snitzer 
142115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max)
143115485e8SMike Snitzer {
1446aa7de05SMark Rutland 	int param = READ_ONCE(*module_param);
145115485e8SMike Snitzer 	int modified_param = 0;
146115485e8SMike Snitzer 	bool modified = true;
147115485e8SMike Snitzer 
148115485e8SMike Snitzer 	if (param < min)
149115485e8SMike Snitzer 		modified_param = min;
150115485e8SMike Snitzer 	else if (param > max)
151115485e8SMike Snitzer 		modified_param = max;
152115485e8SMike Snitzer 	else
153115485e8SMike Snitzer 		modified = false;
154115485e8SMike Snitzer 
155115485e8SMike Snitzer 	if (modified) {
156115485e8SMike Snitzer 		(void)cmpxchg(module_param, param, modified_param);
157115485e8SMike Snitzer 		param = modified_param;
158115485e8SMike Snitzer 	}
159115485e8SMike Snitzer 
160115485e8SMike Snitzer 	return param;
161115485e8SMike Snitzer }
162115485e8SMike Snitzer 
1634cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param,
164f4790826SMike Snitzer 			       unsigned def, unsigned max)
165f4790826SMike Snitzer {
1666aa7de05SMark Rutland 	unsigned param = READ_ONCE(*module_param);
16709c2d531SMike Snitzer 	unsigned modified_param = 0;
168f4790826SMike Snitzer 
16909c2d531SMike Snitzer 	if (!param)
17009c2d531SMike Snitzer 		modified_param = def;
17109c2d531SMike Snitzer 	else if (param > max)
17209c2d531SMike Snitzer 		modified_param = max;
173f4790826SMike Snitzer 
17409c2d531SMike Snitzer 	if (modified_param) {
17509c2d531SMike Snitzer 		(void)cmpxchg(module_param, param, modified_param);
17609c2d531SMike Snitzer 		param = modified_param;
177f4790826SMike Snitzer 	}
178f4790826SMike Snitzer 
17909c2d531SMike Snitzer 	return param;
180f4790826SMike Snitzer }
181f4790826SMike Snitzer 
182e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void)
183e8603136SMike Snitzer {
18409c2d531SMike Snitzer 	return __dm_get_module_param(&reserved_bio_based_ios,
1854cc96131SMike Snitzer 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
186e8603136SMike Snitzer }
187e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
188e8603136SMike Snitzer 
189115485e8SMike Snitzer static unsigned dm_get_numa_node(void)
190115485e8SMike Snitzer {
191115485e8SMike Snitzer 	return __dm_get_module_param_int(&dm_numa_node,
192115485e8SMike Snitzer 					 DM_NUMA_NODE, num_online_nodes() - 1);
193115485e8SMike Snitzer }
194115485e8SMike Snitzer 
1951da177e4SLinus Torvalds static int __init local_init(void)
1961da177e4SLinus Torvalds {
197e689fbabSMike Snitzer 	int r;
1981ae49ea2SMike Snitzer 
19951e5b2bdSMike Anderson 	r = dm_uevent_init();
20051157b4aSKiyoshi Ueda 	if (r)
201e689fbabSMike Snitzer 		return r;
20251e5b2bdSMike Anderson 
203acfe0ad7SMikulas Patocka 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
204acfe0ad7SMikulas Patocka 	if (!deferred_remove_workqueue) {
205acfe0ad7SMikulas Patocka 		r = -ENOMEM;
206acfe0ad7SMikulas Patocka 		goto out_uevent_exit;
207acfe0ad7SMikulas Patocka 	}
208acfe0ad7SMikulas Patocka 
2091da177e4SLinus Torvalds 	_major = major;
2101da177e4SLinus Torvalds 	r = register_blkdev(_major, _name);
21151157b4aSKiyoshi Ueda 	if (r < 0)
212acfe0ad7SMikulas Patocka 		goto out_free_workqueue;
2131da177e4SLinus Torvalds 
2141da177e4SLinus Torvalds 	if (!_major)
2151da177e4SLinus Torvalds 		_major = r;
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 	return 0;
21851157b4aSKiyoshi Ueda 
219acfe0ad7SMikulas Patocka out_free_workqueue:
220acfe0ad7SMikulas Patocka 	destroy_workqueue(deferred_remove_workqueue);
22151157b4aSKiyoshi Ueda out_uevent_exit:
22251157b4aSKiyoshi Ueda 	dm_uevent_exit();
22351157b4aSKiyoshi Ueda 
22451157b4aSKiyoshi Ueda 	return r;
2251da177e4SLinus Torvalds }
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds static void local_exit(void)
2281da177e4SLinus Torvalds {
2292c140a24SMikulas Patocka 	flush_scheduled_work();
230acfe0ad7SMikulas Patocka 	destroy_workqueue(deferred_remove_workqueue);
2312c140a24SMikulas Patocka 
23200d59405SAkinobu Mita 	unregister_blkdev(_major, _name);
23351e5b2bdSMike Anderson 	dm_uevent_exit();
2341da177e4SLinus Torvalds 
2351da177e4SLinus Torvalds 	_major = 0;
2361da177e4SLinus Torvalds 
2371da177e4SLinus Torvalds 	DMINFO("cleaned up");
2381da177e4SLinus Torvalds }
2391da177e4SLinus Torvalds 
240b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = {
2411da177e4SLinus Torvalds 	local_init,
2421da177e4SLinus Torvalds 	dm_target_init,
2431da177e4SLinus Torvalds 	dm_linear_init,
2441da177e4SLinus Torvalds 	dm_stripe_init,
245952b3557SMikulas Patocka 	dm_io_init,
246945fa4d2SMikulas Patocka 	dm_kcopyd_init,
2471da177e4SLinus Torvalds 	dm_interface_init,
248fd2ed4d2SMikulas Patocka 	dm_statistics_init,
2491da177e4SLinus Torvalds };
2501da177e4SLinus Torvalds 
251b9249e55SAlasdair G Kergon static void (*_exits[])(void) = {
2521da177e4SLinus Torvalds 	local_exit,
2531da177e4SLinus Torvalds 	dm_target_exit,
2541da177e4SLinus Torvalds 	dm_linear_exit,
2551da177e4SLinus Torvalds 	dm_stripe_exit,
256952b3557SMikulas Patocka 	dm_io_exit,
257945fa4d2SMikulas Patocka 	dm_kcopyd_exit,
2581da177e4SLinus Torvalds 	dm_interface_exit,
259fd2ed4d2SMikulas Patocka 	dm_statistics_exit,
2601da177e4SLinus Torvalds };
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds static int __init dm_init(void)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	const int count = ARRAY_SIZE(_inits);
2651da177e4SLinus Torvalds 	int r, i;
2661da177e4SLinus Torvalds 
267f1cd6cb2STushar Sugandhi #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
268f1cd6cb2STushar Sugandhi 	DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
269f1cd6cb2STushar Sugandhi 	       " Duplicate IMA measurements will not be recorded in the IMA log.");
270f1cd6cb2STushar Sugandhi #endif
271f1cd6cb2STushar Sugandhi 
2721da177e4SLinus Torvalds 	for (i = 0; i < count; i++) {
2731da177e4SLinus Torvalds 		r = _inits[i]();
2741da177e4SLinus Torvalds 		if (r)
2751da177e4SLinus Torvalds 			goto bad;
2761da177e4SLinus Torvalds 	}
2771da177e4SLinus Torvalds 
2781da177e4SLinus Torvalds 	return 0;
2791da177e4SLinus Torvalds bad:
2801da177e4SLinus Torvalds 	while (i--)
2811da177e4SLinus Torvalds 		_exits[i]();
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	return r;
2841da177e4SLinus Torvalds }
2851da177e4SLinus Torvalds 
2861da177e4SLinus Torvalds static void __exit dm_exit(void)
2871da177e4SLinus Torvalds {
2881da177e4SLinus Torvalds 	int i = ARRAY_SIZE(_exits);
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds 	while (i--)
2911da177e4SLinus Torvalds 		_exits[i]();
292d15b774cSAlasdair G Kergon 
293d15b774cSAlasdair G Kergon 	/*
294d15b774cSAlasdair G Kergon 	 * Should be empty by this point.
295d15b774cSAlasdair G Kergon 	 */
296d15b774cSAlasdair G Kergon 	idr_destroy(&_minor_idr);
2971da177e4SLinus Torvalds }
2981da177e4SLinus Torvalds 
2991da177e4SLinus Torvalds /*
3001da177e4SLinus Torvalds  * Block device functions
3011da177e4SLinus Torvalds  */
302432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md)
303432a212cSMike Anderson {
304432a212cSMike Anderson 	return test_bit(DMF_DELETING, &md->flags);
305432a212cSMike Anderson }
306432a212cSMike Anderson 
307fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode)
3081da177e4SLinus Torvalds {
3091da177e4SLinus Torvalds 	struct mapped_device *md;
3101da177e4SLinus Torvalds 
311fba9f90eSJeff Mahoney 	spin_lock(&_minor_lock);
312fba9f90eSJeff Mahoney 
313fe5f9f2cSAl Viro 	md = bdev->bd_disk->private_data;
314fba9f90eSJeff Mahoney 	if (!md)
315fba9f90eSJeff Mahoney 		goto out;
316fba9f90eSJeff Mahoney 
3175c6bd75dSAlasdair G Kergon 	if (test_bit(DMF_FREEING, &md->flags) ||
318432a212cSMike Anderson 	    dm_deleting_md(md)) {
319fba9f90eSJeff Mahoney 		md = NULL;
320fba9f90eSJeff Mahoney 		goto out;
321fba9f90eSJeff Mahoney 	}
322fba9f90eSJeff Mahoney 
3231da177e4SLinus Torvalds 	dm_get(md);
3245c6bd75dSAlasdair G Kergon 	atomic_inc(&md->open_count);
325fba9f90eSJeff Mahoney out:
326fba9f90eSJeff Mahoney 	spin_unlock(&_minor_lock);
327fba9f90eSJeff Mahoney 
328fba9f90eSJeff Mahoney 	return md ? 0 : -ENXIO;
3291da177e4SLinus Torvalds }
3301da177e4SLinus Torvalds 
331db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode)
3321da177e4SLinus Torvalds {
33363a4f065SMike Snitzer 	struct mapped_device *md;
3346e9624b8SArnd Bergmann 
3354a1aeb98SMilan Broz 	spin_lock(&_minor_lock);
3364a1aeb98SMilan Broz 
33763a4f065SMike Snitzer 	md = disk->private_data;
33863a4f065SMike Snitzer 	if (WARN_ON(!md))
33963a4f065SMike Snitzer 		goto out;
34063a4f065SMike Snitzer 
3412c140a24SMikulas Patocka 	if (atomic_dec_and_test(&md->open_count) &&
3422c140a24SMikulas Patocka 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
343acfe0ad7SMikulas Patocka 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
3442c140a24SMikulas Patocka 
3451da177e4SLinus Torvalds 	dm_put(md);
34663a4f065SMike Snitzer out:
3474a1aeb98SMilan Broz 	spin_unlock(&_minor_lock);
3481da177e4SLinus Torvalds }
3491da177e4SLinus Torvalds 
3505c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md)
3515c6bd75dSAlasdair G Kergon {
3525c6bd75dSAlasdair G Kergon 	return atomic_read(&md->open_count);
3535c6bd75dSAlasdair G Kergon }
3545c6bd75dSAlasdair G Kergon 
3555c6bd75dSAlasdair G Kergon /*
3565c6bd75dSAlasdair G Kergon  * Guarantees nothing is using the device before it's deleted.
3575c6bd75dSAlasdair G Kergon  */
3582c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
3595c6bd75dSAlasdair G Kergon {
3605c6bd75dSAlasdair G Kergon 	int r = 0;
3615c6bd75dSAlasdair G Kergon 
3625c6bd75dSAlasdair G Kergon 	spin_lock(&_minor_lock);
3635c6bd75dSAlasdair G Kergon 
3642c140a24SMikulas Patocka 	if (dm_open_count(md)) {
3655c6bd75dSAlasdair G Kergon 		r = -EBUSY;
3662c140a24SMikulas Patocka 		if (mark_deferred)
3672c140a24SMikulas Patocka 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
3682c140a24SMikulas Patocka 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
3692c140a24SMikulas Patocka 		r = -EEXIST;
3705c6bd75dSAlasdair G Kergon 	else
3715c6bd75dSAlasdair G Kergon 		set_bit(DMF_DELETING, &md->flags);
3725c6bd75dSAlasdair G Kergon 
3735c6bd75dSAlasdair G Kergon 	spin_unlock(&_minor_lock);
3745c6bd75dSAlasdair G Kergon 
3755c6bd75dSAlasdair G Kergon 	return r;
3765c6bd75dSAlasdair G Kergon }
3775c6bd75dSAlasdair G Kergon 
3782c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md)
3792c140a24SMikulas Patocka {
3802c140a24SMikulas Patocka 	int r = 0;
3812c140a24SMikulas Patocka 
3822c140a24SMikulas Patocka 	spin_lock(&_minor_lock);
3832c140a24SMikulas Patocka 
3842c140a24SMikulas Patocka 	if (test_bit(DMF_DELETING, &md->flags))
3852c140a24SMikulas Patocka 		r = -EBUSY;
3862c140a24SMikulas Patocka 	else
3872c140a24SMikulas Patocka 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
3882c140a24SMikulas Patocka 
3892c140a24SMikulas Patocka 	spin_unlock(&_minor_lock);
3902c140a24SMikulas Patocka 
3912c140a24SMikulas Patocka 	return r;
3922c140a24SMikulas Patocka }
3932c140a24SMikulas Patocka 
3942c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w)
3952c140a24SMikulas Patocka {
3962c140a24SMikulas Patocka 	dm_deferred_remove();
3972c140a24SMikulas Patocka }
3982c140a24SMikulas Patocka 
3993ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4003ac51e74SDarrick J. Wong {
4013ac51e74SDarrick J. Wong 	struct mapped_device *md = bdev->bd_disk->private_data;
4023ac51e74SDarrick J. Wong 
4033ac51e74SDarrick J. Wong 	return dm_get_geometry(md, geo);
4043ac51e74SDarrick J. Wong }
4053ac51e74SDarrick J. Wong 
406971888c4SMike Snitzer static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
4075bd5e8d8SMike Snitzer 			    struct block_device **bdev)
408aa129a22SMilan Broz {
40966482026SMike Snitzer 	struct dm_target *tgt;
4106c182cd8SHannes Reinecke 	struct dm_table *map;
411971888c4SMike Snitzer 	int r;
412aa129a22SMilan Broz 
4136c182cd8SHannes Reinecke retry:
414e56f81e0SChristoph Hellwig 	r = -ENOTTY;
415971888c4SMike Snitzer 	map = dm_get_live_table(md, srcu_idx);
416aa129a22SMilan Broz 	if (!map || !dm_table_get_size(map))
417971888c4SMike Snitzer 		return r;
418aa129a22SMilan Broz 
419aa129a22SMilan Broz 	/* We only support devices that have a single target */
420aa129a22SMilan Broz 	if (dm_table_get_num_targets(map) != 1)
421971888c4SMike Snitzer 		return r;
422aa129a22SMilan Broz 
42366482026SMike Snitzer 	tgt = dm_table_get_target(map, 0);
42466482026SMike Snitzer 	if (!tgt->type->prepare_ioctl)
425e56f81e0SChristoph Hellwig 		return r;
426aa129a22SMilan Broz 
427971888c4SMike Snitzer 	if (dm_suspended_md(md))
428971888c4SMike Snitzer 		return -EAGAIN;
429971888c4SMike Snitzer 
4305bd5e8d8SMike Snitzer 	r = tgt->type->prepare_ioctl(tgt, bdev);
4315bbbfdf6SJunichi Nomura 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
432971888c4SMike Snitzer 		dm_put_live_table(md, *srcu_idx);
4336c182cd8SHannes Reinecke 		msleep(10);
4346c182cd8SHannes Reinecke 		goto retry;
4356c182cd8SHannes Reinecke 	}
436971888c4SMike Snitzer 
437e56f81e0SChristoph Hellwig 	return r;
438e56f81e0SChristoph Hellwig }
4396c182cd8SHannes Reinecke 
440971888c4SMike Snitzer static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
441971888c4SMike Snitzer {
442971888c4SMike Snitzer 	dm_put_live_table(md, srcu_idx);
443971888c4SMike Snitzer }
444971888c4SMike Snitzer 
445e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
446e56f81e0SChristoph Hellwig 			unsigned int cmd, unsigned long arg)
447e56f81e0SChristoph Hellwig {
448e56f81e0SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
449971888c4SMike Snitzer 	int r, srcu_idx;
450e56f81e0SChristoph Hellwig 
4515bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
452e56f81e0SChristoph Hellwig 	if (r < 0)
453971888c4SMike Snitzer 		goto out;
454e56f81e0SChristoph Hellwig 
455e56f81e0SChristoph Hellwig 	if (r > 0) {
456e56f81e0SChristoph Hellwig 		/*
457e980f623SChristoph Hellwig 		 * Target determined this ioctl is being issued against a
458e980f623SChristoph Hellwig 		 * subset of the parent bdev; require extra privileges.
459e56f81e0SChristoph Hellwig 		 */
460e980f623SChristoph Hellwig 		if (!capable(CAP_SYS_RAWIO)) {
4610378c625SMike Snitzer 			DMDEBUG_LIMIT(
462e980f623SChristoph Hellwig 	"%s: sending ioctl %x to DM device without required privilege.",
463e980f623SChristoph Hellwig 				current->comm, cmd);
464e980f623SChristoph Hellwig 			r = -ENOIOCTLCMD;
465e56f81e0SChristoph Hellwig 			goto out;
466e56f81e0SChristoph Hellwig 		}
467e980f623SChristoph Hellwig 	}
468e56f81e0SChristoph Hellwig 
469a7cb3d2fSChristoph Hellwig 	if (!bdev->bd_disk->fops->ioctl)
470a7cb3d2fSChristoph Hellwig 		r = -ENOTTY;
471a7cb3d2fSChristoph Hellwig 	else
472a7cb3d2fSChristoph Hellwig 		r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
473e56f81e0SChristoph Hellwig out:
474971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
475aa129a22SMilan Broz 	return r;
476aa129a22SMilan Broz }
477aa129a22SMilan Broz 
4787465d7acSMike Snitzer u64 dm_start_time_ns_from_clone(struct bio *bio)
4797465d7acSMike Snitzer {
4807465d7acSMike Snitzer 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
4817465d7acSMike Snitzer 	struct dm_io *io = tio->io;
4827465d7acSMike Snitzer 
4837465d7acSMike Snitzer 	return jiffies_to_nsecs(io->start_time);
4847465d7acSMike Snitzer }
4857465d7acSMike Snitzer EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
4867465d7acSMike Snitzer 
4877465d7acSMike Snitzer static void start_io_acct(struct dm_io *io)
4887465d7acSMike Snitzer {
4897465d7acSMike Snitzer 	struct mapped_device *md = io->md;
4907465d7acSMike Snitzer 	struct bio *bio = io->orig_bio;
4917465d7acSMike Snitzer 
4927465d7acSMike Snitzer 	io->start_time = bio_start_io_acct(bio);
4937465d7acSMike Snitzer 	if (unlikely(dm_stats_used(&md->stats)))
4947465d7acSMike Snitzer 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
4957465d7acSMike Snitzer 				    bio->bi_iter.bi_sector, bio_sectors(bio),
4967465d7acSMike Snitzer 				    false, 0, &io->stats_aux);
4977465d7acSMike Snitzer }
4987465d7acSMike Snitzer 
499d208b894SJiazi Li static void end_io_acct(struct mapped_device *md, struct bio *bio,
500d208b894SJiazi Li 			unsigned long start_time, struct dm_stats_aux *stats_aux)
5017465d7acSMike Snitzer {
502d208b894SJiazi Li 	unsigned long duration = jiffies - start_time;
5037465d7acSMike Snitzer 
504d208b894SJiazi Li 	bio_end_io_acct(bio, start_time);
5057465d7acSMike Snitzer 
5067465d7acSMike Snitzer 	if (unlikely(dm_stats_used(&md->stats)))
5077465d7acSMike Snitzer 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
5087465d7acSMike Snitzer 				    bio->bi_iter.bi_sector, bio_sectors(bio),
509d208b894SJiazi Li 				    true, duration, stats_aux);
5107465d7acSMike Snitzer 
5117465d7acSMike Snitzer 	/* nudge anyone waiting on suspend queue */
5127465d7acSMike Snitzer 	if (unlikely(wq_has_sleeper(&md->wait)))
5137465d7acSMike Snitzer 		wake_up(&md->wait);
5147465d7acSMike Snitzer }
515978e51baSMike Snitzer 
516978e51baSMike Snitzer static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
5171da177e4SLinus Torvalds {
51864f52b0eSMike Snitzer 	struct dm_io *io;
51964f52b0eSMike Snitzer 	struct dm_target_io *tio;
52064f52b0eSMike Snitzer 	struct bio *clone;
52164f52b0eSMike Snitzer 
5226f1c819cSKent Overstreet 	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
52364f52b0eSMike Snitzer 	if (!clone)
52464f52b0eSMike Snitzer 		return NULL;
52564f52b0eSMike Snitzer 
52664f52b0eSMike Snitzer 	tio = container_of(clone, struct dm_target_io, clone);
52764f52b0eSMike Snitzer 	tio->inside_dm_io = true;
52864f52b0eSMike Snitzer 	tio->io = NULL;
52964f52b0eSMike Snitzer 
53064f52b0eSMike Snitzer 	io = container_of(tio, struct dm_io, tio);
53164f52b0eSMike Snitzer 	io->magic = DM_IO_MAGIC;
532978e51baSMike Snitzer 	io->status = 0;
533978e51baSMike Snitzer 	atomic_set(&io->io_count, 1);
534978e51baSMike Snitzer 	io->orig_bio = bio;
535978e51baSMike Snitzer 	io->md = md;
536978e51baSMike Snitzer 	spin_lock_init(&io->endio_lock);
537978e51baSMike Snitzer 
538978e51baSMike Snitzer 	start_io_acct(io);
53964f52b0eSMike Snitzer 
54064f52b0eSMike Snitzer 	return io;
5411da177e4SLinus Torvalds }
5421da177e4SLinus Torvalds 
543028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io)
5441da177e4SLinus Torvalds {
54564f52b0eSMike Snitzer 	bio_put(&io->tio.clone);
54664f52b0eSMike Snitzer }
54764f52b0eSMike Snitzer 
54864f52b0eSMike Snitzer static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
54964f52b0eSMike Snitzer 				      unsigned target_bio_nr, gfp_t gfp_mask)
55064f52b0eSMike Snitzer {
55164f52b0eSMike Snitzer 	struct dm_target_io *tio;
55264f52b0eSMike Snitzer 
55364f52b0eSMike Snitzer 	if (!ci->io->tio.io) {
55464f52b0eSMike Snitzer 		/* the dm_target_io embedded in ci->io is available */
55564f52b0eSMike Snitzer 		tio = &ci->io->tio;
55664f52b0eSMike Snitzer 	} else {
5576f1c819cSKent Overstreet 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
55864f52b0eSMike Snitzer 		if (!clone)
55964f52b0eSMike Snitzer 			return NULL;
56064f52b0eSMike Snitzer 
56164f52b0eSMike Snitzer 		tio = container_of(clone, struct dm_target_io, clone);
56264f52b0eSMike Snitzer 		tio->inside_dm_io = false;
56364f52b0eSMike Snitzer 	}
56464f52b0eSMike Snitzer 
56564f52b0eSMike Snitzer 	tio->magic = DM_TIO_MAGIC;
56664f52b0eSMike Snitzer 	tio->io = ci->io;
56764f52b0eSMike Snitzer 	tio->ti = ti;
56864f52b0eSMike Snitzer 	tio->target_bio_nr = target_bio_nr;
56964f52b0eSMike Snitzer 
57064f52b0eSMike Snitzer 	return tio;
5711da177e4SLinus Torvalds }
5721da177e4SLinus Torvalds 
573cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio)
5741da177e4SLinus Torvalds {
57564f52b0eSMike Snitzer 	if (tio->inside_dm_io)
57664f52b0eSMike Snitzer 		return;
577dba14160SMikulas Patocka 	bio_put(&tio->clone);
5781da177e4SLinus Torvalds }
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds /*
5811da177e4SLinus Torvalds  * Add the bio to the list of deferred io.
5821da177e4SLinus Torvalds  */
58392c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio)
5841da177e4SLinus Torvalds {
58505447420SKiyoshi Ueda 	unsigned long flags;
5861da177e4SLinus Torvalds 
58705447420SKiyoshi Ueda 	spin_lock_irqsave(&md->deferred_lock, flags);
5881da177e4SLinus Torvalds 	bio_list_add(&md->deferred, bio);
58905447420SKiyoshi Ueda 	spin_unlock_irqrestore(&md->deferred_lock, flags);
59092c63902SMikulas Patocka 	queue_work(md->wq, &md->work);
5911da177e4SLinus Torvalds }
5921da177e4SLinus Torvalds 
5931da177e4SLinus Torvalds /*
5941da177e4SLinus Torvalds  * Everyone (including functions in this file), should use this
5951da177e4SLinus Torvalds  * function to access the md->map field, and make sure they call
59683d5e5b0SMikulas Patocka  * dm_put_live_table() when finished.
5971da177e4SLinus Torvalds  */
59883d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
5991da177e4SLinus Torvalds {
60083d5e5b0SMikulas Patocka 	*srcu_idx = srcu_read_lock(&md->io_barrier);
6011da177e4SLinus Torvalds 
60283d5e5b0SMikulas Patocka 	return srcu_dereference(md->map, &md->io_barrier);
60383d5e5b0SMikulas Patocka }
6041da177e4SLinus Torvalds 
60583d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
60683d5e5b0SMikulas Patocka {
60783d5e5b0SMikulas Patocka 	srcu_read_unlock(&md->io_barrier, srcu_idx);
60883d5e5b0SMikulas Patocka }
60983d5e5b0SMikulas Patocka 
61083d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md)
61183d5e5b0SMikulas Patocka {
61283d5e5b0SMikulas Patocka 	synchronize_srcu(&md->io_barrier);
61383d5e5b0SMikulas Patocka 	synchronize_rcu_expedited();
61483d5e5b0SMikulas Patocka }
61583d5e5b0SMikulas Patocka 
61683d5e5b0SMikulas Patocka /*
61783d5e5b0SMikulas Patocka  * A fast alternative to dm_get_live_table/dm_put_live_table.
61883d5e5b0SMikulas Patocka  * The caller must not block between these two functions.
61983d5e5b0SMikulas Patocka  */
62083d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
62183d5e5b0SMikulas Patocka {
62283d5e5b0SMikulas Patocka 	rcu_read_lock();
62383d5e5b0SMikulas Patocka 	return rcu_dereference(md->map);
62483d5e5b0SMikulas Patocka }
62583d5e5b0SMikulas Patocka 
62683d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
62783d5e5b0SMikulas Patocka {
62883d5e5b0SMikulas Patocka 	rcu_read_unlock();
6291da177e4SLinus Torvalds }
6301da177e4SLinus Torvalds 
631971888c4SMike Snitzer static char *_dm_claim_ptr = "I belong to device-mapper";
632971888c4SMike Snitzer 
6333ac51e74SDarrick J. Wong /*
63486f1152bSBenjamin Marzinski  * Open a table device so we can use it as a map destination.
63586f1152bSBenjamin Marzinski  */
63686f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev,
63786f1152bSBenjamin Marzinski 			     struct mapped_device *md)
63886f1152bSBenjamin Marzinski {
63986f1152bSBenjamin Marzinski 	struct block_device *bdev;
640cd913c76SChristoph Hellwig 	u64 part_off;
64186f1152bSBenjamin Marzinski 	int r;
64286f1152bSBenjamin Marzinski 
64386f1152bSBenjamin Marzinski 	BUG_ON(td->dm_dev.bdev);
64486f1152bSBenjamin Marzinski 
645519049afSMike Snitzer 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
64686f1152bSBenjamin Marzinski 	if (IS_ERR(bdev))
64786f1152bSBenjamin Marzinski 		return PTR_ERR(bdev);
64886f1152bSBenjamin Marzinski 
64986f1152bSBenjamin Marzinski 	r = bd_link_disk_holder(bdev, dm_disk(md));
65086f1152bSBenjamin Marzinski 	if (r) {
65186f1152bSBenjamin Marzinski 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
65286f1152bSBenjamin Marzinski 		return r;
65386f1152bSBenjamin Marzinski 	}
65486f1152bSBenjamin Marzinski 
65586f1152bSBenjamin Marzinski 	td->dm_dev.bdev = bdev;
656cd913c76SChristoph Hellwig 	td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
65786f1152bSBenjamin Marzinski 	return 0;
65886f1152bSBenjamin Marzinski }
65986f1152bSBenjamin Marzinski 
66086f1152bSBenjamin Marzinski /*
66186f1152bSBenjamin Marzinski  * Close a table device that we've been using.
66286f1152bSBenjamin Marzinski  */
66386f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md)
66486f1152bSBenjamin Marzinski {
66586f1152bSBenjamin Marzinski 	if (!td->dm_dev.bdev)
66686f1152bSBenjamin Marzinski 		return;
66786f1152bSBenjamin Marzinski 
66886f1152bSBenjamin Marzinski 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
66986f1152bSBenjamin Marzinski 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
670817bf402SDan Williams 	put_dax(td->dm_dev.dax_dev);
67186f1152bSBenjamin Marzinski 	td->dm_dev.bdev = NULL;
672817bf402SDan Williams 	td->dm_dev.dax_dev = NULL;
67386f1152bSBenjamin Marzinski }
67486f1152bSBenjamin Marzinski 
67586f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev,
6768454fca4SSheetal Singala 					      fmode_t mode)
6778454fca4SSheetal Singala {
67886f1152bSBenjamin Marzinski 	struct table_device *td;
67986f1152bSBenjamin Marzinski 
68086f1152bSBenjamin Marzinski 	list_for_each_entry(td, l, list)
68186f1152bSBenjamin Marzinski 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
68286f1152bSBenjamin Marzinski 			return td;
68386f1152bSBenjamin Marzinski 
68486f1152bSBenjamin Marzinski 	return NULL;
68586f1152bSBenjamin Marzinski }
68686f1152bSBenjamin Marzinski 
68786f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
6888454fca4SSheetal Singala 			struct dm_dev **result)
6898454fca4SSheetal Singala {
69086f1152bSBenjamin Marzinski 	int r;
69186f1152bSBenjamin Marzinski 	struct table_device *td;
69286f1152bSBenjamin Marzinski 
69386f1152bSBenjamin Marzinski 	mutex_lock(&md->table_devices_lock);
69486f1152bSBenjamin Marzinski 	td = find_table_device(&md->table_devices, dev, mode);
69586f1152bSBenjamin Marzinski 	if (!td) {
696115485e8SMike Snitzer 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
69786f1152bSBenjamin Marzinski 		if (!td) {
69886f1152bSBenjamin Marzinski 			mutex_unlock(&md->table_devices_lock);
69986f1152bSBenjamin Marzinski 			return -ENOMEM;
70086f1152bSBenjamin Marzinski 		}
70186f1152bSBenjamin Marzinski 
70286f1152bSBenjamin Marzinski 		td->dm_dev.mode = mode;
70386f1152bSBenjamin Marzinski 		td->dm_dev.bdev = NULL;
70486f1152bSBenjamin Marzinski 
70586f1152bSBenjamin Marzinski 		if ((r = open_table_device(td, dev, md))) {
70686f1152bSBenjamin Marzinski 			mutex_unlock(&md->table_devices_lock);
70786f1152bSBenjamin Marzinski 			kfree(td);
70886f1152bSBenjamin Marzinski 			return r;
70986f1152bSBenjamin Marzinski 		}
71086f1152bSBenjamin Marzinski 
71186f1152bSBenjamin Marzinski 		format_dev_t(td->dm_dev.name, dev);
71286f1152bSBenjamin Marzinski 
713b0b4d7c6SElena Reshetova 		refcount_set(&td->count, 1);
71486f1152bSBenjamin Marzinski 		list_add(&td->list, &md->table_devices);
715b0b4d7c6SElena Reshetova 	} else {
716b0b4d7c6SElena Reshetova 		refcount_inc(&td->count);
71786f1152bSBenjamin Marzinski 	}
71886f1152bSBenjamin Marzinski 	mutex_unlock(&md->table_devices_lock);
71986f1152bSBenjamin Marzinski 
72086f1152bSBenjamin Marzinski 	*result = &td->dm_dev;
72186f1152bSBenjamin Marzinski 	return 0;
72286f1152bSBenjamin Marzinski }
72386f1152bSBenjamin Marzinski 
72486f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
72586f1152bSBenjamin Marzinski {
72686f1152bSBenjamin Marzinski 	struct table_device *td = container_of(d, struct table_device, dm_dev);
72786f1152bSBenjamin Marzinski 
72886f1152bSBenjamin Marzinski 	mutex_lock(&md->table_devices_lock);
729b0b4d7c6SElena Reshetova 	if (refcount_dec_and_test(&td->count)) {
73086f1152bSBenjamin Marzinski 		close_table_device(td, md);
73186f1152bSBenjamin Marzinski 		list_del(&td->list);
73286f1152bSBenjamin Marzinski 		kfree(td);
73386f1152bSBenjamin Marzinski 	}
73486f1152bSBenjamin Marzinski 	mutex_unlock(&md->table_devices_lock);
73586f1152bSBenjamin Marzinski }
73686f1152bSBenjamin Marzinski 
73786f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices)
73886f1152bSBenjamin Marzinski {
73986f1152bSBenjamin Marzinski 	struct list_head *tmp, *next;
74086f1152bSBenjamin Marzinski 
74186f1152bSBenjamin Marzinski 	list_for_each_safe(tmp, next, devices) {
74286f1152bSBenjamin Marzinski 		struct table_device *td = list_entry(tmp, struct table_device, list);
74386f1152bSBenjamin Marzinski 
74486f1152bSBenjamin Marzinski 		DMWARN("dm_destroy: %s still exists with %d references",
745b0b4d7c6SElena Reshetova 		       td->dm_dev.name, refcount_read(&td->count));
74686f1152bSBenjamin Marzinski 		kfree(td);
74786f1152bSBenjamin Marzinski 	}
74886f1152bSBenjamin Marzinski }
74986f1152bSBenjamin Marzinski 
75086f1152bSBenjamin Marzinski /*
7513ac51e74SDarrick J. Wong  * Get the geometry associated with a dm device
7523ac51e74SDarrick J. Wong  */
7533ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
7543ac51e74SDarrick J. Wong {
7553ac51e74SDarrick J. Wong 	*geo = md->geometry;
7563ac51e74SDarrick J. Wong 
7573ac51e74SDarrick J. Wong 	return 0;
7583ac51e74SDarrick J. Wong }
7593ac51e74SDarrick J. Wong 
7603ac51e74SDarrick J. Wong /*
7613ac51e74SDarrick J. Wong  * Set the geometry of a device.
7623ac51e74SDarrick J. Wong  */
7633ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
7643ac51e74SDarrick J. Wong {
7653ac51e74SDarrick J. Wong 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
7663ac51e74SDarrick J. Wong 
7673ac51e74SDarrick J. Wong 	if (geo->start > sz) {
7683ac51e74SDarrick J. Wong 		DMWARN("Start sector is beyond the geometry limits.");
7693ac51e74SDarrick J. Wong 		return -EINVAL;
7703ac51e74SDarrick J. Wong 	}
7713ac51e74SDarrick J. Wong 
7723ac51e74SDarrick J. Wong 	md->geometry = *geo;
7733ac51e74SDarrick J. Wong 
7743ac51e74SDarrick J. Wong 	return 0;
7753ac51e74SDarrick J. Wong }
7763ac51e74SDarrick J. Wong 
7772e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md)
7782e93ccc1SKiyoshi Ueda {
7792e93ccc1SKiyoshi Ueda 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
7802e93ccc1SKiyoshi Ueda }
7812e93ccc1SKiyoshi Ueda 
7821da177e4SLinus Torvalds /*
7831da177e4SLinus Torvalds  * Decrements the number of outstanding ios that a bio has been
7841da177e4SLinus Torvalds  * cloned into, completing the original io if necc.
7851da177e4SLinus Torvalds  */
786e2118b3cSDamien Le Moal void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
7871da177e4SLinus Torvalds {
7882e93ccc1SKiyoshi Ueda 	unsigned long flags;
7894e4cbee9SChristoph Hellwig 	blk_status_t io_error;
790b35f8caaSMilan Broz 	struct bio *bio;
791b35f8caaSMilan Broz 	struct mapped_device *md = io->md;
792d208b894SJiazi Li 	unsigned long start_time = 0;
793d208b894SJiazi Li 	struct dm_stats_aux stats_aux;
7942e93ccc1SKiyoshi Ueda 
7952e93ccc1SKiyoshi Ueda 	/* Push-back supersedes any I/O errors */
796f88fb981SKiyoshi Ueda 	if (unlikely(error)) {
797f88fb981SKiyoshi Ueda 		spin_lock_irqsave(&io->endio_lock, flags);
798745dc570SMike Snitzer 		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
7994e4cbee9SChristoph Hellwig 			io->status = error;
800f88fb981SKiyoshi Ueda 		spin_unlock_irqrestore(&io->endio_lock, flags);
801f88fb981SKiyoshi Ueda 	}
8021da177e4SLinus Torvalds 
8031da177e4SLinus Torvalds 	if (atomic_dec_and_test(&io->io_count)) {
804bf14e2b2SDamien Le Moal 		bio = io->orig_bio;
8054e4cbee9SChristoph Hellwig 		if (io->status == BLK_STS_DM_REQUEUE) {
8062e93ccc1SKiyoshi Ueda 			/*
8072e93ccc1SKiyoshi Ueda 			 * Target requested pushing back the I/O.
8082e93ccc1SKiyoshi Ueda 			 */
809022c2611SMikulas Patocka 			spin_lock_irqsave(&md->deferred_lock, flags);
810bf14e2b2SDamien Le Moal 			if (__noflush_suspending(md) &&
811bf14e2b2SDamien Le Moal 			    !WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
812745dc570SMike Snitzer 				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
813bf14e2b2SDamien Le Moal 				bio_list_add_head(&md->deferred, bio);
814bf14e2b2SDamien Le Moal 			} else {
815bf14e2b2SDamien Le Moal 				/*
816bf14e2b2SDamien Le Moal 				 * noflush suspend was interrupted or this is
817bf14e2b2SDamien Le Moal 				 * a write to a zoned target.
818bf14e2b2SDamien Le Moal 				 */
8194e4cbee9SChristoph Hellwig 				io->status = BLK_STS_IOERR;
820bf14e2b2SDamien Le Moal 			}
821022c2611SMikulas Patocka 			spin_unlock_irqrestore(&md->deferred_lock, flags);
8222e93ccc1SKiyoshi Ueda 		}
8232e93ccc1SKiyoshi Ueda 
8244e4cbee9SChristoph Hellwig 		io_error = io->status;
825d208b894SJiazi Li 		start_time = io->start_time;
826d208b894SJiazi Li 		stats_aux = io->stats_aux;
827a97f925aSMikulas Patocka 		free_io(md, io);
828d208b894SJiazi Li 		end_io_acct(md, bio, start_time, &stats_aux);
8291da177e4SLinus Torvalds 
8304e4cbee9SChristoph Hellwig 		if (io_error == BLK_STS_DM_REQUEUE)
8316a8736d1STejun Heo 			return;
8326a8736d1STejun Heo 
8331eff9d32SJens Axboe 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
8341da177e4SLinus Torvalds 			/*
8356a8736d1STejun Heo 			 * Preflush done for flush with data, reissue
83628a8f0d3SMike Christie 			 * without REQ_PREFLUSH.
837af7e466aSMikulas Patocka 			 */
8381eff9d32SJens Axboe 			bio->bi_opf &= ~REQ_PREFLUSH;
8396a8736d1STejun Heo 			queue_io(md, bio);
840af7e466aSMikulas Patocka 		} else {
841b372d360SMike Snitzer 			/* done with normal IO or empty flush */
8428dd601faSNeilBrown 			if (io_error)
8434e4cbee9SChristoph Hellwig 				bio->bi_status = io_error;
8444246a0b6SChristoph Hellwig 			bio_endio(bio);
8452e93ccc1SKiyoshi Ueda 		}
8461da177e4SLinus Torvalds 	}
847af7e466aSMikulas Patocka }
8481da177e4SLinus Torvalds 
849bcb44433SMike Snitzer void disable_discard(struct mapped_device *md)
850bcb44433SMike Snitzer {
851bcb44433SMike Snitzer 	struct queue_limits *limits = dm_get_queue_limits(md);
852bcb44433SMike Snitzer 
853bcb44433SMike Snitzer 	/* device doesn't really support DISCARD, disable it */
854bcb44433SMike Snitzer 	limits->max_discard_sectors = 0;
855bcb44433SMike Snitzer 	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
856bcb44433SMike Snitzer }
857bcb44433SMike Snitzer 
8584cc96131SMike Snitzer void disable_write_same(struct mapped_device *md)
8597eee4ae2SMike Snitzer {
8607eee4ae2SMike Snitzer 	struct queue_limits *limits = dm_get_queue_limits(md);
8617eee4ae2SMike Snitzer 
8627eee4ae2SMike Snitzer 	/* device doesn't really support WRITE SAME, disable it */
8637eee4ae2SMike Snitzer 	limits->max_write_same_sectors = 0;
8647eee4ae2SMike Snitzer }
8657eee4ae2SMike Snitzer 
866ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md)
867ac62d620SChristoph Hellwig {
868ac62d620SChristoph Hellwig 	struct queue_limits *limits = dm_get_queue_limits(md);
869ac62d620SChristoph Hellwig 
870ac62d620SChristoph Hellwig 	/* device doesn't really support WRITE ZEROES, disable it */
871ac62d620SChristoph Hellwig 	limits->max_write_zeroes_sectors = 0;
872ac62d620SChristoph Hellwig }
873ac62d620SChristoph Hellwig 
874a666e5c0SMikulas Patocka static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
875a666e5c0SMikulas Patocka {
876a666e5c0SMikulas Patocka 	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
877a666e5c0SMikulas Patocka }
878a666e5c0SMikulas Patocka 
8794246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio)
8801da177e4SLinus Torvalds {
8814e4cbee9SChristoph Hellwig 	blk_status_t error = bio->bi_status;
882bfc6d41cSMikulas Patocka 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
883b35f8caaSMilan Broz 	struct dm_io *io = tio->io;
8849faf400fSStefan Bader 	struct mapped_device *md = tio->io->md;
8851da177e4SLinus Torvalds 	dm_endio_fn endio = tio->ti->type->end_io;
886309dca30SChristoph Hellwig 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
8871da177e4SLinus Torvalds 
8889c37de29SMike Snitzer 	if (unlikely(error == BLK_STS_TARGET)) {
889bcb44433SMike Snitzer 		if (bio_op(bio) == REQ_OP_DISCARD &&
890309dca30SChristoph Hellwig 		    !q->limits.max_discard_sectors)
891bcb44433SMike Snitzer 			disable_discard(md);
892bcb44433SMike Snitzer 		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
893309dca30SChristoph Hellwig 			 !q->limits.max_write_same_sectors)
8947eee4ae2SMike Snitzer 			disable_write_same(md);
895bcb44433SMike Snitzer 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
896309dca30SChristoph Hellwig 			 !q->limits.max_write_zeroes_sectors)
897ac62d620SChristoph Hellwig 			disable_write_zeroes(md);
898ac62d620SChristoph Hellwig 	}
8997eee4ae2SMike Snitzer 
900bb37d772SDamien Le Moal 	if (blk_queue_is_zoned(q))
901bb37d772SDamien Le Moal 		dm_zone_endio(io, bio);
902415c79e1SJohannes Thumshirn 
9031be56909SChristoph Hellwig 	if (endio) {
9044e4cbee9SChristoph Hellwig 		int r = endio(tio->ti, bio, &error);
9051be56909SChristoph Hellwig 		switch (r) {
9061be56909SChristoph Hellwig 		case DM_ENDIO_REQUEUE:
907bf14e2b2SDamien Le Moal 			/*
908bf14e2b2SDamien Le Moal 			 * Requeuing writes to a sequential zone of a zoned
909bf14e2b2SDamien Le Moal 			 * target will break the sequential write pattern:
910bf14e2b2SDamien Le Moal 			 * fail such IO.
911bf14e2b2SDamien Le Moal 			 */
912bf14e2b2SDamien Le Moal 			if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
913bf14e2b2SDamien Le Moal 				error = BLK_STS_IOERR;
914bf14e2b2SDamien Le Moal 			else
9154e4cbee9SChristoph Hellwig 				error = BLK_STS_DM_REQUEUE;
916df561f66SGustavo A. R. Silva 			fallthrough;
9171be56909SChristoph Hellwig 		case DM_ENDIO_DONE:
9181be56909SChristoph Hellwig 			break;
9191be56909SChristoph Hellwig 		case DM_ENDIO_INCOMPLETE:
9201be56909SChristoph Hellwig 			/* The target will handle the io */
9211be56909SChristoph Hellwig 			return;
9221be56909SChristoph Hellwig 		default:
9231be56909SChristoph Hellwig 			DMWARN("unimplemented target endio return value: %d", r);
9241be56909SChristoph Hellwig 			BUG();
9251be56909SChristoph Hellwig 		}
9261be56909SChristoph Hellwig 	}
9271be56909SChristoph Hellwig 
928a666e5c0SMikulas Patocka 	if (unlikely(swap_bios_limit(tio->ti, bio))) {
929a666e5c0SMikulas Patocka 		struct mapped_device *md = io->md;
930a666e5c0SMikulas Patocka 		up(&md->swap_bios_semaphore);
931a666e5c0SMikulas Patocka 	}
932a666e5c0SMikulas Patocka 
933cfae7529SMike Snitzer 	free_tio(tio);
934e2118b3cSDamien Le Moal 	dm_io_dec_pending(io, error);
9351da177e4SLinus Torvalds }
9361da177e4SLinus Torvalds 
93778d8e58aSMike Snitzer /*
93856a67df7SMike Snitzer  * Return maximum size of I/O possible at the supplied sector up to the current
93956a67df7SMike Snitzer  * target boundary.
94056a67df7SMike Snitzer  */
9413720281dSMike Snitzer static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
9423720281dSMike Snitzer 						  sector_t target_offset)
9431da177e4SLinus Torvalds {
94456a67df7SMike Snitzer 	return ti->len - target_offset;
94556a67df7SMike Snitzer }
94656a67df7SMike Snitzer 
9473720281dSMike Snitzer static sector_t max_io_len(struct dm_target *ti, sector_t sector)
94856a67df7SMike Snitzer {
9493720281dSMike Snitzer 	sector_t target_offset = dm_target_offset(ti, sector);
9503720281dSMike Snitzer 	sector_t len = max_io_len_target_boundary(ti, target_offset);
9515091cdecSMike Snitzer 	sector_t max_len;
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds 	/*
9543ee16db3SMike Snitzer 	 * Does the target need to split IO even further?
9553ee16db3SMike Snitzer 	 * - varied (per target) IO splitting is a tenet of DM; this
9563ee16db3SMike Snitzer 	 *   explains why stacked chunk_sectors based splitting via
9573ee16db3SMike Snitzer 	 *   blk_max_size_offset() isn't possible here. So pass in
9583ee16db3SMike Snitzer 	 *   ti->max_io_len to override stacked chunk_sectors.
9591da177e4SLinus Torvalds 	 */
9603ee16db3SMike Snitzer 	if (ti->max_io_len) {
96133bd6f06SMike Snitzer 		max_len = blk_max_size_offset(ti->table->md->queue,
9623ee16db3SMike Snitzer 					      target_offset, ti->max_io_len);
963542f9038SMike Snitzer 		if (len > max_len)
964542f9038SMike Snitzer 			len = max_len;
9653ee16db3SMike Snitzer 	}
9661da177e4SLinus Torvalds 
9671da177e4SLinus Torvalds 	return len;
9681da177e4SLinus Torvalds }
9691da177e4SLinus Torvalds 
970542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
971542f9038SMike Snitzer {
972542f9038SMike Snitzer 	if (len > UINT_MAX) {
973542f9038SMike Snitzer 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
974542f9038SMike Snitzer 		      (unsigned long long)len, UINT_MAX);
975542f9038SMike Snitzer 		ti->error = "Maximum size of target IO is too large";
976542f9038SMike Snitzer 		return -EINVAL;
977542f9038SMike Snitzer 	}
978542f9038SMike Snitzer 
97975ae1936SMikulas Patocka 	ti->max_io_len = (uint32_t) len;
980542f9038SMike Snitzer 
981542f9038SMike Snitzer 	return 0;
982542f9038SMike Snitzer }
983542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
984542f9038SMike Snitzer 
985f26c5719SDan Williams static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
986f26c5719SDan Williams 						sector_t sector, int *srcu_idx)
9873d97c829SMike Snitzer 	__acquires(md->io_barrier)
988545ed20eSToshi Kani {
989545ed20eSToshi Kani 	struct dm_table *map;
990545ed20eSToshi Kani 	struct dm_target *ti;
991545ed20eSToshi Kani 
992f26c5719SDan Williams 	map = dm_get_live_table(md, srcu_idx);
993545ed20eSToshi Kani 	if (!map)
994f26c5719SDan Williams 		return NULL;
995545ed20eSToshi Kani 
996545ed20eSToshi Kani 	ti = dm_table_find_target(map, sector);
997123d87d5SMikulas Patocka 	if (!ti)
998f26c5719SDan Williams 		return NULL;
999f26c5719SDan Williams 
1000f26c5719SDan Williams 	return ti;
1001f26c5719SDan Williams }
1002f26c5719SDan Williams 
1003f26c5719SDan Williams static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1004f26c5719SDan Williams 				 long nr_pages, void **kaddr, pfn_t *pfn)
1005f26c5719SDan Williams {
1006f26c5719SDan Williams 	struct mapped_device *md = dax_get_private(dax_dev);
1007f26c5719SDan Williams 	sector_t sector = pgoff * PAGE_SECTORS;
1008f26c5719SDan Williams 	struct dm_target *ti;
1009f26c5719SDan Williams 	long len, ret = -EIO;
1010f26c5719SDan Williams 	int srcu_idx;
1011f26c5719SDan Williams 
1012f26c5719SDan Williams 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1013f26c5719SDan Williams 
1014f26c5719SDan Williams 	if (!ti)
1015545ed20eSToshi Kani 		goto out;
1016f26c5719SDan Williams 	if (!ti->type->direct_access)
1017f26c5719SDan Williams 		goto out;
10183720281dSMike Snitzer 	len = max_io_len(ti, sector) / PAGE_SECTORS;
1019f26c5719SDan Williams 	if (len < 1)
1020f26c5719SDan Williams 		goto out;
1021f26c5719SDan Williams 	nr_pages = min(len, nr_pages);
1022817bf402SDan Williams 	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1023817bf402SDan Williams 
1024545ed20eSToshi Kani  out:
1025545ed20eSToshi Kani 	dm_put_live_table(md, srcu_idx);
1026f26c5719SDan Williams 
1027f26c5719SDan Williams 	return ret;
1028545ed20eSToshi Kani }
1029545ed20eSToshi Kani 
1030cdf6cdcdSVivek Goyal static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1031cdf6cdcdSVivek Goyal 				  size_t nr_pages)
1032cdf6cdcdSVivek Goyal {
1033cdf6cdcdSVivek Goyal 	struct mapped_device *md = dax_get_private(dax_dev);
1034cdf6cdcdSVivek Goyal 	sector_t sector = pgoff * PAGE_SECTORS;
1035cdf6cdcdSVivek Goyal 	struct dm_target *ti;
1036cdf6cdcdSVivek Goyal 	int ret = -EIO;
1037cdf6cdcdSVivek Goyal 	int srcu_idx;
1038cdf6cdcdSVivek Goyal 
1039cdf6cdcdSVivek Goyal 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1040cdf6cdcdSVivek Goyal 
1041cdf6cdcdSVivek Goyal 	if (!ti)
1042cdf6cdcdSVivek Goyal 		goto out;
1043cdf6cdcdSVivek Goyal 	if (WARN_ON(!ti->type->dax_zero_page_range)) {
1044cdf6cdcdSVivek Goyal 		/*
1045cdf6cdcdSVivek Goyal 		 * ->zero_page_range() is mandatory dax operation. If we are
1046cdf6cdcdSVivek Goyal 		 *  here, something is wrong.
1047cdf6cdcdSVivek Goyal 		 */
1048cdf6cdcdSVivek Goyal 		goto out;
1049cdf6cdcdSVivek Goyal 	}
1050cdf6cdcdSVivek Goyal 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1051cdf6cdcdSVivek Goyal  out:
1052cdf6cdcdSVivek Goyal 	dm_put_live_table(md, srcu_idx);
1053cdf6cdcdSVivek Goyal 
1054cdf6cdcdSVivek Goyal 	return ret;
1055cdf6cdcdSVivek Goyal }
1056cdf6cdcdSVivek Goyal 
10571dd40c3eSMikulas Patocka /*
10581dd40c3eSMikulas Patocka  * A target may call dm_accept_partial_bio only from the map routine.  It is
10596842d264SDamien Le Moal  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
10606842d264SDamien Le Moal  * operations and REQ_OP_ZONE_APPEND (zone append writes).
10611dd40c3eSMikulas Patocka  *
10621dd40c3eSMikulas Patocka  * dm_accept_partial_bio informs the dm that the target only wants to process
10631dd40c3eSMikulas Patocka  * additional n_sectors sectors of the bio and the rest of the data should be
10641dd40c3eSMikulas Patocka  * sent in a next bio.
10651dd40c3eSMikulas Patocka  *
10661dd40c3eSMikulas Patocka  * A diagram that explains the arithmetics:
10671dd40c3eSMikulas Patocka  * +--------------------+---------------+-------+
10681dd40c3eSMikulas Patocka  * |         1          |       2       |   3   |
10691dd40c3eSMikulas Patocka  * +--------------------+---------------+-------+
10701dd40c3eSMikulas Patocka  *
10711dd40c3eSMikulas Patocka  * <-------------- *tio->len_ptr --------------->
10721dd40c3eSMikulas Patocka  *                      <------- bi_size ------->
10731dd40c3eSMikulas Patocka  *                      <-- n_sectors -->
10741dd40c3eSMikulas Patocka  *
10751dd40c3eSMikulas Patocka  * Region 1 was already iterated over with bio_advance or similar function.
10761dd40c3eSMikulas Patocka  *	(it may be empty if the target doesn't use bio_advance)
10771dd40c3eSMikulas Patocka  * Region 2 is the remaining bio size that the target wants to process.
10781dd40c3eSMikulas Patocka  *	(it may be empty if region 1 is non-empty, although there is no reason
10791dd40c3eSMikulas Patocka  *	 to make it empty)
10801dd40c3eSMikulas Patocka  * The target requires that region 3 is to be sent in the next bio.
10811dd40c3eSMikulas Patocka  *
10821dd40c3eSMikulas Patocka  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
10831dd40c3eSMikulas Patocka  * the partially processed part (the sum of regions 1+2) must be the same for all
10841dd40c3eSMikulas Patocka  * copies of the bio.
10851dd40c3eSMikulas Patocka  */
10861dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
10871dd40c3eSMikulas Patocka {
10881dd40c3eSMikulas Patocka 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
10891dd40c3eSMikulas Patocka 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
10906842d264SDamien Le Moal 
10911eff9d32SJens Axboe 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
10926842d264SDamien Le Moal 	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
10936842d264SDamien Le Moal 	BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
10941dd40c3eSMikulas Patocka 	BUG_ON(bi_size > *tio->len_ptr);
10951dd40c3eSMikulas Patocka 	BUG_ON(n_sectors > bi_size);
10966842d264SDamien Le Moal 
10971dd40c3eSMikulas Patocka 	*tio->len_ptr -= bi_size - n_sectors;
10981dd40c3eSMikulas Patocka 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
10991dd40c3eSMikulas Patocka }
11001dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
11011dd40c3eSMikulas Patocka 
1102a666e5c0SMikulas Patocka static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1103a666e5c0SMikulas Patocka {
1104a666e5c0SMikulas Patocka 	mutex_lock(&md->swap_bios_lock);
1105a666e5c0SMikulas Patocka 	while (latch < md->swap_bios) {
1106a666e5c0SMikulas Patocka 		cond_resched();
1107a666e5c0SMikulas Patocka 		down(&md->swap_bios_semaphore);
1108a666e5c0SMikulas Patocka 		md->swap_bios--;
1109a666e5c0SMikulas Patocka 	}
1110a666e5c0SMikulas Patocka 	while (latch > md->swap_bios) {
1111a666e5c0SMikulas Patocka 		cond_resched();
1112a666e5c0SMikulas Patocka 		up(&md->swap_bios_semaphore);
1113a666e5c0SMikulas Patocka 		md->swap_bios++;
1114a666e5c0SMikulas Patocka 	}
1115a666e5c0SMikulas Patocka 	mutex_unlock(&md->swap_bios_lock);
1116a666e5c0SMikulas Patocka }
1117a666e5c0SMikulas Patocka 
11183e08773cSChristoph Hellwig static void __map_bio(struct dm_target_io *tio)
11191da177e4SLinus Torvalds {
11201da177e4SLinus Torvalds 	int r;
11212056a782SJens Axboe 	sector_t sector;
1122dba14160SMikulas Patocka 	struct bio *clone = &tio->clone;
112364f52b0eSMike Snitzer 	struct dm_io *io = tio->io;
1124bd2a49b8SAlasdair G Kergon 	struct dm_target *ti = tio->ti;
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 	clone->bi_end_io = clone_endio;
11271da177e4SLinus Torvalds 
11281da177e4SLinus Torvalds 	/*
11291da177e4SLinus Torvalds 	 * Map the clone.  If r == 0 we don't need to do
11301da177e4SLinus Torvalds 	 * anything, the target has assumed ownership of
11311da177e4SLinus Torvalds 	 * this io.
11321da177e4SLinus Torvalds 	 */
1133e2118b3cSDamien Le Moal 	dm_io_inc_pending(io);
11344f024f37SKent Overstreet 	sector = clone->bi_iter.bi_sector;
1135d67a5f4bSMikulas Patocka 
1136a666e5c0SMikulas Patocka 	if (unlikely(swap_bios_limit(ti, clone))) {
1137a666e5c0SMikulas Patocka 		struct mapped_device *md = io->md;
1138a666e5c0SMikulas Patocka 		int latch = get_swap_bios();
1139a666e5c0SMikulas Patocka 		if (unlikely(latch != md->swap_bios))
1140a666e5c0SMikulas Patocka 			__set_swap_bios_limit(md, latch);
1141a666e5c0SMikulas Patocka 		down(&md->swap_bios_semaphore);
1142a666e5c0SMikulas Patocka 	}
1143a666e5c0SMikulas Patocka 
1144bb37d772SDamien Le Moal 	/*
1145bb37d772SDamien Le Moal 	 * Check if the IO needs a special mapping due to zone append emulation
1146bb37d772SDamien Le Moal 	 * on zoned target. In this case, dm_zone_map_bio() calls the target
1147bb37d772SDamien Le Moal 	 * map operation.
1148bb37d772SDamien Le Moal 	 */
1149bb37d772SDamien Le Moal 	if (dm_emulate_zone_append(io->md))
1150bb37d772SDamien Le Moal 		r = dm_zone_map_bio(tio);
1151bb37d772SDamien Le Moal 	else
11527de3ee57SMikulas Patocka 		r = ti->type->map(ti, clone);
1153bb37d772SDamien Le Moal 
1154846785e6SChristoph Hellwig 	switch (r) {
1155846785e6SChristoph Hellwig 	case DM_MAPIO_SUBMITTED:
1156846785e6SChristoph Hellwig 		break;
1157846785e6SChristoph Hellwig 	case DM_MAPIO_REMAPPED:
11581da177e4SLinus Torvalds 		/* the bio has been remapped so dispatch it */
11591c02fca6SChristoph Hellwig 		trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
11603e08773cSChristoph Hellwig 		submit_bio_noacct(clone);
1161846785e6SChristoph Hellwig 		break;
1162846785e6SChristoph Hellwig 	case DM_MAPIO_KILL:
1163a666e5c0SMikulas Patocka 		if (unlikely(swap_bios_limit(ti, clone))) {
1164a666e5c0SMikulas Patocka 			struct mapped_device *md = io->md;
1165a666e5c0SMikulas Patocka 			up(&md->swap_bios_semaphore);
1166a666e5c0SMikulas Patocka 		}
11674e4cbee9SChristoph Hellwig 		free_tio(tio);
1168e2118b3cSDamien Le Moal 		dm_io_dec_pending(io, BLK_STS_IOERR);
11694e4cbee9SChristoph Hellwig 		break;
1170846785e6SChristoph Hellwig 	case DM_MAPIO_REQUEUE:
1171a666e5c0SMikulas Patocka 		if (unlikely(swap_bios_limit(ti, clone))) {
1172a666e5c0SMikulas Patocka 			struct mapped_device *md = io->md;
1173a666e5c0SMikulas Patocka 			up(&md->swap_bios_semaphore);
1174a666e5c0SMikulas Patocka 		}
1175cfae7529SMike Snitzer 		free_tio(tio);
1176e2118b3cSDamien Le Moal 		dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1177846785e6SChristoph Hellwig 		break;
1178846785e6SChristoph Hellwig 	default:
117945cbcd79SKiyoshi Ueda 		DMWARN("unimplemented target map return value: %d", r);
118045cbcd79SKiyoshi Ueda 		BUG();
11811da177e4SLinus Torvalds 	}
11821da177e4SLinus Torvalds }
11831da177e4SLinus Torvalds 
1184e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1185bd2a49b8SAlasdair G Kergon {
11864f024f37SKent Overstreet 	bio->bi_iter.bi_sector = sector;
11874f024f37SKent Overstreet 	bio->bi_iter.bi_size = to_bytes(len);
11881da177e4SLinus Torvalds }
11891da177e4SLinus Torvalds 
11901da177e4SLinus Torvalds /*
11911da177e4SLinus Torvalds  * Creates a bio that consists of range of complete bvecs.
11921da177e4SLinus Torvalds  */
1193c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio,
11941c3b13e6SKent Overstreet 		     sector_t sector, unsigned len)
11951da177e4SLinus Torvalds {
1196dba14160SMikulas Patocka 	struct bio *clone = &tio->clone;
119707560151SEric Biggers 	int r;
11981da177e4SLinus Torvalds 
11991c3b13e6SKent Overstreet 	__bio_clone_fast(clone, bio);
12009c47008dSMartin K. Petersen 
120107560151SEric Biggers 	r = bio_crypt_clone(clone, bio, GFP_NOIO);
120207560151SEric Biggers 	if (r < 0)
120307560151SEric Biggers 		return r;
1204a892c8d5SSatya Tangirala 
120557c36519SMike Snitzer 	if (bio_integrity(bio)) {
1206e2460f2aSMikulas Patocka 		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1207e2460f2aSMikulas Patocka 			     !dm_target_passes_integrity(tio->ti->type))) {
1208e2460f2aSMikulas Patocka 			DMWARN("%s: the target %s doesn't support integrity data.",
1209e2460f2aSMikulas Patocka 				dm_device_name(tio->io->md),
1210e2460f2aSMikulas Patocka 				tio->ti->type->name);
1211e2460f2aSMikulas Patocka 			return -EIO;
1212e2460f2aSMikulas Patocka 		}
1213e2460f2aSMikulas Patocka 
1214e2460f2aSMikulas Patocka 		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1215c80914e8SMike Snitzer 		if (r < 0)
1216c80914e8SMike Snitzer 			return r;
1217c80914e8SMike Snitzer 	}
12181c3b13e6SKent Overstreet 
1219fa8db494SMike Snitzer 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1220fa8db494SMike Snitzer 	clone->bi_iter.bi_size = to_bytes(len);
1221fa8db494SMike Snitzer 
1222fa8db494SMike Snitzer 	if (bio_integrity(bio))
1223fa8db494SMike Snitzer 		bio_integrity_trim(clone);
1224c80914e8SMike Snitzer 
1225c80914e8SMike Snitzer 	return 0;
12261da177e4SLinus Torvalds }
12271da177e4SLinus Torvalds 
1228318716ddSMike Snitzer static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1229318716ddSMike Snitzer 				struct dm_target *ti, unsigned num_bios)
1230f9ab94ceSMikulas Patocka {
1231dba14160SMikulas Patocka 	struct dm_target_io *tio;
1232318716ddSMike Snitzer 	int try;
1233dba14160SMikulas Patocka 
1234318716ddSMike Snitzer 	if (!num_bios)
1235318716ddSMike Snitzer 		return;
1236f9ab94ceSMikulas Patocka 
1237318716ddSMike Snitzer 	if (num_bios == 1) {
1238318716ddSMike Snitzer 		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1239318716ddSMike Snitzer 		bio_list_add(blist, &tio->clone);
1240318716ddSMike Snitzer 		return;
12419015df24SAlasdair G Kergon 	}
12429015df24SAlasdair G Kergon 
1243318716ddSMike Snitzer 	for (try = 0; try < 2; try++) {
1244318716ddSMike Snitzer 		int bio_nr;
1245318716ddSMike Snitzer 		struct bio *bio;
1246318716ddSMike Snitzer 
1247318716ddSMike Snitzer 		if (try)
1248bc02cdbeSMike Snitzer 			mutex_lock(&ci->io->md->table_devices_lock);
1249318716ddSMike Snitzer 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1250318716ddSMike Snitzer 			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1251318716ddSMike Snitzer 			if (!tio)
1252318716ddSMike Snitzer 				break;
1253318716ddSMike Snitzer 
1254318716ddSMike Snitzer 			bio_list_add(blist, &tio->clone);
1255318716ddSMike Snitzer 		}
1256318716ddSMike Snitzer 		if (try)
1257bc02cdbeSMike Snitzer 			mutex_unlock(&ci->io->md->table_devices_lock);
1258318716ddSMike Snitzer 		if (bio_nr == num_bios)
1259318716ddSMike Snitzer 			return;
1260318716ddSMike Snitzer 
1261318716ddSMike Snitzer 		while ((bio = bio_list_pop(blist))) {
1262318716ddSMike Snitzer 			tio = container_of(bio, struct dm_target_io, clone);
1263318716ddSMike Snitzer 			free_tio(tio);
1264318716ddSMike Snitzer 		}
1265318716ddSMike Snitzer 	}
1266318716ddSMike Snitzer }
1267318716ddSMike Snitzer 
12683e08773cSChristoph Hellwig static void __clone_and_map_simple_bio(struct clone_info *ci,
1269318716ddSMike Snitzer 					   struct dm_target_io *tio, unsigned *len)
12709015df24SAlasdair G Kergon {
1271dba14160SMikulas Patocka 	struct bio *clone = &tio->clone;
12729015df24SAlasdair G Kergon 
12731dd40c3eSMikulas Patocka 	tio->len_ptr = len;
12741dd40c3eSMikulas Patocka 
12751c3b13e6SKent Overstreet 	__bio_clone_fast(clone, ci->bio);
1276bd2a49b8SAlasdair G Kergon 	if (len)
12771dd40c3eSMikulas Patocka 		bio_setup_sector(clone, ci->sector, *len);
12783e08773cSChristoph Hellwig 	__map_bio(tio);
1279f9ab94ceSMikulas Patocka }
1280f9ab94ceSMikulas Patocka 
128114fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
12821dd40c3eSMikulas Patocka 				  unsigned num_bios, unsigned *len)
128306a426ceSMike Snitzer {
1284318716ddSMike Snitzer 	struct bio_list blist = BIO_EMPTY_LIST;
1285318716ddSMike Snitzer 	struct bio *bio;
1286318716ddSMike Snitzer 	struct dm_target_io *tio;
128706a426ceSMike Snitzer 
1288318716ddSMike Snitzer 	alloc_multiple_bios(&blist, ci, ti, num_bios);
1289318716ddSMike Snitzer 
1290318716ddSMike Snitzer 	while ((bio = bio_list_pop(&blist))) {
1291318716ddSMike Snitzer 		tio = container_of(bio, struct dm_target_io, clone);
12923e08773cSChristoph Hellwig 		__clone_and_map_simple_bio(ci, tio, len);
1293318716ddSMike Snitzer 	}
129406a426ceSMike Snitzer }
129506a426ceSMike Snitzer 
129614fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci)
1297f9ab94ceSMikulas Patocka {
129806a426ceSMike Snitzer 	unsigned target_nr = 0;
1299f9ab94ceSMikulas Patocka 	struct dm_target *ti;
1300828678b8SMike Snitzer 	struct bio flush_bio;
1301828678b8SMike Snitzer 
1302828678b8SMike Snitzer 	/*
1303828678b8SMike Snitzer 	 * Use an on-stack bio for this, it's safe since we don't
1304828678b8SMike Snitzer 	 * need to reference it after submit. It's just used as
1305828678b8SMike Snitzer 	 * the basis for the clone(s).
1306828678b8SMike Snitzer 	 */
1307828678b8SMike Snitzer 	bio_init(&flush_bio, NULL, 0);
1308828678b8SMike Snitzer 	flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1309309dca30SChristoph Hellwig 	bio_set_dev(&flush_bio, ci->io->md->disk->part0);
131047d95102SChristoph Hellwig 
1311828678b8SMike Snitzer 	ci->bio = &flush_bio;
1312828678b8SMike Snitzer 	ci->sector_count = 0;
1313f9ab94ceSMikulas Patocka 
1314b372d360SMike Snitzer 	BUG_ON(bio_has_data(ci->bio));
1315f9ab94ceSMikulas Patocka 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
13161dd40c3eSMikulas Patocka 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1317828678b8SMike Snitzer 
1318828678b8SMike Snitzer 	bio_uninit(ci->bio);
1319f9ab94ceSMikulas Patocka 	return 0;
1320f9ab94ceSMikulas Patocka }
1321f9ab94ceSMikulas Patocka 
1322c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
13231dd40c3eSMikulas Patocka 				    sector_t sector, unsigned *len)
13245ae89a87SMike Snitzer {
1325dba14160SMikulas Patocka 	struct bio *bio = ci->bio;
13265ae89a87SMike Snitzer 	struct dm_target_io *tio;
1327f31c21e4SNeilBrown 	int r;
13285ae89a87SMike Snitzer 
1329318716ddSMike Snitzer 	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
13301dd40c3eSMikulas Patocka 	tio->len_ptr = len;
1331c80914e8SMike Snitzer 	r = clone_bio(tio, bio, sector, *len);
1332072623deSMikulas Patocka 	if (r < 0) {
1333cfae7529SMike Snitzer 		free_tio(tio);
1334c80914e8SMike Snitzer 		return r;
1335b0d8ed4dSAlasdair G Kergon 	}
13363e08773cSChristoph Hellwig 	__map_bio(tio);
133755a62eefSAlasdair G Kergon 
1338f31c21e4SNeilBrown 	return 0;
133923508a96SMike Snitzer }
134055a62eefSAlasdair G Kergon 
13413d7f4562SMike Snitzer static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
134261697a6aSMike Snitzer 				       unsigned num_bios)
13435ae89a87SMike Snitzer {
134451b86f9aSMichael Lass 	unsigned len;
13455ae89a87SMike Snitzer 
13465ae89a87SMike Snitzer 	/*
134723508a96SMike Snitzer 	 * Even though the device advertised support for this type of
134823508a96SMike Snitzer 	 * request, that does not mean every target supports it, and
1349936688d7SMike Snitzer 	 * reconfiguration might also have changed that since the
13505ae89a87SMike Snitzer 	 * check was performed.
13515ae89a87SMike Snitzer 	 */
135255a62eefSAlasdair G Kergon 	if (!num_bios)
13535ae89a87SMike Snitzer 		return -EOPNOTSUPP;
13545ae89a87SMike Snitzer 
13553720281dSMike Snitzer 	len = min_t(sector_t, ci->sector_count,
13563720281dSMike Snitzer 		    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
135751b86f9aSMichael Lass 
13581dd40c3eSMikulas Patocka 	__send_duplicate_bios(ci, ti, num_bios, &len);
13595ae89a87SMike Snitzer 
1360a79245b3SMike Snitzer 	ci->sector += len;
13613d7f4562SMike Snitzer 	ci->sector_count -= len;
13625ae89a87SMike Snitzer 
13635ae89a87SMike Snitzer 	return 0;
13645ae89a87SMike Snitzer }
13655ae89a87SMike Snitzer 
1366568c73a3SMike Snitzer static bool is_abnormal_io(struct bio *bio)
1367568c73a3SMike Snitzer {
1368568c73a3SMike Snitzer 	bool r = false;
1369568c73a3SMike Snitzer 
1370568c73a3SMike Snitzer 	switch (bio_op(bio)) {
1371568c73a3SMike Snitzer 	case REQ_OP_DISCARD:
1372568c73a3SMike Snitzer 	case REQ_OP_SECURE_ERASE:
1373568c73a3SMike Snitzer 	case REQ_OP_WRITE_SAME:
1374568c73a3SMike Snitzer 	case REQ_OP_WRITE_ZEROES:
1375568c73a3SMike Snitzer 		r = true;
1376568c73a3SMike Snitzer 		break;
1377568c73a3SMike Snitzer 	}
1378568c73a3SMike Snitzer 
1379568c73a3SMike Snitzer 	return r;
1380568c73a3SMike Snitzer }
1381568c73a3SMike Snitzer 
13820519c71eSMike Snitzer static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
13830519c71eSMike Snitzer 				  int *result)
13840519c71eSMike Snitzer {
13850519c71eSMike Snitzer 	struct bio *bio = ci->bio;
13869679b5a7SMike Snitzer 	unsigned num_bios = 0;
13870519c71eSMike Snitzer 
13889679b5a7SMike Snitzer 	switch (bio_op(bio)) {
13899679b5a7SMike Snitzer 	case REQ_OP_DISCARD:
13909679b5a7SMike Snitzer 		num_bios = ti->num_discard_bios;
13919679b5a7SMike Snitzer 		break;
13929679b5a7SMike Snitzer 	case REQ_OP_SECURE_ERASE:
13939679b5a7SMike Snitzer 		num_bios = ti->num_secure_erase_bios;
13949679b5a7SMike Snitzer 		break;
13959679b5a7SMike Snitzer 	case REQ_OP_WRITE_SAME:
13969679b5a7SMike Snitzer 		num_bios = ti->num_write_same_bios;
13979679b5a7SMike Snitzer 		break;
13989679b5a7SMike Snitzer 	case REQ_OP_WRITE_ZEROES:
13999679b5a7SMike Snitzer 		num_bios = ti->num_write_zeroes_bios;
14009679b5a7SMike Snitzer 		break;
14019679b5a7SMike Snitzer 	default:
14020519c71eSMike Snitzer 		return false;
14039679b5a7SMike Snitzer 	}
14040519c71eSMike Snitzer 
14059679b5a7SMike Snitzer 	*result = __send_changing_extent_only(ci, ti, num_bios);
14060519c71eSMike Snitzer 	return true;
14070519c71eSMike Snitzer }
14080519c71eSMike Snitzer 
1409e4c93811SAlasdair G Kergon /*
1410e4c93811SAlasdair G Kergon  * Select the correct strategy for processing a non-flush bio.
1411e4c93811SAlasdair G Kergon  */
1412e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci)
1413e4c93811SAlasdair G Kergon {
1414e4c93811SAlasdair G Kergon 	struct dm_target *ti;
14151c3b13e6SKent Overstreet 	unsigned len;
1416c80914e8SMike Snitzer 	int r;
1417e4c93811SAlasdair G Kergon 
1418e4c93811SAlasdair G Kergon 	ti = dm_table_find_target(ci->map, ci->sector);
1419123d87d5SMikulas Patocka 	if (!ti)
1420e4c93811SAlasdair G Kergon 		return -EIO;
1421e4c93811SAlasdair G Kergon 
1422568c73a3SMike Snitzer 	if (__process_abnormal_io(ci, ti, &r))
14230519c71eSMike Snitzer 		return r;
14243d7f4562SMike Snitzer 
14253720281dSMike Snitzer 	len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1426e4c93811SAlasdair G Kergon 
1427c80914e8SMike Snitzer 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1428c80914e8SMike Snitzer 	if (r < 0)
1429c80914e8SMike Snitzer 		return r;
1430e4c93811SAlasdair G Kergon 
1431e4c93811SAlasdair G Kergon 	ci->sector += len;
1432e4c93811SAlasdair G Kergon 	ci->sector_count -= len;
1433e4c93811SAlasdair G Kergon 
1434e4c93811SAlasdair G Kergon 	return 0;
1435e4c93811SAlasdair G Kergon }
1436e4c93811SAlasdair G Kergon 
1437978e51baSMike Snitzer static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1438978e51baSMike Snitzer 			    struct dm_table *map, struct bio *bio)
1439978e51baSMike Snitzer {
1440978e51baSMike Snitzer 	ci->map = map;
1441978e51baSMike Snitzer 	ci->io = alloc_io(md, bio);
1442978e51baSMike Snitzer 	ci->sector = bio->bi_iter.bi_sector;
1443978e51baSMike Snitzer }
1444978e51baSMike Snitzer 
1445a1e1cb72SMike Snitzer #define __dm_part_stat_sub(part, field, subnd)	\
1446a1e1cb72SMike Snitzer 	(part_stat_get(part, field) -= (subnd))
1447a1e1cb72SMike Snitzer 
1448e4c93811SAlasdair G Kergon /*
144914fe594dSAlasdair G Kergon  * Entry point to split a bio into clones and submit them to the targets.
14501da177e4SLinus Torvalds  */
14513e08773cSChristoph Hellwig static void __split_and_process_bio(struct mapped_device *md,
145283d5e5b0SMikulas Patocka 					struct dm_table *map, struct bio *bio)
14531da177e4SLinus Torvalds {
14541da177e4SLinus Torvalds 	struct clone_info ci;
1455512875bdSJun'ichi Nomura 	int error = 0;
14561da177e4SLinus Torvalds 
1457978e51baSMike Snitzer 	init_clone_info(&ci, md, map, bio);
1458bd2a49b8SAlasdair G Kergon 
14591eff9d32SJens Axboe 	if (bio->bi_opf & REQ_PREFLUSH) {
146014fe594dSAlasdair G Kergon 		error = __send_empty_flush(&ci);
1461e2118b3cSDamien Le Moal 		/* dm_io_dec_pending submits any data associated with flush */
14622e2d6f7eSAjay Joshi 	} else if (op_is_zone_mgmt(bio_op(bio))) {
1463a4aa5e56SDamien Le Moal 		ci.bio = bio;
1464a4aa5e56SDamien Le Moal 		ci.sector_count = 0;
1465a4aa5e56SDamien Le Moal 		error = __split_and_process_non_flush(&ci);
1466b372d360SMike Snitzer 	} else {
14676a8736d1STejun Heo 		ci.bio = bio;
14681da177e4SLinus Torvalds 		ci.sector_count = bio_sectors(bio);
146914fe594dSAlasdair G Kergon 		error = __split_and_process_non_flush(&ci);
1470985eabdcSJeffle Xu 		if (ci.sector_count && !error) {
147118a25da8SNeilBrown 			/*
1472ed00aabdSChristoph Hellwig 			 * Remainder must be passed to submit_bio_noacct()
147318a25da8SNeilBrown 			 * so that it gets handled *after* bios already submitted
147418a25da8SNeilBrown 			 * have been completely processed.
147518a25da8SNeilBrown 			 * We take a clone of the original to store in
1476745dc570SMike Snitzer 			 * ci.io->orig_bio to be used by end_io_acct() and
147718a25da8SNeilBrown 			 * for dec_pending to use for completion handling.
147818a25da8SNeilBrown 			 */
1479f21c601aSMike Snitzer 			struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1480f21c601aSMike Snitzer 						  GFP_NOIO, &md->queue->bio_split);
1481745dc570SMike Snitzer 			ci.io->orig_bio = b;
1482a1e1cb72SMike Snitzer 
1483a1e1cb72SMike Snitzer 			/*
1484a1e1cb72SMike Snitzer 			 * Adjust IO stats for each split, otherwise upon queue
1485a1e1cb72SMike Snitzer 			 * reentry there will be redundant IO accounting.
1486a1e1cb72SMike Snitzer 			 * NOTE: this is a stop-gap fix, a proper fix involves
1487a1e1cb72SMike Snitzer 			 * significant refactoring of DM core's bio splitting
1488a1e1cb72SMike Snitzer 			 * (by eliminating DM's splitting and just using bio_split)
1489a1e1cb72SMike Snitzer 			 */
1490a1e1cb72SMike Snitzer 			part_stat_lock();
14918446fe92SChristoph Hellwig 			__dm_part_stat_sub(dm_disk(md)->part0,
1492a1e1cb72SMike Snitzer 					   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1493a1e1cb72SMike Snitzer 			part_stat_unlock();
1494a1e1cb72SMike Snitzer 
149518a25da8SNeilBrown 			bio_chain(b, bio);
1496eb6f7f7cSChristoph Hellwig 			trace_block_split(b, bio->bi_iter.bi_sector);
14973e08773cSChristoph Hellwig 			submit_bio_noacct(bio);
149818a25da8SNeilBrown 		}
1499d87f4c14STejun Heo 	}
15001da177e4SLinus Torvalds 
15011da177e4SLinus Torvalds 	/* drop the extra reference count */
1502e2118b3cSDamien Le Moal 	dm_io_dec_pending(ci.io, errno_to_blk_status(error));
15031da177e4SLinus Torvalds }
15041da177e4SLinus Torvalds 
15053e08773cSChristoph Hellwig static void dm_submit_bio(struct bio *bio)
15061da177e4SLinus Torvalds {
1507309dca30SChristoph Hellwig 	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
150883d5e5b0SMikulas Patocka 	int srcu_idx;
150983d5e5b0SMikulas Patocka 	struct dm_table *map;
15101da177e4SLinus Torvalds 
151183d5e5b0SMikulas Patocka 	map = dm_get_live_table(md, &srcu_idx);
1512b2abdb1bSMike Snitzer 	if (unlikely(!map)) {
1513b2abdb1bSMike Snitzer 		DMERR_LIMIT("%s: mapping table unavailable, erroring io",
1514b2abdb1bSMike Snitzer 			    dm_device_name(md));
15156a8736d1STejun Heo 		bio_io_error(bio);
1516b2abdb1bSMike Snitzer 		goto out;
15171da177e4SLinus Torvalds 	}
151892c63902SMikulas Patocka 
1519b2abdb1bSMike Snitzer 	/* If suspended, queue this IO for later */
15201da177e4SLinus Torvalds 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
15216abc4946SKonstantin Khlebnikov 		if (bio->bi_opf & REQ_NOWAIT)
15226abc4946SKonstantin Khlebnikov 			bio_wouldblock_error(bio);
1523b2abdb1bSMike Snitzer 		else if (bio->bi_opf & REQ_RAHEAD)
15241da177e4SLinus Torvalds 			bio_io_error(bio);
1525b2abdb1bSMike Snitzer 		else
1526b2abdb1bSMike Snitzer 			queue_io(md, bio);
1527b2abdb1bSMike Snitzer 		goto out;
15281da177e4SLinus Torvalds 	}
15291da177e4SLinus Torvalds 
1530b2abdb1bSMike Snitzer 	/*
1531b2abdb1bSMike Snitzer 	 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1532b2abdb1bSMike Snitzer 	 * otherwise associated queue_limits won't be imposed.
1533b2abdb1bSMike Snitzer 	 */
1534b2abdb1bSMike Snitzer 	if (is_abnormal_io(bio))
1535b2abdb1bSMike Snitzer 		blk_queue_split(&bio);
1536978e51baSMike Snitzer 
15373e08773cSChristoph Hellwig 	__split_and_process_bio(md, map, bio);
1538b2abdb1bSMike Snitzer out:
153983d5e5b0SMikulas Patocka 	dm_put_live_table(md, srcu_idx);
1540978e51baSMike Snitzer }
1541978e51baSMike Snitzer 
15421da177e4SLinus Torvalds /*-----------------------------------------------------------------
15431da177e4SLinus Torvalds  * An IDR is used to keep track of allocated minor numbers.
15441da177e4SLinus Torvalds  *---------------------------------------------------------------*/
15452b06cfffSAlasdair G Kergon static void free_minor(int minor)
15461da177e4SLinus Torvalds {
1547f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
15481da177e4SLinus Torvalds 	idr_remove(&_minor_idr, minor);
1549f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
15501da177e4SLinus Torvalds }
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds /*
15531da177e4SLinus Torvalds  * See if the device with a specific minor # is free.
15541da177e4SLinus Torvalds  */
1555cf13ab8eSFrederik Deweerdt static int specific_minor(int minor)
15561da177e4SLinus Torvalds {
1557c9d76be6STejun Heo 	int r;
15581da177e4SLinus Torvalds 
15591da177e4SLinus Torvalds 	if (minor >= (1 << MINORBITS))
15601da177e4SLinus Torvalds 		return -EINVAL;
15611da177e4SLinus Torvalds 
1562c9d76be6STejun Heo 	idr_preload(GFP_KERNEL);
1563f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
15641da177e4SLinus Torvalds 
1565c9d76be6STejun Heo 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
15661da177e4SLinus Torvalds 
1567f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
1568c9d76be6STejun Heo 	idr_preload_end();
1569c9d76be6STejun Heo 	if (r < 0)
1570c9d76be6STejun Heo 		return r == -ENOSPC ? -EBUSY : r;
1571c9d76be6STejun Heo 	return 0;
15721da177e4SLinus Torvalds }
15731da177e4SLinus Torvalds 
1574cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor)
15751da177e4SLinus Torvalds {
1576c9d76be6STejun Heo 	int r;
15771da177e4SLinus Torvalds 
1578c9d76be6STejun Heo 	idr_preload(GFP_KERNEL);
1579f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
15801da177e4SLinus Torvalds 
1581c9d76be6STejun Heo 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
15821da177e4SLinus Torvalds 
1583f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
1584c9d76be6STejun Heo 	idr_preload_end();
1585c9d76be6STejun Heo 	if (r < 0)
15861da177e4SLinus Torvalds 		return r;
1587c9d76be6STejun Heo 	*minor = r;
1588c9d76be6STejun Heo 	return 0;
15891da177e4SLinus Torvalds }
15901da177e4SLinus Torvalds 
159183d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops;
1592681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops;
1593f26c5719SDan Williams static const struct dax_operations dm_dax_ops;
15941da177e4SLinus Torvalds 
159553d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work);
159653d5914fSMikulas Patocka 
1597aa6ce87aSSatya Tangirala #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1598cb77cb5aSEric Biggers static void dm_queue_destroy_crypto_profile(struct request_queue *q)
1599aa6ce87aSSatya Tangirala {
1600cb77cb5aSEric Biggers 	dm_destroy_crypto_profile(q->crypto_profile);
1601aa6ce87aSSatya Tangirala }
1602aa6ce87aSSatya Tangirala 
1603aa6ce87aSSatya Tangirala #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1604aa6ce87aSSatya Tangirala 
1605cb77cb5aSEric Biggers static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
1606aa6ce87aSSatya Tangirala {
1607aa6ce87aSSatya Tangirala }
1608aa6ce87aSSatya Tangirala #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1609aa6ce87aSSatya Tangirala 
16100f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md)
16110f20972fSMike Snitzer {
16120f20972fSMike Snitzer 	if (md->wq)
16130f20972fSMike Snitzer 		destroy_workqueue(md->wq);
16146f1c819cSKent Overstreet 	bioset_exit(&md->bs);
16156f1c819cSKent Overstreet 	bioset_exit(&md->io_bs);
16160f20972fSMike Snitzer 
1617f26c5719SDan Williams 	if (md->dax_dev) {
1618fb08a190SChristoph Hellwig 		dax_remove_host(md->disk);
1619f26c5719SDan Williams 		kill_dax(md->dax_dev);
1620f26c5719SDan Williams 		put_dax(md->dax_dev);
1621f26c5719SDan Williams 		md->dax_dev = NULL;
1622f26c5719SDan Williams 	}
1623f26c5719SDan Williams 
16240f20972fSMike Snitzer 	if (md->disk) {
16250f20972fSMike Snitzer 		spin_lock(&_minor_lock);
16260f20972fSMike Snitzer 		md->disk->private_data = NULL;
16270f20972fSMike Snitzer 		spin_unlock(&_minor_lock);
162889f871afSChristoph Hellwig 		if (dm_get_md_type(md) != DM_TYPE_NONE) {
162989f871afSChristoph Hellwig 			dm_sysfs_exit(md);
16300f20972fSMike Snitzer 			del_gendisk(md->disk);
163189f871afSChristoph Hellwig 		}
1632cb77cb5aSEric Biggers 		dm_queue_destroy_crypto_profile(md->queue);
163374fe6ba9SChristoph Hellwig 		blk_cleanup_disk(md->disk);
163474a2b6ecSChristoph Hellwig 	}
16350f20972fSMike Snitzer 
1636d09960b0STahsin Erdogan 	cleanup_srcu_struct(&md->io_barrier);
1637d09960b0STahsin Erdogan 
1638d5ffebddSMike Snitzer 	mutex_destroy(&md->suspend_lock);
1639d5ffebddSMike Snitzer 	mutex_destroy(&md->type_lock);
1640d5ffebddSMike Snitzer 	mutex_destroy(&md->table_devices_lock);
1641a666e5c0SMikulas Patocka 	mutex_destroy(&md->swap_bios_lock);
1642d5ffebddSMike Snitzer 
16434cc96131SMike Snitzer 	dm_mq_cleanup_mapped_device(md);
1644bb37d772SDamien Le Moal 	dm_cleanup_zoned_dev(md);
16450f20972fSMike Snitzer }
16460f20972fSMike Snitzer 
16471da177e4SLinus Torvalds /*
16481da177e4SLinus Torvalds  * Allocate and initialise a blank device with a given minor.
16491da177e4SLinus Torvalds  */
16502b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor)
16511da177e4SLinus Torvalds {
1652115485e8SMike Snitzer 	int r, numa_node_id = dm_get_numa_node();
1653115485e8SMike Snitzer 	struct mapped_device *md;
1654ba61fdd1SJeff Mahoney 	void *old_md;
16551da177e4SLinus Torvalds 
1656856eb091SMikulas Patocka 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
16571da177e4SLinus Torvalds 	if (!md) {
16581da177e4SLinus Torvalds 		DMWARN("unable to allocate device, out of memory.");
16591da177e4SLinus Torvalds 		return NULL;
16601da177e4SLinus Torvalds 	}
16611da177e4SLinus Torvalds 
166210da4f79SJeff Mahoney 	if (!try_module_get(THIS_MODULE))
16636ed7ade8SMilan Broz 		goto bad_module_get;
166410da4f79SJeff Mahoney 
16651da177e4SLinus Torvalds 	/* get a minor number for the dev */
16662b06cfffSAlasdair G Kergon 	if (minor == DM_ANY_MINOR)
1667cf13ab8eSFrederik Deweerdt 		r = next_free_minor(&minor);
16682b06cfffSAlasdair G Kergon 	else
1669cf13ab8eSFrederik Deweerdt 		r = specific_minor(minor);
16701da177e4SLinus Torvalds 	if (r < 0)
16716ed7ade8SMilan Broz 		goto bad_minor;
16721da177e4SLinus Torvalds 
167383d5e5b0SMikulas Patocka 	r = init_srcu_struct(&md->io_barrier);
167483d5e5b0SMikulas Patocka 	if (r < 0)
167583d5e5b0SMikulas Patocka 		goto bad_io_barrier;
167683d5e5b0SMikulas Patocka 
1677115485e8SMike Snitzer 	md->numa_node_id = numa_node_id;
1678591ddcfcSMike Snitzer 	md->init_tio_pdu = false;
1679a5664dadSMike Snitzer 	md->type = DM_TYPE_NONE;
1680e61290a4SDaniel Walker 	mutex_init(&md->suspend_lock);
1681a5664dadSMike Snitzer 	mutex_init(&md->type_lock);
168286f1152bSBenjamin Marzinski 	mutex_init(&md->table_devices_lock);
1683022c2611SMikulas Patocka 	spin_lock_init(&md->deferred_lock);
16841da177e4SLinus Torvalds 	atomic_set(&md->holders, 1);
16855c6bd75dSAlasdair G Kergon 	atomic_set(&md->open_count, 0);
16861da177e4SLinus Torvalds 	atomic_set(&md->event_nr, 0);
16877a8c3d3bSMike Anderson 	atomic_set(&md->uevent_seq, 0);
16887a8c3d3bSMike Anderson 	INIT_LIST_HEAD(&md->uevent_list);
168986f1152bSBenjamin Marzinski 	INIT_LIST_HEAD(&md->table_devices);
16907a8c3d3bSMike Anderson 	spin_lock_init(&md->uevent_lock);
16911da177e4SLinus Torvalds 
169247ace7e0SMike Snitzer 	/*
1693c62b37d9SChristoph Hellwig 	 * default to bio-based until DM table is loaded and md->type
1694c62b37d9SChristoph Hellwig 	 * established. If request-based table is loaded: blk-mq will
1695c62b37d9SChristoph Hellwig 	 * override accordingly.
169647ace7e0SMike Snitzer 	 */
169774fe6ba9SChristoph Hellwig 	md->disk = blk_alloc_disk(md->numa_node_id);
16981da177e4SLinus Torvalds 	if (!md->disk)
16990f20972fSMike Snitzer 		goto bad;
170074fe6ba9SChristoph Hellwig 	md->queue = md->disk->queue;
17011da177e4SLinus Torvalds 
1702f0b04115SJeff Mahoney 	init_waitqueue_head(&md->wait);
170353d5914fSMikulas Patocka 	INIT_WORK(&md->work, dm_wq_work);
1704f0b04115SJeff Mahoney 	init_waitqueue_head(&md->eventq);
17052995fa78SMikulas Patocka 	init_completion(&md->kobj_holder.completion);
1706f0b04115SJeff Mahoney 
1707a666e5c0SMikulas Patocka 	md->swap_bios = get_swap_bios();
1708a666e5c0SMikulas Patocka 	sema_init(&md->swap_bios_semaphore, md->swap_bios);
1709a666e5c0SMikulas Patocka 	mutex_init(&md->swap_bios_lock);
1710a666e5c0SMikulas Patocka 
17111da177e4SLinus Torvalds 	md->disk->major = _major;
17121da177e4SLinus Torvalds 	md->disk->first_minor = minor;
171374fe6ba9SChristoph Hellwig 	md->disk->minors = 1;
17141da177e4SLinus Torvalds 	md->disk->fops = &dm_blk_dops;
17151da177e4SLinus Torvalds 	md->disk->queue = md->queue;
17161da177e4SLinus Torvalds 	md->disk->private_data = md;
17171da177e4SLinus Torvalds 	sprintf(md->disk->disk_name, "dm-%d", minor);
1718f26c5719SDan Williams 
17195d2a228bSChristoph Hellwig 	if (IS_ENABLED(CONFIG_FS_DAX)) {
172030c6828aSChristoph Hellwig 		md->dax_dev = alloc_dax(md, &dm_dax_ops);
1721d7519392SChristoph Hellwig 		if (IS_ERR(md->dax_dev)) {
1722d7519392SChristoph Hellwig 			md->dax_dev = NULL;
1723f26c5719SDan Williams 			goto bad;
1724976431b0SDan Williams 		}
1725*7ac5360cSChristoph Hellwig 		set_dax_nocache(md->dax_dev);
1726*7ac5360cSChristoph Hellwig 		set_dax_nomc(md->dax_dev);
1727fb08a190SChristoph Hellwig 		if (dax_add_host(md->dax_dev, md->disk))
1728fb08a190SChristoph Hellwig 			goto bad;
1729d7519392SChristoph Hellwig 	}
1730f26c5719SDan Williams 
17317e51f257SMike Anderson 	format_dev_t(md->name, MKDEV(_major, minor));
17321da177e4SLinus Torvalds 
1733c7c879eeSMichał Mirosław 	md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
1734304f3f6aSMilan Broz 	if (!md->wq)
17350f20972fSMike Snitzer 		goto bad;
1736304f3f6aSMilan Broz 
1737fd2ed4d2SMikulas Patocka 	dm_stats_init(&md->stats);
1738fd2ed4d2SMikulas Patocka 
1739ba61fdd1SJeff Mahoney 	/* Populate the mapping, nobody knows we exist yet */
1740f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
1741ba61fdd1SJeff Mahoney 	old_md = idr_replace(&_minor_idr, md, minor);
1742f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
1743ba61fdd1SJeff Mahoney 
1744ba61fdd1SJeff Mahoney 	BUG_ON(old_md != MINOR_ALLOCED);
1745ba61fdd1SJeff Mahoney 
17461da177e4SLinus Torvalds 	return md;
17471da177e4SLinus Torvalds 
17480f20972fSMike Snitzer bad:
17490f20972fSMike Snitzer 	cleanup_mapped_device(md);
175083d5e5b0SMikulas Patocka bad_io_barrier:
17511da177e4SLinus Torvalds 	free_minor(minor);
17526ed7ade8SMilan Broz bad_minor:
175310da4f79SJeff Mahoney 	module_put(THIS_MODULE);
17546ed7ade8SMilan Broz bad_module_get:
1755856eb091SMikulas Patocka 	kvfree(md);
17561da177e4SLinus Torvalds 	return NULL;
17571da177e4SLinus Torvalds }
17581da177e4SLinus Torvalds 
1759ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md);
1760ae9da83fSJun'ichi Nomura 
17611da177e4SLinus Torvalds static void free_dev(struct mapped_device *md)
17621da177e4SLinus Torvalds {
1763f331c029STejun Heo 	int minor = MINOR(disk_devt(md->disk));
176463d94e48SJun'ichi Nomura 
1765ae9da83fSJun'ichi Nomura 	unlock_fs(md);
17662eb6e1e3SKeith Busch 
17670f20972fSMike Snitzer 	cleanup_mapped_device(md);
17680f20972fSMike Snitzer 
17690f20972fSMike Snitzer 	free_table_devices(&md->table_devices);
17700f20972fSMike Snitzer 	dm_stats_cleanup(&md->stats);
177163a4f065SMike Snitzer 	free_minor(minor);
177263a4f065SMike Snitzer 
177310da4f79SJeff Mahoney 	module_put(THIS_MODULE);
1774856eb091SMikulas Patocka 	kvfree(md);
17751da177e4SLinus Torvalds }
17761da177e4SLinus Torvalds 
17772a2a4c51SJens Axboe static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
1778e6ee8c0bSKiyoshi Ueda {
1779c0820cf5SMikulas Patocka 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
17802a2a4c51SJens Axboe 	int ret = 0;
1781e6ee8c0bSKiyoshi Ueda 
1782545ed20eSToshi Kani 	if (dm_table_bio_based(t)) {
1783c0820cf5SMikulas Patocka 		/*
178464f52b0eSMike Snitzer 		 * The md may already have mempools that need changing.
178564f52b0eSMike Snitzer 		 * If so, reload bioset because front_pad may have changed
178616245bdcSJun'ichi Nomura 		 * because a different table was loaded.
1787c0820cf5SMikulas Patocka 		 */
17886f1c819cSKent Overstreet 		bioset_exit(&md->bs);
17896f1c819cSKent Overstreet 		bioset_exit(&md->io_bs);
17900776aa0eSMike Snitzer 
17916f1c819cSKent Overstreet 	} else if (bioset_initialized(&md->bs)) {
1792cbc4e3c1SMike Snitzer 		/*
17934e6e36c3SMike Snitzer 		 * There's no need to reload with request-based dm
17944e6e36c3SMike Snitzer 		 * because the size of front_pad doesn't change.
17954e6e36c3SMike Snitzer 		 * Note for future: If you are to reload bioset,
17964e6e36c3SMike Snitzer 		 * prep-ed requests in the queue may refer
17974e6e36c3SMike Snitzer 		 * to bio from the old bioset, so you must walk
17984e6e36c3SMike Snitzer 		 * through the queue to unprep.
1799cbc4e3c1SMike Snitzer 		 */
1800cbc4e3c1SMike Snitzer 		goto out;
1801cbc4e3c1SMike Snitzer 	}
1802cbc4e3c1SMike Snitzer 
18036f1c819cSKent Overstreet 	BUG_ON(!p ||
18046f1c819cSKent Overstreet 	       bioset_initialized(&md->bs) ||
18056f1c819cSKent Overstreet 	       bioset_initialized(&md->io_bs));
1806e6ee8c0bSKiyoshi Ueda 
18072a2a4c51SJens Axboe 	ret = bioset_init_from_src(&md->bs, &p->bs);
18082a2a4c51SJens Axboe 	if (ret)
18092a2a4c51SJens Axboe 		goto out;
18102a2a4c51SJens Axboe 	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
18112a2a4c51SJens Axboe 	if (ret)
18122a2a4c51SJens Axboe 		bioset_exit(&md->bs);
1813e6ee8c0bSKiyoshi Ueda out:
181402233342SMike Snitzer 	/* mempool bind completed, no longer need any mempools in the table */
1815e6ee8c0bSKiyoshi Ueda 	dm_table_free_md_mempools(t);
18162a2a4c51SJens Axboe 	return ret;
1817e6ee8c0bSKiyoshi Ueda }
1818e6ee8c0bSKiyoshi Ueda 
18191da177e4SLinus Torvalds /*
18201da177e4SLinus Torvalds  * Bind a table to the device.
18211da177e4SLinus Torvalds  */
18221da177e4SLinus Torvalds static void event_callback(void *context)
18231da177e4SLinus Torvalds {
18247a8c3d3bSMike Anderson 	unsigned long flags;
18257a8c3d3bSMike Anderson 	LIST_HEAD(uevents);
18261da177e4SLinus Torvalds 	struct mapped_device *md = (struct mapped_device *) context;
18271da177e4SLinus Torvalds 
18287a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
18297a8c3d3bSMike Anderson 	list_splice_init(&md->uevent_list, &uevents);
18307a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
18317a8c3d3bSMike Anderson 
1832ed9e1982STejun Heo 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
18337a8c3d3bSMike Anderson 
18341da177e4SLinus Torvalds 	atomic_inc(&md->event_nr);
18351da177e4SLinus Torvalds 	wake_up(&md->eventq);
183662e08243SMikulas Patocka 	dm_issue_global_event();
18371da177e4SLinus Torvalds }
18381da177e4SLinus Torvalds 
1839c217649bSMike Snitzer /*
1840042d2a9bSAlasdair G Kergon  * Returns old map, which caller must destroy.
1841042d2a9bSAlasdair G Kergon  */
1842042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
1843754c5fc7SMike Snitzer 			       struct queue_limits *limits)
18441da177e4SLinus Torvalds {
1845042d2a9bSAlasdair G Kergon 	struct dm_table *old_map;
1846165125e1SJens Axboe 	struct request_queue *q = md->queue;
1847978e51baSMike Snitzer 	bool request_based = dm_table_request_based(t);
18481da177e4SLinus Torvalds 	sector_t size;
18492a2a4c51SJens Axboe 	int ret;
18501da177e4SLinus Torvalds 
18515a8f1f80SBart Van Assche 	lockdep_assert_held(&md->suspend_lock);
18525a8f1f80SBart Van Assche 
18531da177e4SLinus Torvalds 	size = dm_table_get_size(t);
18543ac51e74SDarrick J. Wong 
18553ac51e74SDarrick J. Wong 	/*
18563ac51e74SDarrick J. Wong 	 * Wipe any geometry if the size of the table changed.
18573ac51e74SDarrick J. Wong 	 */
1858fd2ed4d2SMikulas Patocka 	if (size != dm_get_size(md))
18593ac51e74SDarrick J. Wong 		memset(&md->geometry, 0, sizeof(md->geometry));
18603ac51e74SDarrick J. Wong 
18615424a0b8SMikulas Patocka 	if (!get_capacity(md->disk))
18625424a0b8SMikulas Patocka 		set_capacity(md->disk, size);
18635424a0b8SMikulas Patocka 	else
1864f64d9b2eSChristoph Hellwig 		set_capacity_and_notify(md->disk, size);
18651da177e4SLinus Torvalds 
1866cf222b37SAlasdair G Kergon 	dm_table_event_callback(t, event_callback, md);
18672ca3310eSAlasdair G Kergon 
18689c37de29SMike Snitzer 	if (request_based) {
186916f12266SMike Snitzer 		/*
18709c37de29SMike Snitzer 		 * Leverage the fact that request-based DM targets are
18719c37de29SMike Snitzer 		 * immutable singletons - used to optimize dm_mq_queue_rq.
187216f12266SMike Snitzer 		 */
187316f12266SMike Snitzer 		md->immutable_target = dm_table_get_immutable_target(t);
187416f12266SMike Snitzer 	}
1875e6ee8c0bSKiyoshi Ueda 
18762a2a4c51SJens Axboe 	ret = __bind_mempools(md, t);
18772a2a4c51SJens Axboe 	if (ret) {
18782a2a4c51SJens Axboe 		old_map = ERR_PTR(ret);
18792a2a4c51SJens Axboe 		goto out;
18802a2a4c51SJens Axboe 	}
1881e6ee8c0bSKiyoshi Ueda 
1882bb37d772SDamien Le Moal 	ret = dm_table_set_restrictions(t, q, limits);
1883bb37d772SDamien Le Moal 	if (ret) {
1884bb37d772SDamien Le Moal 		old_map = ERR_PTR(ret);
1885bb37d772SDamien Le Moal 		goto out;
1886bb37d772SDamien Le Moal 	}
1887bb37d772SDamien Le Moal 
1888a12f5d48SEric Dumazet 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
18891d3aa6f6SMike Snitzer 	rcu_assign_pointer(md->map, (void *)t);
189036a0456fSAlasdair G Kergon 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
189136a0456fSAlasdair G Kergon 
189241abc4e1SHannes Reinecke 	if (old_map)
189383d5e5b0SMikulas Patocka 		dm_sync_table(md);
18942ca3310eSAlasdair G Kergon 
18952a2a4c51SJens Axboe out:
1896042d2a9bSAlasdair G Kergon 	return old_map;
18971da177e4SLinus Torvalds }
18981da177e4SLinus Torvalds 
1899a7940155SAlasdair G Kergon /*
1900a7940155SAlasdair G Kergon  * Returns unbound table for the caller to free.
1901a7940155SAlasdair G Kergon  */
1902a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md)
19031da177e4SLinus Torvalds {
1904a12f5d48SEric Dumazet 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
19051da177e4SLinus Torvalds 
19061da177e4SLinus Torvalds 	if (!map)
1907a7940155SAlasdair G Kergon 		return NULL;
19081da177e4SLinus Torvalds 
19091da177e4SLinus Torvalds 	dm_table_event_callback(map, NULL, NULL);
19109cdb8520SMonam Agarwal 	RCU_INIT_POINTER(md->map, NULL);
191183d5e5b0SMikulas Patocka 	dm_sync_table(md);
1912a7940155SAlasdair G Kergon 
1913a7940155SAlasdair G Kergon 	return map;
19141da177e4SLinus Torvalds }
19151da177e4SLinus Torvalds 
19161da177e4SLinus Torvalds /*
19171da177e4SLinus Torvalds  * Constructor for a new device.
19181da177e4SLinus Torvalds  */
19192b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result)
19201da177e4SLinus Torvalds {
19211da177e4SLinus Torvalds 	struct mapped_device *md;
19221da177e4SLinus Torvalds 
19232b06cfffSAlasdair G Kergon 	md = alloc_dev(minor);
19241da177e4SLinus Torvalds 	if (!md)
19251da177e4SLinus Torvalds 		return -ENXIO;
19261da177e4SLinus Torvalds 
192791ccbbacSTushar Sugandhi 	dm_ima_reset_data(md);
192891ccbbacSTushar Sugandhi 
19291da177e4SLinus Torvalds 	*result = md;
19301da177e4SLinus Torvalds 	return 0;
19311da177e4SLinus Torvalds }
19321da177e4SLinus Torvalds 
1933a5664dadSMike Snitzer /*
1934a5664dadSMike Snitzer  * Functions to manage md->type.
1935a5664dadSMike Snitzer  * All are required to hold md->type_lock.
1936a5664dadSMike Snitzer  */
1937a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md)
1938a5664dadSMike Snitzer {
1939a5664dadSMike Snitzer 	mutex_lock(&md->type_lock);
1940a5664dadSMike Snitzer }
1941a5664dadSMike Snitzer 
1942a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md)
1943a5664dadSMike Snitzer {
1944a5664dadSMike Snitzer 	mutex_unlock(&md->type_lock);
1945a5664dadSMike Snitzer }
1946a5664dadSMike Snitzer 
19477e0d574fSBart Van Assche void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
1948a5664dadSMike Snitzer {
194900c4fc3bSMike Snitzer 	BUG_ON(!mutex_is_locked(&md->type_lock));
1950a5664dadSMike Snitzer 	md->type = type;
1951a5664dadSMike Snitzer }
1952a5664dadSMike Snitzer 
19537e0d574fSBart Van Assche enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
1954a5664dadSMike Snitzer {
1955a5664dadSMike Snitzer 	return md->type;
1956a5664dadSMike Snitzer }
1957a5664dadSMike Snitzer 
195836a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
195936a0456fSAlasdair G Kergon {
196036a0456fSAlasdair G Kergon 	return md->immutable_target_type;
196136a0456fSAlasdair G Kergon }
196236a0456fSAlasdair G Kergon 
19634a0b4ddfSMike Snitzer /*
1964f84cb8a4SMike Snitzer  * The queue_limits are only valid as long as you have a reference
1965f84cb8a4SMike Snitzer  * count on 'md'.
1966f84cb8a4SMike Snitzer  */
1967f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1968f84cb8a4SMike Snitzer {
1969f84cb8a4SMike Snitzer 	BUG_ON(!atomic_read(&md->holders));
1970f84cb8a4SMike Snitzer 	return &md->queue->limits;
1971f84cb8a4SMike Snitzer }
1972f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits);
1973f84cb8a4SMike Snitzer 
19744a0b4ddfSMike Snitzer /*
19754a0b4ddfSMike Snitzer  * Setup the DM device's queue based on md's type
19764a0b4ddfSMike Snitzer  */
1977591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
19784a0b4ddfSMike Snitzer {
1979ba305859SChristoph Hellwig 	enum dm_queue_mode type = dm_table_get_type(t);
1980c100ec49SMike Snitzer 	struct queue_limits limits;
1981ba305859SChristoph Hellwig 	int r;
1982bfebd1cdSMike Snitzer 
1983545ed20eSToshi Kani 	switch (type) {
1984bfebd1cdSMike Snitzer 	case DM_TYPE_REQUEST_BASED:
1985681cc5e8SMike Snitzer 		md->disk->fops = &dm_rq_blk_dops;
1986e83068a5SMike Snitzer 		r = dm_mq_init_request_queue(md, t);
1987bfebd1cdSMike Snitzer 		if (r) {
1988681cc5e8SMike Snitzer 			DMERR("Cannot initialize queue for request-based dm mapped device");
1989bfebd1cdSMike Snitzer 			return r;
1990bfebd1cdSMike Snitzer 		}
1991bfebd1cdSMike Snitzer 		break;
1992bfebd1cdSMike Snitzer 	case DM_TYPE_BIO_BASED:
1993545ed20eSToshi Kani 	case DM_TYPE_DAX_BIO_BASED:
1994bfebd1cdSMike Snitzer 		break;
19957e0d574fSBart Van Assche 	case DM_TYPE_NONE:
19967e0d574fSBart Van Assche 		WARN_ON_ONCE(true);
19977e0d574fSBart Van Assche 		break;
1998ff36ab34SMike Snitzer 	}
19994a0b4ddfSMike Snitzer 
2000c100ec49SMike Snitzer 	r = dm_calculate_queue_limits(t, &limits);
2001c100ec49SMike Snitzer 	if (r) {
2002c100ec49SMike Snitzer 		DMERR("Cannot calculate initial queue limits");
2003c100ec49SMike Snitzer 		return r;
2004c100ec49SMike Snitzer 	}
2005bb37d772SDamien Le Moal 	r = dm_table_set_restrictions(t, md->queue, &limits);
2006bb37d772SDamien Le Moal 	if (r)
2007bb37d772SDamien Le Moal 		return r;
200889f871afSChristoph Hellwig 
2009e7089f65SLuis Chamberlain 	r = add_disk(md->disk);
2010e7089f65SLuis Chamberlain 	if (r)
2011e7089f65SLuis Chamberlain 		return r;
201289f871afSChristoph Hellwig 
201389f871afSChristoph Hellwig 	r = dm_sysfs_init(md);
201489f871afSChristoph Hellwig 	if (r) {
201589f871afSChristoph Hellwig 		del_gendisk(md->disk);
201689f871afSChristoph Hellwig 		return r;
201789f871afSChristoph Hellwig 	}
2018ba305859SChristoph Hellwig 	md->type = type;
20194a0b4ddfSMike Snitzer 	return 0;
20204a0b4ddfSMike Snitzer }
20214a0b4ddfSMike Snitzer 
20222bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev)
20231da177e4SLinus Torvalds {
20241da177e4SLinus Torvalds 	struct mapped_device *md;
20251da177e4SLinus Torvalds 	unsigned minor = MINOR(dev);
20261da177e4SLinus Torvalds 
20271da177e4SLinus Torvalds 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
20281da177e4SLinus Torvalds 		return NULL;
20291da177e4SLinus Torvalds 
2030f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
20311da177e4SLinus Torvalds 
20321da177e4SLinus Torvalds 	md = idr_find(&_minor_idr, minor);
203349de5769SMike Snitzer 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
203449de5769SMike Snitzer 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2035637842cfSDavid Teigland 		md = NULL;
2036fba9f90eSJeff Mahoney 		goto out;
2037fba9f90eSJeff Mahoney 	}
20382bec1f4aSMikulas Patocka 	dm_get(md);
2039fba9f90eSJeff Mahoney out:
2040f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
20411da177e4SLinus Torvalds 
2042637842cfSDavid Teigland 	return md;
2043637842cfSDavid Teigland }
20443cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md);
2045d229a958SDavid Teigland 
20469ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md)
2047637842cfSDavid Teigland {
20489ade92a9SAlasdair G Kergon 	return md->interface_ptr;
20491da177e4SLinus Torvalds }
20501da177e4SLinus Torvalds 
20511da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr)
20521da177e4SLinus Torvalds {
20531da177e4SLinus Torvalds 	md->interface_ptr = ptr;
20541da177e4SLinus Torvalds }
20551da177e4SLinus Torvalds 
20561da177e4SLinus Torvalds void dm_get(struct mapped_device *md)
20571da177e4SLinus Torvalds {
20581da177e4SLinus Torvalds 	atomic_inc(&md->holders);
20593f77316dSKiyoshi Ueda 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
20601da177e4SLinus Torvalds }
20611da177e4SLinus Torvalds 
206209ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md)
206309ee96b2SMikulas Patocka {
206409ee96b2SMikulas Patocka 	spin_lock(&_minor_lock);
206509ee96b2SMikulas Patocka 	if (test_bit(DMF_FREEING, &md->flags)) {
206609ee96b2SMikulas Patocka 		spin_unlock(&_minor_lock);
206709ee96b2SMikulas Patocka 		return -EBUSY;
206809ee96b2SMikulas Patocka 	}
206909ee96b2SMikulas Patocka 	dm_get(md);
207009ee96b2SMikulas Patocka 	spin_unlock(&_minor_lock);
207109ee96b2SMikulas Patocka 	return 0;
207209ee96b2SMikulas Patocka }
207309ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold);
207409ee96b2SMikulas Patocka 
207572d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md)
207672d94861SAlasdair G Kergon {
207772d94861SAlasdair G Kergon 	return md->name;
207872d94861SAlasdair G Kergon }
207972d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name);
208072d94861SAlasdair G Kergon 
20813f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait)
20821da177e4SLinus Torvalds {
20831134e5aeSMike Anderson 	struct dm_table *map;
208483d5e5b0SMikulas Patocka 	int srcu_idx;
20851da177e4SLinus Torvalds 
20863f77316dSKiyoshi Ueda 	might_sleep();
2087fba9f90eSJeff Mahoney 
208863a4f065SMike Snitzer 	spin_lock(&_minor_lock);
20893f77316dSKiyoshi Ueda 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2090fba9f90eSJeff Mahoney 	set_bit(DMF_FREEING, &md->flags);
2091f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
20923f77316dSKiyoshi Ueda 
2093c12c9a3cSMike Snitzer 	blk_set_queue_dying(md->queue);
20943b785fbcSBart Van Assche 
2095ab7c7bb6SMikulas Patocka 	/*
2096ab7c7bb6SMikulas Patocka 	 * Take suspend_lock so that presuspend and postsuspend methods
2097ab7c7bb6SMikulas Patocka 	 * do not race with internal suspend.
2098ab7c7bb6SMikulas Patocka 	 */
2099ab7c7bb6SMikulas Patocka 	mutex_lock(&md->suspend_lock);
21002a708cffSJunichi Nomura 	map = dm_get_live_table(md, &srcu_idx);
21014f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md)) {
21021da177e4SLinus Torvalds 		dm_table_presuspend_targets(map);
2103adc0daadSMikulas Patocka 		set_bit(DMF_SUSPENDED, &md->flags);
21045df96f2bSMikulas Patocka 		set_bit(DMF_POST_SUSPENDING, &md->flags);
21051da177e4SLinus Torvalds 		dm_table_postsuspend_targets(map);
21061da177e4SLinus Torvalds 	}
210783d5e5b0SMikulas Patocka 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
210883d5e5b0SMikulas Patocka 	dm_put_live_table(md, srcu_idx);
21092a708cffSJunichi Nomura 	mutex_unlock(&md->suspend_lock);
211083d5e5b0SMikulas Patocka 
21113f77316dSKiyoshi Ueda 	/*
21123f77316dSKiyoshi Ueda 	 * Rare, but there may be I/O requests still going to complete,
21133f77316dSKiyoshi Ueda 	 * for example.  Wait for all references to disappear.
21143f77316dSKiyoshi Ueda 	 * No one should increment the reference count of the mapped_device,
21153f77316dSKiyoshi Ueda 	 * after the mapped_device state becomes DMF_FREEING.
21163f77316dSKiyoshi Ueda 	 */
21173f77316dSKiyoshi Ueda 	if (wait)
21183f77316dSKiyoshi Ueda 		while (atomic_read(&md->holders))
21193f77316dSKiyoshi Ueda 			msleep(1);
21203f77316dSKiyoshi Ueda 	else if (atomic_read(&md->holders))
21213f77316dSKiyoshi Ueda 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
21223f77316dSKiyoshi Ueda 		       dm_device_name(md), atomic_read(&md->holders));
21233f77316dSKiyoshi Ueda 
2124a7940155SAlasdair G Kergon 	dm_table_destroy(__unbind(md));
21251da177e4SLinus Torvalds 	free_dev(md);
21261da177e4SLinus Torvalds }
21273f77316dSKiyoshi Ueda 
21283f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md)
21293f77316dSKiyoshi Ueda {
21303f77316dSKiyoshi Ueda 	__dm_destroy(md, true);
21313f77316dSKiyoshi Ueda }
21323f77316dSKiyoshi Ueda 
21333f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md)
21343f77316dSKiyoshi Ueda {
21353f77316dSKiyoshi Ueda 	__dm_destroy(md, false);
21363f77316dSKiyoshi Ueda }
21373f77316dSKiyoshi Ueda 
21383f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md)
21393f77316dSKiyoshi Ueda {
21403f77316dSKiyoshi Ueda 	atomic_dec(&md->holders);
21411da177e4SLinus Torvalds }
214279eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put);
21431da177e4SLinus Torvalds 
214485067747SMing Lei static bool md_in_flight_bios(struct mapped_device *md)
214585067747SMing Lei {
214685067747SMing Lei 	int cpu;
21478446fe92SChristoph Hellwig 	struct block_device *part = dm_disk(md)->part0;
214885067747SMing Lei 	long sum = 0;
214985067747SMing Lei 
215085067747SMing Lei 	for_each_possible_cpu(cpu) {
215185067747SMing Lei 		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
215285067747SMing Lei 		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
215385067747SMing Lei 	}
215485067747SMing Lei 
215585067747SMing Lei 	return sum != 0;
215685067747SMing Lei }
215785067747SMing Lei 
21582f064a59SPeter Zijlstra static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
215946125c1cSMilan Broz {
216046125c1cSMilan Broz 	int r = 0;
21619f4c3f87SBart Van Assche 	DEFINE_WAIT(wait);
216246125c1cSMilan Broz 
216385067747SMing Lei 	while (true) {
21649f4c3f87SBart Van Assche 		prepare_to_wait(&md->wait, &wait, task_state);
216546125c1cSMilan Broz 
216685067747SMing Lei 		if (!md_in_flight_bios(md))
216746125c1cSMilan Broz 			break;
216846125c1cSMilan Broz 
2169e3fabdfdSBart Van Assche 		if (signal_pending_state(task_state, current)) {
217046125c1cSMilan Broz 			r = -EINTR;
217146125c1cSMilan Broz 			break;
217246125c1cSMilan Broz 		}
217346125c1cSMilan Broz 
217446125c1cSMilan Broz 		io_schedule();
217546125c1cSMilan Broz 	}
21769f4c3f87SBart Van Assche 	finish_wait(&md->wait, &wait);
2177b44ebeb0SMikulas Patocka 
217846125c1cSMilan Broz 	return r;
217946125c1cSMilan Broz }
218046125c1cSMilan Broz 
21812f064a59SPeter Zijlstra static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
218285067747SMing Lei {
218385067747SMing Lei 	int r = 0;
218485067747SMing Lei 
218585067747SMing Lei 	if (!queue_is_mq(md->queue))
218685067747SMing Lei 		return dm_wait_for_bios_completion(md, task_state);
218785067747SMing Lei 
218885067747SMing Lei 	while (true) {
218985067747SMing Lei 		if (!blk_mq_queue_inflight(md->queue))
219085067747SMing Lei 			break;
219185067747SMing Lei 
219285067747SMing Lei 		if (signal_pending_state(task_state, current)) {
219385067747SMing Lei 			r = -EINTR;
219485067747SMing Lei 			break;
219585067747SMing Lei 		}
219685067747SMing Lei 
219785067747SMing Lei 		msleep(5);
219885067747SMing Lei 	}
219985067747SMing Lei 
220085067747SMing Lei 	return r;
220185067747SMing Lei }
220285067747SMing Lei 
22031da177e4SLinus Torvalds /*
22041da177e4SLinus Torvalds  * Process the deferred bios
22051da177e4SLinus Torvalds  */
2206ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work)
22071da177e4SLinus Torvalds {
22080c2915b8SMike Snitzer 	struct mapped_device *md = container_of(work, struct mapped_device, work);
22090c2915b8SMike Snitzer 	struct bio *bio;
2210ef208587SMikulas Patocka 
22113b00b203SMikulas Patocka 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2212022c2611SMikulas Patocka 		spin_lock_irq(&md->deferred_lock);
22130c2915b8SMike Snitzer 		bio = bio_list_pop(&md->deferred);
2214022c2611SMikulas Patocka 		spin_unlock_irq(&md->deferred_lock);
2215022c2611SMikulas Patocka 
22160c2915b8SMike Snitzer 		if (!bio)
2217df12ee99SAlasdair G Kergon 			break;
221873d410c0SMilan Broz 
22190c2915b8SMike Snitzer 		submit_bio_noacct(bio);
2220e6ee8c0bSKiyoshi Ueda 	}
22211da177e4SLinus Torvalds }
22221da177e4SLinus Torvalds 
22239a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md)
2224304f3f6aSMilan Broz {
22253b00b203SMikulas Patocka 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
22264e857c58SPeter Zijlstra 	smp_mb__after_atomic();
222753d5914fSMikulas Patocka 	queue_work(md->wq, &md->work);
2228304f3f6aSMilan Broz }
2229304f3f6aSMilan Broz 
22301da177e4SLinus Torvalds /*
2231042d2a9bSAlasdair G Kergon  * Swap in a new table, returning the old one for the caller to destroy.
22321da177e4SLinus Torvalds  */
2233042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
22341da177e4SLinus Torvalds {
223587eb5b21SMike Christie 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2236754c5fc7SMike Snitzer 	struct queue_limits limits;
2237042d2a9bSAlasdair G Kergon 	int r;
22381da177e4SLinus Torvalds 
2239e61290a4SDaniel Walker 	mutex_lock(&md->suspend_lock);
22401da177e4SLinus Torvalds 
22411da177e4SLinus Torvalds 	/* device must be suspended */
22424f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md))
224393c534aeSAlasdair G Kergon 		goto out;
22441da177e4SLinus Torvalds 
22453ae70656SMike Snitzer 	/*
22463ae70656SMike Snitzer 	 * If the new table has no data devices, retain the existing limits.
22473ae70656SMike Snitzer 	 * This helps multipath with queue_if_no_path if all paths disappear,
22483ae70656SMike Snitzer 	 * then new I/O is queued based on these limits, and then some paths
22493ae70656SMike Snitzer 	 * reappear.
22503ae70656SMike Snitzer 	 */
22513ae70656SMike Snitzer 	if (dm_table_has_no_data_devices(table)) {
225283d5e5b0SMikulas Patocka 		live_map = dm_get_live_table_fast(md);
22533ae70656SMike Snitzer 		if (live_map)
22543ae70656SMike Snitzer 			limits = md->queue->limits;
225583d5e5b0SMikulas Patocka 		dm_put_live_table_fast(md);
22563ae70656SMike Snitzer 	}
22573ae70656SMike Snitzer 
225887eb5b21SMike Christie 	if (!live_map) {
2259754c5fc7SMike Snitzer 		r = dm_calculate_queue_limits(table, &limits);
2260042d2a9bSAlasdair G Kergon 		if (r) {
2261042d2a9bSAlasdair G Kergon 			map = ERR_PTR(r);
2262754c5fc7SMike Snitzer 			goto out;
2263042d2a9bSAlasdair G Kergon 		}
226487eb5b21SMike Christie 	}
2265754c5fc7SMike Snitzer 
2266042d2a9bSAlasdair G Kergon 	map = __bind(md, table, &limits);
226762e08243SMikulas Patocka 	dm_issue_global_event();
22681da177e4SLinus Torvalds 
226993c534aeSAlasdair G Kergon out:
2270e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
2271042d2a9bSAlasdair G Kergon 	return map;
22721da177e4SLinus Torvalds }
22731da177e4SLinus Torvalds 
22741da177e4SLinus Torvalds /*
22751da177e4SLinus Torvalds  * Functions to lock and unlock any filesystem running on the
22761da177e4SLinus Torvalds  * device.
22771da177e4SLinus Torvalds  */
22782ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md)
22791da177e4SLinus Torvalds {
2280e39e2e95SAlasdair G Kergon 	int r;
22811da177e4SLinus Torvalds 
2282040f04bdSChristoph Hellwig 	WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2283dfbe03f6SAlasdair G Kergon 
2284977115c0SChristoph Hellwig 	r = freeze_bdev(md->disk->part0);
2285040f04bdSChristoph Hellwig 	if (!r)
2286aa8d7c2fSAlasdair G Kergon 		set_bit(DMF_FROZEN, &md->flags);
2287040f04bdSChristoph Hellwig 	return r;
22881da177e4SLinus Torvalds }
22891da177e4SLinus Torvalds 
22902ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md)
22911da177e4SLinus Torvalds {
2292aa8d7c2fSAlasdair G Kergon 	if (!test_bit(DMF_FROZEN, &md->flags))
2293aa8d7c2fSAlasdair G Kergon 		return;
2294977115c0SChristoph Hellwig 	thaw_bdev(md->disk->part0);
2295aa8d7c2fSAlasdair G Kergon 	clear_bit(DMF_FROZEN, &md->flags);
22961da177e4SLinus Torvalds }
22971da177e4SLinus Torvalds 
22981da177e4SLinus Torvalds /*
2299b48633f8SBart Van Assche  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2300b48633f8SBart Van Assche  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2301b48633f8SBart Van Assche  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2302b48633f8SBart Van Assche  *
2303ffcc3936SMike Snitzer  * If __dm_suspend returns 0, the device is completely quiescent
2304ffcc3936SMike Snitzer  * now. There is no request-processing activity. All new requests
2305ffcc3936SMike Snitzer  * are being added to md->deferred list.
2306cec47e3dSKiyoshi Ueda  */
2307ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
23082f064a59SPeter Zijlstra 			unsigned suspend_flags, unsigned int task_state,
2309eaf9a736SMike Snitzer 			int dmf_suspended_flag)
23101da177e4SLinus Torvalds {
2311ffcc3936SMike Snitzer 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2312ffcc3936SMike Snitzer 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2313ffcc3936SMike Snitzer 	int r;
2314cf222b37SAlasdair G Kergon 
23155a8f1f80SBart Van Assche 	lockdep_assert_held(&md->suspend_lock);
23165a8f1f80SBart Van Assche 
23172e93ccc1SKiyoshi Ueda 	/*
23182e93ccc1SKiyoshi Ueda 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
23192e93ccc1SKiyoshi Ueda 	 * This flag is cleared before dm_suspend returns.
23202e93ccc1SKiyoshi Ueda 	 */
23212e93ccc1SKiyoshi Ueda 	if (noflush)
23222e93ccc1SKiyoshi Ueda 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
232386331f39SBart Van Assche 	else
2324ac75b09fSMike Snitzer 		DMDEBUG("%s: suspending with flush", dm_device_name(md));
23252e93ccc1SKiyoshi Ueda 
2326d67ee213SMike Snitzer 	/*
2327d67ee213SMike Snitzer 	 * This gets reverted if there's an error later and the targets
2328d67ee213SMike Snitzer 	 * provide the .presuspend_undo hook.
2329d67ee213SMike Snitzer 	 */
23301da177e4SLinus Torvalds 	dm_table_presuspend_targets(map);
23311da177e4SLinus Torvalds 
23322e93ccc1SKiyoshi Ueda 	/*
23339f518b27SKiyoshi Ueda 	 * Flush I/O to the device.
23349f518b27SKiyoshi Ueda 	 * Any I/O submitted after lock_fs() may not be flushed.
23359f518b27SKiyoshi Ueda 	 * noflush takes precedence over do_lockfs.
23369f518b27SKiyoshi Ueda 	 * (lock_fs() flushes I/Os and waits for them to complete.)
23372e93ccc1SKiyoshi Ueda 	 */
233832a926daSMikulas Patocka 	if (!noflush && do_lockfs) {
23392ca3310eSAlasdair G Kergon 		r = lock_fs(md);
2340d67ee213SMike Snitzer 		if (r) {
2341d67ee213SMike Snitzer 			dm_table_presuspend_undo_targets(map);
2342ffcc3936SMike Snitzer 			return r;
2343aa8d7c2fSAlasdair G Kergon 		}
2344d67ee213SMike Snitzer 	}
23451da177e4SLinus Torvalds 
23461da177e4SLinus Torvalds 	/*
23473b00b203SMikulas Patocka 	 * Here we must make sure that no processes are submitting requests
23483b00b203SMikulas Patocka 	 * to target drivers i.e. no one may be executing
23490cede372SMike Snitzer 	 * __split_and_process_bio from dm_submit_bio.
23503b00b203SMikulas Patocka 	 *
23510cede372SMike Snitzer 	 * To get all processes out of __split_and_process_bio in dm_submit_bio,
23523b00b203SMikulas Patocka 	 * we take the write lock. To prevent any process from reentering
23530cede372SMike Snitzer 	 * __split_and_process_bio from dm_submit_bio and quiesce the thread
23540cede372SMike Snitzer 	 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
23556a8736d1STejun Heo 	 * flush_workqueue(md->wq).
23561da177e4SLinus Torvalds 	 */
23571eb787ecSAlasdair G Kergon 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
235841abc4e1SHannes Reinecke 	if (map)
235983d5e5b0SMikulas Patocka 		synchronize_srcu(&md->io_barrier);
23601da177e4SLinus Torvalds 
2361d0bcb878SKiyoshi Ueda 	/*
236229e4013dSTejun Heo 	 * Stop md->queue before flushing md->wq in case request-based
236329e4013dSTejun Heo 	 * dm defers requests to md->wq from md->queue.
2364d0bcb878SKiyoshi Ueda 	 */
23656a23e05cSJens Axboe 	if (dm_request_based(md))
2366eca7ee6dSMike Snitzer 		dm_stop_queue(md->queue);
2367cec47e3dSKiyoshi Ueda 
2368d0bcb878SKiyoshi Ueda 	flush_workqueue(md->wq);
2369d0bcb878SKiyoshi Ueda 
23701da177e4SLinus Torvalds 	/*
23713b00b203SMikulas Patocka 	 * At this point no more requests are entering target request routines.
23723b00b203SMikulas Patocka 	 * We call dm_wait_for_completion to wait for all existing requests
23733b00b203SMikulas Patocka 	 * to finish.
23741da177e4SLinus Torvalds 	 */
2375b48633f8SBart Van Assche 	r = dm_wait_for_completion(md, task_state);
2376eaf9a736SMike Snitzer 	if (!r)
2377eaf9a736SMike Snitzer 		set_bit(dmf_suspended_flag, &md->flags);
23781da177e4SLinus Torvalds 
23796d6f10dfSMilan Broz 	if (noflush)
2380022c2611SMikulas Patocka 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
238141abc4e1SHannes Reinecke 	if (map)
238283d5e5b0SMikulas Patocka 		synchronize_srcu(&md->io_barrier);
23832e93ccc1SKiyoshi Ueda 
23841da177e4SLinus Torvalds 	/* were we interrupted ? */
238546125c1cSMilan Broz 	if (r < 0) {
23869a1fb464SMikulas Patocka 		dm_queue_flush(md);
238773d410c0SMilan Broz 
2388cec47e3dSKiyoshi Ueda 		if (dm_request_based(md))
2389eca7ee6dSMike Snitzer 			dm_start_queue(md->queue);
2390cec47e3dSKiyoshi Ueda 
23912ca3310eSAlasdair G Kergon 		unlock_fs(md);
2392d67ee213SMike Snitzer 		dm_table_presuspend_undo_targets(map);
2393ffcc3936SMike Snitzer 		/* pushback list is already flushed, so skip flush */
2394ffcc3936SMike Snitzer 	}
2395ffcc3936SMike Snitzer 
2396ffcc3936SMike Snitzer 	return r;
23972ca3310eSAlasdair G Kergon }
23982ca3310eSAlasdair G Kergon 
23993b00b203SMikulas Patocka /*
2400ffcc3936SMike Snitzer  * We need to be able to change a mapping table under a mounted
2401ffcc3936SMike Snitzer  * filesystem.  For example we might want to move some data in
2402ffcc3936SMike Snitzer  * the background.  Before the table can be swapped with
2403ffcc3936SMike Snitzer  * dm_bind_table, dm_suspend must be called to flush any in
2404ffcc3936SMike Snitzer  * flight bios and ensure that any further io gets deferred.
24053b00b203SMikulas Patocka  */
2406ffcc3936SMike Snitzer /*
2407ffcc3936SMike Snitzer  * Suspend mechanism in request-based dm.
2408ffcc3936SMike Snitzer  *
2409ffcc3936SMike Snitzer  * 1. Flush all I/Os by lock_fs() if needed.
2410ffcc3936SMike Snitzer  * 2. Stop dispatching any I/O by stopping the request_queue.
2411ffcc3936SMike Snitzer  * 3. Wait for all in-flight I/Os to be completed or requeued.
2412ffcc3936SMike Snitzer  *
2413ffcc3936SMike Snitzer  * To abort suspend, start the request_queue.
2414ffcc3936SMike Snitzer  */
2415ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2416ffcc3936SMike Snitzer {
2417ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
2418ffcc3936SMike Snitzer 	int r = 0;
2419ffcc3936SMike Snitzer 
2420ffcc3936SMike Snitzer retry:
2421ffcc3936SMike Snitzer 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2422ffcc3936SMike Snitzer 
2423ffcc3936SMike Snitzer 	if (dm_suspended_md(md)) {
2424ffcc3936SMike Snitzer 		r = -EINVAL;
2425ffcc3936SMike Snitzer 		goto out_unlock;
2426ffcc3936SMike Snitzer 	}
2427ffcc3936SMike Snitzer 
2428ffcc3936SMike Snitzer 	if (dm_suspended_internally_md(md)) {
2429ffcc3936SMike Snitzer 		/* already internally suspended, wait for internal resume */
2430ffcc3936SMike Snitzer 		mutex_unlock(&md->suspend_lock);
2431ffcc3936SMike Snitzer 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2432ffcc3936SMike Snitzer 		if (r)
2433ffcc3936SMike Snitzer 			return r;
2434ffcc3936SMike Snitzer 		goto retry;
2435ffcc3936SMike Snitzer 	}
2436ffcc3936SMike Snitzer 
2437a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2438ffcc3936SMike Snitzer 
2439eaf9a736SMike Snitzer 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2440ffcc3936SMike Snitzer 	if (r)
2441ffcc3936SMike Snitzer 		goto out_unlock;
24423b00b203SMikulas Patocka 
24435df96f2bSMikulas Patocka 	set_bit(DMF_POST_SUSPENDING, &md->flags);
24444d4471cbSKiyoshi Ueda 	dm_table_postsuspend_targets(map);
24455df96f2bSMikulas Patocka 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
24464d4471cbSKiyoshi Ueda 
2447d287483dSAlasdair G Kergon out_unlock:
2448e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
2449cf222b37SAlasdair G Kergon 	return r;
24501da177e4SLinus Torvalds }
24511da177e4SLinus Torvalds 
2452ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map)
24531da177e4SLinus Torvalds {
2454ffcc3936SMike Snitzer 	if (map) {
2455ffcc3936SMike Snitzer 		int r = dm_table_resume_targets(map);
24568757b776SMilan Broz 		if (r)
2457ffcc3936SMike Snitzer 			return r;
2458ffcc3936SMike Snitzer 	}
24592ca3310eSAlasdair G Kergon 
24609a1fb464SMikulas Patocka 	dm_queue_flush(md);
24612ca3310eSAlasdair G Kergon 
2462cec47e3dSKiyoshi Ueda 	/*
2463cec47e3dSKiyoshi Ueda 	 * Flushing deferred I/Os must be done after targets are resumed
2464cec47e3dSKiyoshi Ueda 	 * so that mapping of targets can work correctly.
2465cec47e3dSKiyoshi Ueda 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2466cec47e3dSKiyoshi Ueda 	 */
2467cec47e3dSKiyoshi Ueda 	if (dm_request_based(md))
2468eca7ee6dSMike Snitzer 		dm_start_queue(md->queue);
2469cec47e3dSKiyoshi Ueda 
24702ca3310eSAlasdair G Kergon 	unlock_fs(md);
24712ca3310eSAlasdair G Kergon 
2472ffcc3936SMike Snitzer 	return 0;
2473ffcc3936SMike Snitzer }
2474ffcc3936SMike Snitzer 
2475ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md)
2476ffcc3936SMike Snitzer {
24778dc23658SMinfei Huang 	int r;
2478ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
2479ffcc3936SMike Snitzer 
2480ffcc3936SMike Snitzer retry:
24818dc23658SMinfei Huang 	r = -EINVAL;
2482ffcc3936SMike Snitzer 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2483ffcc3936SMike Snitzer 
2484ffcc3936SMike Snitzer 	if (!dm_suspended_md(md))
2485ffcc3936SMike Snitzer 		goto out;
2486ffcc3936SMike Snitzer 
2487ffcc3936SMike Snitzer 	if (dm_suspended_internally_md(md)) {
2488ffcc3936SMike Snitzer 		/* already internally suspended, wait for internal resume */
2489ffcc3936SMike Snitzer 		mutex_unlock(&md->suspend_lock);
2490ffcc3936SMike Snitzer 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2491ffcc3936SMike Snitzer 		if (r)
2492ffcc3936SMike Snitzer 			return r;
2493ffcc3936SMike Snitzer 		goto retry;
2494ffcc3936SMike Snitzer 	}
2495ffcc3936SMike Snitzer 
2496a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2497ffcc3936SMike Snitzer 	if (!map || !dm_table_get_size(map))
2498ffcc3936SMike Snitzer 		goto out;
2499ffcc3936SMike Snitzer 
2500ffcc3936SMike Snitzer 	r = __dm_resume(md, map);
2501ffcc3936SMike Snitzer 	if (r)
2502ffcc3936SMike Snitzer 		goto out;
2503ffcc3936SMike Snitzer 
25042ca3310eSAlasdair G Kergon 	clear_bit(DMF_SUSPENDED, &md->flags);
2505cf222b37SAlasdair G Kergon out:
2506e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
25072ca3310eSAlasdair G Kergon 
2508cf222b37SAlasdair G Kergon 	return r;
25091da177e4SLinus Torvalds }
25101da177e4SLinus Torvalds 
2511fd2ed4d2SMikulas Patocka /*
2512fd2ed4d2SMikulas Patocka  * Internal suspend/resume works like userspace-driven suspend. It waits
2513fd2ed4d2SMikulas Patocka  * until all bios finish and prevents issuing new bios to the target drivers.
2514fd2ed4d2SMikulas Patocka  * It may be used only from the kernel.
2515fd2ed4d2SMikulas Patocka  */
2516fd2ed4d2SMikulas Patocka 
2517ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2518ffcc3936SMike Snitzer {
2519ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
2520ffcc3936SMike Snitzer 
25211ea0654eSBart Van Assche 	lockdep_assert_held(&md->suspend_lock);
25221ea0654eSBart Van Assche 
252396b26c8cSMikulas Patocka 	if (md->internal_suspend_count++)
2524ffcc3936SMike Snitzer 		return; /* nested internal suspend */
2525ffcc3936SMike Snitzer 
2526ffcc3936SMike Snitzer 	if (dm_suspended_md(md)) {
2527ffcc3936SMike Snitzer 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2528ffcc3936SMike Snitzer 		return; /* nest suspend */
2529ffcc3936SMike Snitzer 	}
2530ffcc3936SMike Snitzer 
2531a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2532ffcc3936SMike Snitzer 
2533ffcc3936SMike Snitzer 	/*
2534ffcc3936SMike Snitzer 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2535ffcc3936SMike Snitzer 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2536ffcc3936SMike Snitzer 	 * would require changing .presuspend to return an error -- avoid this
2537ffcc3936SMike Snitzer 	 * until there is a need for more elaborate variants of internal suspend.
2538ffcc3936SMike Snitzer 	 */
2539eaf9a736SMike Snitzer 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2540eaf9a736SMike Snitzer 			    DMF_SUSPENDED_INTERNALLY);
2541ffcc3936SMike Snitzer 
25425df96f2bSMikulas Patocka 	set_bit(DMF_POST_SUSPENDING, &md->flags);
2543ffcc3936SMike Snitzer 	dm_table_postsuspend_targets(map);
25445df96f2bSMikulas Patocka 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
2545ffcc3936SMike Snitzer }
2546ffcc3936SMike Snitzer 
2547ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md)
2548ffcc3936SMike Snitzer {
254996b26c8cSMikulas Patocka 	BUG_ON(!md->internal_suspend_count);
255096b26c8cSMikulas Patocka 
255196b26c8cSMikulas Patocka 	if (--md->internal_suspend_count)
2552ffcc3936SMike Snitzer 		return; /* resume from nested internal suspend */
2553ffcc3936SMike Snitzer 
2554ffcc3936SMike Snitzer 	if (dm_suspended_md(md))
2555ffcc3936SMike Snitzer 		goto done; /* resume from nested suspend */
2556ffcc3936SMike Snitzer 
2557ffcc3936SMike Snitzer 	/*
2558ffcc3936SMike Snitzer 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2559ffcc3936SMike Snitzer 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2560ffcc3936SMike Snitzer 	 */
2561ffcc3936SMike Snitzer 	(void) __dm_resume(md, NULL);
2562ffcc3936SMike Snitzer 
2563ffcc3936SMike Snitzer done:
2564ffcc3936SMike Snitzer 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2565ffcc3936SMike Snitzer 	smp_mb__after_atomic();
2566ffcc3936SMike Snitzer 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2567ffcc3936SMike Snitzer }
2568ffcc3936SMike Snitzer 
2569ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md)
2570fd2ed4d2SMikulas Patocka {
2571fd2ed4d2SMikulas Patocka 	mutex_lock(&md->suspend_lock);
2572ffcc3936SMike Snitzer 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2573ffcc3936SMike Snitzer 	mutex_unlock(&md->suspend_lock);
2574ffcc3936SMike Snitzer }
2575ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2576ffcc3936SMike Snitzer 
2577ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md)
2578ffcc3936SMike Snitzer {
2579ffcc3936SMike Snitzer 	mutex_lock(&md->suspend_lock);
2580ffcc3936SMike Snitzer 	__dm_internal_resume(md);
2581ffcc3936SMike Snitzer 	mutex_unlock(&md->suspend_lock);
2582ffcc3936SMike Snitzer }
2583ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume);
2584ffcc3936SMike Snitzer 
2585ffcc3936SMike Snitzer /*
2586ffcc3936SMike Snitzer  * Fast variants of internal suspend/resume hold md->suspend_lock,
2587ffcc3936SMike Snitzer  * which prevents interaction with userspace-driven suspend.
2588ffcc3936SMike Snitzer  */
2589ffcc3936SMike Snitzer 
2590ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md)
2591ffcc3936SMike Snitzer {
2592ffcc3936SMike Snitzer 	mutex_lock(&md->suspend_lock);
2593ffcc3936SMike Snitzer 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2594fd2ed4d2SMikulas Patocka 		return;
2595fd2ed4d2SMikulas Patocka 
2596fd2ed4d2SMikulas Patocka 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2597fd2ed4d2SMikulas Patocka 	synchronize_srcu(&md->io_barrier);
2598fd2ed4d2SMikulas Patocka 	flush_workqueue(md->wq);
2599fd2ed4d2SMikulas Patocka 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2600fd2ed4d2SMikulas Patocka }
2601b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2602fd2ed4d2SMikulas Patocka 
2603ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md)
2604fd2ed4d2SMikulas Patocka {
2605ffcc3936SMike Snitzer 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2606fd2ed4d2SMikulas Patocka 		goto done;
2607fd2ed4d2SMikulas Patocka 
2608fd2ed4d2SMikulas Patocka 	dm_queue_flush(md);
2609fd2ed4d2SMikulas Patocka 
2610fd2ed4d2SMikulas Patocka done:
2611fd2ed4d2SMikulas Patocka 	mutex_unlock(&md->suspend_lock);
2612fd2ed4d2SMikulas Patocka }
2613b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2614fd2ed4d2SMikulas Patocka 
26151da177e4SLinus Torvalds /*-----------------------------------------------------------------
26161da177e4SLinus Torvalds  * Event notification.
26171da177e4SLinus Torvalds  *---------------------------------------------------------------*/
26183abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
261960935eb2SMilan Broz 		       unsigned cookie)
262069267a30SAlasdair G Kergon {
26216958c1c6SMikulas Patocka 	int r;
26226958c1c6SMikulas Patocka 	unsigned noio_flag;
262360935eb2SMilan Broz 	char udev_cookie[DM_COOKIE_LENGTH];
262460935eb2SMilan Broz 	char *envp[] = { udev_cookie, NULL };
262560935eb2SMilan Broz 
26266958c1c6SMikulas Patocka 	noio_flag = memalloc_noio_save();
26276958c1c6SMikulas Patocka 
262860935eb2SMilan Broz 	if (!cookie)
26296958c1c6SMikulas Patocka 		r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
263060935eb2SMilan Broz 	else {
263160935eb2SMilan Broz 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
263260935eb2SMilan Broz 			 DM_COOKIE_ENV_VAR_NAME, cookie);
26336958c1c6SMikulas Patocka 		r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
26343abf85b5SPeter Rajnoha 				       action, envp);
263560935eb2SMilan Broz 	}
26366958c1c6SMikulas Patocka 
26376958c1c6SMikulas Patocka 	memalloc_noio_restore(noio_flag);
26386958c1c6SMikulas Patocka 
26396958c1c6SMikulas Patocka 	return r;
264069267a30SAlasdair G Kergon }
264169267a30SAlasdair G Kergon 
26427a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md)
26437a8c3d3bSMike Anderson {
26447a8c3d3bSMike Anderson 	return atomic_add_return(1, &md->uevent_seq);
26457a8c3d3bSMike Anderson }
26467a8c3d3bSMike Anderson 
26471da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md)
26481da177e4SLinus Torvalds {
26491da177e4SLinus Torvalds 	return atomic_read(&md->event_nr);
26501da177e4SLinus Torvalds }
26511da177e4SLinus Torvalds 
26521da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr)
26531da177e4SLinus Torvalds {
26541da177e4SLinus Torvalds 	return wait_event_interruptible(md->eventq,
26551da177e4SLinus Torvalds 			(event_nr != atomic_read(&md->event_nr)));
26561da177e4SLinus Torvalds }
26571da177e4SLinus Torvalds 
26587a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26597a8c3d3bSMike Anderson {
26607a8c3d3bSMike Anderson 	unsigned long flags;
26617a8c3d3bSMike Anderson 
26627a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
26637a8c3d3bSMike Anderson 	list_add(elist, &md->uevent_list);
26647a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
26657a8c3d3bSMike Anderson }
26667a8c3d3bSMike Anderson 
26671da177e4SLinus Torvalds /*
26681da177e4SLinus Torvalds  * The gendisk is only valid as long as you have a reference
26691da177e4SLinus Torvalds  * count on 'md'.
26701da177e4SLinus Torvalds  */
26711da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md)
26721da177e4SLinus Torvalds {
26731da177e4SLinus Torvalds 	return md->disk;
26741da177e4SLinus Torvalds }
267565ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk);
26761da177e4SLinus Torvalds 
2677784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md)
2678784aae73SMilan Broz {
26792995fa78SMikulas Patocka 	return &md->kobj_holder.kobj;
2680784aae73SMilan Broz }
2681784aae73SMilan Broz 
2682784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2683784aae73SMilan Broz {
2684784aae73SMilan Broz 	struct mapped_device *md;
2685784aae73SMilan Broz 
26862995fa78SMikulas Patocka 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2687784aae73SMilan Broz 
2688b9a41d21SHou Tao 	spin_lock(&_minor_lock);
2689b9a41d21SHou Tao 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2690b9a41d21SHou Tao 		md = NULL;
2691b9a41d21SHou Tao 		goto out;
2692b9a41d21SHou Tao 	}
2693784aae73SMilan Broz 	dm_get(md);
2694b9a41d21SHou Tao out:
2695b9a41d21SHou Tao 	spin_unlock(&_minor_lock);
2696b9a41d21SHou Tao 
2697784aae73SMilan Broz 	return md;
2698784aae73SMilan Broz }
2699784aae73SMilan Broz 
27004f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md)
27011da177e4SLinus Torvalds {
27021da177e4SLinus Torvalds 	return test_bit(DMF_SUSPENDED, &md->flags);
27031da177e4SLinus Torvalds }
27041da177e4SLinus Torvalds 
27055df96f2bSMikulas Patocka static int dm_post_suspending_md(struct mapped_device *md)
27065df96f2bSMikulas Patocka {
27075df96f2bSMikulas Patocka 	return test_bit(DMF_POST_SUSPENDING, &md->flags);
27085df96f2bSMikulas Patocka }
27095df96f2bSMikulas Patocka 
2710ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md)
2711ffcc3936SMike Snitzer {
2712ffcc3936SMike Snitzer 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2713ffcc3936SMike Snitzer }
2714ffcc3936SMike Snitzer 
27152c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md)
27162c140a24SMikulas Patocka {
27172c140a24SMikulas Patocka 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
27182c140a24SMikulas Patocka }
27192c140a24SMikulas Patocka 
272064dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti)
272164dbce58SKiyoshi Ueda {
272233bd6f06SMike Snitzer 	return dm_suspended_md(ti->table->md);
272364dbce58SKiyoshi Ueda }
272464dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended);
272564dbce58SKiyoshi Ueda 
27265df96f2bSMikulas Patocka int dm_post_suspending(struct dm_target *ti)
27275df96f2bSMikulas Patocka {
272833bd6f06SMike Snitzer 	return dm_post_suspending_md(ti->table->md);
27295df96f2bSMikulas Patocka }
27305df96f2bSMikulas Patocka EXPORT_SYMBOL_GPL(dm_post_suspending);
27315df96f2bSMikulas Patocka 
27322e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti)
27332e93ccc1SKiyoshi Ueda {
273433bd6f06SMike Snitzer 	return __noflush_suspending(ti->table->md);
27352e93ccc1SKiyoshi Ueda }
27362e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending);
27372e93ccc1SKiyoshi Ueda 
27387e0d574fSBart Van Assche struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
27390776aa0eSMike Snitzer 					    unsigned integrity, unsigned per_io_data_size,
27400776aa0eSMike Snitzer 					    unsigned min_pool_size)
2741e6ee8c0bSKiyoshi Ueda {
2742115485e8SMike Snitzer 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
274378d8e58aSMike Snitzer 	unsigned int pool_size = 0;
274464f52b0eSMike Snitzer 	unsigned int front_pad, io_front_pad;
27456f1c819cSKent Overstreet 	int ret;
2746e6ee8c0bSKiyoshi Ueda 
2747e6ee8c0bSKiyoshi Ueda 	if (!pools)
27484e6e36c3SMike Snitzer 		return NULL;
2749e6ee8c0bSKiyoshi Ueda 
275078d8e58aSMike Snitzer 	switch (type) {
275178d8e58aSMike Snitzer 	case DM_TYPE_BIO_BASED:
2752545ed20eSToshi Kani 	case DM_TYPE_DAX_BIO_BASED:
27530776aa0eSMike Snitzer 		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
275462f26317SJeffle Xu 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
275562f26317SJeffle Xu 		io_front_pad = roundup(per_io_data_size,  __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
27566f1c819cSKent Overstreet 		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
27576f1c819cSKent Overstreet 		if (ret)
275864f52b0eSMike Snitzer 			goto out;
27596f1c819cSKent Overstreet 		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
2760eb8db831SChristoph Hellwig 			goto out;
276178d8e58aSMike Snitzer 		break;
276278d8e58aSMike Snitzer 	case DM_TYPE_REQUEST_BASED:
27630776aa0eSMike Snitzer 		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
276478d8e58aSMike Snitzer 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2765591ddcfcSMike Snitzer 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
276678d8e58aSMike Snitzer 		break;
276778d8e58aSMike Snitzer 	default:
276878d8e58aSMike Snitzer 		BUG();
276978d8e58aSMike Snitzer 	}
277078d8e58aSMike Snitzer 
27716f1c819cSKent Overstreet 	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
27726f1c819cSKent Overstreet 	if (ret)
27735f015204SJun'ichi Nomura 		goto out;
2774e6ee8c0bSKiyoshi Ueda 
27756f1c819cSKent Overstreet 	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
27765f015204SJun'ichi Nomura 		goto out;
2777a91a2785SMartin K. Petersen 
2778e6ee8c0bSKiyoshi Ueda 	return pools;
277978d8e58aSMike Snitzer 
27805f015204SJun'ichi Nomura out:
27815f015204SJun'ichi Nomura 	dm_free_md_mempools(pools);
2782e6ee8c0bSKiyoshi Ueda 
27834e6e36c3SMike Snitzer 	return NULL;
2784e6ee8c0bSKiyoshi Ueda }
2785e6ee8c0bSKiyoshi Ueda 
2786e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools)
2787e6ee8c0bSKiyoshi Ueda {
2788e6ee8c0bSKiyoshi Ueda 	if (!pools)
2789e6ee8c0bSKiyoshi Ueda 		return;
2790e6ee8c0bSKiyoshi Ueda 
27916f1c819cSKent Overstreet 	bioset_exit(&pools->bs);
27926f1c819cSKent Overstreet 	bioset_exit(&pools->io_bs);
2793e6ee8c0bSKiyoshi Ueda 
2794e6ee8c0bSKiyoshi Ueda 	kfree(pools);
2795e6ee8c0bSKiyoshi Ueda }
2796e6ee8c0bSKiyoshi Ueda 
27979c72bad1SChristoph Hellwig struct dm_pr {
27989c72bad1SChristoph Hellwig 	u64	old_key;
27999c72bad1SChristoph Hellwig 	u64	new_key;
28009c72bad1SChristoph Hellwig 	u32	flags;
28019c72bad1SChristoph Hellwig 	bool	fail_early;
28029c72bad1SChristoph Hellwig };
28039c72bad1SChristoph Hellwig 
28049c72bad1SChristoph Hellwig static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
28059c72bad1SChristoph Hellwig 		      void *data)
28069c72bad1SChristoph Hellwig {
28079c72bad1SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
28089c72bad1SChristoph Hellwig 	struct dm_table *table;
28099c72bad1SChristoph Hellwig 	struct dm_target *ti;
28109c72bad1SChristoph Hellwig 	int ret = -ENOTTY, srcu_idx;
28119c72bad1SChristoph Hellwig 
28129c72bad1SChristoph Hellwig 	table = dm_get_live_table(md, &srcu_idx);
28139c72bad1SChristoph Hellwig 	if (!table || !dm_table_get_size(table))
28149c72bad1SChristoph Hellwig 		goto out;
28159c72bad1SChristoph Hellwig 
28169c72bad1SChristoph Hellwig 	/* We only support devices that have a single target */
28179c72bad1SChristoph Hellwig 	if (dm_table_get_num_targets(table) != 1)
28189c72bad1SChristoph Hellwig 		goto out;
28199c72bad1SChristoph Hellwig 	ti = dm_table_get_target(table, 0);
28209c72bad1SChristoph Hellwig 
28219c72bad1SChristoph Hellwig 	ret = -EINVAL;
28229c72bad1SChristoph Hellwig 	if (!ti->type->iterate_devices)
28239c72bad1SChristoph Hellwig 		goto out;
28249c72bad1SChristoph Hellwig 
28259c72bad1SChristoph Hellwig 	ret = ti->type->iterate_devices(ti, fn, data);
28269c72bad1SChristoph Hellwig out:
28279c72bad1SChristoph Hellwig 	dm_put_live_table(md, srcu_idx);
28289c72bad1SChristoph Hellwig 	return ret;
28299c72bad1SChristoph Hellwig }
28309c72bad1SChristoph Hellwig 
28319c72bad1SChristoph Hellwig /*
28329c72bad1SChristoph Hellwig  * For register / unregister we need to manually call out to every path.
28339c72bad1SChristoph Hellwig  */
28349c72bad1SChristoph Hellwig static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
28359c72bad1SChristoph Hellwig 			    sector_t start, sector_t len, void *data)
28369c72bad1SChristoph Hellwig {
28379c72bad1SChristoph Hellwig 	struct dm_pr *pr = data;
28389c72bad1SChristoph Hellwig 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
28399c72bad1SChristoph Hellwig 
28409c72bad1SChristoph Hellwig 	if (!ops || !ops->pr_register)
28419c72bad1SChristoph Hellwig 		return -EOPNOTSUPP;
28429c72bad1SChristoph Hellwig 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
28439c72bad1SChristoph Hellwig }
28449c72bad1SChristoph Hellwig 
284571cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
284671cdb697SChristoph Hellwig 			  u32 flags)
284771cdb697SChristoph Hellwig {
28489c72bad1SChristoph Hellwig 	struct dm_pr pr = {
28499c72bad1SChristoph Hellwig 		.old_key	= old_key,
28509c72bad1SChristoph Hellwig 		.new_key	= new_key,
28519c72bad1SChristoph Hellwig 		.flags		= flags,
28529c72bad1SChristoph Hellwig 		.fail_early	= true,
28539c72bad1SChristoph Hellwig 	};
28549c72bad1SChristoph Hellwig 	int ret;
285571cdb697SChristoph Hellwig 
28569c72bad1SChristoph Hellwig 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
28579c72bad1SChristoph Hellwig 	if (ret && new_key) {
28589c72bad1SChristoph Hellwig 		/* unregister all paths if we failed to register any path */
28599c72bad1SChristoph Hellwig 		pr.old_key = new_key;
28609c72bad1SChristoph Hellwig 		pr.new_key = 0;
28619c72bad1SChristoph Hellwig 		pr.flags = 0;
28629c72bad1SChristoph Hellwig 		pr.fail_early = false;
28639c72bad1SChristoph Hellwig 		dm_call_pr(bdev, __dm_pr_register, &pr);
28649c72bad1SChristoph Hellwig 	}
286571cdb697SChristoph Hellwig 
28669c72bad1SChristoph Hellwig 	return ret;
286771cdb697SChristoph Hellwig }
286871cdb697SChristoph Hellwig 
286971cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
287071cdb697SChristoph Hellwig 			 u32 flags)
287171cdb697SChristoph Hellwig {
287271cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
287371cdb697SChristoph Hellwig 	const struct pr_ops *ops;
2874971888c4SMike Snitzer 	int r, srcu_idx;
287571cdb697SChristoph Hellwig 
28765bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
287771cdb697SChristoph Hellwig 	if (r < 0)
2878971888c4SMike Snitzer 		goto out;
287971cdb697SChristoph Hellwig 
288071cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
288171cdb697SChristoph Hellwig 	if (ops && ops->pr_reserve)
288271cdb697SChristoph Hellwig 		r = ops->pr_reserve(bdev, key, type, flags);
288371cdb697SChristoph Hellwig 	else
288471cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
2885971888c4SMike Snitzer out:
2886971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
288771cdb697SChristoph Hellwig 	return r;
288871cdb697SChristoph Hellwig }
288971cdb697SChristoph Hellwig 
289071cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
289171cdb697SChristoph Hellwig {
289271cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
289371cdb697SChristoph Hellwig 	const struct pr_ops *ops;
2894971888c4SMike Snitzer 	int r, srcu_idx;
289571cdb697SChristoph Hellwig 
28965bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
289771cdb697SChristoph Hellwig 	if (r < 0)
2898971888c4SMike Snitzer 		goto out;
289971cdb697SChristoph Hellwig 
290071cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
290171cdb697SChristoph Hellwig 	if (ops && ops->pr_release)
290271cdb697SChristoph Hellwig 		r = ops->pr_release(bdev, key, type);
290371cdb697SChristoph Hellwig 	else
290471cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
2905971888c4SMike Snitzer out:
2906971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
290771cdb697SChristoph Hellwig 	return r;
290871cdb697SChristoph Hellwig }
290971cdb697SChristoph Hellwig 
291071cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
291171cdb697SChristoph Hellwig 			 enum pr_type type, bool abort)
291271cdb697SChristoph Hellwig {
291371cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
291471cdb697SChristoph Hellwig 	const struct pr_ops *ops;
2915971888c4SMike Snitzer 	int r, srcu_idx;
291671cdb697SChristoph Hellwig 
29175bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
291871cdb697SChristoph Hellwig 	if (r < 0)
2919971888c4SMike Snitzer 		goto out;
292071cdb697SChristoph Hellwig 
292171cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
292271cdb697SChristoph Hellwig 	if (ops && ops->pr_preempt)
292371cdb697SChristoph Hellwig 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
292471cdb697SChristoph Hellwig 	else
292571cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
2926971888c4SMike Snitzer out:
2927971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
292871cdb697SChristoph Hellwig 	return r;
292971cdb697SChristoph Hellwig }
293071cdb697SChristoph Hellwig 
293171cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key)
293271cdb697SChristoph Hellwig {
293371cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
293471cdb697SChristoph Hellwig 	const struct pr_ops *ops;
2935971888c4SMike Snitzer 	int r, srcu_idx;
293671cdb697SChristoph Hellwig 
29375bd5e8d8SMike Snitzer 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
293871cdb697SChristoph Hellwig 	if (r < 0)
2939971888c4SMike Snitzer 		goto out;
294071cdb697SChristoph Hellwig 
294171cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
294271cdb697SChristoph Hellwig 	if (ops && ops->pr_clear)
294371cdb697SChristoph Hellwig 		r = ops->pr_clear(bdev, key);
294471cdb697SChristoph Hellwig 	else
294571cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
2946971888c4SMike Snitzer out:
2947971888c4SMike Snitzer 	dm_unprepare_ioctl(md, srcu_idx);
294871cdb697SChristoph Hellwig 	return r;
294971cdb697SChristoph Hellwig }
295071cdb697SChristoph Hellwig 
295171cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = {
295271cdb697SChristoph Hellwig 	.pr_register	= dm_pr_register,
295371cdb697SChristoph Hellwig 	.pr_reserve	= dm_pr_reserve,
295471cdb697SChristoph Hellwig 	.pr_release	= dm_pr_release,
295571cdb697SChristoph Hellwig 	.pr_preempt	= dm_pr_preempt,
295671cdb697SChristoph Hellwig 	.pr_clear	= dm_pr_clear,
295771cdb697SChristoph Hellwig };
295871cdb697SChristoph Hellwig 
295983d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = {
2960c62b37d9SChristoph Hellwig 	.submit_bio = dm_submit_bio,
29611da177e4SLinus Torvalds 	.open = dm_blk_open,
29621da177e4SLinus Torvalds 	.release = dm_blk_close,
2963aa129a22SMilan Broz 	.ioctl = dm_blk_ioctl,
29643ac51e74SDarrick J. Wong 	.getgeo = dm_blk_getgeo,
2965e76239a3SChristoph Hellwig 	.report_zones = dm_blk_report_zones,
296671cdb697SChristoph Hellwig 	.pr_ops = &dm_pr_ops,
29671da177e4SLinus Torvalds 	.owner = THIS_MODULE
29681da177e4SLinus Torvalds };
29691da177e4SLinus Torvalds 
2970681cc5e8SMike Snitzer static const struct block_device_operations dm_rq_blk_dops = {
2971681cc5e8SMike Snitzer 	.open = dm_blk_open,
2972681cc5e8SMike Snitzer 	.release = dm_blk_close,
2973681cc5e8SMike Snitzer 	.ioctl = dm_blk_ioctl,
2974681cc5e8SMike Snitzer 	.getgeo = dm_blk_getgeo,
2975681cc5e8SMike Snitzer 	.pr_ops = &dm_pr_ops,
2976681cc5e8SMike Snitzer 	.owner = THIS_MODULE
2977681cc5e8SMike Snitzer };
2978681cc5e8SMike Snitzer 
2979f26c5719SDan Williams static const struct dax_operations dm_dax_ops = {
2980f26c5719SDan Williams 	.direct_access = dm_dax_direct_access,
2981cdf6cdcdSVivek Goyal 	.zero_page_range = dm_dax_zero_page_range,
2982f26c5719SDan Williams };
2983f26c5719SDan Williams 
29841da177e4SLinus Torvalds /*
29851da177e4SLinus Torvalds  * module hooks
29861da177e4SLinus Torvalds  */
29871da177e4SLinus Torvalds module_init(dm_init);
29881da177e4SLinus Torvalds module_exit(dm_exit);
29891da177e4SLinus Torvalds 
29901da177e4SLinus Torvalds module_param(major, uint, 0);
29911da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper");
2992f4790826SMike Snitzer 
2993e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2994e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2995e8603136SMike Snitzer 
2996115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
2997115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
2998115485e8SMike Snitzer 
2999a666e5c0SMikulas Patocka module_param(swap_bios, int, S_IRUGO | S_IWUSR);
3000a666e5c0SMikulas Patocka MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3001a666e5c0SMikulas Patocka 
30021da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver");
30031da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
30041da177e4SLinus Torvalds MODULE_LICENSE("GPL");
3005