xref: /openbmc/linux/drivers/md/dm.c (revision cfae7529b525c3fa86deb71cf2036659240a865e)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3784aae73SMilan Broz  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * This file is released under the GPL.
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds #include "dm.h"
951e5b2bdSMike Anderson #include "dm-uevent.h"
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/init.h>
121da177e4SLinus Torvalds #include <linux/module.h>
1348c9c27bSArjan van de Ven #include <linux/mutex.h>
141da177e4SLinus Torvalds #include <linux/moduleparam.h>
151da177e4SLinus Torvalds #include <linux/blkpg.h>
161da177e4SLinus Torvalds #include <linux/bio.h>
171da177e4SLinus Torvalds #include <linux/mempool.h>
181da177e4SLinus Torvalds #include <linux/slab.h>
191da177e4SLinus Torvalds #include <linux/idr.h>
203ac51e74SDarrick J. Wong #include <linux/hdreg.h>
213f77316dSKiyoshi Ueda #include <linux/delay.h>
22ffcc3936SMike Snitzer #include <linux/wait.h>
232eb6e1e3SKeith Busch #include <linux/kthread.h>
240ce65797SMike Snitzer #include <linux/ktime.h>
25de3ec86dSMike Snitzer #include <linux/elevator.h> /* for rq_end_sector() */
26bfebd1cdSMike Snitzer #include <linux/blk-mq.h>
2771cdb697SChristoph Hellwig #include <linux/pr.h>
2855782138SLi Zefan 
2955782138SLi Zefan #include <trace/events/block.h>
301da177e4SLinus Torvalds 
3172d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core"
3272d94861SAlasdair G Kergon 
3371a16736SNamhyung Kim #ifdef CONFIG_PRINTK
3471a16736SNamhyung Kim /*
3571a16736SNamhyung Kim  * ratelimit state to be used in DMXXX_LIMIT().
3671a16736SNamhyung Kim  */
3771a16736SNamhyung Kim DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
3871a16736SNamhyung Kim 		       DEFAULT_RATELIMIT_INTERVAL,
3971a16736SNamhyung Kim 		       DEFAULT_RATELIMIT_BURST);
4071a16736SNamhyung Kim EXPORT_SYMBOL(dm_ratelimit_state);
4171a16736SNamhyung Kim #endif
4271a16736SNamhyung Kim 
4360935eb2SMilan Broz /*
4460935eb2SMilan Broz  * Cookies are numeric values sent with CHANGE and REMOVE
4560935eb2SMilan Broz  * uevents while resuming, removing or renaming the device.
4660935eb2SMilan Broz  */
4760935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
4860935eb2SMilan Broz #define DM_COOKIE_LENGTH 24
4960935eb2SMilan Broz 
501da177e4SLinus Torvalds static const char *_name = DM_NAME;
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds static unsigned int major = 0;
531da177e4SLinus Torvalds static unsigned int _major = 0;
541da177e4SLinus Torvalds 
55d15b774cSAlasdair G Kergon static DEFINE_IDR(_minor_idr);
56d15b774cSAlasdair G Kergon 
57f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock);
582c140a24SMikulas Patocka 
592c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w);
602c140a24SMikulas Patocka 
612c140a24SMikulas Patocka static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
622c140a24SMikulas Patocka 
63acfe0ad7SMikulas Patocka static struct workqueue_struct *deferred_remove_workqueue;
64acfe0ad7SMikulas Patocka 
651da177e4SLinus Torvalds /*
668fbf26adSKiyoshi Ueda  * For bio-based dm.
671da177e4SLinus Torvalds  * One of these is allocated per bio.
681da177e4SLinus Torvalds  */
691da177e4SLinus Torvalds struct dm_io {
701da177e4SLinus Torvalds 	struct mapped_device *md;
711da177e4SLinus Torvalds 	int error;
721da177e4SLinus Torvalds 	atomic_t io_count;
736ae2fa67SRichard Kennedy 	struct bio *bio;
743eaf840eSJun'ichi "Nick" Nomura 	unsigned long start_time;
75f88fb981SKiyoshi Ueda 	spinlock_t endio_lock;
76fd2ed4d2SMikulas Patocka 	struct dm_stats_aux stats_aux;
771da177e4SLinus Torvalds };
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds /*
808fbf26adSKiyoshi Ueda  * For request-based dm.
818fbf26adSKiyoshi Ueda  * One of these is allocated per request.
828fbf26adSKiyoshi Ueda  */
838fbf26adSKiyoshi Ueda struct dm_rq_target_io {
848fbf26adSKiyoshi Ueda 	struct mapped_device *md;
858fbf26adSKiyoshi Ueda 	struct dm_target *ti;
861ae49ea2SMike Snitzer 	struct request *orig, *clone;
872eb6e1e3SKeith Busch 	struct kthread_work work;
888fbf26adSKiyoshi Ueda 	int error;
898fbf26adSKiyoshi Ueda 	union map_info info;
90e262f347SMikulas Patocka 	struct dm_stats_aux stats_aux;
91e262f347SMikulas Patocka 	unsigned long duration_jiffies;
92e262f347SMikulas Patocka 	unsigned n_sectors;
938fbf26adSKiyoshi Ueda };
948fbf26adSKiyoshi Ueda 
958fbf26adSKiyoshi Ueda /*
9694818742SKent Overstreet  * For request-based dm - the bio clones we allocate are embedded in these
9794818742SKent Overstreet  * structs.
9894818742SKent Overstreet  *
9994818742SKent Overstreet  * We allocate these with bio_alloc_bioset, using the front_pad parameter when
10094818742SKent Overstreet  * the bioset is created - this means the bio has to come at the end of the
10194818742SKent Overstreet  * struct.
1028fbf26adSKiyoshi Ueda  */
1038fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info {
1048fbf26adSKiyoshi Ueda 	struct bio *orig;
105cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio;
10694818742SKent Overstreet 	struct bio clone;
1078fbf26adSKiyoshi Ueda };
1088fbf26adSKiyoshi Ueda 
109ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1)
110ba61fdd1SJeff Mahoney 
1111da177e4SLinus Torvalds /*
1121da177e4SLinus Torvalds  * Bits for the md->flags field.
1131da177e4SLinus Torvalds  */
1141eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0
1151da177e4SLinus Torvalds #define DMF_SUSPENDED 1
116aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2
117fba9f90eSJeff Mahoney #define DMF_FREEING 3
1185c6bd75dSAlasdair G Kergon #define DMF_DELETING 4
1192e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5
1208ae12666SKent Overstreet #define DMF_DEFERRED_REMOVE 6
1218ae12666SKent Overstreet #define DMF_SUSPENDED_INTERNALLY 7
1221da177e4SLinus Torvalds 
123304f3f6aSMilan Broz /*
124304f3f6aSMilan Broz  * Work processed by per-device workqueue.
125304f3f6aSMilan Broz  */
1261da177e4SLinus Torvalds struct mapped_device {
12783d5e5b0SMikulas Patocka 	struct srcu_struct io_barrier;
128e61290a4SDaniel Walker 	struct mutex suspend_lock;
1291da177e4SLinus Torvalds 
1302a7faeb1SMikulas Patocka 	/*
1311d3aa6f6SMike Snitzer 	 * The current mapping (struct dm_table *).
1322a7faeb1SMikulas Patocka 	 * Use dm_get_live_table{_fast} or take suspend_lock for
1332a7faeb1SMikulas Patocka 	 * dereference.
1342a7faeb1SMikulas Patocka 	 */
1351d3aa6f6SMike Snitzer 	void __rcu *map;
1362a7faeb1SMikulas Patocka 
13786f1152bSBenjamin Marzinski 	struct list_head table_devices;
13886f1152bSBenjamin Marzinski 	struct mutex table_devices_lock;
13986f1152bSBenjamin Marzinski 
1401da177e4SLinus Torvalds 	unsigned long flags;
1411da177e4SLinus Torvalds 
142165125e1SJens Axboe 	struct request_queue *queue;
143115485e8SMike Snitzer 	int numa_node_id;
144115485e8SMike Snitzer 
145a5664dadSMike Snitzer 	unsigned type;
1464a0b4ddfSMike Snitzer 	/* Protect queue and type against concurrent access. */
147a5664dadSMike Snitzer 	struct mutex type_lock;
148a5664dadSMike Snitzer 
149032482fdSMike Snitzer 	atomic_t holders;
150032482fdSMike Snitzer 	atomic_t open_count;
151032482fdSMike Snitzer 
15216f12266SMike Snitzer 	struct dm_target *immutable_target;
15336a0456fSAlasdair G Kergon 	struct target_type *immutable_target_type;
15436a0456fSAlasdair G Kergon 
1551da177e4SLinus Torvalds 	struct gendisk *disk;
1567e51f257SMike Anderson 	char name[16];
1571da177e4SLinus Torvalds 
1581da177e4SLinus Torvalds 	void *interface_ptr;
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds 	/*
1611da177e4SLinus Torvalds 	 * A list of ios that arrived while we were suspended.
1621da177e4SLinus Torvalds 	 */
163316d315bSNikanth Karthikesan 	atomic_t pending[2];
1641da177e4SLinus Torvalds 	wait_queue_head_t wait;
16553d5914fSMikulas Patocka 	struct work_struct work;
166022c2611SMikulas Patocka 	spinlock_t deferred_lock;
167032482fdSMike Snitzer 	struct bio_list deferred;
168032482fdSMike Snitzer 
169032482fdSMike Snitzer 	/*
170032482fdSMike Snitzer 	 * Event handling.
171032482fdSMike Snitzer 	 */
172032482fdSMike Snitzer 	wait_queue_head_t eventq;
173032482fdSMike Snitzer 	atomic_t event_nr;
174032482fdSMike Snitzer 	atomic_t uevent_seq;
175032482fdSMike Snitzer 	struct list_head uevent_list;
176032482fdSMike Snitzer 	spinlock_t uevent_lock; /* Protect access to uevent_list */
177032482fdSMike Snitzer 
178032482fdSMike Snitzer 	/* the number of internal suspends */
179032482fdSMike Snitzer 	unsigned internal_suspend_count;
1801da177e4SLinus Torvalds 
1811da177e4SLinus Torvalds 	/*
18229e4013dSTejun Heo 	 * Processing queue (flush)
183304f3f6aSMilan Broz 	 */
184304f3f6aSMilan Broz 	struct workqueue_struct *wq;
185304f3f6aSMilan Broz 
186304f3f6aSMilan Broz 	/*
1871da177e4SLinus Torvalds 	 * io objects are allocated from here.
1881da177e4SLinus Torvalds 	 */
1891da177e4SLinus Torvalds 	mempool_t *io_pool;
1901ae49ea2SMike Snitzer 	mempool_t *rq_pool;
1911da177e4SLinus Torvalds 
1929faf400fSStefan Bader 	struct bio_set *bs;
1939faf400fSStefan Bader 
1941da177e4SLinus Torvalds 	/*
1951da177e4SLinus Torvalds 	 * freeze/thaw support require holding onto a super block
1961da177e4SLinus Torvalds 	 */
1971da177e4SLinus Torvalds 	struct super_block *frozen_sb;
1983ac51e74SDarrick J. Wong 
1993ac51e74SDarrick J. Wong 	/* forced geometry settings */
2003ac51e74SDarrick J. Wong 	struct hd_geometry geometry;
201784aae73SMilan Broz 
202032482fdSMike Snitzer 	struct block_device *bdev;
203032482fdSMike Snitzer 
2042995fa78SMikulas Patocka 	/* kobject and completion */
2052995fa78SMikulas Patocka 	struct dm_kobject_holder kobj_holder;
206be35f486SMikulas Patocka 
207d87f4c14STejun Heo 	/* zero-length flush that will be cloned and submitted to targets */
208d87f4c14STejun Heo 	struct bio flush_bio;
209fd2ed4d2SMikulas Patocka 
210fd2ed4d2SMikulas Patocka 	struct dm_stats stats;
2112eb6e1e3SKeith Busch 
2122eb6e1e3SKeith Busch 	struct kthread_worker kworker;
2132eb6e1e3SKeith Busch 	struct task_struct *kworker_task;
214de3ec86dSMike Snitzer 
215de3ec86dSMike Snitzer 	/* for request-based merge heuristic in dm_request_fn() */
2160ce65797SMike Snitzer 	unsigned seq_rq_merge_deadline_usecs;
217de3ec86dSMike Snitzer 	int last_rq_rw;
2180ce65797SMike Snitzer 	sector_t last_rq_pos;
2190ce65797SMike Snitzer 	ktime_t last_rq_start_time;
220bfebd1cdSMike Snitzer 
221bfebd1cdSMike Snitzer 	/* for blk-mq request-based DM support */
2221c357a1eSMike Snitzer 	struct blk_mq_tag_set *tag_set;
223591ddcfcSMike Snitzer 	bool use_blk_mq:1;
224591ddcfcSMike Snitzer 	bool init_tio_pdu:1;
2251da177e4SLinus Torvalds };
2261da177e4SLinus Torvalds 
22717e149b8SMike Snitzer #ifdef CONFIG_DM_MQ_DEFAULT
22817e149b8SMike Snitzer static bool use_blk_mq = true;
22917e149b8SMike Snitzer #else
23017e149b8SMike Snitzer static bool use_blk_mq = false;
23117e149b8SMike Snitzer #endif
23217e149b8SMike Snitzer 
233faad87dfSMike Snitzer #define DM_MQ_NR_HW_QUEUES 1
234faad87dfSMike Snitzer #define DM_MQ_QUEUE_DEPTH 2048
235115485e8SMike Snitzer #define DM_NUMA_NODE NUMA_NO_NODE
236faad87dfSMike Snitzer 
237faad87dfSMike Snitzer static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
238faad87dfSMike Snitzer static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
239115485e8SMike Snitzer static int dm_numa_node = DM_NUMA_NODE;
240faad87dfSMike Snitzer 
24117e149b8SMike Snitzer bool dm_use_blk_mq(struct mapped_device *md)
24217e149b8SMike Snitzer {
24317e149b8SMike Snitzer 	return md->use_blk_mq;
24417e149b8SMike Snitzer }
245591ddcfcSMike Snitzer EXPORT_SYMBOL_GPL(dm_use_blk_mq);
24617e149b8SMike Snitzer 
247e6ee8c0bSKiyoshi Ueda /*
248e6ee8c0bSKiyoshi Ueda  * For mempools pre-allocation at the table loading time.
249e6ee8c0bSKiyoshi Ueda  */
250e6ee8c0bSKiyoshi Ueda struct dm_md_mempools {
251e6ee8c0bSKiyoshi Ueda 	mempool_t *io_pool;
2521ae49ea2SMike Snitzer 	mempool_t *rq_pool;
253e6ee8c0bSKiyoshi Ueda 	struct bio_set *bs;
254e6ee8c0bSKiyoshi Ueda };
255e6ee8c0bSKiyoshi Ueda 
25686f1152bSBenjamin Marzinski struct table_device {
25786f1152bSBenjamin Marzinski 	struct list_head list;
25886f1152bSBenjamin Marzinski 	atomic_t count;
25986f1152bSBenjamin Marzinski 	struct dm_dev dm_dev;
26086f1152bSBenjamin Marzinski };
26186f1152bSBenjamin Marzinski 
2626cfa5857SMike Snitzer #define RESERVED_BIO_BASED_IOS		16
2636cfa5857SMike Snitzer #define RESERVED_REQUEST_BASED_IOS	256
264f4790826SMike Snitzer #define RESERVED_MAX_IOS		1024
265e18b890bSChristoph Lameter static struct kmem_cache *_io_cache;
2668fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache;
2671ae49ea2SMike Snitzer static struct kmem_cache *_rq_cache;
26894818742SKent Overstreet 
269f4790826SMike Snitzer /*
270e8603136SMike Snitzer  * Bio-based DM's mempools' reserved IOs set by the user.
271e8603136SMike Snitzer  */
272e8603136SMike Snitzer static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
273e8603136SMike Snitzer 
274e8603136SMike Snitzer /*
275f4790826SMike Snitzer  * Request-based DM's mempools' reserved IOs set by the user.
276f4790826SMike Snitzer  */
277f4790826SMike Snitzer static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
278f4790826SMike Snitzer 
279115485e8SMike Snitzer static int __dm_get_module_param_int(int *module_param, int min, int max)
280115485e8SMike Snitzer {
281115485e8SMike Snitzer 	int param = ACCESS_ONCE(*module_param);
282115485e8SMike Snitzer 	int modified_param = 0;
283115485e8SMike Snitzer 	bool modified = true;
284115485e8SMike Snitzer 
285115485e8SMike Snitzer 	if (param < min)
286115485e8SMike Snitzer 		modified_param = min;
287115485e8SMike Snitzer 	else if (param > max)
288115485e8SMike Snitzer 		modified_param = max;
289115485e8SMike Snitzer 	else
290115485e8SMike Snitzer 		modified = false;
291115485e8SMike Snitzer 
292115485e8SMike Snitzer 	if (modified) {
293115485e8SMike Snitzer 		(void)cmpxchg(module_param, param, modified_param);
294115485e8SMike Snitzer 		param = modified_param;
295115485e8SMike Snitzer 	}
296115485e8SMike Snitzer 
297115485e8SMike Snitzer 	return param;
298115485e8SMike Snitzer }
299115485e8SMike Snitzer 
30009c2d531SMike Snitzer static unsigned __dm_get_module_param(unsigned *module_param,
301f4790826SMike Snitzer 				      unsigned def, unsigned max)
302f4790826SMike Snitzer {
30309c2d531SMike Snitzer 	unsigned param = ACCESS_ONCE(*module_param);
30409c2d531SMike Snitzer 	unsigned modified_param = 0;
305f4790826SMike Snitzer 
30609c2d531SMike Snitzer 	if (!param)
30709c2d531SMike Snitzer 		modified_param = def;
30809c2d531SMike Snitzer 	else if (param > max)
30909c2d531SMike Snitzer 		modified_param = max;
310f4790826SMike Snitzer 
31109c2d531SMike Snitzer 	if (modified_param) {
31209c2d531SMike Snitzer 		(void)cmpxchg(module_param, param, modified_param);
31309c2d531SMike Snitzer 		param = modified_param;
314f4790826SMike Snitzer 	}
315f4790826SMike Snitzer 
31609c2d531SMike Snitzer 	return param;
317f4790826SMike Snitzer }
318f4790826SMike Snitzer 
319e8603136SMike Snitzer unsigned dm_get_reserved_bio_based_ios(void)
320e8603136SMike Snitzer {
32109c2d531SMike Snitzer 	return __dm_get_module_param(&reserved_bio_based_ios,
322e8603136SMike Snitzer 				     RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
323e8603136SMike Snitzer }
324e8603136SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
325e8603136SMike Snitzer 
326f4790826SMike Snitzer unsigned dm_get_reserved_rq_based_ios(void)
327f4790826SMike Snitzer {
32809c2d531SMike Snitzer 	return __dm_get_module_param(&reserved_rq_based_ios,
329f4790826SMike Snitzer 				     RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
330f4790826SMike Snitzer }
331f4790826SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
332f4790826SMike Snitzer 
333faad87dfSMike Snitzer static unsigned dm_get_blk_mq_nr_hw_queues(void)
334faad87dfSMike Snitzer {
335faad87dfSMike Snitzer 	return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
336faad87dfSMike Snitzer }
337faad87dfSMike Snitzer 
338faad87dfSMike Snitzer static unsigned dm_get_blk_mq_queue_depth(void)
339faad87dfSMike Snitzer {
340faad87dfSMike Snitzer 	return __dm_get_module_param(&dm_mq_queue_depth,
341faad87dfSMike Snitzer 				     DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
342faad87dfSMike Snitzer }
343faad87dfSMike Snitzer 
344115485e8SMike Snitzer static unsigned dm_get_numa_node(void)
345115485e8SMike Snitzer {
346115485e8SMike Snitzer 	return __dm_get_module_param_int(&dm_numa_node,
347115485e8SMike Snitzer 					 DM_NUMA_NODE, num_online_nodes() - 1);
348115485e8SMike Snitzer }
349115485e8SMike Snitzer 
3501da177e4SLinus Torvalds static int __init local_init(void)
3511da177e4SLinus Torvalds {
35251157b4aSKiyoshi Ueda 	int r = -ENOMEM;
3531da177e4SLinus Torvalds 
3541da177e4SLinus Torvalds 	/* allocate a slab for the dm_ios */
355028867acSAlasdair G Kergon 	_io_cache = KMEM_CACHE(dm_io, 0);
3561da177e4SLinus Torvalds 	if (!_io_cache)
35751157b4aSKiyoshi Ueda 		return r;
3581da177e4SLinus Torvalds 
3598fbf26adSKiyoshi Ueda 	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
3608fbf26adSKiyoshi Ueda 	if (!_rq_tio_cache)
361dba14160SMikulas Patocka 		goto out_free_io_cache;
3628fbf26adSKiyoshi Ueda 
363eca7ee6dSMike Snitzer 	_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
3641ae49ea2SMike Snitzer 				      __alignof__(struct request), 0, NULL);
3651ae49ea2SMike Snitzer 	if (!_rq_cache)
3661ae49ea2SMike Snitzer 		goto out_free_rq_tio_cache;
3671ae49ea2SMike Snitzer 
36851e5b2bdSMike Anderson 	r = dm_uevent_init();
36951157b4aSKiyoshi Ueda 	if (r)
3701ae49ea2SMike Snitzer 		goto out_free_rq_cache;
37151e5b2bdSMike Anderson 
372acfe0ad7SMikulas Patocka 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
373acfe0ad7SMikulas Patocka 	if (!deferred_remove_workqueue) {
374acfe0ad7SMikulas Patocka 		r = -ENOMEM;
375acfe0ad7SMikulas Patocka 		goto out_uevent_exit;
376acfe0ad7SMikulas Patocka 	}
377acfe0ad7SMikulas Patocka 
3781da177e4SLinus Torvalds 	_major = major;
3791da177e4SLinus Torvalds 	r = register_blkdev(_major, _name);
38051157b4aSKiyoshi Ueda 	if (r < 0)
381acfe0ad7SMikulas Patocka 		goto out_free_workqueue;
3821da177e4SLinus Torvalds 
3831da177e4SLinus Torvalds 	if (!_major)
3841da177e4SLinus Torvalds 		_major = r;
3851da177e4SLinus Torvalds 
3861da177e4SLinus Torvalds 	return 0;
38751157b4aSKiyoshi Ueda 
388acfe0ad7SMikulas Patocka out_free_workqueue:
389acfe0ad7SMikulas Patocka 	destroy_workqueue(deferred_remove_workqueue);
39051157b4aSKiyoshi Ueda out_uevent_exit:
39151157b4aSKiyoshi Ueda 	dm_uevent_exit();
3921ae49ea2SMike Snitzer out_free_rq_cache:
3931ae49ea2SMike Snitzer 	kmem_cache_destroy(_rq_cache);
3948fbf26adSKiyoshi Ueda out_free_rq_tio_cache:
3958fbf26adSKiyoshi Ueda 	kmem_cache_destroy(_rq_tio_cache);
39651157b4aSKiyoshi Ueda out_free_io_cache:
39751157b4aSKiyoshi Ueda 	kmem_cache_destroy(_io_cache);
39851157b4aSKiyoshi Ueda 
39951157b4aSKiyoshi Ueda 	return r;
4001da177e4SLinus Torvalds }
4011da177e4SLinus Torvalds 
4021da177e4SLinus Torvalds static void local_exit(void)
4031da177e4SLinus Torvalds {
4042c140a24SMikulas Patocka 	flush_scheduled_work();
405acfe0ad7SMikulas Patocka 	destroy_workqueue(deferred_remove_workqueue);
4062c140a24SMikulas Patocka 
4071ae49ea2SMike Snitzer 	kmem_cache_destroy(_rq_cache);
4088fbf26adSKiyoshi Ueda 	kmem_cache_destroy(_rq_tio_cache);
4091da177e4SLinus Torvalds 	kmem_cache_destroy(_io_cache);
41000d59405SAkinobu Mita 	unregister_blkdev(_major, _name);
41151e5b2bdSMike Anderson 	dm_uevent_exit();
4121da177e4SLinus Torvalds 
4131da177e4SLinus Torvalds 	_major = 0;
4141da177e4SLinus Torvalds 
4151da177e4SLinus Torvalds 	DMINFO("cleaned up");
4161da177e4SLinus Torvalds }
4171da177e4SLinus Torvalds 
418b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = {
4191da177e4SLinus Torvalds 	local_init,
4201da177e4SLinus Torvalds 	dm_target_init,
4211da177e4SLinus Torvalds 	dm_linear_init,
4221da177e4SLinus Torvalds 	dm_stripe_init,
423952b3557SMikulas Patocka 	dm_io_init,
424945fa4d2SMikulas Patocka 	dm_kcopyd_init,
4251da177e4SLinus Torvalds 	dm_interface_init,
426fd2ed4d2SMikulas Patocka 	dm_statistics_init,
4271da177e4SLinus Torvalds };
4281da177e4SLinus Torvalds 
429b9249e55SAlasdair G Kergon static void (*_exits[])(void) = {
4301da177e4SLinus Torvalds 	local_exit,
4311da177e4SLinus Torvalds 	dm_target_exit,
4321da177e4SLinus Torvalds 	dm_linear_exit,
4331da177e4SLinus Torvalds 	dm_stripe_exit,
434952b3557SMikulas Patocka 	dm_io_exit,
435945fa4d2SMikulas Patocka 	dm_kcopyd_exit,
4361da177e4SLinus Torvalds 	dm_interface_exit,
437fd2ed4d2SMikulas Patocka 	dm_statistics_exit,
4381da177e4SLinus Torvalds };
4391da177e4SLinus Torvalds 
4401da177e4SLinus Torvalds static int __init dm_init(void)
4411da177e4SLinus Torvalds {
4421da177e4SLinus Torvalds 	const int count = ARRAY_SIZE(_inits);
4431da177e4SLinus Torvalds 
4441da177e4SLinus Torvalds 	int r, i;
4451da177e4SLinus Torvalds 
4461da177e4SLinus Torvalds 	for (i = 0; i < count; i++) {
4471da177e4SLinus Torvalds 		r = _inits[i]();
4481da177e4SLinus Torvalds 		if (r)
4491da177e4SLinus Torvalds 			goto bad;
4501da177e4SLinus Torvalds 	}
4511da177e4SLinus Torvalds 
4521da177e4SLinus Torvalds 	return 0;
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds       bad:
4551da177e4SLinus Torvalds 	while (i--)
4561da177e4SLinus Torvalds 		_exits[i]();
4571da177e4SLinus Torvalds 
4581da177e4SLinus Torvalds 	return r;
4591da177e4SLinus Torvalds }
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds static void __exit dm_exit(void)
4621da177e4SLinus Torvalds {
4631da177e4SLinus Torvalds 	int i = ARRAY_SIZE(_exits);
4641da177e4SLinus Torvalds 
4651da177e4SLinus Torvalds 	while (i--)
4661da177e4SLinus Torvalds 		_exits[i]();
467d15b774cSAlasdair G Kergon 
468d15b774cSAlasdair G Kergon 	/*
469d15b774cSAlasdair G Kergon 	 * Should be empty by this point.
470d15b774cSAlasdair G Kergon 	 */
471d15b774cSAlasdair G Kergon 	idr_destroy(&_minor_idr);
4721da177e4SLinus Torvalds }
4731da177e4SLinus Torvalds 
4741da177e4SLinus Torvalds /*
4751da177e4SLinus Torvalds  * Block device functions
4761da177e4SLinus Torvalds  */
477432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md)
478432a212cSMike Anderson {
479432a212cSMike Anderson 	return test_bit(DMF_DELETING, &md->flags);
480432a212cSMike Anderson }
481432a212cSMike Anderson 
482fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode)
4831da177e4SLinus Torvalds {
4841da177e4SLinus Torvalds 	struct mapped_device *md;
4851da177e4SLinus Torvalds 
486fba9f90eSJeff Mahoney 	spin_lock(&_minor_lock);
487fba9f90eSJeff Mahoney 
488fe5f9f2cSAl Viro 	md = bdev->bd_disk->private_data;
489fba9f90eSJeff Mahoney 	if (!md)
490fba9f90eSJeff Mahoney 		goto out;
491fba9f90eSJeff Mahoney 
4925c6bd75dSAlasdair G Kergon 	if (test_bit(DMF_FREEING, &md->flags) ||
493432a212cSMike Anderson 	    dm_deleting_md(md)) {
494fba9f90eSJeff Mahoney 		md = NULL;
495fba9f90eSJeff Mahoney 		goto out;
496fba9f90eSJeff Mahoney 	}
497fba9f90eSJeff Mahoney 
4981da177e4SLinus Torvalds 	dm_get(md);
4995c6bd75dSAlasdair G Kergon 	atomic_inc(&md->open_count);
500fba9f90eSJeff Mahoney out:
501fba9f90eSJeff Mahoney 	spin_unlock(&_minor_lock);
502fba9f90eSJeff Mahoney 
503fba9f90eSJeff Mahoney 	return md ? 0 : -ENXIO;
5041da177e4SLinus Torvalds }
5051da177e4SLinus Torvalds 
506db2a144bSAl Viro static void dm_blk_close(struct gendisk *disk, fmode_t mode)
5071da177e4SLinus Torvalds {
50863a4f065SMike Snitzer 	struct mapped_device *md;
5096e9624b8SArnd Bergmann 
5104a1aeb98SMilan Broz 	spin_lock(&_minor_lock);
5114a1aeb98SMilan Broz 
51263a4f065SMike Snitzer 	md = disk->private_data;
51363a4f065SMike Snitzer 	if (WARN_ON(!md))
51463a4f065SMike Snitzer 		goto out;
51563a4f065SMike Snitzer 
5162c140a24SMikulas Patocka 	if (atomic_dec_and_test(&md->open_count) &&
5172c140a24SMikulas Patocka 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
518acfe0ad7SMikulas Patocka 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
5192c140a24SMikulas Patocka 
5201da177e4SLinus Torvalds 	dm_put(md);
52163a4f065SMike Snitzer out:
5224a1aeb98SMilan Broz 	spin_unlock(&_minor_lock);
5231da177e4SLinus Torvalds }
5241da177e4SLinus Torvalds 
5255c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md)
5265c6bd75dSAlasdair G Kergon {
5275c6bd75dSAlasdair G Kergon 	return atomic_read(&md->open_count);
5285c6bd75dSAlasdair G Kergon }
5295c6bd75dSAlasdair G Kergon 
5305c6bd75dSAlasdair G Kergon /*
5315c6bd75dSAlasdair G Kergon  * Guarantees nothing is using the device before it's deleted.
5325c6bd75dSAlasdair G Kergon  */
5332c140a24SMikulas Patocka int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
5345c6bd75dSAlasdair G Kergon {
5355c6bd75dSAlasdair G Kergon 	int r = 0;
5365c6bd75dSAlasdair G Kergon 
5375c6bd75dSAlasdair G Kergon 	spin_lock(&_minor_lock);
5385c6bd75dSAlasdair G Kergon 
5392c140a24SMikulas Patocka 	if (dm_open_count(md)) {
5405c6bd75dSAlasdair G Kergon 		r = -EBUSY;
5412c140a24SMikulas Patocka 		if (mark_deferred)
5422c140a24SMikulas Patocka 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
5432c140a24SMikulas Patocka 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
5442c140a24SMikulas Patocka 		r = -EEXIST;
5455c6bd75dSAlasdair G Kergon 	else
5465c6bd75dSAlasdair G Kergon 		set_bit(DMF_DELETING, &md->flags);
5475c6bd75dSAlasdair G Kergon 
5485c6bd75dSAlasdair G Kergon 	spin_unlock(&_minor_lock);
5495c6bd75dSAlasdair G Kergon 
5505c6bd75dSAlasdair G Kergon 	return r;
5515c6bd75dSAlasdair G Kergon }
5525c6bd75dSAlasdair G Kergon 
5532c140a24SMikulas Patocka int dm_cancel_deferred_remove(struct mapped_device *md)
5542c140a24SMikulas Patocka {
5552c140a24SMikulas Patocka 	int r = 0;
5562c140a24SMikulas Patocka 
5572c140a24SMikulas Patocka 	spin_lock(&_minor_lock);
5582c140a24SMikulas Patocka 
5592c140a24SMikulas Patocka 	if (test_bit(DMF_DELETING, &md->flags))
5602c140a24SMikulas Patocka 		r = -EBUSY;
5612c140a24SMikulas Patocka 	else
5622c140a24SMikulas Patocka 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
5632c140a24SMikulas Patocka 
5642c140a24SMikulas Patocka 	spin_unlock(&_minor_lock);
5652c140a24SMikulas Patocka 
5662c140a24SMikulas Patocka 	return r;
5672c140a24SMikulas Patocka }
5682c140a24SMikulas Patocka 
5692c140a24SMikulas Patocka static void do_deferred_remove(struct work_struct *w)
5702c140a24SMikulas Patocka {
5712c140a24SMikulas Patocka 	dm_deferred_remove();
5722c140a24SMikulas Patocka }
5732c140a24SMikulas Patocka 
574fd2ed4d2SMikulas Patocka sector_t dm_get_size(struct mapped_device *md)
575fd2ed4d2SMikulas Patocka {
576fd2ed4d2SMikulas Patocka 	return get_capacity(md->disk);
577fd2ed4d2SMikulas Patocka }
578fd2ed4d2SMikulas Patocka 
5799974fa2cSMike Snitzer struct request_queue *dm_get_md_queue(struct mapped_device *md)
5809974fa2cSMike Snitzer {
5819974fa2cSMike Snitzer 	return md->queue;
5829974fa2cSMike Snitzer }
5839974fa2cSMike Snitzer 
584fd2ed4d2SMikulas Patocka struct dm_stats *dm_get_stats(struct mapped_device *md)
585fd2ed4d2SMikulas Patocka {
586fd2ed4d2SMikulas Patocka 	return &md->stats;
587fd2ed4d2SMikulas Patocka }
588fd2ed4d2SMikulas Patocka 
5893ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5903ac51e74SDarrick J. Wong {
5913ac51e74SDarrick J. Wong 	struct mapped_device *md = bdev->bd_disk->private_data;
5923ac51e74SDarrick J. Wong 
5933ac51e74SDarrick J. Wong 	return dm_get_geometry(md, geo);
5943ac51e74SDarrick J. Wong }
5953ac51e74SDarrick J. Wong 
596956a4025SMike Snitzer static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
59766482026SMike Snitzer 				  struct block_device **bdev,
598956a4025SMike Snitzer 				  fmode_t *mode)
599aa129a22SMilan Broz {
60066482026SMike Snitzer 	struct dm_target *tgt;
6016c182cd8SHannes Reinecke 	struct dm_table *map;
602956a4025SMike Snitzer 	int srcu_idx, r;
603aa129a22SMilan Broz 
6046c182cd8SHannes Reinecke retry:
605e56f81e0SChristoph Hellwig 	r = -ENOTTY;
606956a4025SMike Snitzer 	map = dm_get_live_table(md, &srcu_idx);
607aa129a22SMilan Broz 	if (!map || !dm_table_get_size(map))
608aa129a22SMilan Broz 		goto out;
609aa129a22SMilan Broz 
610aa129a22SMilan Broz 	/* We only support devices that have a single target */
611aa129a22SMilan Broz 	if (dm_table_get_num_targets(map) != 1)
612aa129a22SMilan Broz 		goto out;
613aa129a22SMilan Broz 
61466482026SMike Snitzer 	tgt = dm_table_get_target(map, 0);
61566482026SMike Snitzer 	if (!tgt->type->prepare_ioctl)
6164d341d82SMike Snitzer 		goto out;
617aa129a22SMilan Broz 
6184f186f8bSKiyoshi Ueda 	if (dm_suspended_md(md)) {
619aa129a22SMilan Broz 		r = -EAGAIN;
620aa129a22SMilan Broz 		goto out;
621aa129a22SMilan Broz 	}
622aa129a22SMilan Broz 
62366482026SMike Snitzer 	r = tgt->type->prepare_ioctl(tgt, bdev, mode);
624e56f81e0SChristoph Hellwig 	if (r < 0)
625e56f81e0SChristoph Hellwig 		goto out;
626e56f81e0SChristoph Hellwig 
627956a4025SMike Snitzer 	bdgrab(*bdev);
628956a4025SMike Snitzer 	dm_put_live_table(md, srcu_idx);
629e56f81e0SChristoph Hellwig 	return r;
630aa129a22SMilan Broz 
631aa129a22SMilan Broz out:
632956a4025SMike Snitzer 	dm_put_live_table(md, srcu_idx);
6335bbbfdf6SJunichi Nomura 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
6346c182cd8SHannes Reinecke 		msleep(10);
6356c182cd8SHannes Reinecke 		goto retry;
6366c182cd8SHannes Reinecke 	}
637e56f81e0SChristoph Hellwig 	return r;
638e56f81e0SChristoph Hellwig }
6396c182cd8SHannes Reinecke 
640e56f81e0SChristoph Hellwig static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
641e56f81e0SChristoph Hellwig 			unsigned int cmd, unsigned long arg)
642e56f81e0SChristoph Hellwig {
643e56f81e0SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
644956a4025SMike Snitzer 	int r;
645e56f81e0SChristoph Hellwig 
646956a4025SMike Snitzer 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
647e56f81e0SChristoph Hellwig 	if (r < 0)
648e56f81e0SChristoph Hellwig 		return r;
649e56f81e0SChristoph Hellwig 
650e56f81e0SChristoph Hellwig 	if (r > 0) {
651e56f81e0SChristoph Hellwig 		/*
652e56f81e0SChristoph Hellwig 		 * Target determined this ioctl is being issued against
653e56f81e0SChristoph Hellwig 		 * a logical partition of the parent bdev; so extra
654e56f81e0SChristoph Hellwig 		 * validation is needed.
655e56f81e0SChristoph Hellwig 		 */
656e56f81e0SChristoph Hellwig 		r = scsi_verify_blk_ioctl(NULL, cmd);
657e56f81e0SChristoph Hellwig 		if (r)
658e56f81e0SChristoph Hellwig 			goto out;
659e56f81e0SChristoph Hellwig 	}
660e56f81e0SChristoph Hellwig 
66166482026SMike Snitzer 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
662e56f81e0SChristoph Hellwig out:
663956a4025SMike Snitzer 	bdput(bdev);
664aa129a22SMilan Broz 	return r;
665aa129a22SMilan Broz }
666aa129a22SMilan Broz 
667028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md)
6681da177e4SLinus Torvalds {
6691da177e4SLinus Torvalds 	return mempool_alloc(md->io_pool, GFP_NOIO);
6701da177e4SLinus Torvalds }
6711da177e4SLinus Torvalds 
672028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io)
6731da177e4SLinus Torvalds {
6741da177e4SLinus Torvalds 	mempool_free(io, md->io_pool);
6751da177e4SLinus Torvalds }
6761da177e4SLinus Torvalds 
677*cfae7529SMike Snitzer static void free_tio(struct dm_target_io *tio)
6781da177e4SLinus Torvalds {
679dba14160SMikulas Patocka 	bio_put(&tio->clone);
6801da177e4SLinus Torvalds }
6811da177e4SLinus Torvalds 
682eca7ee6dSMike Snitzer static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
68308885643SKiyoshi Ueda 						gfp_t gfp_mask)
684cec47e3dSKiyoshi Ueda {
6855f015204SJun'ichi Nomura 	return mempool_alloc(md->io_pool, gfp_mask);
686cec47e3dSKiyoshi Ueda }
687cec47e3dSKiyoshi Ueda 
688eca7ee6dSMike Snitzer static void free_old_rq_tio(struct dm_rq_target_io *tio)
689cec47e3dSKiyoshi Ueda {
6905f015204SJun'ichi Nomura 	mempool_free(tio, tio->md->io_pool);
691cec47e3dSKiyoshi Ueda }
692cec47e3dSKiyoshi Ueda 
693eca7ee6dSMike Snitzer static struct request *alloc_old_clone_request(struct mapped_device *md,
6941ae49ea2SMike Snitzer 					       gfp_t gfp_mask)
6951ae49ea2SMike Snitzer {
6961ae49ea2SMike Snitzer 	return mempool_alloc(md->rq_pool, gfp_mask);
6971ae49ea2SMike Snitzer }
6981ae49ea2SMike Snitzer 
699eca7ee6dSMike Snitzer static void free_old_clone_request(struct mapped_device *md, struct request *rq)
7001ae49ea2SMike Snitzer {
7011ae49ea2SMike Snitzer 	mempool_free(rq, md->rq_pool);
7021ae49ea2SMike Snitzer }
7031ae49ea2SMike Snitzer 
70490abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md)
70590abb8c4SKiyoshi Ueda {
70690abb8c4SKiyoshi Ueda 	return atomic_read(&md->pending[READ]) +
70790abb8c4SKiyoshi Ueda 	       atomic_read(&md->pending[WRITE]);
70890abb8c4SKiyoshi Ueda }
70990abb8c4SKiyoshi Ueda 
7103eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io)
7113eaf840eSJun'ichi "Nick" Nomura {
7123eaf840eSJun'ichi "Nick" Nomura 	struct mapped_device *md = io->md;
713fd2ed4d2SMikulas Patocka 	struct bio *bio = io->bio;
714c9959059STejun Heo 	int cpu;
715fd2ed4d2SMikulas Patocka 	int rw = bio_data_dir(bio);
7163eaf840eSJun'ichi "Nick" Nomura 
7173eaf840eSJun'ichi "Nick" Nomura 	io->start_time = jiffies;
7183eaf840eSJun'ichi "Nick" Nomura 
719074a7acaSTejun Heo 	cpu = part_stat_lock();
720074a7acaSTejun Heo 	part_round_stats(cpu, &dm_disk(md)->part0);
721074a7acaSTejun Heo 	part_stat_unlock();
7221e9bb880SShaohua Li 	atomic_set(&dm_disk(md)->part0.in_flight[rw],
7231e9bb880SShaohua Li 		atomic_inc_return(&md->pending[rw]));
724fd2ed4d2SMikulas Patocka 
725fd2ed4d2SMikulas Patocka 	if (unlikely(dm_stats_used(&md->stats)))
7264f024f37SKent Overstreet 		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
727fd2ed4d2SMikulas Patocka 				    bio_sectors(bio), false, 0, &io->stats_aux);
7283eaf840eSJun'ichi "Nick" Nomura }
7293eaf840eSJun'ichi "Nick" Nomura 
730d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io)
7313eaf840eSJun'ichi "Nick" Nomura {
7323eaf840eSJun'ichi "Nick" Nomura 	struct mapped_device *md = io->md;
7333eaf840eSJun'ichi "Nick" Nomura 	struct bio *bio = io->bio;
7343eaf840eSJun'ichi "Nick" Nomura 	unsigned long duration = jiffies - io->start_time;
73518c0b223SGu Zheng 	int pending;
7363eaf840eSJun'ichi "Nick" Nomura 	int rw = bio_data_dir(bio);
7373eaf840eSJun'ichi "Nick" Nomura 
73818c0b223SGu Zheng 	generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
7393eaf840eSJun'ichi "Nick" Nomura 
740fd2ed4d2SMikulas Patocka 	if (unlikely(dm_stats_used(&md->stats)))
7414f024f37SKent Overstreet 		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
742fd2ed4d2SMikulas Patocka 				    bio_sectors(bio), true, duration, &io->stats_aux);
743fd2ed4d2SMikulas Patocka 
744af7e466aSMikulas Patocka 	/*
745af7e466aSMikulas Patocka 	 * After this is decremented the bio must not be touched if it is
746d87f4c14STejun Heo 	 * a flush.
747af7e466aSMikulas Patocka 	 */
7481e9bb880SShaohua Li 	pending = atomic_dec_return(&md->pending[rw]);
7491e9bb880SShaohua Li 	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
750316d315bSNikanth Karthikesan 	pending += atomic_read(&md->pending[rw^0x1]);
7513eaf840eSJun'ichi "Nick" Nomura 
752d221d2e7SMikulas Patocka 	/* nudge anyone waiting on suspend queue */
753d221d2e7SMikulas Patocka 	if (!pending)
754d221d2e7SMikulas Patocka 		wake_up(&md->wait);
7553eaf840eSJun'ichi "Nick" Nomura }
7563eaf840eSJun'ichi "Nick" Nomura 
7571da177e4SLinus Torvalds /*
7581da177e4SLinus Torvalds  * Add the bio to the list of deferred io.
7591da177e4SLinus Torvalds  */
76092c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio)
7611da177e4SLinus Torvalds {
76205447420SKiyoshi Ueda 	unsigned long flags;
7631da177e4SLinus Torvalds 
76405447420SKiyoshi Ueda 	spin_lock_irqsave(&md->deferred_lock, flags);
7651da177e4SLinus Torvalds 	bio_list_add(&md->deferred, bio);
76605447420SKiyoshi Ueda 	spin_unlock_irqrestore(&md->deferred_lock, flags);
76792c63902SMikulas Patocka 	queue_work(md->wq, &md->work);
7681da177e4SLinus Torvalds }
7691da177e4SLinus Torvalds 
7701da177e4SLinus Torvalds /*
7711da177e4SLinus Torvalds  * Everyone (including functions in this file), should use this
7721da177e4SLinus Torvalds  * function to access the md->map field, and make sure they call
77383d5e5b0SMikulas Patocka  * dm_put_live_table() when finished.
7741da177e4SLinus Torvalds  */
77583d5e5b0SMikulas Patocka struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
7761da177e4SLinus Torvalds {
77783d5e5b0SMikulas Patocka 	*srcu_idx = srcu_read_lock(&md->io_barrier);
7781da177e4SLinus Torvalds 
77983d5e5b0SMikulas Patocka 	return srcu_dereference(md->map, &md->io_barrier);
78083d5e5b0SMikulas Patocka }
7811da177e4SLinus Torvalds 
78283d5e5b0SMikulas Patocka void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
78383d5e5b0SMikulas Patocka {
78483d5e5b0SMikulas Patocka 	srcu_read_unlock(&md->io_barrier, srcu_idx);
78583d5e5b0SMikulas Patocka }
78683d5e5b0SMikulas Patocka 
78783d5e5b0SMikulas Patocka void dm_sync_table(struct mapped_device *md)
78883d5e5b0SMikulas Patocka {
78983d5e5b0SMikulas Patocka 	synchronize_srcu(&md->io_barrier);
79083d5e5b0SMikulas Patocka 	synchronize_rcu_expedited();
79183d5e5b0SMikulas Patocka }
79283d5e5b0SMikulas Patocka 
79383d5e5b0SMikulas Patocka /*
79483d5e5b0SMikulas Patocka  * A fast alternative to dm_get_live_table/dm_put_live_table.
79583d5e5b0SMikulas Patocka  * The caller must not block between these two functions.
79683d5e5b0SMikulas Patocka  */
79783d5e5b0SMikulas Patocka static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
79883d5e5b0SMikulas Patocka {
79983d5e5b0SMikulas Patocka 	rcu_read_lock();
80083d5e5b0SMikulas Patocka 	return rcu_dereference(md->map);
80183d5e5b0SMikulas Patocka }
80283d5e5b0SMikulas Patocka 
80383d5e5b0SMikulas Patocka static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
80483d5e5b0SMikulas Patocka {
80583d5e5b0SMikulas Patocka 	rcu_read_unlock();
8061da177e4SLinus Torvalds }
8071da177e4SLinus Torvalds 
8083ac51e74SDarrick J. Wong /*
80986f1152bSBenjamin Marzinski  * Open a table device so we can use it as a map destination.
81086f1152bSBenjamin Marzinski  */
81186f1152bSBenjamin Marzinski static int open_table_device(struct table_device *td, dev_t dev,
81286f1152bSBenjamin Marzinski 			     struct mapped_device *md)
81386f1152bSBenjamin Marzinski {
81486f1152bSBenjamin Marzinski 	static char *_claim_ptr = "I belong to device-mapper";
81586f1152bSBenjamin Marzinski 	struct block_device *bdev;
81686f1152bSBenjamin Marzinski 
81786f1152bSBenjamin Marzinski 	int r;
81886f1152bSBenjamin Marzinski 
81986f1152bSBenjamin Marzinski 	BUG_ON(td->dm_dev.bdev);
82086f1152bSBenjamin Marzinski 
82186f1152bSBenjamin Marzinski 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
82286f1152bSBenjamin Marzinski 	if (IS_ERR(bdev))
82386f1152bSBenjamin Marzinski 		return PTR_ERR(bdev);
82486f1152bSBenjamin Marzinski 
82586f1152bSBenjamin Marzinski 	r = bd_link_disk_holder(bdev, dm_disk(md));
82686f1152bSBenjamin Marzinski 	if (r) {
82786f1152bSBenjamin Marzinski 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
82886f1152bSBenjamin Marzinski 		return r;
82986f1152bSBenjamin Marzinski 	}
83086f1152bSBenjamin Marzinski 
83186f1152bSBenjamin Marzinski 	td->dm_dev.bdev = bdev;
83286f1152bSBenjamin Marzinski 	return 0;
83386f1152bSBenjamin Marzinski }
83486f1152bSBenjamin Marzinski 
83586f1152bSBenjamin Marzinski /*
83686f1152bSBenjamin Marzinski  * Close a table device that we've been using.
83786f1152bSBenjamin Marzinski  */
83886f1152bSBenjamin Marzinski static void close_table_device(struct table_device *td, struct mapped_device *md)
83986f1152bSBenjamin Marzinski {
84086f1152bSBenjamin Marzinski 	if (!td->dm_dev.bdev)
84186f1152bSBenjamin Marzinski 		return;
84286f1152bSBenjamin Marzinski 
84386f1152bSBenjamin Marzinski 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
84486f1152bSBenjamin Marzinski 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
84586f1152bSBenjamin Marzinski 	td->dm_dev.bdev = NULL;
84686f1152bSBenjamin Marzinski }
84786f1152bSBenjamin Marzinski 
84886f1152bSBenjamin Marzinski static struct table_device *find_table_device(struct list_head *l, dev_t dev,
84986f1152bSBenjamin Marzinski 					      fmode_t mode) {
85086f1152bSBenjamin Marzinski 	struct table_device *td;
85186f1152bSBenjamin Marzinski 
85286f1152bSBenjamin Marzinski 	list_for_each_entry(td, l, list)
85386f1152bSBenjamin Marzinski 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
85486f1152bSBenjamin Marzinski 			return td;
85586f1152bSBenjamin Marzinski 
85686f1152bSBenjamin Marzinski 	return NULL;
85786f1152bSBenjamin Marzinski }
85886f1152bSBenjamin Marzinski 
85986f1152bSBenjamin Marzinski int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
86086f1152bSBenjamin Marzinski 			struct dm_dev **result) {
86186f1152bSBenjamin Marzinski 	int r;
86286f1152bSBenjamin Marzinski 	struct table_device *td;
86386f1152bSBenjamin Marzinski 
86486f1152bSBenjamin Marzinski 	mutex_lock(&md->table_devices_lock);
86586f1152bSBenjamin Marzinski 	td = find_table_device(&md->table_devices, dev, mode);
86686f1152bSBenjamin Marzinski 	if (!td) {
867115485e8SMike Snitzer 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
86886f1152bSBenjamin Marzinski 		if (!td) {
86986f1152bSBenjamin Marzinski 			mutex_unlock(&md->table_devices_lock);
87086f1152bSBenjamin Marzinski 			return -ENOMEM;
87186f1152bSBenjamin Marzinski 		}
87286f1152bSBenjamin Marzinski 
87386f1152bSBenjamin Marzinski 		td->dm_dev.mode = mode;
87486f1152bSBenjamin Marzinski 		td->dm_dev.bdev = NULL;
87586f1152bSBenjamin Marzinski 
87686f1152bSBenjamin Marzinski 		if ((r = open_table_device(td, dev, md))) {
87786f1152bSBenjamin Marzinski 			mutex_unlock(&md->table_devices_lock);
87886f1152bSBenjamin Marzinski 			kfree(td);
87986f1152bSBenjamin Marzinski 			return r;
88086f1152bSBenjamin Marzinski 		}
88186f1152bSBenjamin Marzinski 
88286f1152bSBenjamin Marzinski 		format_dev_t(td->dm_dev.name, dev);
88386f1152bSBenjamin Marzinski 
88486f1152bSBenjamin Marzinski 		atomic_set(&td->count, 0);
88586f1152bSBenjamin Marzinski 		list_add(&td->list, &md->table_devices);
88686f1152bSBenjamin Marzinski 	}
88786f1152bSBenjamin Marzinski 	atomic_inc(&td->count);
88886f1152bSBenjamin Marzinski 	mutex_unlock(&md->table_devices_lock);
88986f1152bSBenjamin Marzinski 
89086f1152bSBenjamin Marzinski 	*result = &td->dm_dev;
89186f1152bSBenjamin Marzinski 	return 0;
89286f1152bSBenjamin Marzinski }
89386f1152bSBenjamin Marzinski EXPORT_SYMBOL_GPL(dm_get_table_device);
89486f1152bSBenjamin Marzinski 
89586f1152bSBenjamin Marzinski void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
89686f1152bSBenjamin Marzinski {
89786f1152bSBenjamin Marzinski 	struct table_device *td = container_of(d, struct table_device, dm_dev);
89886f1152bSBenjamin Marzinski 
89986f1152bSBenjamin Marzinski 	mutex_lock(&md->table_devices_lock);
90086f1152bSBenjamin Marzinski 	if (atomic_dec_and_test(&td->count)) {
90186f1152bSBenjamin Marzinski 		close_table_device(td, md);
90286f1152bSBenjamin Marzinski 		list_del(&td->list);
90386f1152bSBenjamin Marzinski 		kfree(td);
90486f1152bSBenjamin Marzinski 	}
90586f1152bSBenjamin Marzinski 	mutex_unlock(&md->table_devices_lock);
90686f1152bSBenjamin Marzinski }
90786f1152bSBenjamin Marzinski EXPORT_SYMBOL(dm_put_table_device);
90886f1152bSBenjamin Marzinski 
90986f1152bSBenjamin Marzinski static void free_table_devices(struct list_head *devices)
91086f1152bSBenjamin Marzinski {
91186f1152bSBenjamin Marzinski 	struct list_head *tmp, *next;
91286f1152bSBenjamin Marzinski 
91386f1152bSBenjamin Marzinski 	list_for_each_safe(tmp, next, devices) {
91486f1152bSBenjamin Marzinski 		struct table_device *td = list_entry(tmp, struct table_device, list);
91586f1152bSBenjamin Marzinski 
91686f1152bSBenjamin Marzinski 		DMWARN("dm_destroy: %s still exists with %d references",
91786f1152bSBenjamin Marzinski 		       td->dm_dev.name, atomic_read(&td->count));
91886f1152bSBenjamin Marzinski 		kfree(td);
91986f1152bSBenjamin Marzinski 	}
92086f1152bSBenjamin Marzinski }
92186f1152bSBenjamin Marzinski 
92286f1152bSBenjamin Marzinski /*
9233ac51e74SDarrick J. Wong  * Get the geometry associated with a dm device
9243ac51e74SDarrick J. Wong  */
9253ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
9263ac51e74SDarrick J. Wong {
9273ac51e74SDarrick J. Wong 	*geo = md->geometry;
9283ac51e74SDarrick J. Wong 
9293ac51e74SDarrick J. Wong 	return 0;
9303ac51e74SDarrick J. Wong }
9313ac51e74SDarrick J. Wong 
9323ac51e74SDarrick J. Wong /*
9333ac51e74SDarrick J. Wong  * Set the geometry of a device.
9343ac51e74SDarrick J. Wong  */
9353ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
9363ac51e74SDarrick J. Wong {
9373ac51e74SDarrick J. Wong 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
9383ac51e74SDarrick J. Wong 
9393ac51e74SDarrick J. Wong 	if (geo->start > sz) {
9403ac51e74SDarrick J. Wong 		DMWARN("Start sector is beyond the geometry limits.");
9413ac51e74SDarrick J. Wong 		return -EINVAL;
9423ac51e74SDarrick J. Wong 	}
9433ac51e74SDarrick J. Wong 
9443ac51e74SDarrick J. Wong 	md->geometry = *geo;
9453ac51e74SDarrick J. Wong 
9463ac51e74SDarrick J. Wong 	return 0;
9473ac51e74SDarrick J. Wong }
9483ac51e74SDarrick J. Wong 
9491da177e4SLinus Torvalds /*-----------------------------------------------------------------
9501da177e4SLinus Torvalds  * CRUD START:
9511da177e4SLinus Torvalds  *   A more elegant soln is in the works that uses the queue
9521da177e4SLinus Torvalds  *   merge fn, unfortunately there are a couple of changes to
9531da177e4SLinus Torvalds  *   the block layer that I want to make for this.  So in the
9541da177e4SLinus Torvalds  *   interests of getting something for people to use I give
9551da177e4SLinus Torvalds  *   you this clearly demarcated crap.
9561da177e4SLinus Torvalds  *---------------------------------------------------------------*/
9571da177e4SLinus Torvalds 
9582e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md)
9592e93ccc1SKiyoshi Ueda {
9602e93ccc1SKiyoshi Ueda 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
9612e93ccc1SKiyoshi Ueda }
9622e93ccc1SKiyoshi Ueda 
9631da177e4SLinus Torvalds /*
9641da177e4SLinus Torvalds  * Decrements the number of outstanding ios that a bio has been
9651da177e4SLinus Torvalds  * cloned into, completing the original io if necc.
9661da177e4SLinus Torvalds  */
967858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error)
9681da177e4SLinus Torvalds {
9692e93ccc1SKiyoshi Ueda 	unsigned long flags;
970b35f8caaSMilan Broz 	int io_error;
971b35f8caaSMilan Broz 	struct bio *bio;
972b35f8caaSMilan Broz 	struct mapped_device *md = io->md;
9732e93ccc1SKiyoshi Ueda 
9742e93ccc1SKiyoshi Ueda 	/* Push-back supersedes any I/O errors */
975f88fb981SKiyoshi Ueda 	if (unlikely(error)) {
976f88fb981SKiyoshi Ueda 		spin_lock_irqsave(&io->endio_lock, flags);
977f88fb981SKiyoshi Ueda 		if (!(io->error > 0 && __noflush_suspending(md)))
9781da177e4SLinus Torvalds 			io->error = error;
979f88fb981SKiyoshi Ueda 		spin_unlock_irqrestore(&io->endio_lock, flags);
980f88fb981SKiyoshi Ueda 	}
9811da177e4SLinus Torvalds 
9821da177e4SLinus Torvalds 	if (atomic_dec_and_test(&io->io_count)) {
9832e93ccc1SKiyoshi Ueda 		if (io->error == DM_ENDIO_REQUEUE) {
9842e93ccc1SKiyoshi Ueda 			/*
9852e93ccc1SKiyoshi Ueda 			 * Target requested pushing back the I/O.
9862e93ccc1SKiyoshi Ueda 			 */
987022c2611SMikulas Patocka 			spin_lock_irqsave(&md->deferred_lock, flags);
9886a8736d1STejun Heo 			if (__noflush_suspending(md))
9896a8736d1STejun Heo 				bio_list_add_head(&md->deferred, io->bio);
9906a8736d1STejun Heo 			else
9912e93ccc1SKiyoshi Ueda 				/* noflush suspend was interrupted. */
9922e93ccc1SKiyoshi Ueda 				io->error = -EIO;
993022c2611SMikulas Patocka 			spin_unlock_irqrestore(&md->deferred_lock, flags);
9942e93ccc1SKiyoshi Ueda 		}
9952e93ccc1SKiyoshi Ueda 
996b35f8caaSMilan Broz 		io_error = io->error;
997b35f8caaSMilan Broz 		bio = io->bio;
998af7e466aSMikulas Patocka 		end_io_acct(io);
999a97f925aSMikulas Patocka 		free_io(md, io);
10001da177e4SLinus Torvalds 
10016a8736d1STejun Heo 		if (io_error == DM_ENDIO_REQUEUE)
10026a8736d1STejun Heo 			return;
10036a8736d1STejun Heo 
10044f024f37SKent Overstreet 		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
10051da177e4SLinus Torvalds 			/*
10066a8736d1STejun Heo 			 * Preflush done for flush with data, reissue
10076a8736d1STejun Heo 			 * without REQ_FLUSH.
10081da177e4SLinus Torvalds 			 */
10096a8736d1STejun Heo 			bio->bi_rw &= ~REQ_FLUSH;
10106a8736d1STejun Heo 			queue_io(md, bio);
10115f3ea37cSArnaldo Carvalho de Melo 		} else {
1012b372d360SMike Snitzer 			/* done with normal IO or empty flush */
10130a82a8d1SLinus Torvalds 			trace_block_bio_complete(md->queue, bio, io_error);
10144246a0b6SChristoph Hellwig 			bio->bi_error = io_error;
10154246a0b6SChristoph Hellwig 			bio_endio(bio);
10162e93ccc1SKiyoshi Ueda 		}
10171da177e4SLinus Torvalds 	}
1018af7e466aSMikulas Patocka }
10191da177e4SLinus Torvalds 
10207eee4ae2SMike Snitzer static void disable_write_same(struct mapped_device *md)
10217eee4ae2SMike Snitzer {
10227eee4ae2SMike Snitzer 	struct queue_limits *limits = dm_get_queue_limits(md);
10237eee4ae2SMike Snitzer 
10247eee4ae2SMike Snitzer 	/* device doesn't really support WRITE SAME, disable it */
10257eee4ae2SMike Snitzer 	limits->max_write_same_sectors = 0;
10267eee4ae2SMike Snitzer }
10277eee4ae2SMike Snitzer 
10284246a0b6SChristoph Hellwig static void clone_endio(struct bio *bio)
10291da177e4SLinus Torvalds {
10304246a0b6SChristoph Hellwig 	int error = bio->bi_error;
10315164beceSzhendong chen 	int r = error;
1032bfc6d41cSMikulas Patocka 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1033b35f8caaSMilan Broz 	struct dm_io *io = tio->io;
10349faf400fSStefan Bader 	struct mapped_device *md = tio->io->md;
10351da177e4SLinus Torvalds 	dm_endio_fn endio = tio->ti->type->end_io;
10361da177e4SLinus Torvalds 
10371da177e4SLinus Torvalds 	if (endio) {
10387de3ee57SMikulas Patocka 		r = endio(tio->ti, bio, error);
10392e93ccc1SKiyoshi Ueda 		if (r < 0 || r == DM_ENDIO_REQUEUE)
10402e93ccc1SKiyoshi Ueda 			/*
10412e93ccc1SKiyoshi Ueda 			 * error and requeue request are handled
10422e93ccc1SKiyoshi Ueda 			 * in dec_pending().
10432e93ccc1SKiyoshi Ueda 			 */
10441da177e4SLinus Torvalds 			error = r;
104545cbcd79SKiyoshi Ueda 		else if (r == DM_ENDIO_INCOMPLETE)
104645cbcd79SKiyoshi Ueda 			/* The target will handle the io */
10476712ecf8SNeilBrown 			return;
104845cbcd79SKiyoshi Ueda 		else if (r) {
104945cbcd79SKiyoshi Ueda 			DMWARN("unimplemented target endio return value: %d", r);
105045cbcd79SKiyoshi Ueda 			BUG();
105145cbcd79SKiyoshi Ueda 		}
10521da177e4SLinus Torvalds 	}
10531da177e4SLinus Torvalds 
10547eee4ae2SMike Snitzer 	if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
10557eee4ae2SMike Snitzer 		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
10567eee4ae2SMike Snitzer 		disable_write_same(md);
10577eee4ae2SMike Snitzer 
1058*cfae7529SMike Snitzer 	free_tio(tio);
1059b35f8caaSMilan Broz 	dec_pending(io, error);
10601da177e4SLinus Torvalds }
10611da177e4SLinus Torvalds 
106278d8e58aSMike Snitzer /*
106378d8e58aSMike Snitzer  * Partial completion handling for request-based dm
106478d8e58aSMike Snitzer  */
10654246a0b6SChristoph Hellwig static void end_clone_bio(struct bio *clone)
106678d8e58aSMike Snitzer {
106778d8e58aSMike Snitzer 	struct dm_rq_clone_bio_info *info =
106878d8e58aSMike Snitzer 		container_of(clone, struct dm_rq_clone_bio_info, clone);
106978d8e58aSMike Snitzer 	struct dm_rq_target_io *tio = info->tio;
107078d8e58aSMike Snitzer 	struct bio *bio = info->orig;
107178d8e58aSMike Snitzer 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
107250887bd1SJunichi Nomura 	int error = clone->bi_error;
107378d8e58aSMike Snitzer 
107478d8e58aSMike Snitzer 	bio_put(clone);
107578d8e58aSMike Snitzer 
107678d8e58aSMike Snitzer 	if (tio->error)
107778d8e58aSMike Snitzer 		/*
107878d8e58aSMike Snitzer 		 * An error has already been detected on the request.
107978d8e58aSMike Snitzer 		 * Once error occurred, just let clone->end_io() handle
108078d8e58aSMike Snitzer 		 * the remainder.
108178d8e58aSMike Snitzer 		 */
108278d8e58aSMike Snitzer 		return;
108350887bd1SJunichi Nomura 	else if (error) {
108478d8e58aSMike Snitzer 		/*
108578d8e58aSMike Snitzer 		 * Don't notice the error to the upper layer yet.
108678d8e58aSMike Snitzer 		 * The error handling decision is made by the target driver,
108778d8e58aSMike Snitzer 		 * when the request is completed.
108878d8e58aSMike Snitzer 		 */
108950887bd1SJunichi Nomura 		tio->error = error;
109078d8e58aSMike Snitzer 		return;
109178d8e58aSMike Snitzer 	}
109278d8e58aSMike Snitzer 
109378d8e58aSMike Snitzer 	/*
109478d8e58aSMike Snitzer 	 * I/O for the bio successfully completed.
109578d8e58aSMike Snitzer 	 * Notice the data completion to the upper layer.
109678d8e58aSMike Snitzer 	 */
109778d8e58aSMike Snitzer 
109878d8e58aSMike Snitzer 	/*
109978d8e58aSMike Snitzer 	 * bios are processed from the head of the list.
110078d8e58aSMike Snitzer 	 * So the completing bio should always be rq->bio.
110178d8e58aSMike Snitzer 	 * If it's not, something wrong is happening.
110278d8e58aSMike Snitzer 	 */
110378d8e58aSMike Snitzer 	if (tio->orig->bio != bio)
110478d8e58aSMike Snitzer 		DMERR("bio completion is going in the middle of the request");
110578d8e58aSMike Snitzer 
110678d8e58aSMike Snitzer 	/*
110778d8e58aSMike Snitzer 	 * Update the original request.
110878d8e58aSMike Snitzer 	 * Do not use blk_end_request() here, because it may complete
110978d8e58aSMike Snitzer 	 * the original request before the clone, and break the ordering.
111078d8e58aSMike Snitzer 	 */
111178d8e58aSMike Snitzer 	blk_update_request(tio->orig, 0, nr_bytes);
111278d8e58aSMike Snitzer }
111378d8e58aSMike Snitzer 
1114bfebd1cdSMike Snitzer static struct dm_rq_target_io *tio_from_request(struct request *rq)
1115bfebd1cdSMike Snitzer {
1116bfebd1cdSMike Snitzer 	return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
1117bfebd1cdSMike Snitzer }
1118bfebd1cdSMike Snitzer 
1119e262f347SMikulas Patocka static void rq_end_stats(struct mapped_device *md, struct request *orig)
1120e262f347SMikulas Patocka {
1121e262f347SMikulas Patocka 	if (unlikely(dm_stats_used(&md->stats))) {
1122e262f347SMikulas Patocka 		struct dm_rq_target_io *tio = tio_from_request(orig);
1123e262f347SMikulas Patocka 		tio->duration_jiffies = jiffies - tio->duration_jiffies;
1124e262f347SMikulas Patocka 		dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
1125e262f347SMikulas Patocka 				    tio->n_sectors, true, tio->duration_jiffies,
1126e262f347SMikulas Patocka 				    &tio->stats_aux);
1127e262f347SMikulas Patocka 	}
1128e262f347SMikulas Patocka }
1129e262f347SMikulas Patocka 
1130cec47e3dSKiyoshi Ueda /*
1131cec47e3dSKiyoshi Ueda  * Don't touch any member of the md after calling this function because
1132cec47e3dSKiyoshi Ueda  * the md may be freed in dm_put() at the end of this function.
1133cec47e3dSKiyoshi Ueda  * Or do dm_get() before calling this function and dm_put() later.
1134cec47e3dSKiyoshi Ueda  */
1135466d89a6SKeith Busch static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1136cec47e3dSKiyoshi Ueda {
1137b4324feeSKiyoshi Ueda 	atomic_dec(&md->pending[rw]);
1138cec47e3dSKiyoshi Ueda 
1139cec47e3dSKiyoshi Ueda 	/* nudge anyone waiting on suspend queue */
1140621739b0SMike Snitzer 	if (!md_in_flight(md))
1141cec47e3dSKiyoshi Ueda 		wake_up(&md->wait);
1142cec47e3dSKiyoshi Ueda 
1143a8c32a5cSJens Axboe 	/*
1144a8c32a5cSJens Axboe 	 * Run this off this callpath, as drivers could invoke end_io while
1145a8c32a5cSJens Axboe 	 * inside their request_fn (and holding the queue lock). Calling
1146a8c32a5cSJens Axboe 	 * back into ->request_fn() could deadlock attempting to grab the
1147a8c32a5cSJens Axboe 	 * queue lock again.
1148a8c32a5cSJens Axboe 	 */
11496acfe68bSMike Snitzer 	if (!md->queue->mq_ops && run_queue)
1150a8c32a5cSJens Axboe 		blk_run_queue_async(md->queue);
1151cec47e3dSKiyoshi Ueda 
1152cec47e3dSKiyoshi Ueda 	/*
1153cec47e3dSKiyoshi Ueda 	 * dm_put() must be at the end of this function. See the comment above
1154cec47e3dSKiyoshi Ueda 	 */
1155cec47e3dSKiyoshi Ueda 	dm_put(md);
1156cec47e3dSKiyoshi Ueda }
1157cec47e3dSKiyoshi Ueda 
1158e5d8de32SMike Snitzer static void free_rq_clone(struct request *clone)
1159a77e28c7SKiyoshi Ueda {
1160a77e28c7SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
1161bfebd1cdSMike Snitzer 	struct mapped_device *md = tio->md;
1162a77e28c7SKiyoshi Ueda 
116378d8e58aSMike Snitzer 	blk_rq_unprep_clone(clone);
116478d8e58aSMike Snitzer 
1165aa6df8ddSMike Snitzer 	if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1166aa6df8ddSMike Snitzer 		/* stacked on blk-mq queue(s) */
1167e5863d9aSMike Snitzer 		tio->ti->type->release_clone_rq(clone);
116802233342SMike Snitzer 	else if (!md->queue->mq_ops)
116902233342SMike Snitzer 		/* request_fn queue stacked on request_fn queue(s) */
1170eca7ee6dSMike Snitzer 		free_old_clone_request(md, clone);
1171bfebd1cdSMike Snitzer 
1172bfebd1cdSMike Snitzer 	if (!md->queue->mq_ops)
1173eca7ee6dSMike Snitzer 		free_old_rq_tio(tio);
1174a77e28c7SKiyoshi Ueda }
1175a77e28c7SKiyoshi Ueda 
1176980691e5SKiyoshi Ueda /*
1177980691e5SKiyoshi Ueda  * Complete the clone and the original request.
1178466d89a6SKeith Busch  * Must be called without clone's queue lock held,
1179466d89a6SKeith Busch  * see end_clone_request() for more details.
1180980691e5SKiyoshi Ueda  */
1181980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error)
1182980691e5SKiyoshi Ueda {
1183980691e5SKiyoshi Ueda 	int rw = rq_data_dir(clone);
1184980691e5SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
1185980691e5SKiyoshi Ueda 	struct mapped_device *md = tio->md;
1186980691e5SKiyoshi Ueda 	struct request *rq = tio->orig;
1187980691e5SKiyoshi Ueda 
118829e4013dSTejun Heo 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1189980691e5SKiyoshi Ueda 		rq->errors = clone->errors;
1190980691e5SKiyoshi Ueda 		rq->resid_len = clone->resid_len;
1191980691e5SKiyoshi Ueda 
1192980691e5SKiyoshi Ueda 		if (rq->sense)
1193980691e5SKiyoshi Ueda 			/*
1194980691e5SKiyoshi Ueda 			 * We are using the sense buffer of the original
1195980691e5SKiyoshi Ueda 			 * request.
1196980691e5SKiyoshi Ueda 			 * So setting the length of the sense data is enough.
1197980691e5SKiyoshi Ueda 			 */
1198980691e5SKiyoshi Ueda 			rq->sense_len = clone->sense_len;
1199980691e5SKiyoshi Ueda 	}
1200980691e5SKiyoshi Ueda 
1201e5d8de32SMike Snitzer 	free_rq_clone(clone);
1202e262f347SMikulas Patocka 	rq_end_stats(md, rq);
1203bfebd1cdSMike Snitzer 	if (!rq->q->mq_ops)
1204980691e5SKiyoshi Ueda 		blk_end_request_all(rq, error);
1205bfebd1cdSMike Snitzer 	else
1206bfebd1cdSMike Snitzer 		blk_mq_end_request(rq, error);
120729e4013dSTejun Heo 	rq_completed(md, rw, true);
1208980691e5SKiyoshi Ueda }
1209980691e5SKiyoshi Ueda 
1210cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq)
1211cec47e3dSKiyoshi Ueda {
1212bfebd1cdSMike Snitzer 	struct dm_rq_target_io *tio = tio_from_request(rq);
1213466d89a6SKeith Busch 	struct request *clone = tio->clone;
1214cec47e3dSKiyoshi Ueda 
1215bfebd1cdSMike Snitzer 	if (!rq->q->mq_ops) {
1216cec47e3dSKiyoshi Ueda 		rq->special = NULL;
1217cec47e3dSKiyoshi Ueda 		rq->cmd_flags &= ~REQ_DONTPREP;
1218bfebd1cdSMike Snitzer 	}
1219cec47e3dSKiyoshi Ueda 
1220e5863d9aSMike Snitzer 	if (clone)
1221e5d8de32SMike Snitzer 		free_rq_clone(clone);
12224328daa2SMike Snitzer 	else if (!tio->md->queue->mq_ops)
1223eca7ee6dSMike Snitzer 		free_old_rq_tio(tio);
1224cec47e3dSKiyoshi Ueda }
1225cec47e3dSKiyoshi Ueda 
1226cec47e3dSKiyoshi Ueda /*
1227cec47e3dSKiyoshi Ueda  * Requeue the original request of a clone.
1228cec47e3dSKiyoshi Ueda  */
1229eca7ee6dSMike Snitzer static void dm_old_requeue_request(struct request *rq)
1230cec47e3dSKiyoshi Ueda {
1231cec47e3dSKiyoshi Ueda 	struct request_queue *q = rq->q;
1232cec47e3dSKiyoshi Ueda 	unsigned long flags;
1233cec47e3dSKiyoshi Ueda 
1234cec47e3dSKiyoshi Ueda 	spin_lock_irqsave(q->queue_lock, flags);
1235cec47e3dSKiyoshi Ueda 	blk_requeue_request(q, rq);
12364ae9944dSJunichi Nomura 	blk_run_queue_async(q);
1237cec47e3dSKiyoshi Ueda 	spin_unlock_irqrestore(q->queue_lock, flags);
1238bfebd1cdSMike Snitzer }
1239bfebd1cdSMike Snitzer 
1240818c5f3bSMike Snitzer static void dm_mq_requeue_request(struct request *rq)
1241818c5f3bSMike Snitzer {
1242818c5f3bSMike Snitzer 	struct request_queue *q = rq->q;
1243818c5f3bSMike Snitzer 	unsigned long flags;
1244818c5f3bSMike Snitzer 
1245818c5f3bSMike Snitzer 	blk_mq_requeue_request(rq);
1246818c5f3bSMike Snitzer 	spin_lock_irqsave(q->queue_lock, flags);
1247818c5f3bSMike Snitzer 	if (!blk_queue_stopped(q))
1248818c5f3bSMike Snitzer 		blk_mq_kick_requeue_list(q);
1249818c5f3bSMike Snitzer 	spin_unlock_irqrestore(q->queue_lock, flags);
1250818c5f3bSMike Snitzer }
1251818c5f3bSMike Snitzer 
12522d76fff1SMike Snitzer static void dm_requeue_original_request(struct mapped_device *md,
1253bfebd1cdSMike Snitzer 					struct request *rq)
1254bfebd1cdSMike Snitzer {
1255bfebd1cdSMike Snitzer 	int rw = rq_data_dir(rq);
1256bfebd1cdSMike Snitzer 
125798dbc9c6SBryn M. Reeves 	rq_end_stats(md, rq);
1258bfebd1cdSMike Snitzer 	dm_unprep_request(rq);
1259bfebd1cdSMike Snitzer 
1260bfebd1cdSMike Snitzer 	if (!rq->q->mq_ops)
1261eca7ee6dSMike Snitzer 		dm_old_requeue_request(rq);
1262818c5f3bSMike Snitzer 	else
1263818c5f3bSMike Snitzer 		dm_mq_requeue_request(rq);
1264cec47e3dSKiyoshi Ueda 
1265466d89a6SKeith Busch 	rq_completed(md, rw, false);
1266cec47e3dSKiyoshi Ueda }
1267466d89a6SKeith Busch 
1268eca7ee6dSMike Snitzer static void dm_old_stop_queue(struct request_queue *q)
1269cec47e3dSKiyoshi Ueda {
1270bfebd1cdSMike Snitzer 	unsigned long flags;
1271bfebd1cdSMike Snitzer 
1272bfebd1cdSMike Snitzer 	spin_lock_irqsave(q->queue_lock, flags);
1273818c5f3bSMike Snitzer 	if (blk_queue_stopped(q)) {
1274818c5f3bSMike Snitzer 		spin_unlock_irqrestore(q->queue_lock, flags);
1275818c5f3bSMike Snitzer 		return;
1276818c5f3bSMike Snitzer 	}
1277818c5f3bSMike Snitzer 
1278cec47e3dSKiyoshi Ueda 	blk_stop_queue(q);
1279bfebd1cdSMike Snitzer 	spin_unlock_irqrestore(q->queue_lock, flags);
1280cec47e3dSKiyoshi Ueda }
1281cec47e3dSKiyoshi Ueda 
1282eca7ee6dSMike Snitzer static void dm_stop_queue(struct request_queue *q)
1283cec47e3dSKiyoshi Ueda {
1284bfebd1cdSMike Snitzer 	if (!q->mq_ops)
1285eca7ee6dSMike Snitzer 		dm_old_stop_queue(q);
1286bfebd1cdSMike Snitzer 	else
1287bfebd1cdSMike Snitzer 		blk_mq_stop_hw_queues(q);
1288bfebd1cdSMike Snitzer }
1289bfebd1cdSMike Snitzer 
1290eca7ee6dSMike Snitzer static void dm_old_start_queue(struct request_queue *q)
1291bfebd1cdSMike Snitzer {
1292cec47e3dSKiyoshi Ueda 	unsigned long flags;
1293cec47e3dSKiyoshi Ueda 
1294cec47e3dSKiyoshi Ueda 	spin_lock_irqsave(q->queue_lock, flags);
1295cec47e3dSKiyoshi Ueda 	if (blk_queue_stopped(q))
1296cec47e3dSKiyoshi Ueda 		blk_start_queue(q);
1297bfebd1cdSMike Snitzer 	spin_unlock_irqrestore(q->queue_lock, flags);
1298cec47e3dSKiyoshi Ueda }
1299cec47e3dSKiyoshi Ueda 
1300eca7ee6dSMike Snitzer static void dm_start_queue(struct request_queue *q)
1301cec47e3dSKiyoshi Ueda {
1302bfebd1cdSMike Snitzer 	if (!q->mq_ops)
1303eca7ee6dSMike Snitzer 		dm_old_start_queue(q);
1304818c5f3bSMike Snitzer 	else {
1305bfebd1cdSMike Snitzer 		blk_mq_start_stopped_hw_queues(q, true);
1306818c5f3bSMike Snitzer 		blk_mq_kick_requeue_list(q);
1307818c5f3bSMike Snitzer 	}
1308cec47e3dSKiyoshi Ueda }
1309cec47e3dSKiyoshi Ueda 
131011a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped)
131111a68244SKiyoshi Ueda {
131211a68244SKiyoshi Ueda 	int r = error;
131311a68244SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
1314ba1cbad9SMike Snitzer 	dm_request_endio_fn rq_end_io = NULL;
1315ba1cbad9SMike Snitzer 
1316ba1cbad9SMike Snitzer 	if (tio->ti) {
1317ba1cbad9SMike Snitzer 		rq_end_io = tio->ti->type->rq_end_io;
131811a68244SKiyoshi Ueda 
131911a68244SKiyoshi Ueda 		if (mapped && rq_end_io)
132011a68244SKiyoshi Ueda 			r = rq_end_io(tio->ti, clone, error, &tio->info);
1321ba1cbad9SMike Snitzer 	}
132211a68244SKiyoshi Ueda 
13237eee4ae2SMike Snitzer 	if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
13247eee4ae2SMike Snitzer 		     !clone->q->limits.max_write_same_sectors))
13257eee4ae2SMike Snitzer 		disable_write_same(tio->md);
13267eee4ae2SMike Snitzer 
132711a68244SKiyoshi Ueda 	if (r <= 0)
132811a68244SKiyoshi Ueda 		/* The target wants to complete the I/O */
132911a68244SKiyoshi Ueda 		dm_end_request(clone, r);
133011a68244SKiyoshi Ueda 	else if (r == DM_ENDIO_INCOMPLETE)
133111a68244SKiyoshi Ueda 		/* The target will handle the I/O */
133211a68244SKiyoshi Ueda 		return;
133311a68244SKiyoshi Ueda 	else if (r == DM_ENDIO_REQUEUE)
133411a68244SKiyoshi Ueda 		/* The target wants to requeue the I/O */
13352d76fff1SMike Snitzer 		dm_requeue_original_request(tio->md, tio->orig);
133611a68244SKiyoshi Ueda 	else {
133711a68244SKiyoshi Ueda 		DMWARN("unimplemented target endio return value: %d", r);
133811a68244SKiyoshi Ueda 		BUG();
133911a68244SKiyoshi Ueda 	}
134011a68244SKiyoshi Ueda }
134111a68244SKiyoshi Ueda 
1342cec47e3dSKiyoshi Ueda /*
1343cec47e3dSKiyoshi Ueda  * Request completion handler for request-based dm
1344cec47e3dSKiyoshi Ueda  */
1345cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq)
1346cec47e3dSKiyoshi Ueda {
134711a68244SKiyoshi Ueda 	bool mapped = true;
1348bfebd1cdSMike Snitzer 	struct dm_rq_target_io *tio = tio_from_request(rq);
1349466d89a6SKeith Busch 	struct request *clone = tio->clone;
1350bfebd1cdSMike Snitzer 	int rw;
1351cec47e3dSKiyoshi Ueda 
1352e5863d9aSMike Snitzer 	if (!clone) {
1353e262f347SMikulas Patocka 		rq_end_stats(tio->md, rq);
1354bfebd1cdSMike Snitzer 		rw = rq_data_dir(rq);
1355bfebd1cdSMike Snitzer 		if (!rq->q->mq_ops) {
1356e5863d9aSMike Snitzer 			blk_end_request_all(rq, tio->error);
1357bfebd1cdSMike Snitzer 			rq_completed(tio->md, rw, false);
1358eca7ee6dSMike Snitzer 			free_old_rq_tio(tio);
1359bfebd1cdSMike Snitzer 		} else {
1360bfebd1cdSMike Snitzer 			blk_mq_end_request(rq, tio->error);
1361bfebd1cdSMike Snitzer 			rq_completed(tio->md, rw, false);
1362bfebd1cdSMike Snitzer 		}
1363e5863d9aSMike Snitzer 		return;
1364e5863d9aSMike Snitzer 	}
1365cec47e3dSKiyoshi Ueda 
136611a68244SKiyoshi Ueda 	if (rq->cmd_flags & REQ_FAILED)
136711a68244SKiyoshi Ueda 		mapped = false;
1368cec47e3dSKiyoshi Ueda 
136911a68244SKiyoshi Ueda 	dm_done(clone, tio->error, mapped);
1370cec47e3dSKiyoshi Ueda }
1371cec47e3dSKiyoshi Ueda 
1372cec47e3dSKiyoshi Ueda /*
1373cec47e3dSKiyoshi Ueda  * Complete the clone and the original request with the error status
1374cec47e3dSKiyoshi Ueda  * through softirq context.
1375cec47e3dSKiyoshi Ueda  */
1376466d89a6SKeith Busch static void dm_complete_request(struct request *rq, int error)
1377cec47e3dSKiyoshi Ueda {
1378bfebd1cdSMike Snitzer 	struct dm_rq_target_io *tio = tio_from_request(rq);
1379cec47e3dSKiyoshi Ueda 
1380cec47e3dSKiyoshi Ueda 	tio->error = error;
13816acfe68bSMike Snitzer 	if (!rq->q->mq_ops)
1382cec47e3dSKiyoshi Ueda 		blk_complete_request(rq);
13836acfe68bSMike Snitzer 	else
13846acfe68bSMike Snitzer 		blk_mq_complete_request(rq, error);
1385cec47e3dSKiyoshi Ueda }
1386cec47e3dSKiyoshi Ueda 
1387cec47e3dSKiyoshi Ueda /*
1388cec47e3dSKiyoshi Ueda  * Complete the not-mapped clone and the original request with the error status
1389cec47e3dSKiyoshi Ueda  * through softirq context.
1390cec47e3dSKiyoshi Ueda  * Target's rq_end_io() function isn't called.
1391e5863d9aSMike Snitzer  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1392cec47e3dSKiyoshi Ueda  */
1393466d89a6SKeith Busch static void dm_kill_unmapped_request(struct request *rq, int error)
1394cec47e3dSKiyoshi Ueda {
1395cec47e3dSKiyoshi Ueda 	rq->cmd_flags |= REQ_FAILED;
1396466d89a6SKeith Busch 	dm_complete_request(rq, error);
1397cec47e3dSKiyoshi Ueda }
1398cec47e3dSKiyoshi Ueda 
1399cec47e3dSKiyoshi Ueda /*
1400eca7ee6dSMike Snitzer  * Called with the clone's queue lock held (in the case of .request_fn)
1401cec47e3dSKiyoshi Ueda  */
1402cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error)
1403cec47e3dSKiyoshi Ueda {
1404466d89a6SKeith Busch 	struct dm_rq_target_io *tio = clone->end_io_data;
1405466d89a6SKeith Busch 
1406e5863d9aSMike Snitzer 	if (!clone->q->mq_ops) {
1407cec47e3dSKiyoshi Ueda 		/*
1408cec47e3dSKiyoshi Ueda 		 * For just cleaning up the information of the queue in which
1409cec47e3dSKiyoshi Ueda 		 * the clone was dispatched.
1410e5863d9aSMike Snitzer 		 * The clone is *NOT* freed actually here because it is alloced
1411e5863d9aSMike Snitzer 		 * from dm own mempool (REQ_ALLOCED isn't set).
1412cec47e3dSKiyoshi Ueda 		 */
1413cec47e3dSKiyoshi Ueda 		__blk_put_request(clone->q, clone);
1414e5863d9aSMike Snitzer 	}
1415cec47e3dSKiyoshi Ueda 
1416cec47e3dSKiyoshi Ueda 	/*
1417cec47e3dSKiyoshi Ueda 	 * Actual request completion is done in a softirq context which doesn't
1418466d89a6SKeith Busch 	 * hold the clone's queue lock.  Otherwise, deadlock could occur because:
1419cec47e3dSKiyoshi Ueda 	 *     - another request may be submitted by the upper level driver
1420cec47e3dSKiyoshi Ueda 	 *       of the stacking during the completion
1421cec47e3dSKiyoshi Ueda 	 *     - the submission which requires queue lock may be done
1422466d89a6SKeith Busch 	 *       against this clone's queue
1423cec47e3dSKiyoshi Ueda 	 */
1424466d89a6SKeith Busch 	dm_complete_request(tio->orig, error);
1425cec47e3dSKiyoshi Ueda }
1426cec47e3dSKiyoshi Ueda 
142756a67df7SMike Snitzer /*
142856a67df7SMike Snitzer  * Return maximum size of I/O possible at the supplied sector up to the current
142956a67df7SMike Snitzer  * target boundary.
143056a67df7SMike Snitzer  */
143156a67df7SMike Snitzer static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
14321da177e4SLinus Torvalds {
143356a67df7SMike Snitzer 	sector_t target_offset = dm_target_offset(ti, sector);
143456a67df7SMike Snitzer 
143556a67df7SMike Snitzer 	return ti->len - target_offset;
143656a67df7SMike Snitzer }
143756a67df7SMike Snitzer 
143856a67df7SMike Snitzer static sector_t max_io_len(sector_t sector, struct dm_target *ti)
143956a67df7SMike Snitzer {
144056a67df7SMike Snitzer 	sector_t len = max_io_len_target_boundary(sector, ti);
1441542f9038SMike Snitzer 	sector_t offset, max_len;
14421da177e4SLinus Torvalds 
14431da177e4SLinus Torvalds 	/*
14441da177e4SLinus Torvalds 	 * Does the target need to split even further?
14451da177e4SLinus Torvalds 	 */
1446542f9038SMike Snitzer 	if (ti->max_io_len) {
1447542f9038SMike Snitzer 		offset = dm_target_offset(ti, sector);
1448542f9038SMike Snitzer 		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1449542f9038SMike Snitzer 			max_len = sector_div(offset, ti->max_io_len);
1450542f9038SMike Snitzer 		else
1451542f9038SMike Snitzer 			max_len = offset & (ti->max_io_len - 1);
1452542f9038SMike Snitzer 		max_len = ti->max_io_len - max_len;
1453542f9038SMike Snitzer 
1454542f9038SMike Snitzer 		if (len > max_len)
1455542f9038SMike Snitzer 			len = max_len;
14561da177e4SLinus Torvalds 	}
14571da177e4SLinus Torvalds 
14581da177e4SLinus Torvalds 	return len;
14591da177e4SLinus Torvalds }
14601da177e4SLinus Torvalds 
1461542f9038SMike Snitzer int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1462542f9038SMike Snitzer {
1463542f9038SMike Snitzer 	if (len > UINT_MAX) {
1464542f9038SMike Snitzer 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1465542f9038SMike Snitzer 		      (unsigned long long)len, UINT_MAX);
1466542f9038SMike Snitzer 		ti->error = "Maximum size of target IO is too large";
1467542f9038SMike Snitzer 		return -EINVAL;
1468542f9038SMike Snitzer 	}
1469542f9038SMike Snitzer 
1470542f9038SMike Snitzer 	ti->max_io_len = (uint32_t) len;
1471542f9038SMike Snitzer 
1472542f9038SMike Snitzer 	return 0;
1473542f9038SMike Snitzer }
1474542f9038SMike Snitzer EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1475542f9038SMike Snitzer 
14761dd40c3eSMikulas Patocka /*
14771dd40c3eSMikulas Patocka  * A target may call dm_accept_partial_bio only from the map routine.  It is
14781dd40c3eSMikulas Patocka  * allowed for all bio types except REQ_FLUSH.
14791dd40c3eSMikulas Patocka  *
14801dd40c3eSMikulas Patocka  * dm_accept_partial_bio informs the dm that the target only wants to process
14811dd40c3eSMikulas Patocka  * additional n_sectors sectors of the bio and the rest of the data should be
14821dd40c3eSMikulas Patocka  * sent in a next bio.
14831dd40c3eSMikulas Patocka  *
14841dd40c3eSMikulas Patocka  * A diagram that explains the arithmetics:
14851dd40c3eSMikulas Patocka  * +--------------------+---------------+-------+
14861dd40c3eSMikulas Patocka  * |         1          |       2       |   3   |
14871dd40c3eSMikulas Patocka  * +--------------------+---------------+-------+
14881dd40c3eSMikulas Patocka  *
14891dd40c3eSMikulas Patocka  * <-------------- *tio->len_ptr --------------->
14901dd40c3eSMikulas Patocka  *                      <------- bi_size ------->
14911dd40c3eSMikulas Patocka  *                      <-- n_sectors -->
14921dd40c3eSMikulas Patocka  *
14931dd40c3eSMikulas Patocka  * Region 1 was already iterated over with bio_advance or similar function.
14941dd40c3eSMikulas Patocka  *	(it may be empty if the target doesn't use bio_advance)
14951dd40c3eSMikulas Patocka  * Region 2 is the remaining bio size that the target wants to process.
14961dd40c3eSMikulas Patocka  *	(it may be empty if region 1 is non-empty, although there is no reason
14971dd40c3eSMikulas Patocka  *	 to make it empty)
14981dd40c3eSMikulas Patocka  * The target requires that region 3 is to be sent in the next bio.
14991dd40c3eSMikulas Patocka  *
15001dd40c3eSMikulas Patocka  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
15011dd40c3eSMikulas Patocka  * the partially processed part (the sum of regions 1+2) must be the same for all
15021dd40c3eSMikulas Patocka  * copies of the bio.
15031dd40c3eSMikulas Patocka  */
15041dd40c3eSMikulas Patocka void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
15051dd40c3eSMikulas Patocka {
15061dd40c3eSMikulas Patocka 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
15071dd40c3eSMikulas Patocka 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
15081dd40c3eSMikulas Patocka 	BUG_ON(bio->bi_rw & REQ_FLUSH);
15091dd40c3eSMikulas Patocka 	BUG_ON(bi_size > *tio->len_ptr);
15101dd40c3eSMikulas Patocka 	BUG_ON(n_sectors > bi_size);
15111dd40c3eSMikulas Patocka 	*tio->len_ptr -= bi_size - n_sectors;
15121dd40c3eSMikulas Patocka 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
15131dd40c3eSMikulas Patocka }
15141dd40c3eSMikulas Patocka EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
15151dd40c3eSMikulas Patocka 
1516bd2a49b8SAlasdair G Kergon static void __map_bio(struct dm_target_io *tio)
15171da177e4SLinus Torvalds {
15181da177e4SLinus Torvalds 	int r;
15192056a782SJens Axboe 	sector_t sector;
1520dba14160SMikulas Patocka 	struct bio *clone = &tio->clone;
1521bd2a49b8SAlasdair G Kergon 	struct dm_target *ti = tio->ti;
15221da177e4SLinus Torvalds 
15231da177e4SLinus Torvalds 	clone->bi_end_io = clone_endio;
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds 	/*
15261da177e4SLinus Torvalds 	 * Map the clone.  If r == 0 we don't need to do
15271da177e4SLinus Torvalds 	 * anything, the target has assumed ownership of
15281da177e4SLinus Torvalds 	 * this io.
15291da177e4SLinus Torvalds 	 */
15301da177e4SLinus Torvalds 	atomic_inc(&tio->io->io_count);
15314f024f37SKent Overstreet 	sector = clone->bi_iter.bi_sector;
15327de3ee57SMikulas Patocka 	r = ti->type->map(ti, clone);
153345cbcd79SKiyoshi Ueda 	if (r == DM_MAPIO_REMAPPED) {
15341da177e4SLinus Torvalds 		/* the bio has been remapped so dispatch it */
15352056a782SJens Axboe 
1536d07335e5SMike Snitzer 		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
153722a7c31aSAlan D. Brunelle 				      tio->io->bio->bi_bdev->bd_dev, sector);
15382056a782SJens Axboe 
15391da177e4SLinus Torvalds 		generic_make_request(clone);
15402e93ccc1SKiyoshi Ueda 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
15412e93ccc1SKiyoshi Ueda 		/* error the io and bail out, or requeue it if needed */
15429faf400fSStefan Bader 		dec_pending(tio->io, r);
1543*cfae7529SMike Snitzer 		free_tio(tio);
1544ab37844dSMikulas Patocka 	} else if (r != DM_MAPIO_SUBMITTED) {
154545cbcd79SKiyoshi Ueda 		DMWARN("unimplemented target map return value: %d", r);
154645cbcd79SKiyoshi Ueda 		BUG();
15471da177e4SLinus Torvalds 	}
15481da177e4SLinus Torvalds }
15491da177e4SLinus Torvalds 
15501da177e4SLinus Torvalds struct clone_info {
15511da177e4SLinus Torvalds 	struct mapped_device *md;
15521da177e4SLinus Torvalds 	struct dm_table *map;
15531da177e4SLinus Torvalds 	struct bio *bio;
15541da177e4SLinus Torvalds 	struct dm_io *io;
15551da177e4SLinus Torvalds 	sector_t sector;
1556e0d6609aSMikulas Patocka 	unsigned sector_count;
15571da177e4SLinus Torvalds };
15581da177e4SLinus Torvalds 
1559e0d6609aSMikulas Patocka static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1560bd2a49b8SAlasdair G Kergon {
15614f024f37SKent Overstreet 	bio->bi_iter.bi_sector = sector;
15624f024f37SKent Overstreet 	bio->bi_iter.bi_size = to_bytes(len);
15631da177e4SLinus Torvalds }
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds /*
15661da177e4SLinus Torvalds  * Creates a bio that consists of range of complete bvecs.
15671da177e4SLinus Torvalds  */
1568c80914e8SMike Snitzer static int clone_bio(struct dm_target_io *tio, struct bio *bio,
15691c3b13e6SKent Overstreet 		     sector_t sector, unsigned len)
15701da177e4SLinus Torvalds {
1571dba14160SMikulas Patocka 	struct bio *clone = &tio->clone;
15721da177e4SLinus Torvalds 
15731c3b13e6SKent Overstreet 	__bio_clone_fast(clone, bio);
15749c47008dSMartin K. Petersen 
1575c80914e8SMike Snitzer 	if (bio_integrity(bio)) {
1576c80914e8SMike Snitzer 		int r = bio_integrity_clone(clone, bio, GFP_NOIO);
1577c80914e8SMike Snitzer 		if (r < 0)
1578c80914e8SMike Snitzer 			return r;
1579c80914e8SMike Snitzer 	}
15801c3b13e6SKent Overstreet 
15811c3b13e6SKent Overstreet 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
15821c3b13e6SKent Overstreet 	clone->bi_iter.bi_size = to_bytes(len);
15831c3b13e6SKent Overstreet 
15841c3b13e6SKent Overstreet 	if (bio_integrity(bio))
15851c3b13e6SKent Overstreet 		bio_integrity_trim(clone, 0, len);
1586c80914e8SMike Snitzer 
1587c80914e8SMike Snitzer 	return 0;
15881da177e4SLinus Torvalds }
15891da177e4SLinus Torvalds 
15909015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci,
159199778273SJunichi Nomura 				      struct dm_target *ti,
159255a62eefSAlasdair G Kergon 				      unsigned target_bio_nr)
1593f9ab94ceSMikulas Patocka {
1594dba14160SMikulas Patocka 	struct dm_target_io *tio;
1595dba14160SMikulas Patocka 	struct bio *clone;
1596dba14160SMikulas Patocka 
159799778273SJunichi Nomura 	clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1598dba14160SMikulas Patocka 	tio = container_of(clone, struct dm_target_io, clone);
1599f9ab94ceSMikulas Patocka 
1600f9ab94ceSMikulas Patocka 	tio->io = ci->io;
1601f9ab94ceSMikulas Patocka 	tio->ti = ti;
160255a62eefSAlasdair G Kergon 	tio->target_bio_nr = target_bio_nr;
16039015df24SAlasdair G Kergon 
16049015df24SAlasdair G Kergon 	return tio;
16059015df24SAlasdair G Kergon }
16069015df24SAlasdair G Kergon 
160714fe594dSAlasdair G Kergon static void __clone_and_map_simple_bio(struct clone_info *ci,
160814fe594dSAlasdair G Kergon 				       struct dm_target *ti,
16091dd40c3eSMikulas Patocka 				       unsigned target_bio_nr, unsigned *len)
16109015df24SAlasdair G Kergon {
161199778273SJunichi Nomura 	struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1612dba14160SMikulas Patocka 	struct bio *clone = &tio->clone;
16139015df24SAlasdair G Kergon 
16141dd40c3eSMikulas Patocka 	tio->len_ptr = len;
16151dd40c3eSMikulas Patocka 
16161c3b13e6SKent Overstreet 	__bio_clone_fast(clone, ci->bio);
1617bd2a49b8SAlasdair G Kergon 	if (len)
16181dd40c3eSMikulas Patocka 		bio_setup_sector(clone, ci->sector, *len);
1619f9ab94ceSMikulas Patocka 
1620bd2a49b8SAlasdair G Kergon 	__map_bio(tio);
1621f9ab94ceSMikulas Patocka }
1622f9ab94ceSMikulas Patocka 
162314fe594dSAlasdair G Kergon static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
16241dd40c3eSMikulas Patocka 				  unsigned num_bios, unsigned *len)
162506a426ceSMike Snitzer {
162655a62eefSAlasdair G Kergon 	unsigned target_bio_nr;
162706a426ceSMike Snitzer 
162855a62eefSAlasdair G Kergon 	for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
162914fe594dSAlasdair G Kergon 		__clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
163006a426ceSMike Snitzer }
163106a426ceSMike Snitzer 
163214fe594dSAlasdair G Kergon static int __send_empty_flush(struct clone_info *ci)
1633f9ab94ceSMikulas Patocka {
163406a426ceSMike Snitzer 	unsigned target_nr = 0;
1635f9ab94ceSMikulas Patocka 	struct dm_target *ti;
1636f9ab94ceSMikulas Patocka 
1637b372d360SMike Snitzer 	BUG_ON(bio_has_data(ci->bio));
1638f9ab94ceSMikulas Patocka 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
16391dd40c3eSMikulas Patocka 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1640f9ab94ceSMikulas Patocka 
1641f9ab94ceSMikulas Patocka 	return 0;
1642f9ab94ceSMikulas Patocka }
1643f9ab94ceSMikulas Patocka 
1644c80914e8SMike Snitzer static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
16451dd40c3eSMikulas Patocka 				     sector_t sector, unsigned *len)
16465ae89a87SMike Snitzer {
1647dba14160SMikulas Patocka 	struct bio *bio = ci->bio;
16485ae89a87SMike Snitzer 	struct dm_target_io *tio;
1649b0d8ed4dSAlasdair G Kergon 	unsigned target_bio_nr;
1650b0d8ed4dSAlasdair G Kergon 	unsigned num_target_bios = 1;
1651c80914e8SMike Snitzer 	int r = 0;
16525ae89a87SMike Snitzer 
1653b0d8ed4dSAlasdair G Kergon 	/*
1654b0d8ed4dSAlasdair G Kergon 	 * Does the target want to receive duplicate copies of the bio?
1655b0d8ed4dSAlasdair G Kergon 	 */
1656b0d8ed4dSAlasdair G Kergon 	if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1657b0d8ed4dSAlasdair G Kergon 		num_target_bios = ti->num_write_bios(ti, bio);
1658e4c93811SAlasdair G Kergon 
1659b0d8ed4dSAlasdair G Kergon 	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
166099778273SJunichi Nomura 		tio = alloc_tio(ci, ti, target_bio_nr);
16611dd40c3eSMikulas Patocka 		tio->len_ptr = len;
1662c80914e8SMike Snitzer 		r = clone_bio(tio, bio, sector, *len);
1663072623deSMikulas Patocka 		if (r < 0) {
1664*cfae7529SMike Snitzer 			free_tio(tio);
1665c80914e8SMike Snitzer 			break;
1666072623deSMikulas Patocka 		}
1667bd2a49b8SAlasdair G Kergon 		__map_bio(tio);
16685ae89a87SMike Snitzer 	}
1669c80914e8SMike Snitzer 
1670c80914e8SMike Snitzer 	return r;
1671b0d8ed4dSAlasdair G Kergon }
16725ae89a87SMike Snitzer 
167355a62eefSAlasdair G Kergon typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
167423508a96SMike Snitzer 
167555a62eefSAlasdair G Kergon static unsigned get_num_discard_bios(struct dm_target *ti)
167623508a96SMike Snitzer {
167755a62eefSAlasdair G Kergon 	return ti->num_discard_bios;
167823508a96SMike Snitzer }
167923508a96SMike Snitzer 
168055a62eefSAlasdair G Kergon static unsigned get_num_write_same_bios(struct dm_target *ti)
168123508a96SMike Snitzer {
168255a62eefSAlasdair G Kergon 	return ti->num_write_same_bios;
168323508a96SMike Snitzer }
168423508a96SMike Snitzer 
168523508a96SMike Snitzer typedef bool (*is_split_required_fn)(struct dm_target *ti);
168623508a96SMike Snitzer 
168723508a96SMike Snitzer static bool is_split_required_for_discard(struct dm_target *ti)
168823508a96SMike Snitzer {
168955a62eefSAlasdair G Kergon 	return ti->split_discard_bios;
169023508a96SMike Snitzer }
169123508a96SMike Snitzer 
169214fe594dSAlasdair G Kergon static int __send_changing_extent_only(struct clone_info *ci,
169355a62eefSAlasdair G Kergon 				       get_num_bios_fn get_num_bios,
169423508a96SMike Snitzer 				       is_split_required_fn is_split_required)
16955ae89a87SMike Snitzer {
16965ae89a87SMike Snitzer 	struct dm_target *ti;
1697e0d6609aSMikulas Patocka 	unsigned len;
169855a62eefSAlasdair G Kergon 	unsigned num_bios;
16995ae89a87SMike Snitzer 
1700a79245b3SMike Snitzer 	do {
17015ae89a87SMike Snitzer 		ti = dm_table_find_target(ci->map, ci->sector);
17025ae89a87SMike Snitzer 		if (!dm_target_is_valid(ti))
17035ae89a87SMike Snitzer 			return -EIO;
17045ae89a87SMike Snitzer 
17055ae89a87SMike Snitzer 		/*
170623508a96SMike Snitzer 		 * Even though the device advertised support for this type of
170723508a96SMike Snitzer 		 * request, that does not mean every target supports it, and
1708936688d7SMike Snitzer 		 * reconfiguration might also have changed that since the
17095ae89a87SMike Snitzer 		 * check was performed.
17105ae89a87SMike Snitzer 		 */
171155a62eefSAlasdair G Kergon 		num_bios = get_num_bios ? get_num_bios(ti) : 0;
171255a62eefSAlasdair G Kergon 		if (!num_bios)
17135ae89a87SMike Snitzer 			return -EOPNOTSUPP;
17145ae89a87SMike Snitzer 
171523508a96SMike Snitzer 		if (is_split_required && !is_split_required(ti))
1716e0d6609aSMikulas Patocka 			len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
17177acf0277SMikulas Patocka 		else
1718e0d6609aSMikulas Patocka 			len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
17195ae89a87SMike Snitzer 
17201dd40c3eSMikulas Patocka 		__send_duplicate_bios(ci, ti, num_bios, &len);
17215ae89a87SMike Snitzer 
1722a79245b3SMike Snitzer 		ci->sector += len;
1723a79245b3SMike Snitzer 	} while (ci->sector_count -= len);
17245ae89a87SMike Snitzer 
17255ae89a87SMike Snitzer 	return 0;
17265ae89a87SMike Snitzer }
17275ae89a87SMike Snitzer 
172814fe594dSAlasdair G Kergon static int __send_discard(struct clone_info *ci)
172923508a96SMike Snitzer {
173014fe594dSAlasdair G Kergon 	return __send_changing_extent_only(ci, get_num_discard_bios,
173123508a96SMike Snitzer 					   is_split_required_for_discard);
173223508a96SMike Snitzer }
173323508a96SMike Snitzer 
173414fe594dSAlasdair G Kergon static int __send_write_same(struct clone_info *ci)
173523508a96SMike Snitzer {
173614fe594dSAlasdair G Kergon 	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
173723508a96SMike Snitzer }
173823508a96SMike Snitzer 
1739e4c93811SAlasdair G Kergon /*
1740e4c93811SAlasdair G Kergon  * Select the correct strategy for processing a non-flush bio.
1741e4c93811SAlasdair G Kergon  */
1742e4c93811SAlasdair G Kergon static int __split_and_process_non_flush(struct clone_info *ci)
1743e4c93811SAlasdair G Kergon {
1744e4c93811SAlasdair G Kergon 	struct bio *bio = ci->bio;
1745e4c93811SAlasdair G Kergon 	struct dm_target *ti;
17461c3b13e6SKent Overstreet 	unsigned len;
1747c80914e8SMike Snitzer 	int r;
1748e4c93811SAlasdair G Kergon 
1749e4c93811SAlasdair G Kergon 	if (unlikely(bio->bi_rw & REQ_DISCARD))
1750e4c93811SAlasdair G Kergon 		return __send_discard(ci);
1751e4c93811SAlasdair G Kergon 	else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1752e4c93811SAlasdair G Kergon 		return __send_write_same(ci);
1753e4c93811SAlasdair G Kergon 
1754e4c93811SAlasdair G Kergon 	ti = dm_table_find_target(ci->map, ci->sector);
1755e4c93811SAlasdair G Kergon 	if (!dm_target_is_valid(ti))
1756e4c93811SAlasdair G Kergon 		return -EIO;
1757e4c93811SAlasdair G Kergon 
17581c3b13e6SKent Overstreet 	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1759e4c93811SAlasdair G Kergon 
1760c80914e8SMike Snitzer 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1761c80914e8SMike Snitzer 	if (r < 0)
1762c80914e8SMike Snitzer 		return r;
1763e4c93811SAlasdair G Kergon 
1764e4c93811SAlasdair G Kergon 	ci->sector += len;
1765e4c93811SAlasdair G Kergon 	ci->sector_count -= len;
1766e4c93811SAlasdair G Kergon 
1767e4c93811SAlasdair G Kergon 	return 0;
1768e4c93811SAlasdair G Kergon }
1769e4c93811SAlasdair G Kergon 
1770e4c93811SAlasdair G Kergon /*
177114fe594dSAlasdair G Kergon  * Entry point to split a bio into clones and submit them to the targets.
17721da177e4SLinus Torvalds  */
177383d5e5b0SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md,
177483d5e5b0SMikulas Patocka 				    struct dm_table *map, struct bio *bio)
17751da177e4SLinus Torvalds {
17761da177e4SLinus Torvalds 	struct clone_info ci;
1777512875bdSJun'ichi Nomura 	int error = 0;
17781da177e4SLinus Torvalds 
177983d5e5b0SMikulas Patocka 	if (unlikely(!map)) {
1780f0b9a450SMikulas Patocka 		bio_io_error(bio);
1781f0b9a450SMikulas Patocka 		return;
1782f0b9a450SMikulas Patocka 	}
1783692d0eb9SMikulas Patocka 
178483d5e5b0SMikulas Patocka 	ci.map = map;
17851da177e4SLinus Torvalds 	ci.md = md;
17861da177e4SLinus Torvalds 	ci.io = alloc_io(md);
17871da177e4SLinus Torvalds 	ci.io->error = 0;
17881da177e4SLinus Torvalds 	atomic_set(&ci.io->io_count, 1);
17891da177e4SLinus Torvalds 	ci.io->bio = bio;
17901da177e4SLinus Torvalds 	ci.io->md = md;
1791f88fb981SKiyoshi Ueda 	spin_lock_init(&ci.io->endio_lock);
17924f024f37SKent Overstreet 	ci.sector = bio->bi_iter.bi_sector;
17931da177e4SLinus Torvalds 
17943eaf840eSJun'ichi "Nick" Nomura 	start_io_acct(ci.io);
1795bd2a49b8SAlasdair G Kergon 
1796b372d360SMike Snitzer 	if (bio->bi_rw & REQ_FLUSH) {
1797b372d360SMike Snitzer 		ci.bio = &ci.md->flush_bio;
1798b372d360SMike Snitzer 		ci.sector_count = 0;
179914fe594dSAlasdair G Kergon 		error = __send_empty_flush(&ci);
1800b372d360SMike Snitzer 		/* dec_pending submits any data associated with flush */
1801b372d360SMike Snitzer 	} else {
18026a8736d1STejun Heo 		ci.bio = bio;
1803f6fccb12SMilan Broz 		ci.sector_count = bio_sectors(bio);
1804512875bdSJun'ichi Nomura 		while (ci.sector_count && !error)
180514fe594dSAlasdair G Kergon 			error = __split_and_process_non_flush(&ci);
1806d87f4c14STejun Heo 	}
18071da177e4SLinus Torvalds 
18081da177e4SLinus Torvalds 	/* drop the extra reference count */
1809512875bdSJun'ichi Nomura 	dec_pending(ci.io, error);
18109e4e5f87SMilan Broz }
18119e4e5f87SMilan Broz /*-----------------------------------------------------------------
18121da177e4SLinus Torvalds  * CRUD END
18131da177e4SLinus Torvalds  *---------------------------------------------------------------*/
18141da177e4SLinus Torvalds 
18151da177e4SLinus Torvalds /*
18161da177e4SLinus Torvalds  * The request function that just remaps the bio built up by
18171da177e4SLinus Torvalds  * dm_merge_bvec.
18181da177e4SLinus Torvalds  */
1819dece1635SJens Axboe static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
18201da177e4SLinus Torvalds {
182112f03a49SKevin Corry 	int rw = bio_data_dir(bio);
18221da177e4SLinus Torvalds 	struct mapped_device *md = q->queuedata;
182383d5e5b0SMikulas Patocka 	int srcu_idx;
182483d5e5b0SMikulas Patocka 	struct dm_table *map;
18251da177e4SLinus Torvalds 
182683d5e5b0SMikulas Patocka 	map = dm_get_live_table(md, &srcu_idx);
18271da177e4SLinus Torvalds 
182818c0b223SGu Zheng 	generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
182912f03a49SKevin Corry 
18306a8736d1STejun Heo 	/* if we're suspended, we have to queue this io for later */
18316a8736d1STejun Heo 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
183283d5e5b0SMikulas Patocka 		dm_put_live_table(md, srcu_idx);
18331da177e4SLinus Torvalds 
18346a8736d1STejun Heo 		if (bio_rw(bio) != READA)
183592c63902SMikulas Patocka 			queue_io(md, bio);
18366a8736d1STejun Heo 		else
18376a8736d1STejun Heo 			bio_io_error(bio);
1838dece1635SJens Axboe 		return BLK_QC_T_NONE;
18391da177e4SLinus Torvalds 	}
18401da177e4SLinus Torvalds 
184183d5e5b0SMikulas Patocka 	__split_and_process_bio(md, map, bio);
184283d5e5b0SMikulas Patocka 	dm_put_live_table(md, srcu_idx);
1843dece1635SJens Axboe 	return BLK_QC_T_NONE;
1844cec47e3dSKiyoshi Ueda }
1845cec47e3dSKiyoshi Ueda 
1846fd2ed4d2SMikulas Patocka int dm_request_based(struct mapped_device *md)
1847cec47e3dSKiyoshi Ueda {
1848cec47e3dSKiyoshi Ueda 	return blk_queue_stackable(md->queue);
1849cec47e3dSKiyoshi Ueda }
1850cec47e3dSKiyoshi Ueda 
1851466d89a6SKeith Busch static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1852cec47e3dSKiyoshi Ueda {
1853cec47e3dSKiyoshi Ueda 	int r;
1854cec47e3dSKiyoshi Ueda 
1855466d89a6SKeith Busch 	if (blk_queue_io_stat(clone->q))
1856466d89a6SKeith Busch 		clone->cmd_flags |= REQ_IO_STAT;
1857cec47e3dSKiyoshi Ueda 
1858466d89a6SKeith Busch 	clone->start_time = jiffies;
1859466d89a6SKeith Busch 	r = blk_insert_cloned_request(clone->q, clone);
1860cec47e3dSKiyoshi Ueda 	if (r)
1861466d89a6SKeith Busch 		/* must complete clone in terms of original request */
1862cec47e3dSKiyoshi Ueda 		dm_complete_request(rq, r);
1863cec47e3dSKiyoshi Ueda }
1864cec47e3dSKiyoshi Ueda 
186578d8e58aSMike Snitzer static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
186678d8e58aSMike Snitzer 				 void *data)
1867cec47e3dSKiyoshi Ueda {
186878d8e58aSMike Snitzer 	struct dm_rq_target_io *tio = data;
186978d8e58aSMike Snitzer 	struct dm_rq_clone_bio_info *info =
187078d8e58aSMike Snitzer 		container_of(bio, struct dm_rq_clone_bio_info, clone);
187178d8e58aSMike Snitzer 
187278d8e58aSMike Snitzer 	info->orig = bio_orig;
187378d8e58aSMike Snitzer 	info->tio = tio;
187478d8e58aSMike Snitzer 	bio->bi_end_io = end_clone_bio;
187578d8e58aSMike Snitzer 
187678d8e58aSMike Snitzer 	return 0;
187778d8e58aSMike Snitzer }
187878d8e58aSMike Snitzer 
187978d8e58aSMike Snitzer static int setup_clone(struct request *clone, struct request *rq,
188078d8e58aSMike Snitzer 		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
188178d8e58aSMike Snitzer {
188278d8e58aSMike Snitzer 	int r;
188378d8e58aSMike Snitzer 
188478d8e58aSMike Snitzer 	r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
188578d8e58aSMike Snitzer 			      dm_rq_bio_constructor, tio);
188678d8e58aSMike Snitzer 	if (r)
188778d8e58aSMike Snitzer 		return r;
188878d8e58aSMike Snitzer 
188978d8e58aSMike Snitzer 	clone->cmd = rq->cmd;
189078d8e58aSMike Snitzer 	clone->cmd_len = rq->cmd_len;
189178d8e58aSMike Snitzer 	clone->sense = rq->sense;
1892cec47e3dSKiyoshi Ueda 	clone->end_io = end_clone_request;
1893cec47e3dSKiyoshi Ueda 	clone->end_io_data = tio;
189478d8e58aSMike Snitzer 
18951ae49ea2SMike Snitzer 	tio->clone = clone;
189678d8e58aSMike Snitzer 
189778d8e58aSMike Snitzer 	return 0;
1898cec47e3dSKiyoshi Ueda }
1899cec47e3dSKiyoshi Ueda 
1900eca7ee6dSMike Snitzer static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
19011ae49ea2SMike Snitzer 				    struct dm_rq_target_io *tio, gfp_t gfp_mask)
19026facdaffSKiyoshi Ueda {
190302233342SMike Snitzer 	/*
1904c5248f79SMike Snitzer 	 * Create clone for use with .request_fn request_queue
190502233342SMike Snitzer 	 */
190602233342SMike Snitzer 	struct request *clone;
19071ae49ea2SMike Snitzer 
1908eca7ee6dSMike Snitzer 	clone = alloc_old_clone_request(md, gfp_mask);
19091ae49ea2SMike Snitzer 	if (!clone)
19101ae49ea2SMike Snitzer 		return NULL;
19111ae49ea2SMike Snitzer 
19121ae49ea2SMike Snitzer 	blk_rq_init(NULL, clone);
191378d8e58aSMike Snitzer 	if (setup_clone(clone, rq, tio, gfp_mask)) {
191478d8e58aSMike Snitzer 		/* -ENOMEM */
1915eca7ee6dSMike Snitzer 		free_old_clone_request(md, clone);
191678d8e58aSMike Snitzer 		return NULL;
191778d8e58aSMike Snitzer 	}
19181ae49ea2SMike Snitzer 
19191ae49ea2SMike Snitzer 	return clone;
19201ae49ea2SMike Snitzer }
19211ae49ea2SMike Snitzer 
19222eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work);
19232eb6e1e3SKeith Busch 
1924bfebd1cdSMike Snitzer static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1925bfebd1cdSMike Snitzer 		     struct mapped_device *md)
1926bfebd1cdSMike Snitzer {
1927bfebd1cdSMike Snitzer 	tio->md = md;
1928bfebd1cdSMike Snitzer 	tio->ti = NULL;
1929bfebd1cdSMike Snitzer 	tio->clone = NULL;
1930bfebd1cdSMike Snitzer 	tio->orig = rq;
1931bfebd1cdSMike Snitzer 	tio->error = 0;
1932591ddcfcSMike Snitzer 	/*
1933591ddcfcSMike Snitzer 	 * Avoid initializing info for blk-mq; it passes
1934591ddcfcSMike Snitzer 	 * target-specific data through info.ptr
1935591ddcfcSMike Snitzer 	 * (see: dm_mq_init_request)
1936591ddcfcSMike Snitzer 	 */
1937591ddcfcSMike Snitzer 	if (!md->init_tio_pdu)
1938bfebd1cdSMike Snitzer 		memset(&tio->info, 0, sizeof(tio->info));
193902233342SMike Snitzer 	if (md->kworker_task)
1940bfebd1cdSMike Snitzer 		init_kthread_work(&tio->work, map_tio_request);
1941bfebd1cdSMike Snitzer }
1942bfebd1cdSMike Snitzer 
1943eca7ee6dSMike Snitzer static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
1944eca7ee6dSMike Snitzer 					       struct mapped_device *md,
1945eca7ee6dSMike Snitzer 					       gfp_t gfp_mask)
19466facdaffSKiyoshi Ueda {
19476facdaffSKiyoshi Ueda 	struct dm_rq_target_io *tio;
1948e5863d9aSMike Snitzer 	int srcu_idx;
1949e5863d9aSMike Snitzer 	struct dm_table *table;
19506facdaffSKiyoshi Ueda 
1951eca7ee6dSMike Snitzer 	tio = alloc_old_rq_tio(md, gfp_mask);
19526facdaffSKiyoshi Ueda 	if (!tio)
19536facdaffSKiyoshi Ueda 		return NULL;
19546facdaffSKiyoshi Ueda 
1955bfebd1cdSMike Snitzer 	init_tio(tio, rq, md);
19566facdaffSKiyoshi Ueda 
1957e5863d9aSMike Snitzer 	table = dm_get_live_table(md, &srcu_idx);
1958eca7ee6dSMike Snitzer 	/*
1959eca7ee6dSMike Snitzer 	 * Must clone a request if this .request_fn DM device
1960eca7ee6dSMike Snitzer 	 * is stacked on .request_fn device(s).
1961eca7ee6dSMike Snitzer 	 */
1962e5863d9aSMike Snitzer 	if (!dm_table_mq_request_based(table)) {
1963eca7ee6dSMike Snitzer 		if (!clone_old_rq(rq, md, tio, gfp_mask)) {
1964e5863d9aSMike Snitzer 			dm_put_live_table(md, srcu_idx);
1965eca7ee6dSMike Snitzer 			free_old_rq_tio(tio);
19666facdaffSKiyoshi Ueda 			return NULL;
19676facdaffSKiyoshi Ueda 		}
1968e5863d9aSMike Snitzer 	}
1969e5863d9aSMike Snitzer 	dm_put_live_table(md, srcu_idx);
19706facdaffSKiyoshi Ueda 
1971466d89a6SKeith Busch 	return tio;
19726facdaffSKiyoshi Ueda }
19736facdaffSKiyoshi Ueda 
1974cec47e3dSKiyoshi Ueda /*
1975cec47e3dSKiyoshi Ueda  * Called with the queue lock held.
1976cec47e3dSKiyoshi Ueda  */
1977eca7ee6dSMike Snitzer static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
1978cec47e3dSKiyoshi Ueda {
1979cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
1980466d89a6SKeith Busch 	struct dm_rq_target_io *tio;
1981cec47e3dSKiyoshi Ueda 
1982cec47e3dSKiyoshi Ueda 	if (unlikely(rq->special)) {
1983cec47e3dSKiyoshi Ueda 		DMWARN("Already has something in rq->special.");
1984cec47e3dSKiyoshi Ueda 		return BLKPREP_KILL;
1985cec47e3dSKiyoshi Ueda 	}
1986cec47e3dSKiyoshi Ueda 
1987eca7ee6dSMike Snitzer 	tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
1988466d89a6SKeith Busch 	if (!tio)
1989cec47e3dSKiyoshi Ueda 		return BLKPREP_DEFER;
1990cec47e3dSKiyoshi Ueda 
1991466d89a6SKeith Busch 	rq->special = tio;
1992cec47e3dSKiyoshi Ueda 	rq->cmd_flags |= REQ_DONTPREP;
1993cec47e3dSKiyoshi Ueda 
1994cec47e3dSKiyoshi Ueda 	return BLKPREP_OK;
1995cec47e3dSKiyoshi Ueda }
1996cec47e3dSKiyoshi Ueda 
19979eef87daSKiyoshi Ueda /*
19989eef87daSKiyoshi Ueda  * Returns:
1999e5863d9aSMike Snitzer  * 0                : the request has been processed
2000e5863d9aSMike Snitzer  * DM_MAPIO_REQUEUE : the original request needs to be requeued
2001e5863d9aSMike Snitzer  * < 0              : the request was completed due to failure
20029eef87daSKiyoshi Ueda  */
2003bfebd1cdSMike Snitzer static int map_request(struct dm_rq_target_io *tio, struct request *rq,
2004cec47e3dSKiyoshi Ueda 		       struct mapped_device *md)
2005cec47e3dSKiyoshi Ueda {
2006e5863d9aSMike Snitzer 	int r;
2007bfebd1cdSMike Snitzer 	struct dm_target *ti = tio->ti;
2008e5863d9aSMike Snitzer 	struct request *clone = NULL;
2009cec47e3dSKiyoshi Ueda 
2010e5863d9aSMike Snitzer 	if (tio->clone) {
2011e5863d9aSMike Snitzer 		clone = tio->clone;
2012cec47e3dSKiyoshi Ueda 		r = ti->type->map_rq(ti, clone, &tio->info);
2013e5863d9aSMike Snitzer 	} else {
2014e5863d9aSMike Snitzer 		r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
2015e5863d9aSMike Snitzer 		if (r < 0) {
2016e5863d9aSMike Snitzer 			/* The target wants to complete the I/O */
2017e5863d9aSMike Snitzer 			dm_kill_unmapped_request(rq, r);
2018e5863d9aSMike Snitzer 			return r;
2019e5863d9aSMike Snitzer 		}
20203a140755SJunichi Nomura 		if (r != DM_MAPIO_REMAPPED)
20213a140755SJunichi Nomura 			return r;
202278d8e58aSMike Snitzer 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
202378d8e58aSMike Snitzer 			/* -ENOMEM */
202478d8e58aSMike Snitzer 			ti->type->release_clone_rq(clone);
202578d8e58aSMike Snitzer 			return DM_MAPIO_REQUEUE;
202678d8e58aSMike Snitzer 		}
2027e5863d9aSMike Snitzer 	}
2028e5863d9aSMike Snitzer 
2029cec47e3dSKiyoshi Ueda 	switch (r) {
2030cec47e3dSKiyoshi Ueda 	case DM_MAPIO_SUBMITTED:
2031cec47e3dSKiyoshi Ueda 		/* The target has taken the I/O to submit by itself later */
2032cec47e3dSKiyoshi Ueda 		break;
2033cec47e3dSKiyoshi Ueda 	case DM_MAPIO_REMAPPED:
2034cec47e3dSKiyoshi Ueda 		/* The target has remapped the I/O so dispatch it */
20356db4ccd6SJun'ichi Nomura 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
2036466d89a6SKeith Busch 				     blk_rq_pos(rq));
2037466d89a6SKeith Busch 		dm_dispatch_clone_request(clone, rq);
2038cec47e3dSKiyoshi Ueda 		break;
2039cec47e3dSKiyoshi Ueda 	case DM_MAPIO_REQUEUE:
2040cec47e3dSKiyoshi Ueda 		/* The target wants to requeue the I/O */
20412d76fff1SMike Snitzer 		dm_requeue_original_request(md, tio->orig);
2042cec47e3dSKiyoshi Ueda 		break;
2043cec47e3dSKiyoshi Ueda 	default:
2044cec47e3dSKiyoshi Ueda 		if (r > 0) {
2045cec47e3dSKiyoshi Ueda 			DMWARN("unimplemented target map return value: %d", r);
2046cec47e3dSKiyoshi Ueda 			BUG();
2047cec47e3dSKiyoshi Ueda 		}
2048cec47e3dSKiyoshi Ueda 
2049cec47e3dSKiyoshi Ueda 		/* The target wants to complete the I/O */
2050466d89a6SKeith Busch 		dm_kill_unmapped_request(rq, r);
2051e5863d9aSMike Snitzer 		return r;
2052cec47e3dSKiyoshi Ueda 	}
20539eef87daSKiyoshi Ueda 
2054e5863d9aSMike Snitzer 	return 0;
2055cec47e3dSKiyoshi Ueda }
2056cec47e3dSKiyoshi Ueda 
20572eb6e1e3SKeith Busch static void map_tio_request(struct kthread_work *work)
2058ba1cbad9SMike Snitzer {
20592eb6e1e3SKeith Busch 	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
2060e5863d9aSMike Snitzer 	struct request *rq = tio->orig;
2061e5863d9aSMike Snitzer 	struct mapped_device *md = tio->md;
2062ba1cbad9SMike Snitzer 
2063bfebd1cdSMike Snitzer 	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
20642d76fff1SMike Snitzer 		dm_requeue_original_request(md, rq);
20652eb6e1e3SKeith Busch }
20662eb6e1e3SKeith Busch 
2067466d89a6SKeith Busch static void dm_start_request(struct mapped_device *md, struct request *orig)
2068ba1cbad9SMike Snitzer {
2069bfebd1cdSMike Snitzer 	if (!orig->q->mq_ops)
2070ba1cbad9SMike Snitzer 		blk_start_request(orig);
2071bfebd1cdSMike Snitzer 	else
2072bfebd1cdSMike Snitzer 		blk_mq_start_request(orig);
2073466d89a6SKeith Busch 	atomic_inc(&md->pending[rq_data_dir(orig)]);
2074ba1cbad9SMike Snitzer 
20750ce65797SMike Snitzer 	if (md->seq_rq_merge_deadline_usecs) {
2076de3ec86dSMike Snitzer 		md->last_rq_pos = rq_end_sector(orig);
2077de3ec86dSMike Snitzer 		md->last_rq_rw = rq_data_dir(orig);
20780ce65797SMike Snitzer 		md->last_rq_start_time = ktime_get();
20790ce65797SMike Snitzer 	}
2080de3ec86dSMike Snitzer 
2081e262f347SMikulas Patocka 	if (unlikely(dm_stats_used(&md->stats))) {
2082e262f347SMikulas Patocka 		struct dm_rq_target_io *tio = tio_from_request(orig);
2083e262f347SMikulas Patocka 		tio->duration_jiffies = jiffies;
2084e262f347SMikulas Patocka 		tio->n_sectors = blk_rq_sectors(orig);
2085e262f347SMikulas Patocka 		dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
2086e262f347SMikulas Patocka 				    tio->n_sectors, false, 0, &tio->stats_aux);
2087e262f347SMikulas Patocka 	}
2088e262f347SMikulas Patocka 
2089ba1cbad9SMike Snitzer 	/*
2090ba1cbad9SMike Snitzer 	 * Hold the md reference here for the in-flight I/O.
2091ba1cbad9SMike Snitzer 	 * We can't rely on the reference count by device opener,
2092ba1cbad9SMike Snitzer 	 * because the device may be closed during the request completion
2093ba1cbad9SMike Snitzer 	 * when all bios are completed.
2094ba1cbad9SMike Snitzer 	 * See the comment in rq_completed() too.
2095ba1cbad9SMike Snitzer 	 */
2096ba1cbad9SMike Snitzer 	dm_get(md);
2097ba1cbad9SMike Snitzer }
2098ba1cbad9SMike Snitzer 
20990ce65797SMike Snitzer #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
21000ce65797SMike Snitzer 
21010ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
21020ce65797SMike Snitzer {
21030ce65797SMike Snitzer 	return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
21040ce65797SMike Snitzer }
21050ce65797SMike Snitzer 
21060ce65797SMike Snitzer ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
21070ce65797SMike Snitzer 						     const char *buf, size_t count)
21080ce65797SMike Snitzer {
21090ce65797SMike Snitzer 	unsigned deadline;
21100ce65797SMike Snitzer 
211117e149b8SMike Snitzer 	if (!dm_request_based(md) || md->use_blk_mq)
21120ce65797SMike Snitzer 		return count;
21130ce65797SMike Snitzer 
21140ce65797SMike Snitzer 	if (kstrtouint(buf, 10, &deadline))
21150ce65797SMike Snitzer 		return -EINVAL;
21160ce65797SMike Snitzer 
21170ce65797SMike Snitzer 	if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
21180ce65797SMike Snitzer 		deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
21190ce65797SMike Snitzer 
21200ce65797SMike Snitzer 	md->seq_rq_merge_deadline_usecs = deadline;
21210ce65797SMike Snitzer 
21220ce65797SMike Snitzer 	return count;
21230ce65797SMike Snitzer }
21240ce65797SMike Snitzer 
21250ce65797SMike Snitzer static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
21260ce65797SMike Snitzer {
21270ce65797SMike Snitzer 	ktime_t kt_deadline;
21280ce65797SMike Snitzer 
21290ce65797SMike Snitzer 	if (!md->seq_rq_merge_deadline_usecs)
21300ce65797SMike Snitzer 		return false;
21310ce65797SMike Snitzer 
21320ce65797SMike Snitzer 	kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
21330ce65797SMike Snitzer 	kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
21340ce65797SMike Snitzer 
21350ce65797SMike Snitzer 	return !ktime_after(ktime_get(), kt_deadline);
21360ce65797SMike Snitzer }
21370ce65797SMike Snitzer 
2138cec47e3dSKiyoshi Ueda /*
2139cec47e3dSKiyoshi Ueda  * q->request_fn for request-based dm.
2140cec47e3dSKiyoshi Ueda  * Called with the queue lock held.
2141cec47e3dSKiyoshi Ueda  */
2142cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q)
2143cec47e3dSKiyoshi Ueda {
2144cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
2145c91852ffSMike Snitzer 	struct dm_target *ti = md->immutable_target;
2146466d89a6SKeith Busch 	struct request *rq;
21472eb6e1e3SKeith Busch 	struct dm_rq_target_io *tio;
2148c91852ffSMike Snitzer 	sector_t pos = 0;
2149c91852ffSMike Snitzer 
2150c91852ffSMike Snitzer 	if (unlikely(!ti)) {
2151c91852ffSMike Snitzer 		int srcu_idx;
2152c91852ffSMike Snitzer 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2153c91852ffSMike Snitzer 
2154c91852ffSMike Snitzer 		ti = dm_table_find_target(map, pos);
2155c91852ffSMike Snitzer 		dm_put_live_table(md, srcu_idx);
2156c91852ffSMike Snitzer 	}
2157cec47e3dSKiyoshi Ueda 
2158cec47e3dSKiyoshi Ueda 	/*
2159b4324feeSKiyoshi Ueda 	 * For suspend, check blk_queue_stopped() and increment
2160b4324feeSKiyoshi Ueda 	 * ->pending within a single queue_lock not to increment the
2161b4324feeSKiyoshi Ueda 	 * number of in-flight I/Os after the queue is stopped in
2162b4324feeSKiyoshi Ueda 	 * dm_suspend().
2163cec47e3dSKiyoshi Ueda 	 */
21647eaceaccSJens Axboe 	while (!blk_queue_stopped(q)) {
2165cec47e3dSKiyoshi Ueda 		rq = blk_peek_request(q);
2166cec47e3dSKiyoshi Ueda 		if (!rq)
2167c91852ffSMike Snitzer 			return;
2168cec47e3dSKiyoshi Ueda 
216929e4013dSTejun Heo 		/* always use block 0 to find the target for flushes for now */
217029e4013dSTejun Heo 		pos = 0;
217129e4013dSTejun Heo 		if (!(rq->cmd_flags & REQ_FLUSH))
217229e4013dSTejun Heo 			pos = blk_rq_pos(rq);
2173d0bcb878SKiyoshi Ueda 
2174c91852ffSMike Snitzer 		if ((dm_request_peeked_before_merge_deadline(md) &&
21750ce65797SMike Snitzer 		     md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2176c91852ffSMike Snitzer 		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
2177c91852ffSMike Snitzer 		    (ti->type->busy && ti->type->busy(ti))) {
2178c91852ffSMike Snitzer 			blk_delay_queue(q, HZ / 100);
2179c91852ffSMike Snitzer 			return;
2180c91852ffSMike Snitzer 		}
2181cec47e3dSKiyoshi Ueda 
2182466d89a6SKeith Busch 		dm_start_request(md, rq);
2183b4324feeSKiyoshi Ueda 
2184bfebd1cdSMike Snitzer 		tio = tio_from_request(rq);
21852eb6e1e3SKeith Busch 		/* Establish tio->ti before queuing work (map_tio_request) */
21862eb6e1e3SKeith Busch 		tio->ti = ti;
21872eb6e1e3SKeith Busch 		queue_kthread_work(&md->kworker, &tio->work);
2188052189a2SKiyoshi Ueda 		BUG_ON(!irqs_disabled());
2189cec47e3dSKiyoshi Ueda 	}
2190cec47e3dSKiyoshi Ueda }
2191cec47e3dSKiyoshi Ueda 
21921da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits)
21931da177e4SLinus Torvalds {
21948a57dfc6SChandra Seetharaman 	int r = bdi_bits;
21958a57dfc6SChandra Seetharaman 	struct mapped_device *md = congested_data;
21968a57dfc6SChandra Seetharaman 	struct dm_table *map;
21971da177e4SLinus Torvalds 
21981eb787ecSAlasdair G Kergon 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2199e522c039SMike Snitzer 		if (dm_request_based(md)) {
2200cec47e3dSKiyoshi Ueda 			/*
2201e522c039SMike Snitzer 			 * With request-based DM we only need to check the
2202e522c039SMike Snitzer 			 * top-level queue for congestion.
2203cec47e3dSKiyoshi Ueda 			 */
2204e522c039SMike Snitzer 			r = md->queue->backing_dev_info.wb.state & bdi_bits;
2205e522c039SMike Snitzer 		} else {
2206e522c039SMike Snitzer 			map = dm_get_live_table_fast(md);
2207e522c039SMike Snitzer 			if (map)
22081da177e4SLinus Torvalds 				r = dm_table_any_congested(map, bdi_bits);
220983d5e5b0SMikulas Patocka 			dm_put_live_table_fast(md);
22108a57dfc6SChandra Seetharaman 		}
2211e522c039SMike Snitzer 	}
22128a57dfc6SChandra Seetharaman 
22131da177e4SLinus Torvalds 	return r;
22141da177e4SLinus Torvalds }
22151da177e4SLinus Torvalds 
22161da177e4SLinus Torvalds /*-----------------------------------------------------------------
22171da177e4SLinus Torvalds  * An IDR is used to keep track of allocated minor numbers.
22181da177e4SLinus Torvalds  *---------------------------------------------------------------*/
22192b06cfffSAlasdair G Kergon static void free_minor(int minor)
22201da177e4SLinus Torvalds {
2221f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
22221da177e4SLinus Torvalds 	idr_remove(&_minor_idr, minor);
2223f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
22241da177e4SLinus Torvalds }
22251da177e4SLinus Torvalds 
22261da177e4SLinus Torvalds /*
22271da177e4SLinus Torvalds  * See if the device with a specific minor # is free.
22281da177e4SLinus Torvalds  */
2229cf13ab8eSFrederik Deweerdt static int specific_minor(int minor)
22301da177e4SLinus Torvalds {
2231c9d76be6STejun Heo 	int r;
22321da177e4SLinus Torvalds 
22331da177e4SLinus Torvalds 	if (minor >= (1 << MINORBITS))
22341da177e4SLinus Torvalds 		return -EINVAL;
22351da177e4SLinus Torvalds 
2236c9d76be6STejun Heo 	idr_preload(GFP_KERNEL);
2237f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
22381da177e4SLinus Torvalds 
2239c9d76be6STejun Heo 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
22401da177e4SLinus Torvalds 
2241f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
2242c9d76be6STejun Heo 	idr_preload_end();
2243c9d76be6STejun Heo 	if (r < 0)
2244c9d76be6STejun Heo 		return r == -ENOSPC ? -EBUSY : r;
2245c9d76be6STejun Heo 	return 0;
22461da177e4SLinus Torvalds }
22471da177e4SLinus Torvalds 
2248cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor)
22491da177e4SLinus Torvalds {
2250c9d76be6STejun Heo 	int r;
22511da177e4SLinus Torvalds 
2252c9d76be6STejun Heo 	idr_preload(GFP_KERNEL);
2253f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
22541da177e4SLinus Torvalds 
2255c9d76be6STejun Heo 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
22561da177e4SLinus Torvalds 
2257f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
2258c9d76be6STejun Heo 	idr_preload_end();
2259c9d76be6STejun Heo 	if (r < 0)
22601da177e4SLinus Torvalds 		return r;
2261c9d76be6STejun Heo 	*minor = r;
2262c9d76be6STejun Heo 	return 0;
22631da177e4SLinus Torvalds }
22641da177e4SLinus Torvalds 
226583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops;
22661da177e4SLinus Torvalds 
226753d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work);
226853d5914fSMikulas Patocka 
22694a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md)
22704a0b4ddfSMike Snitzer {
22714a0b4ddfSMike Snitzer 	/*
22724a0b4ddfSMike Snitzer 	 * Request-based dm devices cannot be stacked on top of bio-based dm
2273bfebd1cdSMike Snitzer 	 * devices.  The type of this dm device may not have been decided yet.
22744a0b4ddfSMike Snitzer 	 * The type is decided at the first table loading time.
22754a0b4ddfSMike Snitzer 	 * To prevent problematic device stacking, clear the queue flag
22764a0b4ddfSMike Snitzer 	 * for request stacking support until then.
22774a0b4ddfSMike Snitzer 	 *
22784a0b4ddfSMike Snitzer 	 * This queue is new, so no concurrency on the queue_flags.
22794a0b4ddfSMike Snitzer 	 */
22804a0b4ddfSMike Snitzer 	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
2281ad5f498fSMikulas Patocka 
2282ad5f498fSMikulas Patocka 	/*
2283ad5f498fSMikulas Patocka 	 * Initialize data that will only be used by a non-blk-mq DM queue
2284ad5f498fSMikulas Patocka 	 * - must do so here (in alloc_dev callchain) before queue is used
2285ad5f498fSMikulas Patocka 	 */
2286ad5f498fSMikulas Patocka 	md->queue->queuedata = md;
2287ad5f498fSMikulas Patocka 	md->queue->backing_dev_info.congested_data = md;
2288bfebd1cdSMike Snitzer }
22894a0b4ddfSMike Snitzer 
2290eca7ee6dSMike Snitzer static void dm_init_normal_md_queue(struct mapped_device *md)
2291bfebd1cdSMike Snitzer {
229217e149b8SMike Snitzer 	md->use_blk_mq = false;
2293bfebd1cdSMike Snitzer 	dm_init_md_queue(md);
2294bfebd1cdSMike Snitzer 
2295bfebd1cdSMike Snitzer 	/*
2296bfebd1cdSMike Snitzer 	 * Initialize aspects of queue that aren't relevant for blk-mq
2297bfebd1cdSMike Snitzer 	 */
22984a0b4ddfSMike Snitzer 	md->queue->backing_dev_info.congested_fn = dm_any_congested;
22994a0b4ddfSMike Snitzer 	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
23004a0b4ddfSMike Snitzer }
23014a0b4ddfSMike Snitzer 
23020f20972fSMike Snitzer static void cleanup_mapped_device(struct mapped_device *md)
23030f20972fSMike Snitzer {
23040f20972fSMike Snitzer 	if (md->wq)
23050f20972fSMike Snitzer 		destroy_workqueue(md->wq);
23060f20972fSMike Snitzer 	if (md->kworker_task)
23070f20972fSMike Snitzer 		kthread_stop(md->kworker_task);
23080f20972fSMike Snitzer 	mempool_destroy(md->io_pool);
23090f20972fSMike Snitzer 	mempool_destroy(md->rq_pool);
23100f20972fSMike Snitzer 	if (md->bs)
23110f20972fSMike Snitzer 		bioset_free(md->bs);
23120f20972fSMike Snitzer 
2313b06075a9SMikulas Patocka 	cleanup_srcu_struct(&md->io_barrier);
2314b06075a9SMikulas Patocka 
23150f20972fSMike Snitzer 	if (md->disk) {
23160f20972fSMike Snitzer 		spin_lock(&_minor_lock);
23170f20972fSMike Snitzer 		md->disk->private_data = NULL;
23180f20972fSMike Snitzer 		spin_unlock(&_minor_lock);
23190f20972fSMike Snitzer 		del_gendisk(md->disk);
23200f20972fSMike Snitzer 		put_disk(md->disk);
23210f20972fSMike Snitzer 	}
23220f20972fSMike Snitzer 
23230f20972fSMike Snitzer 	if (md->queue)
23240f20972fSMike Snitzer 		blk_cleanup_queue(md->queue);
23250f20972fSMike Snitzer 
23260f20972fSMike Snitzer 	if (md->bdev) {
23270f20972fSMike Snitzer 		bdput(md->bdev);
23280f20972fSMike Snitzer 		md->bdev = NULL;
23290f20972fSMike Snitzer 	}
23300f20972fSMike Snitzer }
23310f20972fSMike Snitzer 
23321da177e4SLinus Torvalds /*
23331da177e4SLinus Torvalds  * Allocate and initialise a blank device with a given minor.
23341da177e4SLinus Torvalds  */
23352b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor)
23361da177e4SLinus Torvalds {
2337115485e8SMike Snitzer 	int r, numa_node_id = dm_get_numa_node();
2338115485e8SMike Snitzer 	struct mapped_device *md;
2339ba61fdd1SJeff Mahoney 	void *old_md;
23401da177e4SLinus Torvalds 
2341115485e8SMike Snitzer 	md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
23421da177e4SLinus Torvalds 	if (!md) {
23431da177e4SLinus Torvalds 		DMWARN("unable to allocate device, out of memory.");
23441da177e4SLinus Torvalds 		return NULL;
23451da177e4SLinus Torvalds 	}
23461da177e4SLinus Torvalds 
234710da4f79SJeff Mahoney 	if (!try_module_get(THIS_MODULE))
23486ed7ade8SMilan Broz 		goto bad_module_get;
234910da4f79SJeff Mahoney 
23501da177e4SLinus Torvalds 	/* get a minor number for the dev */
23512b06cfffSAlasdair G Kergon 	if (minor == DM_ANY_MINOR)
2352cf13ab8eSFrederik Deweerdt 		r = next_free_minor(&minor);
23532b06cfffSAlasdair G Kergon 	else
2354cf13ab8eSFrederik Deweerdt 		r = specific_minor(minor);
23551da177e4SLinus Torvalds 	if (r < 0)
23566ed7ade8SMilan Broz 		goto bad_minor;
23571da177e4SLinus Torvalds 
235883d5e5b0SMikulas Patocka 	r = init_srcu_struct(&md->io_barrier);
235983d5e5b0SMikulas Patocka 	if (r < 0)
236083d5e5b0SMikulas Patocka 		goto bad_io_barrier;
236183d5e5b0SMikulas Patocka 
2362115485e8SMike Snitzer 	md->numa_node_id = numa_node_id;
236317e149b8SMike Snitzer 	md->use_blk_mq = use_blk_mq;
2364591ddcfcSMike Snitzer 	md->init_tio_pdu = false;
2365a5664dadSMike Snitzer 	md->type = DM_TYPE_NONE;
2366e61290a4SDaniel Walker 	mutex_init(&md->suspend_lock);
2367a5664dadSMike Snitzer 	mutex_init(&md->type_lock);
236886f1152bSBenjamin Marzinski 	mutex_init(&md->table_devices_lock);
2369022c2611SMikulas Patocka 	spin_lock_init(&md->deferred_lock);
23701da177e4SLinus Torvalds 	atomic_set(&md->holders, 1);
23715c6bd75dSAlasdair G Kergon 	atomic_set(&md->open_count, 0);
23721da177e4SLinus Torvalds 	atomic_set(&md->event_nr, 0);
23737a8c3d3bSMike Anderson 	atomic_set(&md->uevent_seq, 0);
23747a8c3d3bSMike Anderson 	INIT_LIST_HEAD(&md->uevent_list);
237586f1152bSBenjamin Marzinski 	INIT_LIST_HEAD(&md->table_devices);
23767a8c3d3bSMike Anderson 	spin_lock_init(&md->uevent_lock);
23771da177e4SLinus Torvalds 
2378115485e8SMike Snitzer 	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
23791da177e4SLinus Torvalds 	if (!md->queue)
23800f20972fSMike Snitzer 		goto bad;
23811da177e4SLinus Torvalds 
23824a0b4ddfSMike Snitzer 	dm_init_md_queue(md);
23839faf400fSStefan Bader 
2384115485e8SMike Snitzer 	md->disk = alloc_disk_node(1, numa_node_id);
23851da177e4SLinus Torvalds 	if (!md->disk)
23860f20972fSMike Snitzer 		goto bad;
23871da177e4SLinus Torvalds 
2388316d315bSNikanth Karthikesan 	atomic_set(&md->pending[0], 0);
2389316d315bSNikanth Karthikesan 	atomic_set(&md->pending[1], 0);
2390f0b04115SJeff Mahoney 	init_waitqueue_head(&md->wait);
239153d5914fSMikulas Patocka 	INIT_WORK(&md->work, dm_wq_work);
2392f0b04115SJeff Mahoney 	init_waitqueue_head(&md->eventq);
23932995fa78SMikulas Patocka 	init_completion(&md->kobj_holder.completion);
23942eb6e1e3SKeith Busch 	md->kworker_task = NULL;
2395f0b04115SJeff Mahoney 
23961da177e4SLinus Torvalds 	md->disk->major = _major;
23971da177e4SLinus Torvalds 	md->disk->first_minor = minor;
23981da177e4SLinus Torvalds 	md->disk->fops = &dm_blk_dops;
23991da177e4SLinus Torvalds 	md->disk->queue = md->queue;
24001da177e4SLinus Torvalds 	md->disk->private_data = md;
24011da177e4SLinus Torvalds 	sprintf(md->disk->disk_name, "dm-%d", minor);
24021da177e4SLinus Torvalds 	add_disk(md->disk);
24037e51f257SMike Anderson 	format_dev_t(md->name, MKDEV(_major, minor));
24041da177e4SLinus Torvalds 
2405670368a8STejun Heo 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2406304f3f6aSMilan Broz 	if (!md->wq)
24070f20972fSMike Snitzer 		goto bad;
2408304f3f6aSMilan Broz 
240932a926daSMikulas Patocka 	md->bdev = bdget_disk(md->disk, 0);
241032a926daSMikulas Patocka 	if (!md->bdev)
24110f20972fSMike Snitzer 		goto bad;
241232a926daSMikulas Patocka 
24136a8736d1STejun Heo 	bio_init(&md->flush_bio);
24146a8736d1STejun Heo 	md->flush_bio.bi_bdev = md->bdev;
24156a8736d1STejun Heo 	md->flush_bio.bi_rw = WRITE_FLUSH;
24166a8736d1STejun Heo 
2417fd2ed4d2SMikulas Patocka 	dm_stats_init(&md->stats);
2418fd2ed4d2SMikulas Patocka 
2419ba61fdd1SJeff Mahoney 	/* Populate the mapping, nobody knows we exist yet */
2420f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
2421ba61fdd1SJeff Mahoney 	old_md = idr_replace(&_minor_idr, md, minor);
2422f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
2423ba61fdd1SJeff Mahoney 
2424ba61fdd1SJeff Mahoney 	BUG_ON(old_md != MINOR_ALLOCED);
2425ba61fdd1SJeff Mahoney 
24261da177e4SLinus Torvalds 	return md;
24271da177e4SLinus Torvalds 
24280f20972fSMike Snitzer bad:
24290f20972fSMike Snitzer 	cleanup_mapped_device(md);
243083d5e5b0SMikulas Patocka bad_io_barrier:
24311da177e4SLinus Torvalds 	free_minor(minor);
24326ed7ade8SMilan Broz bad_minor:
243310da4f79SJeff Mahoney 	module_put(THIS_MODULE);
24346ed7ade8SMilan Broz bad_module_get:
24351da177e4SLinus Torvalds 	kfree(md);
24361da177e4SLinus Torvalds 	return NULL;
24371da177e4SLinus Torvalds }
24381da177e4SLinus Torvalds 
2439ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md);
2440ae9da83fSJun'ichi Nomura 
24411da177e4SLinus Torvalds static void free_dev(struct mapped_device *md)
24421da177e4SLinus Torvalds {
2443f331c029STejun Heo 	int minor = MINOR(disk_devt(md->disk));
244463d94e48SJun'ichi Nomura 
2445ae9da83fSJun'ichi Nomura 	unlock_fs(md);
24462eb6e1e3SKeith Busch 
24470f20972fSMike Snitzer 	cleanup_mapped_device(md);
24481c357a1eSMike Snitzer 	if (md->tag_set) {
24491c357a1eSMike Snitzer 		blk_mq_free_tag_set(md->tag_set);
24501c357a1eSMike Snitzer 		kfree(md->tag_set);
24511c357a1eSMike Snitzer 	}
24520f20972fSMike Snitzer 
24530f20972fSMike Snitzer 	free_table_devices(&md->table_devices);
24540f20972fSMike Snitzer 	dm_stats_cleanup(&md->stats);
245563a4f065SMike Snitzer 	free_minor(minor);
245663a4f065SMike Snitzer 
245710da4f79SJeff Mahoney 	module_put(THIS_MODULE);
24581da177e4SLinus Torvalds 	kfree(md);
24591da177e4SLinus Torvalds }
24601da177e4SLinus Torvalds 
2461e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2462e6ee8c0bSKiyoshi Ueda {
2463c0820cf5SMikulas Patocka 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2464e6ee8c0bSKiyoshi Ueda 
24654e6e36c3SMike Snitzer 	if (md->bs) {
24664e6e36c3SMike Snitzer 		/* The md already has necessary mempools. */
24674e6e36c3SMike Snitzer 		if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2468c0820cf5SMikulas Patocka 			/*
246916245bdcSJun'ichi Nomura 			 * Reload bioset because front_pad may have changed
247016245bdcSJun'ichi Nomura 			 * because a different table was loaded.
2471c0820cf5SMikulas Patocka 			 */
2472c0820cf5SMikulas Patocka 			bioset_free(md->bs);
2473c0820cf5SMikulas Patocka 			md->bs = p->bs;
2474c0820cf5SMikulas Patocka 			p->bs = NULL;
2475c0820cf5SMikulas Patocka 		}
2476cbc4e3c1SMike Snitzer 		/*
24774e6e36c3SMike Snitzer 		 * There's no need to reload with request-based dm
24784e6e36c3SMike Snitzer 		 * because the size of front_pad doesn't change.
24794e6e36c3SMike Snitzer 		 * Note for future: If you are to reload bioset,
24804e6e36c3SMike Snitzer 		 * prep-ed requests in the queue may refer
24814e6e36c3SMike Snitzer 		 * to bio from the old bioset, so you must walk
24824e6e36c3SMike Snitzer 		 * through the queue to unprep.
2483cbc4e3c1SMike Snitzer 		 */
2484cbc4e3c1SMike Snitzer 		goto out;
2485cbc4e3c1SMike Snitzer 	}
2486cbc4e3c1SMike Snitzer 
2487cbc4e3c1SMike Snitzer 	BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2488e6ee8c0bSKiyoshi Ueda 
2489e6ee8c0bSKiyoshi Ueda 	md->io_pool = p->io_pool;
2490e6ee8c0bSKiyoshi Ueda 	p->io_pool = NULL;
24911ae49ea2SMike Snitzer 	md->rq_pool = p->rq_pool;
24921ae49ea2SMike Snitzer 	p->rq_pool = NULL;
2493e6ee8c0bSKiyoshi Ueda 	md->bs = p->bs;
2494e6ee8c0bSKiyoshi Ueda 	p->bs = NULL;
24954e6e36c3SMike Snitzer 
2496e6ee8c0bSKiyoshi Ueda out:
249702233342SMike Snitzer 	/* mempool bind completed, no longer need any mempools in the table */
2498e6ee8c0bSKiyoshi Ueda 	dm_table_free_md_mempools(t);
2499e6ee8c0bSKiyoshi Ueda }
2500e6ee8c0bSKiyoshi Ueda 
25011da177e4SLinus Torvalds /*
25021da177e4SLinus Torvalds  * Bind a table to the device.
25031da177e4SLinus Torvalds  */
25041da177e4SLinus Torvalds static void event_callback(void *context)
25051da177e4SLinus Torvalds {
25067a8c3d3bSMike Anderson 	unsigned long flags;
25077a8c3d3bSMike Anderson 	LIST_HEAD(uevents);
25081da177e4SLinus Torvalds 	struct mapped_device *md = (struct mapped_device *) context;
25091da177e4SLinus Torvalds 
25107a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
25117a8c3d3bSMike Anderson 	list_splice_init(&md->uevent_list, &uevents);
25127a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
25137a8c3d3bSMike Anderson 
2514ed9e1982STejun Heo 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
25157a8c3d3bSMike Anderson 
25161da177e4SLinus Torvalds 	atomic_inc(&md->event_nr);
25171da177e4SLinus Torvalds 	wake_up(&md->eventq);
25181da177e4SLinus Torvalds }
25191da177e4SLinus Torvalds 
2520c217649bSMike Snitzer /*
2521c217649bSMike Snitzer  * Protected by md->suspend_lock obtained by dm_swap_table().
2522c217649bSMike Snitzer  */
25234e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size)
25241da177e4SLinus Torvalds {
25254e90188bSAlasdair G Kergon 	set_capacity(md->disk, size);
25261da177e4SLinus Torvalds 
2527db8fef4fSMikulas Patocka 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
25281da177e4SLinus Torvalds }
25291da177e4SLinus Torvalds 
2530042d2a9bSAlasdair G Kergon /*
2531042d2a9bSAlasdair G Kergon  * Returns old map, which caller must destroy.
2532042d2a9bSAlasdair G Kergon  */
2533042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2534754c5fc7SMike Snitzer 			       struct queue_limits *limits)
25351da177e4SLinus Torvalds {
2536042d2a9bSAlasdair G Kergon 	struct dm_table *old_map;
2537165125e1SJens Axboe 	struct request_queue *q = md->queue;
25381da177e4SLinus Torvalds 	sector_t size;
25391da177e4SLinus Torvalds 
25401da177e4SLinus Torvalds 	size = dm_table_get_size(t);
25413ac51e74SDarrick J. Wong 
25423ac51e74SDarrick J. Wong 	/*
25433ac51e74SDarrick J. Wong 	 * Wipe any geometry if the size of the table changed.
25443ac51e74SDarrick J. Wong 	 */
2545fd2ed4d2SMikulas Patocka 	if (size != dm_get_size(md))
25463ac51e74SDarrick J. Wong 		memset(&md->geometry, 0, sizeof(md->geometry));
25473ac51e74SDarrick J. Wong 
25484e90188bSAlasdair G Kergon 	__set_size(md, size);
25491da177e4SLinus Torvalds 
2550cf222b37SAlasdair G Kergon 	dm_table_event_callback(t, event_callback, md);
25512ca3310eSAlasdair G Kergon 
2552e6ee8c0bSKiyoshi Ueda 	/*
2553e6ee8c0bSKiyoshi Ueda 	 * The queue hasn't been stopped yet, if the old table type wasn't
2554e6ee8c0bSKiyoshi Ueda 	 * for request-based during suspension.  So stop it to prevent
2555e6ee8c0bSKiyoshi Ueda 	 * I/O mapping before resume.
2556e6ee8c0bSKiyoshi Ueda 	 * This must be done before setting the queue restrictions,
2557e6ee8c0bSKiyoshi Ueda 	 * because request-based dm may be run just after the setting.
2558e6ee8c0bSKiyoshi Ueda 	 */
255916f12266SMike Snitzer 	if (dm_table_request_based(t)) {
2560eca7ee6dSMike Snitzer 		dm_stop_queue(q);
256116f12266SMike Snitzer 		/*
256216f12266SMike Snitzer 		 * Leverage the fact that request-based DM targets are
256316f12266SMike Snitzer 		 * immutable singletons and establish md->immutable_target
256416f12266SMike Snitzer 		 * - used to optimize both dm_request_fn and dm_mq_queue_rq
256516f12266SMike Snitzer 		 */
256616f12266SMike Snitzer 		md->immutable_target = dm_table_get_immutable_target(t);
256716f12266SMike Snitzer 	}
2568e6ee8c0bSKiyoshi Ueda 
2569e6ee8c0bSKiyoshi Ueda 	__bind_mempools(md, t);
2570e6ee8c0bSKiyoshi Ueda 
2571a12f5d48SEric Dumazet 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
25721d3aa6f6SMike Snitzer 	rcu_assign_pointer(md->map, (void *)t);
257336a0456fSAlasdair G Kergon 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
257436a0456fSAlasdair G Kergon 
2575754c5fc7SMike Snitzer 	dm_table_set_restrictions(t, q, limits);
257641abc4e1SHannes Reinecke 	if (old_map)
257783d5e5b0SMikulas Patocka 		dm_sync_table(md);
25782ca3310eSAlasdair G Kergon 
2579042d2a9bSAlasdair G Kergon 	return old_map;
25801da177e4SLinus Torvalds }
25811da177e4SLinus Torvalds 
2582a7940155SAlasdair G Kergon /*
2583a7940155SAlasdair G Kergon  * Returns unbound table for the caller to free.
2584a7940155SAlasdair G Kergon  */
2585a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md)
25861da177e4SLinus Torvalds {
2587a12f5d48SEric Dumazet 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 	if (!map)
2590a7940155SAlasdair G Kergon 		return NULL;
25911da177e4SLinus Torvalds 
25921da177e4SLinus Torvalds 	dm_table_event_callback(map, NULL, NULL);
25939cdb8520SMonam Agarwal 	RCU_INIT_POINTER(md->map, NULL);
259483d5e5b0SMikulas Patocka 	dm_sync_table(md);
2595a7940155SAlasdair G Kergon 
2596a7940155SAlasdair G Kergon 	return map;
25971da177e4SLinus Torvalds }
25981da177e4SLinus Torvalds 
25991da177e4SLinus Torvalds /*
26001da177e4SLinus Torvalds  * Constructor for a new device.
26011da177e4SLinus Torvalds  */
26022b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result)
26031da177e4SLinus Torvalds {
26041da177e4SLinus Torvalds 	struct mapped_device *md;
26051da177e4SLinus Torvalds 
26062b06cfffSAlasdair G Kergon 	md = alloc_dev(minor);
26071da177e4SLinus Torvalds 	if (!md)
26081da177e4SLinus Torvalds 		return -ENXIO;
26091da177e4SLinus Torvalds 
2610784aae73SMilan Broz 	dm_sysfs_init(md);
2611784aae73SMilan Broz 
26121da177e4SLinus Torvalds 	*result = md;
26131da177e4SLinus Torvalds 	return 0;
26141da177e4SLinus Torvalds }
26151da177e4SLinus Torvalds 
2616a5664dadSMike Snitzer /*
2617a5664dadSMike Snitzer  * Functions to manage md->type.
2618a5664dadSMike Snitzer  * All are required to hold md->type_lock.
2619a5664dadSMike Snitzer  */
2620a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md)
2621a5664dadSMike Snitzer {
2622a5664dadSMike Snitzer 	mutex_lock(&md->type_lock);
2623a5664dadSMike Snitzer }
2624a5664dadSMike Snitzer 
2625a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md)
2626a5664dadSMike Snitzer {
2627a5664dadSMike Snitzer 	mutex_unlock(&md->type_lock);
2628a5664dadSMike Snitzer }
2629a5664dadSMike Snitzer 
2630a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type)
2631a5664dadSMike Snitzer {
263200c4fc3bSMike Snitzer 	BUG_ON(!mutex_is_locked(&md->type_lock));
2633a5664dadSMike Snitzer 	md->type = type;
2634a5664dadSMike Snitzer }
2635a5664dadSMike Snitzer 
2636a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md)
2637a5664dadSMike Snitzer {
2638a5664dadSMike Snitzer 	return md->type;
2639a5664dadSMike Snitzer }
2640a5664dadSMike Snitzer 
264136a0456fSAlasdair G Kergon struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
264236a0456fSAlasdair G Kergon {
264336a0456fSAlasdair G Kergon 	return md->immutable_target_type;
264436a0456fSAlasdair G Kergon }
264536a0456fSAlasdair G Kergon 
26464a0b4ddfSMike Snitzer /*
2647f84cb8a4SMike Snitzer  * The queue_limits are only valid as long as you have a reference
2648f84cb8a4SMike Snitzer  * count on 'md'.
2649f84cb8a4SMike Snitzer  */
2650f84cb8a4SMike Snitzer struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2651f84cb8a4SMike Snitzer {
2652f84cb8a4SMike Snitzer 	BUG_ON(!atomic_read(&md->holders));
2653f84cb8a4SMike Snitzer 	return &md->queue->limits;
2654f84cb8a4SMike Snitzer }
2655f84cb8a4SMike Snitzer EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2656f84cb8a4SMike Snitzer 
2657eca7ee6dSMike Snitzer static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
2658bfebd1cdSMike Snitzer {
2659bfebd1cdSMike Snitzer 	/* Initialize the request-based DM worker thread */
2660bfebd1cdSMike Snitzer 	init_kthread_worker(&md->kworker);
2661bfebd1cdSMike Snitzer 	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2662bfebd1cdSMike Snitzer 				       "kdmwork-%s", dm_device_name(md));
2663bfebd1cdSMike Snitzer }
2664bfebd1cdSMike Snitzer 
2665f84cb8a4SMike Snitzer /*
2666eca7ee6dSMike Snitzer  * Fully initialize a .request_fn request-based queue.
26674a0b4ddfSMike Snitzer  */
2668eca7ee6dSMike Snitzer static int dm_old_init_request_queue(struct mapped_device *md)
26694a0b4ddfSMike Snitzer {
26704a0b4ddfSMike Snitzer 	/* Fully initialize the queue */
2671e233d800SBob Liu 	if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL))
2672bfebd1cdSMike Snitzer 		return -EINVAL;
26734a0b4ddfSMike Snitzer 
26740ce65797SMike Snitzer 	/* disable dm_request_fn's merge heuristic by default */
26750ce65797SMike Snitzer 	md->seq_rq_merge_deadline_usecs = 0;
26760ce65797SMike Snitzer 
2677eca7ee6dSMike Snitzer 	dm_init_normal_md_queue(md);
26784a0b4ddfSMike Snitzer 	blk_queue_softirq_done(md->queue, dm_softirq_done);
2679eca7ee6dSMike Snitzer 	blk_queue_prep_rq(md->queue, dm_old_prep_fn);
26804a0b4ddfSMike Snitzer 
2681eca7ee6dSMike Snitzer 	dm_old_init_rq_based_worker_thread(md);
26822eb6e1e3SKeith Busch 
26834a0b4ddfSMike Snitzer 	elv_register_queue(md->queue);
26844a0b4ddfSMike Snitzer 
2685bfebd1cdSMike Snitzer 	return 0;
2686bfebd1cdSMike Snitzer }
2687bfebd1cdSMike Snitzer 
2688bfebd1cdSMike Snitzer static int dm_mq_init_request(void *data, struct request *rq,
2689bfebd1cdSMike Snitzer 			      unsigned int hctx_idx, unsigned int request_idx,
2690bfebd1cdSMike Snitzer 			      unsigned int numa_node)
2691bfebd1cdSMike Snitzer {
2692bfebd1cdSMike Snitzer 	struct mapped_device *md = data;
2693bfebd1cdSMike Snitzer 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
2694bfebd1cdSMike Snitzer 
2695bfebd1cdSMike Snitzer 	/*
2696bfebd1cdSMike Snitzer 	 * Must initialize md member of tio, otherwise it won't
2697bfebd1cdSMike Snitzer 	 * be available in dm_mq_queue_rq.
2698bfebd1cdSMike Snitzer 	 */
2699bfebd1cdSMike Snitzer 	tio->md = md;
2700bfebd1cdSMike Snitzer 
2701591ddcfcSMike Snitzer 	if (md->init_tio_pdu) {
2702591ddcfcSMike Snitzer 		/* target-specific per-io data is immediately after the tio */
2703591ddcfcSMike Snitzer 		tio->info.ptr = tio + 1;
2704591ddcfcSMike Snitzer 	}
2705591ddcfcSMike Snitzer 
2706bfebd1cdSMike Snitzer 	return 0;
2707bfebd1cdSMike Snitzer }
2708bfebd1cdSMike Snitzer 
2709bfebd1cdSMike Snitzer static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2710bfebd1cdSMike Snitzer 			  const struct blk_mq_queue_data *bd)
2711bfebd1cdSMike Snitzer {
2712bfebd1cdSMike Snitzer 	struct request *rq = bd->rq;
2713bfebd1cdSMike Snitzer 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
2714bfebd1cdSMike Snitzer 	struct mapped_device *md = tio->md;
271516f12266SMike Snitzer 	struct dm_target *ti = md->immutable_target;
271616f12266SMike Snitzer 
271716f12266SMike Snitzer 	if (unlikely(!ti)) {
2718bfebd1cdSMike Snitzer 		int srcu_idx;
2719bfebd1cdSMike Snitzer 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2720bfebd1cdSMike Snitzer 
272116f12266SMike Snitzer 		ti = dm_table_find_target(map, 0);
2722bfebd1cdSMike Snitzer 		dm_put_live_table(md, srcu_idx);
2723bfebd1cdSMike Snitzer 	}
2724bfebd1cdSMike Snitzer 
2725bfebd1cdSMike Snitzer 	if (ti->type->busy && ti->type->busy(ti))
2726bfebd1cdSMike Snitzer 		return BLK_MQ_RQ_QUEUE_BUSY;
2727bfebd1cdSMike Snitzer 
2728bfebd1cdSMike Snitzer 	dm_start_request(md, rq);
2729bfebd1cdSMike Snitzer 
2730bfebd1cdSMike Snitzer 	/* Init tio using md established in .init_request */
2731bfebd1cdSMike Snitzer 	init_tio(tio, rq, md);
2732bfebd1cdSMike Snitzer 
273302233342SMike Snitzer 	/*
273402233342SMike Snitzer 	 * Establish tio->ti before queuing work (map_tio_request)
273502233342SMike Snitzer 	 * or making direct call to map_request().
273602233342SMike Snitzer 	 */
2737bfebd1cdSMike Snitzer 	tio->ti = ti;
273802233342SMike Snitzer 
273902233342SMike Snitzer 	/* Direct call is fine since .queue_rq allows allocations */
274045714fbeSMike Snitzer 	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
274145714fbeSMike Snitzer 		/* Undo dm_start_request() before requeuing */
2742e262f347SMikulas Patocka 		rq_end_stats(md, rq);
274345714fbeSMike Snitzer 		rq_completed(md, rq_data_dir(rq), false);
274445714fbeSMike Snitzer 		return BLK_MQ_RQ_QUEUE_BUSY;
274545714fbeSMike Snitzer 	}
2746bfebd1cdSMike Snitzer 
2747bfebd1cdSMike Snitzer 	return BLK_MQ_RQ_QUEUE_OK;
2748bfebd1cdSMike Snitzer }
2749bfebd1cdSMike Snitzer 
2750bfebd1cdSMike Snitzer static struct blk_mq_ops dm_mq_ops = {
2751bfebd1cdSMike Snitzer 	.queue_rq = dm_mq_queue_rq,
2752bfebd1cdSMike Snitzer 	.map_queue = blk_mq_map_queue,
2753bfebd1cdSMike Snitzer 	.complete = dm_softirq_done,
2754bfebd1cdSMike Snitzer 	.init_request = dm_mq_init_request,
2755bfebd1cdSMike Snitzer };
2756bfebd1cdSMike Snitzer 
2757591ddcfcSMike Snitzer static int dm_mq_init_request_queue(struct mapped_device *md,
2758591ddcfcSMike Snitzer 				    struct dm_target *immutable_tgt)
2759bfebd1cdSMike Snitzer {
2760bfebd1cdSMike Snitzer 	struct request_queue *q;
2761bfebd1cdSMike Snitzer 	int err;
2762bfebd1cdSMike Snitzer 
2763c5248f79SMike Snitzer 	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
2764c5248f79SMike Snitzer 		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
2765c5248f79SMike Snitzer 		return -EINVAL;
2766c5248f79SMike Snitzer 	}
2767c5248f79SMike Snitzer 
2768115485e8SMike Snitzer 	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
27691c357a1eSMike Snitzer 	if (!md->tag_set)
27701c357a1eSMike Snitzer 		return -ENOMEM;
27711c357a1eSMike Snitzer 
27721c357a1eSMike Snitzer 	md->tag_set->ops = &dm_mq_ops;
27731c357a1eSMike Snitzer 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
2774115485e8SMike Snitzer 	md->tag_set->numa_node = md->numa_node_id;
27751c357a1eSMike Snitzer 	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
27761c357a1eSMike Snitzer 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
27771c357a1eSMike Snitzer 	md->tag_set->driver_data = md;
27781c357a1eSMike Snitzer 
27791c357a1eSMike Snitzer 	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
2780591ddcfcSMike Snitzer 	if (immutable_tgt && immutable_tgt->per_io_data_size) {
2781591ddcfcSMike Snitzer 		/* any target-specific per-io data is immediately after the tio */
2782591ddcfcSMike Snitzer 		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
2783591ddcfcSMike Snitzer 		md->init_tio_pdu = true;
2784591ddcfcSMike Snitzer 	}
2785bfebd1cdSMike Snitzer 
27861c357a1eSMike Snitzer 	err = blk_mq_alloc_tag_set(md->tag_set);
2787bfebd1cdSMike Snitzer 	if (err)
27881c357a1eSMike Snitzer 		goto out_kfree_tag_set;
2789bfebd1cdSMike Snitzer 
27901c357a1eSMike Snitzer 	q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
2791bfebd1cdSMike Snitzer 	if (IS_ERR(q)) {
2792bfebd1cdSMike Snitzer 		err = PTR_ERR(q);
2793bfebd1cdSMike Snitzer 		goto out_tag_set;
2794bfebd1cdSMike Snitzer 	}
2795bfebd1cdSMike Snitzer 	dm_init_md_queue(md);
2796bfebd1cdSMike Snitzer 
2797bfebd1cdSMike Snitzer 	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
2798bfebd1cdSMike Snitzer 	blk_mq_register_disk(md->disk);
2799bfebd1cdSMike Snitzer 
2800bfebd1cdSMike Snitzer 	return 0;
2801bfebd1cdSMike Snitzer 
2802bfebd1cdSMike Snitzer out_tag_set:
28031c357a1eSMike Snitzer 	blk_mq_free_tag_set(md->tag_set);
28041c357a1eSMike Snitzer out_kfree_tag_set:
28051c357a1eSMike Snitzer 	kfree(md->tag_set);
28061c357a1eSMike Snitzer 
2807bfebd1cdSMike Snitzer 	return err;
28084a0b4ddfSMike Snitzer }
28094a0b4ddfSMike Snitzer 
28104e6e36c3SMike Snitzer static unsigned filter_md_type(unsigned type, struct mapped_device *md)
28114e6e36c3SMike Snitzer {
28124e6e36c3SMike Snitzer 	if (type == DM_TYPE_BIO_BASED)
28134e6e36c3SMike Snitzer 		return type;
28144e6e36c3SMike Snitzer 
28154e6e36c3SMike Snitzer 	return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
28164e6e36c3SMike Snitzer }
28174e6e36c3SMike Snitzer 
28184a0b4ddfSMike Snitzer /*
28194a0b4ddfSMike Snitzer  * Setup the DM device's queue based on md's type
28204a0b4ddfSMike Snitzer  */
2821591ddcfcSMike Snitzer int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
28224a0b4ddfSMike Snitzer {
2823bfebd1cdSMike Snitzer 	int r;
282417e149b8SMike Snitzer 	unsigned md_type = filter_md_type(dm_get_md_type(md), md);
2825bfebd1cdSMike Snitzer 
2826bfebd1cdSMike Snitzer 	switch (md_type) {
2827bfebd1cdSMike Snitzer 	case DM_TYPE_REQUEST_BASED:
2828eca7ee6dSMike Snitzer 		r = dm_old_init_request_queue(md);
2829bfebd1cdSMike Snitzer 		if (r) {
2830eca7ee6dSMike Snitzer 			DMERR("Cannot initialize queue for request-based mapped device");
2831bfebd1cdSMike Snitzer 			return r;
28324a0b4ddfSMike Snitzer 		}
2833bfebd1cdSMike Snitzer 		break;
2834bfebd1cdSMike Snitzer 	case DM_TYPE_MQ_REQUEST_BASED:
2835591ddcfcSMike Snitzer 		r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
2836bfebd1cdSMike Snitzer 		if (r) {
2837eca7ee6dSMike Snitzer 			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2838bfebd1cdSMike Snitzer 			return r;
2839bfebd1cdSMike Snitzer 		}
2840bfebd1cdSMike Snitzer 		break;
2841bfebd1cdSMike Snitzer 	case DM_TYPE_BIO_BASED:
2842eca7ee6dSMike Snitzer 		dm_init_normal_md_queue(md);
2843ff36ab34SMike Snitzer 		blk_queue_make_request(md->queue, dm_make_request);
2844dbba42d8SMikulas Patocka 		/*
2845dbba42d8SMikulas Patocka 		 * DM handles splitting bios as needed.  Free the bio_split bioset
2846dbba42d8SMikulas Patocka 		 * since it won't be used (saves 1 process per bio-based DM device).
2847dbba42d8SMikulas Patocka 		 */
2848dbba42d8SMikulas Patocka 		bioset_free(md->queue->bio_split);
2849dbba42d8SMikulas Patocka 		md->queue->bio_split = NULL;
2850bfebd1cdSMike Snitzer 		break;
2851ff36ab34SMike Snitzer 	}
28524a0b4ddfSMike Snitzer 
28534a0b4ddfSMike Snitzer 	return 0;
28544a0b4ddfSMike Snitzer }
28554a0b4ddfSMike Snitzer 
28562bec1f4aSMikulas Patocka struct mapped_device *dm_get_md(dev_t dev)
28571da177e4SLinus Torvalds {
28581da177e4SLinus Torvalds 	struct mapped_device *md;
28591da177e4SLinus Torvalds 	unsigned minor = MINOR(dev);
28601da177e4SLinus Torvalds 
28611da177e4SLinus Torvalds 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
28621da177e4SLinus Torvalds 		return NULL;
28631da177e4SLinus Torvalds 
2864f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
28651da177e4SLinus Torvalds 
28661da177e4SLinus Torvalds 	md = idr_find(&_minor_idr, minor);
28672bec1f4aSMikulas Patocka 	if (md) {
28682bec1f4aSMikulas Patocka 		if ((md == MINOR_ALLOCED ||
2869f331c029STejun Heo 		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
2870abdc568bSKiyoshi Ueda 		     dm_deleting_md(md) ||
2871fba9f90eSJeff Mahoney 		     test_bit(DMF_FREEING, &md->flags))) {
2872637842cfSDavid Teigland 			md = NULL;
2873fba9f90eSJeff Mahoney 			goto out;
2874fba9f90eSJeff Mahoney 		}
28752bec1f4aSMikulas Patocka 		dm_get(md);
28762bec1f4aSMikulas Patocka 	}
28771da177e4SLinus Torvalds 
2878fba9f90eSJeff Mahoney out:
2879f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
28801da177e4SLinus Torvalds 
2881637842cfSDavid Teigland 	return md;
2882637842cfSDavid Teigland }
28833cf2e4baSAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_get_md);
2884d229a958SDavid Teigland 
28859ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md)
2886637842cfSDavid Teigland {
28879ade92a9SAlasdair G Kergon 	return md->interface_ptr;
28881da177e4SLinus Torvalds }
28891da177e4SLinus Torvalds 
28901da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr)
28911da177e4SLinus Torvalds {
28921da177e4SLinus Torvalds 	md->interface_ptr = ptr;
28931da177e4SLinus Torvalds }
28941da177e4SLinus Torvalds 
28951da177e4SLinus Torvalds void dm_get(struct mapped_device *md)
28961da177e4SLinus Torvalds {
28971da177e4SLinus Torvalds 	atomic_inc(&md->holders);
28983f77316dSKiyoshi Ueda 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
28991da177e4SLinus Torvalds }
29001da177e4SLinus Torvalds 
290109ee96b2SMikulas Patocka int dm_hold(struct mapped_device *md)
290209ee96b2SMikulas Patocka {
290309ee96b2SMikulas Patocka 	spin_lock(&_minor_lock);
290409ee96b2SMikulas Patocka 	if (test_bit(DMF_FREEING, &md->flags)) {
290509ee96b2SMikulas Patocka 		spin_unlock(&_minor_lock);
290609ee96b2SMikulas Patocka 		return -EBUSY;
290709ee96b2SMikulas Patocka 	}
290809ee96b2SMikulas Patocka 	dm_get(md);
290909ee96b2SMikulas Patocka 	spin_unlock(&_minor_lock);
291009ee96b2SMikulas Patocka 	return 0;
291109ee96b2SMikulas Patocka }
291209ee96b2SMikulas Patocka EXPORT_SYMBOL_GPL(dm_hold);
291309ee96b2SMikulas Patocka 
291472d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md)
291572d94861SAlasdair G Kergon {
291672d94861SAlasdair G Kergon 	return md->name;
291772d94861SAlasdair G Kergon }
291872d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name);
291972d94861SAlasdair G Kergon 
29203f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait)
29211da177e4SLinus Torvalds {
29221134e5aeSMike Anderson 	struct dm_table *map;
292383d5e5b0SMikulas Patocka 	int srcu_idx;
29241da177e4SLinus Torvalds 
29253f77316dSKiyoshi Ueda 	might_sleep();
2926fba9f90eSJeff Mahoney 
292763a4f065SMike Snitzer 	spin_lock(&_minor_lock);
29283f77316dSKiyoshi Ueda 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2929fba9f90eSJeff Mahoney 	set_bit(DMF_FREEING, &md->flags);
2930f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
29313f77316dSKiyoshi Ueda 
293202233342SMike Snitzer 	if (dm_request_based(md) && md->kworker_task)
29332eb6e1e3SKeith Busch 		flush_kthread_worker(&md->kworker);
29342eb6e1e3SKeith Busch 
2935ab7c7bb6SMikulas Patocka 	/*
2936ab7c7bb6SMikulas Patocka 	 * Take suspend_lock so that presuspend and postsuspend methods
2937ab7c7bb6SMikulas Patocka 	 * do not race with internal suspend.
2938ab7c7bb6SMikulas Patocka 	 */
2939ab7c7bb6SMikulas Patocka 	mutex_lock(&md->suspend_lock);
29402a708cffSJunichi Nomura 	map = dm_get_live_table(md, &srcu_idx);
29414f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md)) {
29421da177e4SLinus Torvalds 		dm_table_presuspend_targets(map);
29431da177e4SLinus Torvalds 		dm_table_postsuspend_targets(map);
29441da177e4SLinus Torvalds 	}
294583d5e5b0SMikulas Patocka 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
294683d5e5b0SMikulas Patocka 	dm_put_live_table(md, srcu_idx);
29472a708cffSJunichi Nomura 	mutex_unlock(&md->suspend_lock);
294883d5e5b0SMikulas Patocka 
29493f77316dSKiyoshi Ueda 	/*
29503f77316dSKiyoshi Ueda 	 * Rare, but there may be I/O requests still going to complete,
29513f77316dSKiyoshi Ueda 	 * for example.  Wait for all references to disappear.
29523f77316dSKiyoshi Ueda 	 * No one should increment the reference count of the mapped_device,
29533f77316dSKiyoshi Ueda 	 * after the mapped_device state becomes DMF_FREEING.
29543f77316dSKiyoshi Ueda 	 */
29553f77316dSKiyoshi Ueda 	if (wait)
29563f77316dSKiyoshi Ueda 		while (atomic_read(&md->holders))
29573f77316dSKiyoshi Ueda 			msleep(1);
29583f77316dSKiyoshi Ueda 	else if (atomic_read(&md->holders))
29593f77316dSKiyoshi Ueda 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
29603f77316dSKiyoshi Ueda 		       dm_device_name(md), atomic_read(&md->holders));
29613f77316dSKiyoshi Ueda 
2962784aae73SMilan Broz 	dm_sysfs_exit(md);
2963a7940155SAlasdair G Kergon 	dm_table_destroy(__unbind(md));
29641da177e4SLinus Torvalds 	free_dev(md);
29651da177e4SLinus Torvalds }
29663f77316dSKiyoshi Ueda 
29673f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md)
29683f77316dSKiyoshi Ueda {
29693f77316dSKiyoshi Ueda 	__dm_destroy(md, true);
29703f77316dSKiyoshi Ueda }
29713f77316dSKiyoshi Ueda 
29723f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md)
29733f77316dSKiyoshi Ueda {
29743f77316dSKiyoshi Ueda 	__dm_destroy(md, false);
29753f77316dSKiyoshi Ueda }
29763f77316dSKiyoshi Ueda 
29773f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md)
29783f77316dSKiyoshi Ueda {
29793f77316dSKiyoshi Ueda 	atomic_dec(&md->holders);
29801da177e4SLinus Torvalds }
298179eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put);
29821da177e4SLinus Torvalds 
2983401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
298446125c1cSMilan Broz {
298546125c1cSMilan Broz 	int r = 0;
2986b44ebeb0SMikulas Patocka 	DECLARE_WAITQUEUE(wait, current);
2987b44ebeb0SMikulas Patocka 
2988b44ebeb0SMikulas Patocka 	add_wait_queue(&md->wait, &wait);
298946125c1cSMilan Broz 
299046125c1cSMilan Broz 	while (1) {
2991401600dfSMikulas Patocka 		set_current_state(interruptible);
299246125c1cSMilan Broz 
2993b4324feeSKiyoshi Ueda 		if (!md_in_flight(md))
299446125c1cSMilan Broz 			break;
299546125c1cSMilan Broz 
2996401600dfSMikulas Patocka 		if (interruptible == TASK_INTERRUPTIBLE &&
2997401600dfSMikulas Patocka 		    signal_pending(current)) {
299846125c1cSMilan Broz 			r = -EINTR;
299946125c1cSMilan Broz 			break;
300046125c1cSMilan Broz 		}
300146125c1cSMilan Broz 
300246125c1cSMilan Broz 		io_schedule();
300346125c1cSMilan Broz 	}
300446125c1cSMilan Broz 	set_current_state(TASK_RUNNING);
300546125c1cSMilan Broz 
3006b44ebeb0SMikulas Patocka 	remove_wait_queue(&md->wait, &wait);
3007b44ebeb0SMikulas Patocka 
300846125c1cSMilan Broz 	return r;
300946125c1cSMilan Broz }
301046125c1cSMilan Broz 
30111da177e4SLinus Torvalds /*
30121da177e4SLinus Torvalds  * Process the deferred bios
30131da177e4SLinus Torvalds  */
3014ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work)
30151da177e4SLinus Torvalds {
3016ef208587SMikulas Patocka 	struct mapped_device *md = container_of(work, struct mapped_device,
3017ef208587SMikulas Patocka 						work);
30186d6f10dfSMilan Broz 	struct bio *c;
301983d5e5b0SMikulas Patocka 	int srcu_idx;
302083d5e5b0SMikulas Patocka 	struct dm_table *map;
30211da177e4SLinus Torvalds 
302283d5e5b0SMikulas Patocka 	map = dm_get_live_table(md, &srcu_idx);
3023ef208587SMikulas Patocka 
30243b00b203SMikulas Patocka 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
3025022c2611SMikulas Patocka 		spin_lock_irq(&md->deferred_lock);
3026022c2611SMikulas Patocka 		c = bio_list_pop(&md->deferred);
3027022c2611SMikulas Patocka 		spin_unlock_irq(&md->deferred_lock);
3028022c2611SMikulas Patocka 
30296a8736d1STejun Heo 		if (!c)
3030df12ee99SAlasdair G Kergon 			break;
303173d410c0SMilan Broz 
3032e6ee8c0bSKiyoshi Ueda 		if (dm_request_based(md))
3033e6ee8c0bSKiyoshi Ueda 			generic_make_request(c);
3034af7e466aSMikulas Patocka 		else
303583d5e5b0SMikulas Patocka 			__split_and_process_bio(md, map, c);
3036e6ee8c0bSKiyoshi Ueda 	}
30373b00b203SMikulas Patocka 
303883d5e5b0SMikulas Patocka 	dm_put_live_table(md, srcu_idx);
30391da177e4SLinus Torvalds }
30401da177e4SLinus Torvalds 
30419a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md)
3042304f3f6aSMilan Broz {
30433b00b203SMikulas Patocka 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
30444e857c58SPeter Zijlstra 	smp_mb__after_atomic();
304553d5914fSMikulas Patocka 	queue_work(md->wq, &md->work);
3046304f3f6aSMilan Broz }
3047304f3f6aSMilan Broz 
30481da177e4SLinus Torvalds /*
3049042d2a9bSAlasdair G Kergon  * Swap in a new table, returning the old one for the caller to destroy.
30501da177e4SLinus Torvalds  */
3051042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
30521da177e4SLinus Torvalds {
305387eb5b21SMike Christie 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
3054754c5fc7SMike Snitzer 	struct queue_limits limits;
3055042d2a9bSAlasdair G Kergon 	int r;
30561da177e4SLinus Torvalds 
3057e61290a4SDaniel Walker 	mutex_lock(&md->suspend_lock);
30581da177e4SLinus Torvalds 
30591da177e4SLinus Torvalds 	/* device must be suspended */
30604f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md))
306193c534aeSAlasdair G Kergon 		goto out;
30621da177e4SLinus Torvalds 
30633ae70656SMike Snitzer 	/*
30643ae70656SMike Snitzer 	 * If the new table has no data devices, retain the existing limits.
30653ae70656SMike Snitzer 	 * This helps multipath with queue_if_no_path if all paths disappear,
30663ae70656SMike Snitzer 	 * then new I/O is queued based on these limits, and then some paths
30673ae70656SMike Snitzer 	 * reappear.
30683ae70656SMike Snitzer 	 */
30693ae70656SMike Snitzer 	if (dm_table_has_no_data_devices(table)) {
307083d5e5b0SMikulas Patocka 		live_map = dm_get_live_table_fast(md);
30713ae70656SMike Snitzer 		if (live_map)
30723ae70656SMike Snitzer 			limits = md->queue->limits;
307383d5e5b0SMikulas Patocka 		dm_put_live_table_fast(md);
30743ae70656SMike Snitzer 	}
30753ae70656SMike Snitzer 
307687eb5b21SMike Christie 	if (!live_map) {
3077754c5fc7SMike Snitzer 		r = dm_calculate_queue_limits(table, &limits);
3078042d2a9bSAlasdair G Kergon 		if (r) {
3079042d2a9bSAlasdair G Kergon 			map = ERR_PTR(r);
3080754c5fc7SMike Snitzer 			goto out;
3081042d2a9bSAlasdair G Kergon 		}
308287eb5b21SMike Christie 	}
3083754c5fc7SMike Snitzer 
3084042d2a9bSAlasdair G Kergon 	map = __bind(md, table, &limits);
30851da177e4SLinus Torvalds 
308693c534aeSAlasdair G Kergon out:
3087e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
3088042d2a9bSAlasdair G Kergon 	return map;
30891da177e4SLinus Torvalds }
30901da177e4SLinus Torvalds 
30911da177e4SLinus Torvalds /*
30921da177e4SLinus Torvalds  * Functions to lock and unlock any filesystem running on the
30931da177e4SLinus Torvalds  * device.
30941da177e4SLinus Torvalds  */
30952ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md)
30961da177e4SLinus Torvalds {
3097e39e2e95SAlasdair G Kergon 	int r;
30981da177e4SLinus Torvalds 
30991da177e4SLinus Torvalds 	WARN_ON(md->frozen_sb);
3100dfbe03f6SAlasdair G Kergon 
3101db8fef4fSMikulas Patocka 	md->frozen_sb = freeze_bdev(md->bdev);
3102dfbe03f6SAlasdair G Kergon 	if (IS_ERR(md->frozen_sb)) {
3103cf222b37SAlasdair G Kergon 		r = PTR_ERR(md->frozen_sb);
3104e39e2e95SAlasdair G Kergon 		md->frozen_sb = NULL;
3105e39e2e95SAlasdair G Kergon 		return r;
3106dfbe03f6SAlasdair G Kergon 	}
3107dfbe03f6SAlasdair G Kergon 
3108aa8d7c2fSAlasdair G Kergon 	set_bit(DMF_FROZEN, &md->flags);
3109aa8d7c2fSAlasdair G Kergon 
31101da177e4SLinus Torvalds 	return 0;
31111da177e4SLinus Torvalds }
31121da177e4SLinus Torvalds 
31132ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md)
31141da177e4SLinus Torvalds {
3115aa8d7c2fSAlasdair G Kergon 	if (!test_bit(DMF_FROZEN, &md->flags))
3116aa8d7c2fSAlasdair G Kergon 		return;
3117aa8d7c2fSAlasdair G Kergon 
3118db8fef4fSMikulas Patocka 	thaw_bdev(md->bdev, md->frozen_sb);
31191da177e4SLinus Torvalds 	md->frozen_sb = NULL;
3120aa8d7c2fSAlasdair G Kergon 	clear_bit(DMF_FROZEN, &md->flags);
31211da177e4SLinus Torvalds }
31221da177e4SLinus Torvalds 
31231da177e4SLinus Torvalds /*
3124ffcc3936SMike Snitzer  * If __dm_suspend returns 0, the device is completely quiescent
3125ffcc3936SMike Snitzer  * now. There is no request-processing activity. All new requests
3126ffcc3936SMike Snitzer  * are being added to md->deferred list.
3127cec47e3dSKiyoshi Ueda  *
3128ffcc3936SMike Snitzer  * Caller must hold md->suspend_lock
3129cec47e3dSKiyoshi Ueda  */
3130ffcc3936SMike Snitzer static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3131ffcc3936SMike Snitzer 			unsigned suspend_flags, int interruptible)
31321da177e4SLinus Torvalds {
3133ffcc3936SMike Snitzer 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
3134ffcc3936SMike Snitzer 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
3135ffcc3936SMike Snitzer 	int r;
3136cf222b37SAlasdair G Kergon 
31372e93ccc1SKiyoshi Ueda 	/*
31382e93ccc1SKiyoshi Ueda 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
31392e93ccc1SKiyoshi Ueda 	 * This flag is cleared before dm_suspend returns.
31402e93ccc1SKiyoshi Ueda 	 */
31412e93ccc1SKiyoshi Ueda 	if (noflush)
31422e93ccc1SKiyoshi Ueda 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
31432e93ccc1SKiyoshi Ueda 
3144d67ee213SMike Snitzer 	/*
3145d67ee213SMike Snitzer 	 * This gets reverted if there's an error later and the targets
3146d67ee213SMike Snitzer 	 * provide the .presuspend_undo hook.
3147d67ee213SMike Snitzer 	 */
31481da177e4SLinus Torvalds 	dm_table_presuspend_targets(map);
31491da177e4SLinus Torvalds 
31502e93ccc1SKiyoshi Ueda 	/*
31519f518b27SKiyoshi Ueda 	 * Flush I/O to the device.
31529f518b27SKiyoshi Ueda 	 * Any I/O submitted after lock_fs() may not be flushed.
31539f518b27SKiyoshi Ueda 	 * noflush takes precedence over do_lockfs.
31549f518b27SKiyoshi Ueda 	 * (lock_fs() flushes I/Os and waits for them to complete.)
31552e93ccc1SKiyoshi Ueda 	 */
315632a926daSMikulas Patocka 	if (!noflush && do_lockfs) {
31572ca3310eSAlasdair G Kergon 		r = lock_fs(md);
3158d67ee213SMike Snitzer 		if (r) {
3159d67ee213SMike Snitzer 			dm_table_presuspend_undo_targets(map);
3160ffcc3936SMike Snitzer 			return r;
3161aa8d7c2fSAlasdair G Kergon 		}
3162d67ee213SMike Snitzer 	}
31631da177e4SLinus Torvalds 
31641da177e4SLinus Torvalds 	/*
31653b00b203SMikulas Patocka 	 * Here we must make sure that no processes are submitting requests
31663b00b203SMikulas Patocka 	 * to target drivers i.e. no one may be executing
31673b00b203SMikulas Patocka 	 * __split_and_process_bio. This is called from dm_request and
31683b00b203SMikulas Patocka 	 * dm_wq_work.
31693b00b203SMikulas Patocka 	 *
31703b00b203SMikulas Patocka 	 * To get all processes out of __split_and_process_bio in dm_request,
31713b00b203SMikulas Patocka 	 * we take the write lock. To prevent any process from reentering
31726a8736d1STejun Heo 	 * __split_and_process_bio from dm_request and quiesce the thread
31736a8736d1STejun Heo 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
31746a8736d1STejun Heo 	 * flush_workqueue(md->wq).
31751da177e4SLinus Torvalds 	 */
31761eb787ecSAlasdair G Kergon 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
317741abc4e1SHannes Reinecke 	if (map)
317883d5e5b0SMikulas Patocka 		synchronize_srcu(&md->io_barrier);
31791da177e4SLinus Torvalds 
3180d0bcb878SKiyoshi Ueda 	/*
318129e4013dSTejun Heo 	 * Stop md->queue before flushing md->wq in case request-based
318229e4013dSTejun Heo 	 * dm defers requests to md->wq from md->queue.
3183d0bcb878SKiyoshi Ueda 	 */
31842eb6e1e3SKeith Busch 	if (dm_request_based(md)) {
3185eca7ee6dSMike Snitzer 		dm_stop_queue(md->queue);
318602233342SMike Snitzer 		if (md->kworker_task)
31872eb6e1e3SKeith Busch 			flush_kthread_worker(&md->kworker);
31882eb6e1e3SKeith Busch 	}
3189cec47e3dSKiyoshi Ueda 
3190d0bcb878SKiyoshi Ueda 	flush_workqueue(md->wq);
3191d0bcb878SKiyoshi Ueda 
31921da177e4SLinus Torvalds 	/*
31933b00b203SMikulas Patocka 	 * At this point no more requests are entering target request routines.
31943b00b203SMikulas Patocka 	 * We call dm_wait_for_completion to wait for all existing requests
31953b00b203SMikulas Patocka 	 * to finish.
31961da177e4SLinus Torvalds 	 */
3197ffcc3936SMike Snitzer 	r = dm_wait_for_completion(md, interruptible);
31981da177e4SLinus Torvalds 
31996d6f10dfSMilan Broz 	if (noflush)
3200022c2611SMikulas Patocka 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
320141abc4e1SHannes Reinecke 	if (map)
320283d5e5b0SMikulas Patocka 		synchronize_srcu(&md->io_barrier);
32032e93ccc1SKiyoshi Ueda 
32041da177e4SLinus Torvalds 	/* were we interrupted ? */
320546125c1cSMilan Broz 	if (r < 0) {
32069a1fb464SMikulas Patocka 		dm_queue_flush(md);
320773d410c0SMilan Broz 
3208cec47e3dSKiyoshi Ueda 		if (dm_request_based(md))
3209eca7ee6dSMike Snitzer 			dm_start_queue(md->queue);
3210cec47e3dSKiyoshi Ueda 
32112ca3310eSAlasdair G Kergon 		unlock_fs(md);
3212d67ee213SMike Snitzer 		dm_table_presuspend_undo_targets(map);
3213ffcc3936SMike Snitzer 		/* pushback list is already flushed, so skip flush */
3214ffcc3936SMike Snitzer 	}
3215ffcc3936SMike Snitzer 
3216ffcc3936SMike Snitzer 	return r;
32172ca3310eSAlasdair G Kergon }
32182ca3310eSAlasdair G Kergon 
32193b00b203SMikulas Patocka /*
3220ffcc3936SMike Snitzer  * We need to be able to change a mapping table under a mounted
3221ffcc3936SMike Snitzer  * filesystem.  For example we might want to move some data in
3222ffcc3936SMike Snitzer  * the background.  Before the table can be swapped with
3223ffcc3936SMike Snitzer  * dm_bind_table, dm_suspend must be called to flush any in
3224ffcc3936SMike Snitzer  * flight bios and ensure that any further io gets deferred.
32253b00b203SMikulas Patocka  */
3226ffcc3936SMike Snitzer /*
3227ffcc3936SMike Snitzer  * Suspend mechanism in request-based dm.
3228ffcc3936SMike Snitzer  *
3229ffcc3936SMike Snitzer  * 1. Flush all I/Os by lock_fs() if needed.
3230ffcc3936SMike Snitzer  * 2. Stop dispatching any I/O by stopping the request_queue.
3231ffcc3936SMike Snitzer  * 3. Wait for all in-flight I/Os to be completed or requeued.
3232ffcc3936SMike Snitzer  *
3233ffcc3936SMike Snitzer  * To abort suspend, start the request_queue.
3234ffcc3936SMike Snitzer  */
3235ffcc3936SMike Snitzer int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
3236ffcc3936SMike Snitzer {
3237ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
3238ffcc3936SMike Snitzer 	int r = 0;
3239ffcc3936SMike Snitzer 
3240ffcc3936SMike Snitzer retry:
3241ffcc3936SMike Snitzer 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3242ffcc3936SMike Snitzer 
3243ffcc3936SMike Snitzer 	if (dm_suspended_md(md)) {
3244ffcc3936SMike Snitzer 		r = -EINVAL;
3245ffcc3936SMike Snitzer 		goto out_unlock;
3246ffcc3936SMike Snitzer 	}
3247ffcc3936SMike Snitzer 
3248ffcc3936SMike Snitzer 	if (dm_suspended_internally_md(md)) {
3249ffcc3936SMike Snitzer 		/* already internally suspended, wait for internal resume */
3250ffcc3936SMike Snitzer 		mutex_unlock(&md->suspend_lock);
3251ffcc3936SMike Snitzer 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3252ffcc3936SMike Snitzer 		if (r)
3253ffcc3936SMike Snitzer 			return r;
3254ffcc3936SMike Snitzer 		goto retry;
3255ffcc3936SMike Snitzer 	}
3256ffcc3936SMike Snitzer 
3257a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3258ffcc3936SMike Snitzer 
3259ffcc3936SMike Snitzer 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
3260ffcc3936SMike Snitzer 	if (r)
3261ffcc3936SMike Snitzer 		goto out_unlock;
32623b00b203SMikulas Patocka 
32631da177e4SLinus Torvalds 	set_bit(DMF_SUSPENDED, &md->flags);
32641da177e4SLinus Torvalds 
32654d4471cbSKiyoshi Ueda 	dm_table_postsuspend_targets(map);
32664d4471cbSKiyoshi Ueda 
3267d287483dSAlasdair G Kergon out_unlock:
3268e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
3269cf222b37SAlasdair G Kergon 	return r;
32701da177e4SLinus Torvalds }
32711da177e4SLinus Torvalds 
3272ffcc3936SMike Snitzer static int __dm_resume(struct mapped_device *md, struct dm_table *map)
32731da177e4SLinus Torvalds {
3274ffcc3936SMike Snitzer 	if (map) {
3275ffcc3936SMike Snitzer 		int r = dm_table_resume_targets(map);
32768757b776SMilan Broz 		if (r)
3277ffcc3936SMike Snitzer 			return r;
3278ffcc3936SMike Snitzer 	}
32792ca3310eSAlasdair G Kergon 
32809a1fb464SMikulas Patocka 	dm_queue_flush(md);
32812ca3310eSAlasdair G Kergon 
3282cec47e3dSKiyoshi Ueda 	/*
3283cec47e3dSKiyoshi Ueda 	 * Flushing deferred I/Os must be done after targets are resumed
3284cec47e3dSKiyoshi Ueda 	 * so that mapping of targets can work correctly.
3285cec47e3dSKiyoshi Ueda 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
3286cec47e3dSKiyoshi Ueda 	 */
3287cec47e3dSKiyoshi Ueda 	if (dm_request_based(md))
3288eca7ee6dSMike Snitzer 		dm_start_queue(md->queue);
3289cec47e3dSKiyoshi Ueda 
32902ca3310eSAlasdair G Kergon 	unlock_fs(md);
32912ca3310eSAlasdair G Kergon 
3292ffcc3936SMike Snitzer 	return 0;
3293ffcc3936SMike Snitzer }
3294ffcc3936SMike Snitzer 
3295ffcc3936SMike Snitzer int dm_resume(struct mapped_device *md)
3296ffcc3936SMike Snitzer {
3297ffcc3936SMike Snitzer 	int r = -EINVAL;
3298ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
3299ffcc3936SMike Snitzer 
3300ffcc3936SMike Snitzer retry:
3301ffcc3936SMike Snitzer 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3302ffcc3936SMike Snitzer 
3303ffcc3936SMike Snitzer 	if (!dm_suspended_md(md))
3304ffcc3936SMike Snitzer 		goto out;
3305ffcc3936SMike Snitzer 
3306ffcc3936SMike Snitzer 	if (dm_suspended_internally_md(md)) {
3307ffcc3936SMike Snitzer 		/* already internally suspended, wait for internal resume */
3308ffcc3936SMike Snitzer 		mutex_unlock(&md->suspend_lock);
3309ffcc3936SMike Snitzer 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3310ffcc3936SMike Snitzer 		if (r)
3311ffcc3936SMike Snitzer 			return r;
3312ffcc3936SMike Snitzer 		goto retry;
3313ffcc3936SMike Snitzer 	}
3314ffcc3936SMike Snitzer 
3315a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3316ffcc3936SMike Snitzer 	if (!map || !dm_table_get_size(map))
3317ffcc3936SMike Snitzer 		goto out;
3318ffcc3936SMike Snitzer 
3319ffcc3936SMike Snitzer 	r = __dm_resume(md, map);
3320ffcc3936SMike Snitzer 	if (r)
3321ffcc3936SMike Snitzer 		goto out;
3322ffcc3936SMike Snitzer 
33232ca3310eSAlasdair G Kergon 	clear_bit(DMF_SUSPENDED, &md->flags);
33242ca3310eSAlasdair G Kergon 
3325cf222b37SAlasdair G Kergon 	r = 0;
3326cf222b37SAlasdair G Kergon out:
3327e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
33282ca3310eSAlasdair G Kergon 
3329cf222b37SAlasdair G Kergon 	return r;
33301da177e4SLinus Torvalds }
33311da177e4SLinus Torvalds 
3332fd2ed4d2SMikulas Patocka /*
3333fd2ed4d2SMikulas Patocka  * Internal suspend/resume works like userspace-driven suspend. It waits
3334fd2ed4d2SMikulas Patocka  * until all bios finish and prevents issuing new bios to the target drivers.
3335fd2ed4d2SMikulas Patocka  * It may be used only from the kernel.
3336fd2ed4d2SMikulas Patocka  */
3337fd2ed4d2SMikulas Patocka 
3338ffcc3936SMike Snitzer static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
3339ffcc3936SMike Snitzer {
3340ffcc3936SMike Snitzer 	struct dm_table *map = NULL;
3341ffcc3936SMike Snitzer 
334296b26c8cSMikulas Patocka 	if (md->internal_suspend_count++)
3343ffcc3936SMike Snitzer 		return; /* nested internal suspend */
3344ffcc3936SMike Snitzer 
3345ffcc3936SMike Snitzer 	if (dm_suspended_md(md)) {
3346ffcc3936SMike Snitzer 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3347ffcc3936SMike Snitzer 		return; /* nest suspend */
3348ffcc3936SMike Snitzer 	}
3349ffcc3936SMike Snitzer 
3350a12f5d48SEric Dumazet 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3351ffcc3936SMike Snitzer 
3352ffcc3936SMike Snitzer 	/*
3353ffcc3936SMike Snitzer 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3354ffcc3936SMike Snitzer 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
3355ffcc3936SMike Snitzer 	 * would require changing .presuspend to return an error -- avoid this
3356ffcc3936SMike Snitzer 	 * until there is a need for more elaborate variants of internal suspend.
3357ffcc3936SMike Snitzer 	 */
3358ffcc3936SMike Snitzer 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
3359ffcc3936SMike Snitzer 
3360ffcc3936SMike Snitzer 	set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3361ffcc3936SMike Snitzer 
3362ffcc3936SMike Snitzer 	dm_table_postsuspend_targets(map);
3363ffcc3936SMike Snitzer }
3364ffcc3936SMike Snitzer 
3365ffcc3936SMike Snitzer static void __dm_internal_resume(struct mapped_device *md)
3366ffcc3936SMike Snitzer {
336796b26c8cSMikulas Patocka 	BUG_ON(!md->internal_suspend_count);
336896b26c8cSMikulas Patocka 
336996b26c8cSMikulas Patocka 	if (--md->internal_suspend_count)
3370ffcc3936SMike Snitzer 		return; /* resume from nested internal suspend */
3371ffcc3936SMike Snitzer 
3372ffcc3936SMike Snitzer 	if (dm_suspended_md(md))
3373ffcc3936SMike Snitzer 		goto done; /* resume from nested suspend */
3374ffcc3936SMike Snitzer 
3375ffcc3936SMike Snitzer 	/*
3376ffcc3936SMike Snitzer 	 * NOTE: existing callers don't need to call dm_table_resume_targets
3377ffcc3936SMike Snitzer 	 * (which may fail -- so best to avoid it for now by passing NULL map)
3378ffcc3936SMike Snitzer 	 */
3379ffcc3936SMike Snitzer 	(void) __dm_resume(md, NULL);
3380ffcc3936SMike Snitzer 
3381ffcc3936SMike Snitzer done:
3382ffcc3936SMike Snitzer 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3383ffcc3936SMike Snitzer 	smp_mb__after_atomic();
3384ffcc3936SMike Snitzer 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3385ffcc3936SMike Snitzer }
3386ffcc3936SMike Snitzer 
3387ffcc3936SMike Snitzer void dm_internal_suspend_noflush(struct mapped_device *md)
3388fd2ed4d2SMikulas Patocka {
3389fd2ed4d2SMikulas Patocka 	mutex_lock(&md->suspend_lock);
3390ffcc3936SMike Snitzer 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3391ffcc3936SMike Snitzer 	mutex_unlock(&md->suspend_lock);
3392ffcc3936SMike Snitzer }
3393ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3394ffcc3936SMike Snitzer 
3395ffcc3936SMike Snitzer void dm_internal_resume(struct mapped_device *md)
3396ffcc3936SMike Snitzer {
3397ffcc3936SMike Snitzer 	mutex_lock(&md->suspend_lock);
3398ffcc3936SMike Snitzer 	__dm_internal_resume(md);
3399ffcc3936SMike Snitzer 	mutex_unlock(&md->suspend_lock);
3400ffcc3936SMike Snitzer }
3401ffcc3936SMike Snitzer EXPORT_SYMBOL_GPL(dm_internal_resume);
3402ffcc3936SMike Snitzer 
3403ffcc3936SMike Snitzer /*
3404ffcc3936SMike Snitzer  * Fast variants of internal suspend/resume hold md->suspend_lock,
3405ffcc3936SMike Snitzer  * which prevents interaction with userspace-driven suspend.
3406ffcc3936SMike Snitzer  */
3407ffcc3936SMike Snitzer 
3408ffcc3936SMike Snitzer void dm_internal_suspend_fast(struct mapped_device *md)
3409ffcc3936SMike Snitzer {
3410ffcc3936SMike Snitzer 	mutex_lock(&md->suspend_lock);
3411ffcc3936SMike Snitzer 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3412fd2ed4d2SMikulas Patocka 		return;
3413fd2ed4d2SMikulas Patocka 
3414fd2ed4d2SMikulas Patocka 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3415fd2ed4d2SMikulas Patocka 	synchronize_srcu(&md->io_barrier);
3416fd2ed4d2SMikulas Patocka 	flush_workqueue(md->wq);
3417fd2ed4d2SMikulas Patocka 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3418fd2ed4d2SMikulas Patocka }
3419b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3420fd2ed4d2SMikulas Patocka 
3421ffcc3936SMike Snitzer void dm_internal_resume_fast(struct mapped_device *md)
3422fd2ed4d2SMikulas Patocka {
3423ffcc3936SMike Snitzer 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3424fd2ed4d2SMikulas Patocka 		goto done;
3425fd2ed4d2SMikulas Patocka 
3426fd2ed4d2SMikulas Patocka 	dm_queue_flush(md);
3427fd2ed4d2SMikulas Patocka 
3428fd2ed4d2SMikulas Patocka done:
3429fd2ed4d2SMikulas Patocka 	mutex_unlock(&md->suspend_lock);
3430fd2ed4d2SMikulas Patocka }
3431b735fedeSMikulas Patocka EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3432fd2ed4d2SMikulas Patocka 
34331da177e4SLinus Torvalds /*-----------------------------------------------------------------
34341da177e4SLinus Torvalds  * Event notification.
34351da177e4SLinus Torvalds  *---------------------------------------------------------------*/
34363abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
343760935eb2SMilan Broz 		       unsigned cookie)
343869267a30SAlasdair G Kergon {
343960935eb2SMilan Broz 	char udev_cookie[DM_COOKIE_LENGTH];
344060935eb2SMilan Broz 	char *envp[] = { udev_cookie, NULL };
344160935eb2SMilan Broz 
344260935eb2SMilan Broz 	if (!cookie)
34433abf85b5SPeter Rajnoha 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
344460935eb2SMilan Broz 	else {
344560935eb2SMilan Broz 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
344660935eb2SMilan Broz 			 DM_COOKIE_ENV_VAR_NAME, cookie);
34473abf85b5SPeter Rajnoha 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
34483abf85b5SPeter Rajnoha 					  action, envp);
344960935eb2SMilan Broz 	}
345069267a30SAlasdair G Kergon }
345169267a30SAlasdair G Kergon 
34527a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md)
34537a8c3d3bSMike Anderson {
34547a8c3d3bSMike Anderson 	return atomic_add_return(1, &md->uevent_seq);
34557a8c3d3bSMike Anderson }
34567a8c3d3bSMike Anderson 
34571da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md)
34581da177e4SLinus Torvalds {
34591da177e4SLinus Torvalds 	return atomic_read(&md->event_nr);
34601da177e4SLinus Torvalds }
34611da177e4SLinus Torvalds 
34621da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr)
34631da177e4SLinus Torvalds {
34641da177e4SLinus Torvalds 	return wait_event_interruptible(md->eventq,
34651da177e4SLinus Torvalds 			(event_nr != atomic_read(&md->event_nr)));
34661da177e4SLinus Torvalds }
34671da177e4SLinus Torvalds 
34687a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
34697a8c3d3bSMike Anderson {
34707a8c3d3bSMike Anderson 	unsigned long flags;
34717a8c3d3bSMike Anderson 
34727a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
34737a8c3d3bSMike Anderson 	list_add(elist, &md->uevent_list);
34747a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
34757a8c3d3bSMike Anderson }
34767a8c3d3bSMike Anderson 
34771da177e4SLinus Torvalds /*
34781da177e4SLinus Torvalds  * The gendisk is only valid as long as you have a reference
34791da177e4SLinus Torvalds  * count on 'md'.
34801da177e4SLinus Torvalds  */
34811da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md)
34821da177e4SLinus Torvalds {
34831da177e4SLinus Torvalds 	return md->disk;
34841da177e4SLinus Torvalds }
348565ff5b7dSSami Tolvanen EXPORT_SYMBOL_GPL(dm_disk);
34861da177e4SLinus Torvalds 
3487784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md)
3488784aae73SMilan Broz {
34892995fa78SMikulas Patocka 	return &md->kobj_holder.kobj;
3490784aae73SMilan Broz }
3491784aae73SMilan Broz 
3492784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3493784aae73SMilan Broz {
3494784aae73SMilan Broz 	struct mapped_device *md;
3495784aae73SMilan Broz 
34962995fa78SMikulas Patocka 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3497784aae73SMilan Broz 
34984d89b7b4SMilan Broz 	if (test_bit(DMF_FREEING, &md->flags) ||
3499432a212cSMike Anderson 	    dm_deleting_md(md))
35004d89b7b4SMilan Broz 		return NULL;
35014d89b7b4SMilan Broz 
3502784aae73SMilan Broz 	dm_get(md);
3503784aae73SMilan Broz 	return md;
3504784aae73SMilan Broz }
3505784aae73SMilan Broz 
35064f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md)
35071da177e4SLinus Torvalds {
35081da177e4SLinus Torvalds 	return test_bit(DMF_SUSPENDED, &md->flags);
35091da177e4SLinus Torvalds }
35101da177e4SLinus Torvalds 
3511ffcc3936SMike Snitzer int dm_suspended_internally_md(struct mapped_device *md)
3512ffcc3936SMike Snitzer {
3513ffcc3936SMike Snitzer 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3514ffcc3936SMike Snitzer }
3515ffcc3936SMike Snitzer 
35162c140a24SMikulas Patocka int dm_test_deferred_remove_flag(struct mapped_device *md)
35172c140a24SMikulas Patocka {
35182c140a24SMikulas Patocka 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
35192c140a24SMikulas Patocka }
35202c140a24SMikulas Patocka 
352164dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti)
352264dbce58SKiyoshi Ueda {
3523ecdb2e25SKiyoshi Ueda 	return dm_suspended_md(dm_table_get_md(ti->table));
352464dbce58SKiyoshi Ueda }
352564dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended);
352664dbce58SKiyoshi Ueda 
35272e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti)
35282e93ccc1SKiyoshi Ueda {
3529ecdb2e25SKiyoshi Ueda 	return __noflush_suspending(dm_table_get_md(ti->table));
35302e93ccc1SKiyoshi Ueda }
35312e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending);
35322e93ccc1SKiyoshi Ueda 
353378d8e58aSMike Snitzer struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
353430187e1dSMike Snitzer 					    unsigned integrity, unsigned per_io_data_size)
3535e6ee8c0bSKiyoshi Ueda {
3536115485e8SMike Snitzer 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
353778d8e58aSMike Snitzer 	struct kmem_cache *cachep = NULL;
353878d8e58aSMike Snitzer 	unsigned int pool_size = 0;
35395f015204SJun'ichi Nomura 	unsigned int front_pad;
3540e6ee8c0bSKiyoshi Ueda 
3541e6ee8c0bSKiyoshi Ueda 	if (!pools)
35424e6e36c3SMike Snitzer 		return NULL;
3543e6ee8c0bSKiyoshi Ueda 
354478d8e58aSMike Snitzer 	type = filter_md_type(type, md);
354517e149b8SMike Snitzer 
354678d8e58aSMike Snitzer 	switch (type) {
354778d8e58aSMike Snitzer 	case DM_TYPE_BIO_BASED:
354878d8e58aSMike Snitzer 		cachep = _io_cache;
354978d8e58aSMike Snitzer 		pool_size = dm_get_reserved_bio_based_ios();
355030187e1dSMike Snitzer 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
355178d8e58aSMike Snitzer 		break;
355278d8e58aSMike Snitzer 	case DM_TYPE_REQUEST_BASED:
355378d8e58aSMike Snitzer 		cachep = _rq_tio_cache;
355478d8e58aSMike Snitzer 		pool_size = dm_get_reserved_rq_based_ios();
355578d8e58aSMike Snitzer 		pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
355678d8e58aSMike Snitzer 		if (!pools->rq_pool)
355778d8e58aSMike Snitzer 			goto out;
355878d8e58aSMike Snitzer 		/* fall through to setup remaining rq-based pools */
355978d8e58aSMike Snitzer 	case DM_TYPE_MQ_REQUEST_BASED:
356078d8e58aSMike Snitzer 		if (!pool_size)
356178d8e58aSMike Snitzer 			pool_size = dm_get_reserved_rq_based_ios();
356278d8e58aSMike Snitzer 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3563591ddcfcSMike Snitzer 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
356478d8e58aSMike Snitzer 		break;
356578d8e58aSMike Snitzer 	default:
356678d8e58aSMike Snitzer 		BUG();
356778d8e58aSMike Snitzer 	}
356878d8e58aSMike Snitzer 
356978d8e58aSMike Snitzer 	if (cachep) {
357078d8e58aSMike Snitzer 		pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3571e6ee8c0bSKiyoshi Ueda 		if (!pools->io_pool)
35725f015204SJun'ichi Nomura 			goto out;
357378d8e58aSMike Snitzer 	}
3574e6ee8c0bSKiyoshi Ueda 
35753d8aab2dSJunichi Nomura 	pools->bs = bioset_create_nobvec(pool_size, front_pad);
3576e6ee8c0bSKiyoshi Ueda 	if (!pools->bs)
35775f015204SJun'ichi Nomura 		goto out;
3578e6ee8c0bSKiyoshi Ueda 
3579a91a2785SMartin K. Petersen 	if (integrity && bioset_integrity_create(pools->bs, pool_size))
35805f015204SJun'ichi Nomura 		goto out;
3581a91a2785SMartin K. Petersen 
3582e6ee8c0bSKiyoshi Ueda 	return pools;
358378d8e58aSMike Snitzer 
35845f015204SJun'ichi Nomura out:
35855f015204SJun'ichi Nomura 	dm_free_md_mempools(pools);
3586e6ee8c0bSKiyoshi Ueda 
35874e6e36c3SMike Snitzer 	return NULL;
3588e6ee8c0bSKiyoshi Ueda }
3589e6ee8c0bSKiyoshi Ueda 
3590e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools)
3591e6ee8c0bSKiyoshi Ueda {
3592e6ee8c0bSKiyoshi Ueda 	if (!pools)
3593e6ee8c0bSKiyoshi Ueda 		return;
3594e6ee8c0bSKiyoshi Ueda 
3595e6ee8c0bSKiyoshi Ueda 	mempool_destroy(pools->io_pool);
35961ae49ea2SMike Snitzer 	mempool_destroy(pools->rq_pool);
35971ae49ea2SMike Snitzer 
3598e6ee8c0bSKiyoshi Ueda 	if (pools->bs)
3599e6ee8c0bSKiyoshi Ueda 		bioset_free(pools->bs);
3600e6ee8c0bSKiyoshi Ueda 
3601e6ee8c0bSKiyoshi Ueda 	kfree(pools);
3602e6ee8c0bSKiyoshi Ueda }
3603e6ee8c0bSKiyoshi Ueda 
360471cdb697SChristoph Hellwig static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
360571cdb697SChristoph Hellwig 			  u32 flags)
360671cdb697SChristoph Hellwig {
360771cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
360871cdb697SChristoph Hellwig 	const struct pr_ops *ops;
360971cdb697SChristoph Hellwig 	fmode_t mode;
3610956a4025SMike Snitzer 	int r;
361171cdb697SChristoph Hellwig 
3612956a4025SMike Snitzer 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
361371cdb697SChristoph Hellwig 	if (r < 0)
361471cdb697SChristoph Hellwig 		return r;
361571cdb697SChristoph Hellwig 
361671cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
361771cdb697SChristoph Hellwig 	if (ops && ops->pr_register)
361871cdb697SChristoph Hellwig 		r = ops->pr_register(bdev, old_key, new_key, flags);
361971cdb697SChristoph Hellwig 	else
362071cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
362171cdb697SChristoph Hellwig 
3622956a4025SMike Snitzer 	bdput(bdev);
362371cdb697SChristoph Hellwig 	return r;
362471cdb697SChristoph Hellwig }
362571cdb697SChristoph Hellwig 
362671cdb697SChristoph Hellwig static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
362771cdb697SChristoph Hellwig 			 u32 flags)
362871cdb697SChristoph Hellwig {
362971cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
363071cdb697SChristoph Hellwig 	const struct pr_ops *ops;
363171cdb697SChristoph Hellwig 	fmode_t mode;
3632956a4025SMike Snitzer 	int r;
363371cdb697SChristoph Hellwig 
3634956a4025SMike Snitzer 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
363571cdb697SChristoph Hellwig 	if (r < 0)
363671cdb697SChristoph Hellwig 		return r;
363771cdb697SChristoph Hellwig 
363871cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
363971cdb697SChristoph Hellwig 	if (ops && ops->pr_reserve)
364071cdb697SChristoph Hellwig 		r = ops->pr_reserve(bdev, key, type, flags);
364171cdb697SChristoph Hellwig 	else
364271cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
364371cdb697SChristoph Hellwig 
3644956a4025SMike Snitzer 	bdput(bdev);
364571cdb697SChristoph Hellwig 	return r;
364671cdb697SChristoph Hellwig }
364771cdb697SChristoph Hellwig 
364871cdb697SChristoph Hellwig static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
364971cdb697SChristoph Hellwig {
365071cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
365171cdb697SChristoph Hellwig 	const struct pr_ops *ops;
365271cdb697SChristoph Hellwig 	fmode_t mode;
3653956a4025SMike Snitzer 	int r;
365471cdb697SChristoph Hellwig 
3655956a4025SMike Snitzer 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
365671cdb697SChristoph Hellwig 	if (r < 0)
365771cdb697SChristoph Hellwig 		return r;
365871cdb697SChristoph Hellwig 
365971cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
366071cdb697SChristoph Hellwig 	if (ops && ops->pr_release)
366171cdb697SChristoph Hellwig 		r = ops->pr_release(bdev, key, type);
366271cdb697SChristoph Hellwig 	else
366371cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
366471cdb697SChristoph Hellwig 
3665956a4025SMike Snitzer 	bdput(bdev);
366671cdb697SChristoph Hellwig 	return r;
366771cdb697SChristoph Hellwig }
366871cdb697SChristoph Hellwig 
366971cdb697SChristoph Hellwig static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
367071cdb697SChristoph Hellwig 			 enum pr_type type, bool abort)
367171cdb697SChristoph Hellwig {
367271cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
367371cdb697SChristoph Hellwig 	const struct pr_ops *ops;
367471cdb697SChristoph Hellwig 	fmode_t mode;
3675956a4025SMike Snitzer 	int r;
367671cdb697SChristoph Hellwig 
3677956a4025SMike Snitzer 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
367871cdb697SChristoph Hellwig 	if (r < 0)
367971cdb697SChristoph Hellwig 		return r;
368071cdb697SChristoph Hellwig 
368171cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
368271cdb697SChristoph Hellwig 	if (ops && ops->pr_preempt)
368371cdb697SChristoph Hellwig 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
368471cdb697SChristoph Hellwig 	else
368571cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
368671cdb697SChristoph Hellwig 
3687956a4025SMike Snitzer 	bdput(bdev);
368871cdb697SChristoph Hellwig 	return r;
368971cdb697SChristoph Hellwig }
369071cdb697SChristoph Hellwig 
369171cdb697SChristoph Hellwig static int dm_pr_clear(struct block_device *bdev, u64 key)
369271cdb697SChristoph Hellwig {
369371cdb697SChristoph Hellwig 	struct mapped_device *md = bdev->bd_disk->private_data;
369471cdb697SChristoph Hellwig 	const struct pr_ops *ops;
369571cdb697SChristoph Hellwig 	fmode_t mode;
3696956a4025SMike Snitzer 	int r;
369771cdb697SChristoph Hellwig 
3698956a4025SMike Snitzer 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
369971cdb697SChristoph Hellwig 	if (r < 0)
370071cdb697SChristoph Hellwig 		return r;
370171cdb697SChristoph Hellwig 
370271cdb697SChristoph Hellwig 	ops = bdev->bd_disk->fops->pr_ops;
370371cdb697SChristoph Hellwig 	if (ops && ops->pr_clear)
370471cdb697SChristoph Hellwig 		r = ops->pr_clear(bdev, key);
370571cdb697SChristoph Hellwig 	else
370671cdb697SChristoph Hellwig 		r = -EOPNOTSUPP;
370771cdb697SChristoph Hellwig 
3708956a4025SMike Snitzer 	bdput(bdev);
370971cdb697SChristoph Hellwig 	return r;
371071cdb697SChristoph Hellwig }
371171cdb697SChristoph Hellwig 
371271cdb697SChristoph Hellwig static const struct pr_ops dm_pr_ops = {
371371cdb697SChristoph Hellwig 	.pr_register	= dm_pr_register,
371471cdb697SChristoph Hellwig 	.pr_reserve	= dm_pr_reserve,
371571cdb697SChristoph Hellwig 	.pr_release	= dm_pr_release,
371671cdb697SChristoph Hellwig 	.pr_preempt	= dm_pr_preempt,
371771cdb697SChristoph Hellwig 	.pr_clear	= dm_pr_clear,
371871cdb697SChristoph Hellwig };
371971cdb697SChristoph Hellwig 
372083d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = {
37211da177e4SLinus Torvalds 	.open = dm_blk_open,
37221da177e4SLinus Torvalds 	.release = dm_blk_close,
3723aa129a22SMilan Broz 	.ioctl = dm_blk_ioctl,
37243ac51e74SDarrick J. Wong 	.getgeo = dm_blk_getgeo,
372571cdb697SChristoph Hellwig 	.pr_ops = &dm_pr_ops,
37261da177e4SLinus Torvalds 	.owner = THIS_MODULE
37271da177e4SLinus Torvalds };
37281da177e4SLinus Torvalds 
37291da177e4SLinus Torvalds /*
37301da177e4SLinus Torvalds  * module hooks
37311da177e4SLinus Torvalds  */
37321da177e4SLinus Torvalds module_init(dm_init);
37331da177e4SLinus Torvalds module_exit(dm_exit);
37341da177e4SLinus Torvalds 
37351da177e4SLinus Torvalds module_param(major, uint, 0);
37361da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper");
3737f4790826SMike Snitzer 
3738e8603136SMike Snitzer module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3739e8603136SMike Snitzer MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3740e8603136SMike Snitzer 
3741f4790826SMike Snitzer module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
3742f4790826SMike Snitzer MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
3743f4790826SMike Snitzer 
374417e149b8SMike Snitzer module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
374517e149b8SMike Snitzer MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
374617e149b8SMike Snitzer 
3747faad87dfSMike Snitzer module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
3748faad87dfSMike Snitzer MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
3749faad87dfSMike Snitzer 
3750faad87dfSMike Snitzer module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
3751faad87dfSMike Snitzer MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
3752faad87dfSMike Snitzer 
3753115485e8SMike Snitzer module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3754115485e8SMike Snitzer MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3755115485e8SMike Snitzer 
37561da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver");
37571da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
37581da177e4SLinus Torvalds MODULE_LICENSE("GPL");
3759