xref: /openbmc/linux/drivers/md/dm.c (revision 06a426cee9b35505aeb7516a67bd26496ca7ed08)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3784aae73SMilan Broz  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * This file is released under the GPL.
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds #include "dm.h"
951e5b2bdSMike Anderson #include "dm-uevent.h"
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/init.h>
121da177e4SLinus Torvalds #include <linux/module.h>
1348c9c27bSArjan van de Ven #include <linux/mutex.h>
141da177e4SLinus Torvalds #include <linux/moduleparam.h>
151da177e4SLinus Torvalds #include <linux/blkpg.h>
161da177e4SLinus Torvalds #include <linux/bio.h>
171da177e4SLinus Torvalds #include <linux/buffer_head.h>
186e9624b8SArnd Bergmann #include <linux/smp_lock.h>
191da177e4SLinus Torvalds #include <linux/mempool.h>
201da177e4SLinus Torvalds #include <linux/slab.h>
211da177e4SLinus Torvalds #include <linux/idr.h>
223ac51e74SDarrick J. Wong #include <linux/hdreg.h>
233f77316dSKiyoshi Ueda #include <linux/delay.h>
2455782138SLi Zefan 
2555782138SLi Zefan #include <trace/events/block.h>
261da177e4SLinus Torvalds 
2772d94861SAlasdair G Kergon #define DM_MSG_PREFIX "core"
2872d94861SAlasdair G Kergon 
2960935eb2SMilan Broz /*
3060935eb2SMilan Broz  * Cookies are numeric values sent with CHANGE and REMOVE
3160935eb2SMilan Broz  * uevents while resuming, removing or renaming the device.
3260935eb2SMilan Broz  */
3360935eb2SMilan Broz #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
3460935eb2SMilan Broz #define DM_COOKIE_LENGTH 24
3560935eb2SMilan Broz 
361da177e4SLinus Torvalds static const char *_name = DM_NAME;
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds static unsigned int major = 0;
391da177e4SLinus Torvalds static unsigned int _major = 0;
401da177e4SLinus Torvalds 
41f32c10b0SJeff Mahoney static DEFINE_SPINLOCK(_minor_lock);
421da177e4SLinus Torvalds /*
438fbf26adSKiyoshi Ueda  * For bio-based dm.
441da177e4SLinus Torvalds  * One of these is allocated per bio.
451da177e4SLinus Torvalds  */
461da177e4SLinus Torvalds struct dm_io {
471da177e4SLinus Torvalds 	struct mapped_device *md;
481da177e4SLinus Torvalds 	int error;
491da177e4SLinus Torvalds 	atomic_t io_count;
506ae2fa67SRichard Kennedy 	struct bio *bio;
513eaf840eSJun'ichi "Nick" Nomura 	unsigned long start_time;
52f88fb981SKiyoshi Ueda 	spinlock_t endio_lock;
531da177e4SLinus Torvalds };
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds /*
568fbf26adSKiyoshi Ueda  * For bio-based dm.
571da177e4SLinus Torvalds  * One of these is allocated per target within a bio.  Hopefully
581da177e4SLinus Torvalds  * this will be simplified out one day.
591da177e4SLinus Torvalds  */
60028867acSAlasdair G Kergon struct dm_target_io {
611da177e4SLinus Torvalds 	struct dm_io *io;
621da177e4SLinus Torvalds 	struct dm_target *ti;
631da177e4SLinus Torvalds 	union map_info info;
641da177e4SLinus Torvalds };
651da177e4SLinus Torvalds 
668fbf26adSKiyoshi Ueda /*
678fbf26adSKiyoshi Ueda  * For request-based dm.
688fbf26adSKiyoshi Ueda  * One of these is allocated per request.
698fbf26adSKiyoshi Ueda  */
708fbf26adSKiyoshi Ueda struct dm_rq_target_io {
718fbf26adSKiyoshi Ueda 	struct mapped_device *md;
728fbf26adSKiyoshi Ueda 	struct dm_target *ti;
738fbf26adSKiyoshi Ueda 	struct request *orig, clone;
748fbf26adSKiyoshi Ueda 	int error;
758fbf26adSKiyoshi Ueda 	union map_info info;
768fbf26adSKiyoshi Ueda };
778fbf26adSKiyoshi Ueda 
788fbf26adSKiyoshi Ueda /*
798fbf26adSKiyoshi Ueda  * For request-based dm.
808fbf26adSKiyoshi Ueda  * One of these is allocated per bio.
818fbf26adSKiyoshi Ueda  */
828fbf26adSKiyoshi Ueda struct dm_rq_clone_bio_info {
838fbf26adSKiyoshi Ueda 	struct bio *orig;
84cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio;
858fbf26adSKiyoshi Ueda };
868fbf26adSKiyoshi Ueda 
871da177e4SLinus Torvalds union map_info *dm_get_mapinfo(struct bio *bio)
881da177e4SLinus Torvalds {
891da177e4SLinus Torvalds 	if (bio && bio->bi_private)
90028867acSAlasdair G Kergon 		return &((struct dm_target_io *)bio->bi_private)->info;
911da177e4SLinus Torvalds 	return NULL;
921da177e4SLinus Torvalds }
931da177e4SLinus Torvalds 
94cec47e3dSKiyoshi Ueda union map_info *dm_get_rq_mapinfo(struct request *rq)
95cec47e3dSKiyoshi Ueda {
96cec47e3dSKiyoshi Ueda 	if (rq && rq->end_io_data)
97cec47e3dSKiyoshi Ueda 		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
98cec47e3dSKiyoshi Ueda 	return NULL;
99cec47e3dSKiyoshi Ueda }
100cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
101cec47e3dSKiyoshi Ueda 
102ba61fdd1SJeff Mahoney #define MINOR_ALLOCED ((void *)-1)
103ba61fdd1SJeff Mahoney 
1041da177e4SLinus Torvalds /*
1051da177e4SLinus Torvalds  * Bits for the md->flags field.
1061da177e4SLinus Torvalds  */
1071eb787ecSAlasdair G Kergon #define DMF_BLOCK_IO_FOR_SUSPEND 0
1081da177e4SLinus Torvalds #define DMF_SUSPENDED 1
109aa8d7c2fSAlasdair G Kergon #define DMF_FROZEN 2
110fba9f90eSJeff Mahoney #define DMF_FREEING 3
1115c6bd75dSAlasdair G Kergon #define DMF_DELETING 4
1122e93ccc1SKiyoshi Ueda #define DMF_NOFLUSH_SUSPENDING 5
1131eb787ecSAlasdair G Kergon #define DMF_QUEUE_IO_TO_THREAD 6
1141da177e4SLinus Torvalds 
115304f3f6aSMilan Broz /*
116304f3f6aSMilan Broz  * Work processed by per-device workqueue.
117304f3f6aSMilan Broz  */
1181da177e4SLinus Torvalds struct mapped_device {
1192ca3310eSAlasdair G Kergon 	struct rw_semaphore io_lock;
120e61290a4SDaniel Walker 	struct mutex suspend_lock;
1211da177e4SLinus Torvalds 	rwlock_t map_lock;
1221da177e4SLinus Torvalds 	atomic_t holders;
1235c6bd75dSAlasdair G Kergon 	atomic_t open_count;
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds 	unsigned long flags;
1261da177e4SLinus Torvalds 
127165125e1SJens Axboe 	struct request_queue *queue;
128a5664dadSMike Snitzer 	unsigned type;
1294a0b4ddfSMike Snitzer 	/* Protect queue and type against concurrent access. */
130a5664dadSMike Snitzer 	struct mutex type_lock;
131a5664dadSMike Snitzer 
1321da177e4SLinus Torvalds 	struct gendisk *disk;
1337e51f257SMike Anderson 	char name[16];
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds 	void *interface_ptr;
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 	/*
1381da177e4SLinus Torvalds 	 * A list of ios that arrived while we were suspended.
1391da177e4SLinus Torvalds 	 */
140316d315bSNikanth Karthikesan 	atomic_t pending[2];
1411da177e4SLinus Torvalds 	wait_queue_head_t wait;
14253d5914fSMikulas Patocka 	struct work_struct work;
1431da177e4SLinus Torvalds 	struct bio_list deferred;
144022c2611SMikulas Patocka 	spinlock_t deferred_lock;
1451da177e4SLinus Torvalds 
1461da177e4SLinus Torvalds 	/*
147af7e466aSMikulas Patocka 	 * An error from the barrier request currently being processed.
148af7e466aSMikulas Patocka 	 */
149af7e466aSMikulas Patocka 	int barrier_error;
150af7e466aSMikulas Patocka 
151af7e466aSMikulas Patocka 	/*
152d0bcb878SKiyoshi Ueda 	 * Protect barrier_error from concurrent endio processing
153d0bcb878SKiyoshi Ueda 	 * in request-based dm.
154d0bcb878SKiyoshi Ueda 	 */
155d0bcb878SKiyoshi Ueda 	spinlock_t barrier_error_lock;
156d0bcb878SKiyoshi Ueda 
157d0bcb878SKiyoshi Ueda 	/*
158304f3f6aSMilan Broz 	 * Processing queue (flush/barriers)
159304f3f6aSMilan Broz 	 */
160304f3f6aSMilan Broz 	struct workqueue_struct *wq;
161d0bcb878SKiyoshi Ueda 	struct work_struct barrier_work;
162d0bcb878SKiyoshi Ueda 
163d0bcb878SKiyoshi Ueda 	/* A pointer to the currently processing pre/post flush request */
164d0bcb878SKiyoshi Ueda 	struct request *flush_request;
165304f3f6aSMilan Broz 
166304f3f6aSMilan Broz 	/*
1671da177e4SLinus Torvalds 	 * The current mapping.
1681da177e4SLinus Torvalds 	 */
1691da177e4SLinus Torvalds 	struct dm_table *map;
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 	/*
1721da177e4SLinus Torvalds 	 * io objects are allocated from here.
1731da177e4SLinus Torvalds 	 */
1741da177e4SLinus Torvalds 	mempool_t *io_pool;
1751da177e4SLinus Torvalds 	mempool_t *tio_pool;
1761da177e4SLinus Torvalds 
1779faf400fSStefan Bader 	struct bio_set *bs;
1789faf400fSStefan Bader 
1791da177e4SLinus Torvalds 	/*
1801da177e4SLinus Torvalds 	 * Event handling.
1811da177e4SLinus Torvalds 	 */
1821da177e4SLinus Torvalds 	atomic_t event_nr;
1831da177e4SLinus Torvalds 	wait_queue_head_t eventq;
1847a8c3d3bSMike Anderson 	atomic_t uevent_seq;
1857a8c3d3bSMike Anderson 	struct list_head uevent_list;
1867a8c3d3bSMike Anderson 	spinlock_t uevent_lock; /* Protect access to uevent_list */
1871da177e4SLinus Torvalds 
1881da177e4SLinus Torvalds 	/*
1891da177e4SLinus Torvalds 	 * freeze/thaw support require holding onto a super block
1901da177e4SLinus Torvalds 	 */
1911da177e4SLinus Torvalds 	struct super_block *frozen_sb;
192db8fef4fSMikulas Patocka 	struct block_device *bdev;
1933ac51e74SDarrick J. Wong 
1943ac51e74SDarrick J. Wong 	/* forced geometry settings */
1953ac51e74SDarrick J. Wong 	struct hd_geometry geometry;
196784aae73SMilan Broz 
197cec47e3dSKiyoshi Ueda 	/* For saving the address of __make_request for request based dm */
198cec47e3dSKiyoshi Ueda 	make_request_fn *saved_make_request_fn;
199cec47e3dSKiyoshi Ueda 
200784aae73SMilan Broz 	/* sysfs handle */
201784aae73SMilan Broz 	struct kobject kobj;
20252b1fd5aSMikulas Patocka 
20352b1fd5aSMikulas Patocka 	/* zero-length barrier that will be cloned and submitted to targets */
20452b1fd5aSMikulas Patocka 	struct bio barrier_bio;
2051da177e4SLinus Torvalds };
2061da177e4SLinus Torvalds 
207e6ee8c0bSKiyoshi Ueda /*
208e6ee8c0bSKiyoshi Ueda  * For mempools pre-allocation at the table loading time.
209e6ee8c0bSKiyoshi Ueda  */
210e6ee8c0bSKiyoshi Ueda struct dm_md_mempools {
211e6ee8c0bSKiyoshi Ueda 	mempool_t *io_pool;
212e6ee8c0bSKiyoshi Ueda 	mempool_t *tio_pool;
213e6ee8c0bSKiyoshi Ueda 	struct bio_set *bs;
214e6ee8c0bSKiyoshi Ueda };
215e6ee8c0bSKiyoshi Ueda 
2161da177e4SLinus Torvalds #define MIN_IOS 256
217e18b890bSChristoph Lameter static struct kmem_cache *_io_cache;
218e18b890bSChristoph Lameter static struct kmem_cache *_tio_cache;
2198fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_tio_cache;
2208fbf26adSKiyoshi Ueda static struct kmem_cache *_rq_bio_info_cache;
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds static int __init local_init(void)
2231da177e4SLinus Torvalds {
22451157b4aSKiyoshi Ueda 	int r = -ENOMEM;
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	/* allocate a slab for the dm_ios */
227028867acSAlasdair G Kergon 	_io_cache = KMEM_CACHE(dm_io, 0);
2281da177e4SLinus Torvalds 	if (!_io_cache)
22951157b4aSKiyoshi Ueda 		return r;
2301da177e4SLinus Torvalds 
2311da177e4SLinus Torvalds 	/* allocate a slab for the target ios */
232028867acSAlasdair G Kergon 	_tio_cache = KMEM_CACHE(dm_target_io, 0);
23351157b4aSKiyoshi Ueda 	if (!_tio_cache)
23451157b4aSKiyoshi Ueda 		goto out_free_io_cache;
2351da177e4SLinus Torvalds 
2368fbf26adSKiyoshi Ueda 	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
2378fbf26adSKiyoshi Ueda 	if (!_rq_tio_cache)
2388fbf26adSKiyoshi Ueda 		goto out_free_tio_cache;
2398fbf26adSKiyoshi Ueda 
2408fbf26adSKiyoshi Ueda 	_rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
2418fbf26adSKiyoshi Ueda 	if (!_rq_bio_info_cache)
2428fbf26adSKiyoshi Ueda 		goto out_free_rq_tio_cache;
2438fbf26adSKiyoshi Ueda 
24451e5b2bdSMike Anderson 	r = dm_uevent_init();
24551157b4aSKiyoshi Ueda 	if (r)
2468fbf26adSKiyoshi Ueda 		goto out_free_rq_bio_info_cache;
24751e5b2bdSMike Anderson 
2481da177e4SLinus Torvalds 	_major = major;
2491da177e4SLinus Torvalds 	r = register_blkdev(_major, _name);
25051157b4aSKiyoshi Ueda 	if (r < 0)
25151157b4aSKiyoshi Ueda 		goto out_uevent_exit;
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 	if (!_major)
2541da177e4SLinus Torvalds 		_major = r;
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	return 0;
25751157b4aSKiyoshi Ueda 
25851157b4aSKiyoshi Ueda out_uevent_exit:
25951157b4aSKiyoshi Ueda 	dm_uevent_exit();
2608fbf26adSKiyoshi Ueda out_free_rq_bio_info_cache:
2618fbf26adSKiyoshi Ueda 	kmem_cache_destroy(_rq_bio_info_cache);
2628fbf26adSKiyoshi Ueda out_free_rq_tio_cache:
2638fbf26adSKiyoshi Ueda 	kmem_cache_destroy(_rq_tio_cache);
26451157b4aSKiyoshi Ueda out_free_tio_cache:
26551157b4aSKiyoshi Ueda 	kmem_cache_destroy(_tio_cache);
26651157b4aSKiyoshi Ueda out_free_io_cache:
26751157b4aSKiyoshi Ueda 	kmem_cache_destroy(_io_cache);
26851157b4aSKiyoshi Ueda 
26951157b4aSKiyoshi Ueda 	return r;
2701da177e4SLinus Torvalds }
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds static void local_exit(void)
2731da177e4SLinus Torvalds {
2748fbf26adSKiyoshi Ueda 	kmem_cache_destroy(_rq_bio_info_cache);
2758fbf26adSKiyoshi Ueda 	kmem_cache_destroy(_rq_tio_cache);
2761da177e4SLinus Torvalds 	kmem_cache_destroy(_tio_cache);
2771da177e4SLinus Torvalds 	kmem_cache_destroy(_io_cache);
27800d59405SAkinobu Mita 	unregister_blkdev(_major, _name);
27951e5b2bdSMike Anderson 	dm_uevent_exit();
2801da177e4SLinus Torvalds 
2811da177e4SLinus Torvalds 	_major = 0;
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	DMINFO("cleaned up");
2841da177e4SLinus Torvalds }
2851da177e4SLinus Torvalds 
286b9249e55SAlasdair G Kergon static int (*_inits[])(void) __initdata = {
2871da177e4SLinus Torvalds 	local_init,
2881da177e4SLinus Torvalds 	dm_target_init,
2891da177e4SLinus Torvalds 	dm_linear_init,
2901da177e4SLinus Torvalds 	dm_stripe_init,
291952b3557SMikulas Patocka 	dm_io_init,
292945fa4d2SMikulas Patocka 	dm_kcopyd_init,
2931da177e4SLinus Torvalds 	dm_interface_init,
2941da177e4SLinus Torvalds };
2951da177e4SLinus Torvalds 
296b9249e55SAlasdair G Kergon static void (*_exits[])(void) = {
2971da177e4SLinus Torvalds 	local_exit,
2981da177e4SLinus Torvalds 	dm_target_exit,
2991da177e4SLinus Torvalds 	dm_linear_exit,
3001da177e4SLinus Torvalds 	dm_stripe_exit,
301952b3557SMikulas Patocka 	dm_io_exit,
302945fa4d2SMikulas Patocka 	dm_kcopyd_exit,
3031da177e4SLinus Torvalds 	dm_interface_exit,
3041da177e4SLinus Torvalds };
3051da177e4SLinus Torvalds 
3061da177e4SLinus Torvalds static int __init dm_init(void)
3071da177e4SLinus Torvalds {
3081da177e4SLinus Torvalds 	const int count = ARRAY_SIZE(_inits);
3091da177e4SLinus Torvalds 
3101da177e4SLinus Torvalds 	int r, i;
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds 	for (i = 0; i < count; i++) {
3131da177e4SLinus Torvalds 		r = _inits[i]();
3141da177e4SLinus Torvalds 		if (r)
3151da177e4SLinus Torvalds 			goto bad;
3161da177e4SLinus Torvalds 	}
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds 	return 0;
3191da177e4SLinus Torvalds 
3201da177e4SLinus Torvalds       bad:
3211da177e4SLinus Torvalds 	while (i--)
3221da177e4SLinus Torvalds 		_exits[i]();
3231da177e4SLinus Torvalds 
3241da177e4SLinus Torvalds 	return r;
3251da177e4SLinus Torvalds }
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds static void __exit dm_exit(void)
3281da177e4SLinus Torvalds {
3291da177e4SLinus Torvalds 	int i = ARRAY_SIZE(_exits);
3301da177e4SLinus Torvalds 
3311da177e4SLinus Torvalds 	while (i--)
3321da177e4SLinus Torvalds 		_exits[i]();
3331da177e4SLinus Torvalds }
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds /*
3361da177e4SLinus Torvalds  * Block device functions
3371da177e4SLinus Torvalds  */
338432a212cSMike Anderson int dm_deleting_md(struct mapped_device *md)
339432a212cSMike Anderson {
340432a212cSMike Anderson 	return test_bit(DMF_DELETING, &md->flags);
341432a212cSMike Anderson }
342432a212cSMike Anderson 
343fe5f9f2cSAl Viro static int dm_blk_open(struct block_device *bdev, fmode_t mode)
3441da177e4SLinus Torvalds {
3451da177e4SLinus Torvalds 	struct mapped_device *md;
3461da177e4SLinus Torvalds 
3476e9624b8SArnd Bergmann 	lock_kernel();
348fba9f90eSJeff Mahoney 	spin_lock(&_minor_lock);
349fba9f90eSJeff Mahoney 
350fe5f9f2cSAl Viro 	md = bdev->bd_disk->private_data;
351fba9f90eSJeff Mahoney 	if (!md)
352fba9f90eSJeff Mahoney 		goto out;
353fba9f90eSJeff Mahoney 
3545c6bd75dSAlasdair G Kergon 	if (test_bit(DMF_FREEING, &md->flags) ||
355432a212cSMike Anderson 	    dm_deleting_md(md)) {
356fba9f90eSJeff Mahoney 		md = NULL;
357fba9f90eSJeff Mahoney 		goto out;
358fba9f90eSJeff Mahoney 	}
359fba9f90eSJeff Mahoney 
3601da177e4SLinus Torvalds 	dm_get(md);
3615c6bd75dSAlasdair G Kergon 	atomic_inc(&md->open_count);
362fba9f90eSJeff Mahoney 
363fba9f90eSJeff Mahoney out:
364fba9f90eSJeff Mahoney 	spin_unlock(&_minor_lock);
3656e9624b8SArnd Bergmann 	unlock_kernel();
366fba9f90eSJeff Mahoney 
367fba9f90eSJeff Mahoney 	return md ? 0 : -ENXIO;
3681da177e4SLinus Torvalds }
3691da177e4SLinus Torvalds 
370fe5f9f2cSAl Viro static int dm_blk_close(struct gendisk *disk, fmode_t mode)
3711da177e4SLinus Torvalds {
372fe5f9f2cSAl Viro 	struct mapped_device *md = disk->private_data;
3736e9624b8SArnd Bergmann 
3746e9624b8SArnd Bergmann 	lock_kernel();
3755c6bd75dSAlasdair G Kergon 	atomic_dec(&md->open_count);
3761da177e4SLinus Torvalds 	dm_put(md);
3776e9624b8SArnd Bergmann 	unlock_kernel();
3786e9624b8SArnd Bergmann 
3791da177e4SLinus Torvalds 	return 0;
3801da177e4SLinus Torvalds }
3811da177e4SLinus Torvalds 
3825c6bd75dSAlasdair G Kergon int dm_open_count(struct mapped_device *md)
3835c6bd75dSAlasdair G Kergon {
3845c6bd75dSAlasdair G Kergon 	return atomic_read(&md->open_count);
3855c6bd75dSAlasdair G Kergon }
3865c6bd75dSAlasdair G Kergon 
3875c6bd75dSAlasdair G Kergon /*
3885c6bd75dSAlasdair G Kergon  * Guarantees nothing is using the device before it's deleted.
3895c6bd75dSAlasdair G Kergon  */
3905c6bd75dSAlasdair G Kergon int dm_lock_for_deletion(struct mapped_device *md)
3915c6bd75dSAlasdair G Kergon {
3925c6bd75dSAlasdair G Kergon 	int r = 0;
3935c6bd75dSAlasdair G Kergon 
3945c6bd75dSAlasdair G Kergon 	spin_lock(&_minor_lock);
3955c6bd75dSAlasdair G Kergon 
3965c6bd75dSAlasdair G Kergon 	if (dm_open_count(md))
3975c6bd75dSAlasdair G Kergon 		r = -EBUSY;
3985c6bd75dSAlasdair G Kergon 	else
3995c6bd75dSAlasdair G Kergon 		set_bit(DMF_DELETING, &md->flags);
4005c6bd75dSAlasdair G Kergon 
4015c6bd75dSAlasdair G Kergon 	spin_unlock(&_minor_lock);
4025c6bd75dSAlasdair G Kergon 
4035c6bd75dSAlasdair G Kergon 	return r;
4045c6bd75dSAlasdair G Kergon }
4055c6bd75dSAlasdair G Kergon 
4063ac51e74SDarrick J. Wong static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4073ac51e74SDarrick J. Wong {
4083ac51e74SDarrick J. Wong 	struct mapped_device *md = bdev->bd_disk->private_data;
4093ac51e74SDarrick J. Wong 
4103ac51e74SDarrick J. Wong 	return dm_get_geometry(md, geo);
4113ac51e74SDarrick J. Wong }
4123ac51e74SDarrick J. Wong 
413fe5f9f2cSAl Viro static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
414aa129a22SMilan Broz 			unsigned int cmd, unsigned long arg)
415aa129a22SMilan Broz {
416fe5f9f2cSAl Viro 	struct mapped_device *md = bdev->bd_disk->private_data;
4177c666411SAlasdair G Kergon 	struct dm_table *map = dm_get_live_table(md);
418aa129a22SMilan Broz 	struct dm_target *tgt;
419aa129a22SMilan Broz 	int r = -ENOTTY;
420aa129a22SMilan Broz 
421aa129a22SMilan Broz 	if (!map || !dm_table_get_size(map))
422aa129a22SMilan Broz 		goto out;
423aa129a22SMilan Broz 
424aa129a22SMilan Broz 	/* We only support devices that have a single target */
425aa129a22SMilan Broz 	if (dm_table_get_num_targets(map) != 1)
426aa129a22SMilan Broz 		goto out;
427aa129a22SMilan Broz 
428aa129a22SMilan Broz 	tgt = dm_table_get_target(map, 0);
429aa129a22SMilan Broz 
4304f186f8bSKiyoshi Ueda 	if (dm_suspended_md(md)) {
431aa129a22SMilan Broz 		r = -EAGAIN;
432aa129a22SMilan Broz 		goto out;
433aa129a22SMilan Broz 	}
434aa129a22SMilan Broz 
435aa129a22SMilan Broz 	if (tgt->type->ioctl)
436647b3d00SAl Viro 		r = tgt->type->ioctl(tgt, cmd, arg);
437aa129a22SMilan Broz 
438aa129a22SMilan Broz out:
439aa129a22SMilan Broz 	dm_table_put(map);
440aa129a22SMilan Broz 
441aa129a22SMilan Broz 	return r;
442aa129a22SMilan Broz }
443aa129a22SMilan Broz 
444028867acSAlasdair G Kergon static struct dm_io *alloc_io(struct mapped_device *md)
4451da177e4SLinus Torvalds {
4461da177e4SLinus Torvalds 	return mempool_alloc(md->io_pool, GFP_NOIO);
4471da177e4SLinus Torvalds }
4481da177e4SLinus Torvalds 
449028867acSAlasdair G Kergon static void free_io(struct mapped_device *md, struct dm_io *io)
4501da177e4SLinus Torvalds {
4511da177e4SLinus Torvalds 	mempool_free(io, md->io_pool);
4521da177e4SLinus Torvalds }
4531da177e4SLinus Torvalds 
454028867acSAlasdair G Kergon static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
4551da177e4SLinus Torvalds {
4561da177e4SLinus Torvalds 	mempool_free(tio, md->tio_pool);
4571da177e4SLinus Torvalds }
4581da177e4SLinus Torvalds 
45908885643SKiyoshi Ueda static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
46008885643SKiyoshi Ueda 					    gfp_t gfp_mask)
461cec47e3dSKiyoshi Ueda {
46208885643SKiyoshi Ueda 	return mempool_alloc(md->tio_pool, gfp_mask);
463cec47e3dSKiyoshi Ueda }
464cec47e3dSKiyoshi Ueda 
465cec47e3dSKiyoshi Ueda static void free_rq_tio(struct dm_rq_target_io *tio)
466cec47e3dSKiyoshi Ueda {
467cec47e3dSKiyoshi Ueda 	mempool_free(tio, tio->md->tio_pool);
468cec47e3dSKiyoshi Ueda }
469cec47e3dSKiyoshi Ueda 
470cec47e3dSKiyoshi Ueda static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
471cec47e3dSKiyoshi Ueda {
472cec47e3dSKiyoshi Ueda 	return mempool_alloc(md->io_pool, GFP_ATOMIC);
473cec47e3dSKiyoshi Ueda }
474cec47e3dSKiyoshi Ueda 
475cec47e3dSKiyoshi Ueda static void free_bio_info(struct dm_rq_clone_bio_info *info)
476cec47e3dSKiyoshi Ueda {
477cec47e3dSKiyoshi Ueda 	mempool_free(info, info->tio->md->io_pool);
478cec47e3dSKiyoshi Ueda }
479cec47e3dSKiyoshi Ueda 
48090abb8c4SKiyoshi Ueda static int md_in_flight(struct mapped_device *md)
48190abb8c4SKiyoshi Ueda {
48290abb8c4SKiyoshi Ueda 	return atomic_read(&md->pending[READ]) +
48390abb8c4SKiyoshi Ueda 	       atomic_read(&md->pending[WRITE]);
48490abb8c4SKiyoshi Ueda }
48590abb8c4SKiyoshi Ueda 
4863eaf840eSJun'ichi "Nick" Nomura static void start_io_acct(struct dm_io *io)
4873eaf840eSJun'ichi "Nick" Nomura {
4883eaf840eSJun'ichi "Nick" Nomura 	struct mapped_device *md = io->md;
489c9959059STejun Heo 	int cpu;
490316d315bSNikanth Karthikesan 	int rw = bio_data_dir(io->bio);
4913eaf840eSJun'ichi "Nick" Nomura 
4923eaf840eSJun'ichi "Nick" Nomura 	io->start_time = jiffies;
4933eaf840eSJun'ichi "Nick" Nomura 
494074a7acaSTejun Heo 	cpu = part_stat_lock();
495074a7acaSTejun Heo 	part_round_stats(cpu, &dm_disk(md)->part0);
496074a7acaSTejun Heo 	part_stat_unlock();
497316d315bSNikanth Karthikesan 	dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
4983eaf840eSJun'ichi "Nick" Nomura }
4993eaf840eSJun'ichi "Nick" Nomura 
500d221d2e7SMikulas Patocka static void end_io_acct(struct dm_io *io)
5013eaf840eSJun'ichi "Nick" Nomura {
5023eaf840eSJun'ichi "Nick" Nomura 	struct mapped_device *md = io->md;
5033eaf840eSJun'ichi "Nick" Nomura 	struct bio *bio = io->bio;
5043eaf840eSJun'ichi "Nick" Nomura 	unsigned long duration = jiffies - io->start_time;
505c9959059STejun Heo 	int pending, cpu;
5063eaf840eSJun'ichi "Nick" Nomura 	int rw = bio_data_dir(bio);
5073eaf840eSJun'ichi "Nick" Nomura 
508074a7acaSTejun Heo 	cpu = part_stat_lock();
509074a7acaSTejun Heo 	part_round_stats(cpu, &dm_disk(md)->part0);
510074a7acaSTejun Heo 	part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
511074a7acaSTejun Heo 	part_stat_unlock();
5123eaf840eSJun'ichi "Nick" Nomura 
513af7e466aSMikulas Patocka 	/*
514af7e466aSMikulas Patocka 	 * After this is decremented the bio must not be touched if it is
515af7e466aSMikulas Patocka 	 * a barrier.
516af7e466aSMikulas Patocka 	 */
517316d315bSNikanth Karthikesan 	dm_disk(md)->part0.in_flight[rw] = pending =
518316d315bSNikanth Karthikesan 		atomic_dec_return(&md->pending[rw]);
519316d315bSNikanth Karthikesan 	pending += atomic_read(&md->pending[rw^0x1]);
5203eaf840eSJun'ichi "Nick" Nomura 
521d221d2e7SMikulas Patocka 	/* nudge anyone waiting on suspend queue */
522d221d2e7SMikulas Patocka 	if (!pending)
523d221d2e7SMikulas Patocka 		wake_up(&md->wait);
5243eaf840eSJun'ichi "Nick" Nomura }
5253eaf840eSJun'ichi "Nick" Nomura 
5261da177e4SLinus Torvalds /*
5271da177e4SLinus Torvalds  * Add the bio to the list of deferred io.
5281da177e4SLinus Torvalds  */
52992c63902SMikulas Patocka static void queue_io(struct mapped_device *md, struct bio *bio)
5301da177e4SLinus Torvalds {
5312ca3310eSAlasdair G Kergon 	down_write(&md->io_lock);
5321da177e4SLinus Torvalds 
533022c2611SMikulas Patocka 	spin_lock_irq(&md->deferred_lock);
5341da177e4SLinus Torvalds 	bio_list_add(&md->deferred, bio);
535022c2611SMikulas Patocka 	spin_unlock_irq(&md->deferred_lock);
5361da177e4SLinus Torvalds 
53792c63902SMikulas Patocka 	if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
53892c63902SMikulas Patocka 		queue_work(md->wq, &md->work);
53992c63902SMikulas Patocka 
5402ca3310eSAlasdair G Kergon 	up_write(&md->io_lock);
5411da177e4SLinus Torvalds }
5421da177e4SLinus Torvalds 
5431da177e4SLinus Torvalds /*
5441da177e4SLinus Torvalds  * Everyone (including functions in this file), should use this
5451da177e4SLinus Torvalds  * function to access the md->map field, and make sure they call
5461da177e4SLinus Torvalds  * dm_table_put() when finished.
5471da177e4SLinus Torvalds  */
5487c666411SAlasdair G Kergon struct dm_table *dm_get_live_table(struct mapped_device *md)
5491da177e4SLinus Torvalds {
5501da177e4SLinus Torvalds 	struct dm_table *t;
551523d9297SKiyoshi Ueda 	unsigned long flags;
5521da177e4SLinus Torvalds 
553523d9297SKiyoshi Ueda 	read_lock_irqsave(&md->map_lock, flags);
5541da177e4SLinus Torvalds 	t = md->map;
5551da177e4SLinus Torvalds 	if (t)
5561da177e4SLinus Torvalds 		dm_table_get(t);
557523d9297SKiyoshi Ueda 	read_unlock_irqrestore(&md->map_lock, flags);
5581da177e4SLinus Torvalds 
5591da177e4SLinus Torvalds 	return t;
5601da177e4SLinus Torvalds }
5611da177e4SLinus Torvalds 
5623ac51e74SDarrick J. Wong /*
5633ac51e74SDarrick J. Wong  * Get the geometry associated with a dm device
5643ac51e74SDarrick J. Wong  */
5653ac51e74SDarrick J. Wong int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
5663ac51e74SDarrick J. Wong {
5673ac51e74SDarrick J. Wong 	*geo = md->geometry;
5683ac51e74SDarrick J. Wong 
5693ac51e74SDarrick J. Wong 	return 0;
5703ac51e74SDarrick J. Wong }
5713ac51e74SDarrick J. Wong 
5723ac51e74SDarrick J. Wong /*
5733ac51e74SDarrick J. Wong  * Set the geometry of a device.
5743ac51e74SDarrick J. Wong  */
5753ac51e74SDarrick J. Wong int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
5763ac51e74SDarrick J. Wong {
5773ac51e74SDarrick J. Wong 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
5783ac51e74SDarrick J. Wong 
5793ac51e74SDarrick J. Wong 	if (geo->start > sz) {
5803ac51e74SDarrick J. Wong 		DMWARN("Start sector is beyond the geometry limits.");
5813ac51e74SDarrick J. Wong 		return -EINVAL;
5823ac51e74SDarrick J. Wong 	}
5833ac51e74SDarrick J. Wong 
5843ac51e74SDarrick J. Wong 	md->geometry = *geo;
5853ac51e74SDarrick J. Wong 
5863ac51e74SDarrick J. Wong 	return 0;
5873ac51e74SDarrick J. Wong }
5883ac51e74SDarrick J. Wong 
5891da177e4SLinus Torvalds /*-----------------------------------------------------------------
5901da177e4SLinus Torvalds  * CRUD START:
5911da177e4SLinus Torvalds  *   A more elegant soln is in the works that uses the queue
5921da177e4SLinus Torvalds  *   merge fn, unfortunately there are a couple of changes to
5931da177e4SLinus Torvalds  *   the block layer that I want to make for this.  So in the
5941da177e4SLinus Torvalds  *   interests of getting something for people to use I give
5951da177e4SLinus Torvalds  *   you this clearly demarcated crap.
5961da177e4SLinus Torvalds  *---------------------------------------------------------------*/
5971da177e4SLinus Torvalds 
5982e93ccc1SKiyoshi Ueda static int __noflush_suspending(struct mapped_device *md)
5992e93ccc1SKiyoshi Ueda {
6002e93ccc1SKiyoshi Ueda 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
6012e93ccc1SKiyoshi Ueda }
6022e93ccc1SKiyoshi Ueda 
6031da177e4SLinus Torvalds /*
6041da177e4SLinus Torvalds  * Decrements the number of outstanding ios that a bio has been
6051da177e4SLinus Torvalds  * cloned into, completing the original io if necc.
6061da177e4SLinus Torvalds  */
607858119e1SArjan van de Ven static void dec_pending(struct dm_io *io, int error)
6081da177e4SLinus Torvalds {
6092e93ccc1SKiyoshi Ueda 	unsigned long flags;
610b35f8caaSMilan Broz 	int io_error;
611b35f8caaSMilan Broz 	struct bio *bio;
612b35f8caaSMilan Broz 	struct mapped_device *md = io->md;
6132e93ccc1SKiyoshi Ueda 
6142e93ccc1SKiyoshi Ueda 	/* Push-back supersedes any I/O errors */
615f88fb981SKiyoshi Ueda 	if (unlikely(error)) {
616f88fb981SKiyoshi Ueda 		spin_lock_irqsave(&io->endio_lock, flags);
617f88fb981SKiyoshi Ueda 		if (!(io->error > 0 && __noflush_suspending(md)))
6181da177e4SLinus Torvalds 			io->error = error;
619f88fb981SKiyoshi Ueda 		spin_unlock_irqrestore(&io->endio_lock, flags);
620f88fb981SKiyoshi Ueda 	}
6211da177e4SLinus Torvalds 
6221da177e4SLinus Torvalds 	if (atomic_dec_and_test(&io->io_count)) {
6232e93ccc1SKiyoshi Ueda 		if (io->error == DM_ENDIO_REQUEUE) {
6242e93ccc1SKiyoshi Ueda 			/*
6252e93ccc1SKiyoshi Ueda 			 * Target requested pushing back the I/O.
6262e93ccc1SKiyoshi Ueda 			 */
627022c2611SMikulas Patocka 			spin_lock_irqsave(&md->deferred_lock, flags);
6282761e95fSMikulas Patocka 			if (__noflush_suspending(md)) {
6297b6d91daSChristoph Hellwig 				if (!(io->bio->bi_rw & REQ_HARDBARRIER))
6302761e95fSMikulas Patocka 					bio_list_add_head(&md->deferred,
6312761e95fSMikulas Patocka 							  io->bio);
6322761e95fSMikulas Patocka 			} else
6332e93ccc1SKiyoshi Ueda 				/* noflush suspend was interrupted. */
6342e93ccc1SKiyoshi Ueda 				io->error = -EIO;
635022c2611SMikulas Patocka 			spin_unlock_irqrestore(&md->deferred_lock, flags);
6362e93ccc1SKiyoshi Ueda 		}
6372e93ccc1SKiyoshi Ueda 
638b35f8caaSMilan Broz 		io_error = io->error;
639b35f8caaSMilan Broz 		bio = io->bio;
6402056a782SJens Axboe 
6417b6d91daSChristoph Hellwig 		if (bio->bi_rw & REQ_HARDBARRIER) {
642af7e466aSMikulas Patocka 			/*
643af7e466aSMikulas Patocka 			 * There can be just one barrier request so we use
644af7e466aSMikulas Patocka 			 * a per-device variable for error reporting.
645af7e466aSMikulas Patocka 			 * Note that you can't touch the bio after end_io_acct
646708e9295SMikulas Patocka 			 *
647708e9295SMikulas Patocka 			 * We ignore -EOPNOTSUPP for empty flush reported by
648708e9295SMikulas Patocka 			 * underlying devices. We assume that if the device
649708e9295SMikulas Patocka 			 * doesn't support empty barriers, it doesn't need
650708e9295SMikulas Patocka 			 * cache flushing commands.
651af7e466aSMikulas Patocka 			 */
652708e9295SMikulas Patocka 			if (!md->barrier_error &&
653708e9295SMikulas Patocka 			    !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
654af7e466aSMikulas Patocka 				md->barrier_error = io_error;
655af7e466aSMikulas Patocka 			end_io_acct(io);
656a97f925aSMikulas Patocka 			free_io(md, io);
657af7e466aSMikulas Patocka 		} else {
658af7e466aSMikulas Patocka 			end_io_acct(io);
659a97f925aSMikulas Patocka 			free_io(md, io);
660b35f8caaSMilan Broz 
661b35f8caaSMilan Broz 			if (io_error != DM_ENDIO_REQUEUE) {
662b35f8caaSMilan Broz 				trace_block_bio_complete(md->queue, bio);
663b35f8caaSMilan Broz 
664b35f8caaSMilan Broz 				bio_endio(bio, io_error);
6652e93ccc1SKiyoshi Ueda 			}
6661da177e4SLinus Torvalds 		}
667af7e466aSMikulas Patocka 	}
6681da177e4SLinus Torvalds }
6691da177e4SLinus Torvalds 
6706712ecf8SNeilBrown static void clone_endio(struct bio *bio, int error)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	int r = 0;
673028867acSAlasdair G Kergon 	struct dm_target_io *tio = bio->bi_private;
674b35f8caaSMilan Broz 	struct dm_io *io = tio->io;
6759faf400fSStefan Bader 	struct mapped_device *md = tio->io->md;
6761da177e4SLinus Torvalds 	dm_endio_fn endio = tio->ti->type->end_io;
6771da177e4SLinus Torvalds 
6781da177e4SLinus Torvalds 	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
6791da177e4SLinus Torvalds 		error = -EIO;
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	if (endio) {
6821da177e4SLinus Torvalds 		r = endio(tio->ti, bio, error, &tio->info);
6832e93ccc1SKiyoshi Ueda 		if (r < 0 || r == DM_ENDIO_REQUEUE)
6842e93ccc1SKiyoshi Ueda 			/*
6852e93ccc1SKiyoshi Ueda 			 * error and requeue request are handled
6862e93ccc1SKiyoshi Ueda 			 * in dec_pending().
6872e93ccc1SKiyoshi Ueda 			 */
6881da177e4SLinus Torvalds 			error = r;
68945cbcd79SKiyoshi Ueda 		else if (r == DM_ENDIO_INCOMPLETE)
69045cbcd79SKiyoshi Ueda 			/* The target will handle the io */
6916712ecf8SNeilBrown 			return;
69245cbcd79SKiyoshi Ueda 		else if (r) {
69345cbcd79SKiyoshi Ueda 			DMWARN("unimplemented target endio return value: %d", r);
69445cbcd79SKiyoshi Ueda 			BUG();
69545cbcd79SKiyoshi Ueda 		}
6961da177e4SLinus Torvalds 	}
6971da177e4SLinus Torvalds 
6989faf400fSStefan Bader 	/*
6999faf400fSStefan Bader 	 * Store md for cleanup instead of tio which is about to get freed.
7009faf400fSStefan Bader 	 */
7019faf400fSStefan Bader 	bio->bi_private = md->bs;
7029faf400fSStefan Bader 
7039faf400fSStefan Bader 	free_tio(md, tio);
704b35f8caaSMilan Broz 	bio_put(bio);
705b35f8caaSMilan Broz 	dec_pending(io, error);
7061da177e4SLinus Torvalds }
7071da177e4SLinus Torvalds 
708cec47e3dSKiyoshi Ueda /*
709cec47e3dSKiyoshi Ueda  * Partial completion handling for request-based dm
710cec47e3dSKiyoshi Ueda  */
711cec47e3dSKiyoshi Ueda static void end_clone_bio(struct bio *clone, int error)
712cec47e3dSKiyoshi Ueda {
713cec47e3dSKiyoshi Ueda 	struct dm_rq_clone_bio_info *info = clone->bi_private;
714cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = info->tio;
715cec47e3dSKiyoshi Ueda 	struct bio *bio = info->orig;
716cec47e3dSKiyoshi Ueda 	unsigned int nr_bytes = info->orig->bi_size;
717cec47e3dSKiyoshi Ueda 
718cec47e3dSKiyoshi Ueda 	bio_put(clone);
719cec47e3dSKiyoshi Ueda 
720cec47e3dSKiyoshi Ueda 	if (tio->error)
721cec47e3dSKiyoshi Ueda 		/*
722cec47e3dSKiyoshi Ueda 		 * An error has already been detected on the request.
723cec47e3dSKiyoshi Ueda 		 * Once error occurred, just let clone->end_io() handle
724cec47e3dSKiyoshi Ueda 		 * the remainder.
725cec47e3dSKiyoshi Ueda 		 */
726cec47e3dSKiyoshi Ueda 		return;
727cec47e3dSKiyoshi Ueda 	else if (error) {
728cec47e3dSKiyoshi Ueda 		/*
729cec47e3dSKiyoshi Ueda 		 * Don't notice the error to the upper layer yet.
730cec47e3dSKiyoshi Ueda 		 * The error handling decision is made by the target driver,
731cec47e3dSKiyoshi Ueda 		 * when the request is completed.
732cec47e3dSKiyoshi Ueda 		 */
733cec47e3dSKiyoshi Ueda 		tio->error = error;
734cec47e3dSKiyoshi Ueda 		return;
735cec47e3dSKiyoshi Ueda 	}
736cec47e3dSKiyoshi Ueda 
737cec47e3dSKiyoshi Ueda 	/*
738cec47e3dSKiyoshi Ueda 	 * I/O for the bio successfully completed.
739cec47e3dSKiyoshi Ueda 	 * Notice the data completion to the upper layer.
740cec47e3dSKiyoshi Ueda 	 */
741cec47e3dSKiyoshi Ueda 
742cec47e3dSKiyoshi Ueda 	/*
743cec47e3dSKiyoshi Ueda 	 * bios are processed from the head of the list.
744cec47e3dSKiyoshi Ueda 	 * So the completing bio should always be rq->bio.
745cec47e3dSKiyoshi Ueda 	 * If it's not, something wrong is happening.
746cec47e3dSKiyoshi Ueda 	 */
747cec47e3dSKiyoshi Ueda 	if (tio->orig->bio != bio)
748cec47e3dSKiyoshi Ueda 		DMERR("bio completion is going in the middle of the request");
749cec47e3dSKiyoshi Ueda 
750cec47e3dSKiyoshi Ueda 	/*
751cec47e3dSKiyoshi Ueda 	 * Update the original request.
752cec47e3dSKiyoshi Ueda 	 * Do not use blk_end_request() here, because it may complete
753cec47e3dSKiyoshi Ueda 	 * the original request before the clone, and break the ordering.
754cec47e3dSKiyoshi Ueda 	 */
755cec47e3dSKiyoshi Ueda 	blk_update_request(tio->orig, 0, nr_bytes);
756cec47e3dSKiyoshi Ueda }
757cec47e3dSKiyoshi Ueda 
758d0bcb878SKiyoshi Ueda static void store_barrier_error(struct mapped_device *md, int error)
759d0bcb878SKiyoshi Ueda {
760d0bcb878SKiyoshi Ueda 	unsigned long flags;
761d0bcb878SKiyoshi Ueda 
762d0bcb878SKiyoshi Ueda 	spin_lock_irqsave(&md->barrier_error_lock, flags);
763d0bcb878SKiyoshi Ueda 	/*
764d0bcb878SKiyoshi Ueda 	 * Basically, the first error is taken, but:
765d0bcb878SKiyoshi Ueda 	 *   -EOPNOTSUPP supersedes any I/O error.
766d0bcb878SKiyoshi Ueda 	 *   Requeue request supersedes any I/O error but -EOPNOTSUPP.
767d0bcb878SKiyoshi Ueda 	 */
768d0bcb878SKiyoshi Ueda 	if (!md->barrier_error || error == -EOPNOTSUPP ||
769d0bcb878SKiyoshi Ueda 	    (md->barrier_error != -EOPNOTSUPP &&
770d0bcb878SKiyoshi Ueda 	     error == DM_ENDIO_REQUEUE))
771d0bcb878SKiyoshi Ueda 		md->barrier_error = error;
772d0bcb878SKiyoshi Ueda 	spin_unlock_irqrestore(&md->barrier_error_lock, flags);
773d0bcb878SKiyoshi Ueda }
774d0bcb878SKiyoshi Ueda 
775cec47e3dSKiyoshi Ueda /*
776cec47e3dSKiyoshi Ueda  * Don't touch any member of the md after calling this function because
777cec47e3dSKiyoshi Ueda  * the md may be freed in dm_put() at the end of this function.
778cec47e3dSKiyoshi Ueda  * Or do dm_get() before calling this function and dm_put() later.
779cec47e3dSKiyoshi Ueda  */
780b4324feeSKiyoshi Ueda static void rq_completed(struct mapped_device *md, int rw, int run_queue)
781cec47e3dSKiyoshi Ueda {
782b4324feeSKiyoshi Ueda 	atomic_dec(&md->pending[rw]);
783cec47e3dSKiyoshi Ueda 
784cec47e3dSKiyoshi Ueda 	/* nudge anyone waiting on suspend queue */
785b4324feeSKiyoshi Ueda 	if (!md_in_flight(md))
786cec47e3dSKiyoshi Ueda 		wake_up(&md->wait);
787cec47e3dSKiyoshi Ueda 
788cec47e3dSKiyoshi Ueda 	if (run_queue)
789b4324feeSKiyoshi Ueda 		blk_run_queue(md->queue);
790cec47e3dSKiyoshi Ueda 
791cec47e3dSKiyoshi Ueda 	/*
792cec47e3dSKiyoshi Ueda 	 * dm_put() must be at the end of this function. See the comment above
793cec47e3dSKiyoshi Ueda 	 */
794cec47e3dSKiyoshi Ueda 	dm_put(md);
795cec47e3dSKiyoshi Ueda }
796cec47e3dSKiyoshi Ueda 
797a77e28c7SKiyoshi Ueda static void free_rq_clone(struct request *clone)
798a77e28c7SKiyoshi Ueda {
799a77e28c7SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
800a77e28c7SKiyoshi Ueda 
801a77e28c7SKiyoshi Ueda 	blk_rq_unprep_clone(clone);
802a77e28c7SKiyoshi Ueda 	free_rq_tio(tio);
803a77e28c7SKiyoshi Ueda }
804a77e28c7SKiyoshi Ueda 
805980691e5SKiyoshi Ueda /*
806980691e5SKiyoshi Ueda  * Complete the clone and the original request.
807980691e5SKiyoshi Ueda  * Must be called without queue lock.
808980691e5SKiyoshi Ueda  */
809980691e5SKiyoshi Ueda static void dm_end_request(struct request *clone, int error)
810980691e5SKiyoshi Ueda {
811980691e5SKiyoshi Ueda 	int rw = rq_data_dir(clone);
812d0bcb878SKiyoshi Ueda 	int run_queue = 1;
81333659ebbSChristoph Hellwig 	bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
814980691e5SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
815980691e5SKiyoshi Ueda 	struct mapped_device *md = tio->md;
816980691e5SKiyoshi Ueda 	struct request *rq = tio->orig;
817980691e5SKiyoshi Ueda 
81833659ebbSChristoph Hellwig 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
819980691e5SKiyoshi Ueda 		rq->errors = clone->errors;
820980691e5SKiyoshi Ueda 		rq->resid_len = clone->resid_len;
821980691e5SKiyoshi Ueda 
822980691e5SKiyoshi Ueda 		if (rq->sense)
823980691e5SKiyoshi Ueda 			/*
824980691e5SKiyoshi Ueda 			 * We are using the sense buffer of the original
825980691e5SKiyoshi Ueda 			 * request.
826980691e5SKiyoshi Ueda 			 * So setting the length of the sense data is enough.
827980691e5SKiyoshi Ueda 			 */
828980691e5SKiyoshi Ueda 			rq->sense_len = clone->sense_len;
829980691e5SKiyoshi Ueda 	}
830980691e5SKiyoshi Ueda 
831980691e5SKiyoshi Ueda 	free_rq_clone(clone);
832980691e5SKiyoshi Ueda 
833d0bcb878SKiyoshi Ueda 	if (unlikely(is_barrier)) {
834d0bcb878SKiyoshi Ueda 		if (unlikely(error))
835d0bcb878SKiyoshi Ueda 			store_barrier_error(md, error);
836d0bcb878SKiyoshi Ueda 		run_queue = 0;
837d0bcb878SKiyoshi Ueda 	} else
838980691e5SKiyoshi Ueda 		blk_end_request_all(rq, error);
839980691e5SKiyoshi Ueda 
840d0bcb878SKiyoshi Ueda 	rq_completed(md, rw, run_queue);
841980691e5SKiyoshi Ueda }
842980691e5SKiyoshi Ueda 
843cec47e3dSKiyoshi Ueda static void dm_unprep_request(struct request *rq)
844cec47e3dSKiyoshi Ueda {
845cec47e3dSKiyoshi Ueda 	struct request *clone = rq->special;
846cec47e3dSKiyoshi Ueda 
847cec47e3dSKiyoshi Ueda 	rq->special = NULL;
848cec47e3dSKiyoshi Ueda 	rq->cmd_flags &= ~REQ_DONTPREP;
849cec47e3dSKiyoshi Ueda 
850a77e28c7SKiyoshi Ueda 	free_rq_clone(clone);
851cec47e3dSKiyoshi Ueda }
852cec47e3dSKiyoshi Ueda 
853cec47e3dSKiyoshi Ueda /*
854cec47e3dSKiyoshi Ueda  * Requeue the original request of a clone.
855cec47e3dSKiyoshi Ueda  */
856cec47e3dSKiyoshi Ueda void dm_requeue_unmapped_request(struct request *clone)
857cec47e3dSKiyoshi Ueda {
858b4324feeSKiyoshi Ueda 	int rw = rq_data_dir(clone);
859cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
860cec47e3dSKiyoshi Ueda 	struct mapped_device *md = tio->md;
861cec47e3dSKiyoshi Ueda 	struct request *rq = tio->orig;
862cec47e3dSKiyoshi Ueda 	struct request_queue *q = rq->q;
863cec47e3dSKiyoshi Ueda 	unsigned long flags;
864cec47e3dSKiyoshi Ueda 
86533659ebbSChristoph Hellwig 	if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
866d0bcb878SKiyoshi Ueda 		/*
867d0bcb878SKiyoshi Ueda 		 * Barrier clones share an original request.
868d0bcb878SKiyoshi Ueda 		 * Leave it to dm_end_request(), which handles this special
869d0bcb878SKiyoshi Ueda 		 * case.
870d0bcb878SKiyoshi Ueda 		 */
871d0bcb878SKiyoshi Ueda 		dm_end_request(clone, DM_ENDIO_REQUEUE);
872d0bcb878SKiyoshi Ueda 		return;
873d0bcb878SKiyoshi Ueda 	}
874d0bcb878SKiyoshi Ueda 
875cec47e3dSKiyoshi Ueda 	dm_unprep_request(rq);
876cec47e3dSKiyoshi Ueda 
877cec47e3dSKiyoshi Ueda 	spin_lock_irqsave(q->queue_lock, flags);
878cec47e3dSKiyoshi Ueda 	if (elv_queue_empty(q))
879cec47e3dSKiyoshi Ueda 		blk_plug_device(q);
880cec47e3dSKiyoshi Ueda 	blk_requeue_request(q, rq);
881cec47e3dSKiyoshi Ueda 	spin_unlock_irqrestore(q->queue_lock, flags);
882cec47e3dSKiyoshi Ueda 
883b4324feeSKiyoshi Ueda 	rq_completed(md, rw, 0);
884cec47e3dSKiyoshi Ueda }
885cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
886cec47e3dSKiyoshi Ueda 
887cec47e3dSKiyoshi Ueda static void __stop_queue(struct request_queue *q)
888cec47e3dSKiyoshi Ueda {
889cec47e3dSKiyoshi Ueda 	blk_stop_queue(q);
890cec47e3dSKiyoshi Ueda }
891cec47e3dSKiyoshi Ueda 
892cec47e3dSKiyoshi Ueda static void stop_queue(struct request_queue *q)
893cec47e3dSKiyoshi Ueda {
894cec47e3dSKiyoshi Ueda 	unsigned long flags;
895cec47e3dSKiyoshi Ueda 
896cec47e3dSKiyoshi Ueda 	spin_lock_irqsave(q->queue_lock, flags);
897cec47e3dSKiyoshi Ueda 	__stop_queue(q);
898cec47e3dSKiyoshi Ueda 	spin_unlock_irqrestore(q->queue_lock, flags);
899cec47e3dSKiyoshi Ueda }
900cec47e3dSKiyoshi Ueda 
901cec47e3dSKiyoshi Ueda static void __start_queue(struct request_queue *q)
902cec47e3dSKiyoshi Ueda {
903cec47e3dSKiyoshi Ueda 	if (blk_queue_stopped(q))
904cec47e3dSKiyoshi Ueda 		blk_start_queue(q);
905cec47e3dSKiyoshi Ueda }
906cec47e3dSKiyoshi Ueda 
907cec47e3dSKiyoshi Ueda static void start_queue(struct request_queue *q)
908cec47e3dSKiyoshi Ueda {
909cec47e3dSKiyoshi Ueda 	unsigned long flags;
910cec47e3dSKiyoshi Ueda 
911cec47e3dSKiyoshi Ueda 	spin_lock_irqsave(q->queue_lock, flags);
912cec47e3dSKiyoshi Ueda 	__start_queue(q);
913cec47e3dSKiyoshi Ueda 	spin_unlock_irqrestore(q->queue_lock, flags);
914cec47e3dSKiyoshi Ueda }
915cec47e3dSKiyoshi Ueda 
91611a68244SKiyoshi Ueda static void dm_done(struct request *clone, int error, bool mapped)
91711a68244SKiyoshi Ueda {
91811a68244SKiyoshi Ueda 	int r = error;
91911a68244SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
92011a68244SKiyoshi Ueda 	dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
92111a68244SKiyoshi Ueda 
92211a68244SKiyoshi Ueda 	if (mapped && rq_end_io)
92311a68244SKiyoshi Ueda 		r = rq_end_io(tio->ti, clone, error, &tio->info);
92411a68244SKiyoshi Ueda 
92511a68244SKiyoshi Ueda 	if (r <= 0)
92611a68244SKiyoshi Ueda 		/* The target wants to complete the I/O */
92711a68244SKiyoshi Ueda 		dm_end_request(clone, r);
92811a68244SKiyoshi Ueda 	else if (r == DM_ENDIO_INCOMPLETE)
92911a68244SKiyoshi Ueda 		/* The target will handle the I/O */
93011a68244SKiyoshi Ueda 		return;
93111a68244SKiyoshi Ueda 	else if (r == DM_ENDIO_REQUEUE)
93211a68244SKiyoshi Ueda 		/* The target wants to requeue the I/O */
93311a68244SKiyoshi Ueda 		dm_requeue_unmapped_request(clone);
93411a68244SKiyoshi Ueda 	else {
93511a68244SKiyoshi Ueda 		DMWARN("unimplemented target endio return value: %d", r);
93611a68244SKiyoshi Ueda 		BUG();
93711a68244SKiyoshi Ueda 	}
93811a68244SKiyoshi Ueda }
93911a68244SKiyoshi Ueda 
940cec47e3dSKiyoshi Ueda /*
941cec47e3dSKiyoshi Ueda  * Request completion handler for request-based dm
942cec47e3dSKiyoshi Ueda  */
943cec47e3dSKiyoshi Ueda static void dm_softirq_done(struct request *rq)
944cec47e3dSKiyoshi Ueda {
94511a68244SKiyoshi Ueda 	bool mapped = true;
946cec47e3dSKiyoshi Ueda 	struct request *clone = rq->completion_data;
947cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
948cec47e3dSKiyoshi Ueda 
94911a68244SKiyoshi Ueda 	if (rq->cmd_flags & REQ_FAILED)
95011a68244SKiyoshi Ueda 		mapped = false;
951cec47e3dSKiyoshi Ueda 
95211a68244SKiyoshi Ueda 	dm_done(clone, tio->error, mapped);
953cec47e3dSKiyoshi Ueda }
954cec47e3dSKiyoshi Ueda 
955cec47e3dSKiyoshi Ueda /*
956cec47e3dSKiyoshi Ueda  * Complete the clone and the original request with the error status
957cec47e3dSKiyoshi Ueda  * through softirq context.
958cec47e3dSKiyoshi Ueda  */
959cec47e3dSKiyoshi Ueda static void dm_complete_request(struct request *clone, int error)
960cec47e3dSKiyoshi Ueda {
961cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
962cec47e3dSKiyoshi Ueda 	struct request *rq = tio->orig;
963cec47e3dSKiyoshi Ueda 
96433659ebbSChristoph Hellwig 	if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
965d0bcb878SKiyoshi Ueda 		/*
966d0bcb878SKiyoshi Ueda 		 * Barrier clones share an original request.  So can't use
967d0bcb878SKiyoshi Ueda 		 * softirq_done with the original.
968d0bcb878SKiyoshi Ueda 		 * Pass the clone to dm_done() directly in this special case.
969d0bcb878SKiyoshi Ueda 		 * It is safe (even if clone->q->queue_lock is held here)
970d0bcb878SKiyoshi Ueda 		 * because there is no I/O dispatching during the completion
971d0bcb878SKiyoshi Ueda 		 * of barrier clone.
972d0bcb878SKiyoshi Ueda 		 */
973d0bcb878SKiyoshi Ueda 		dm_done(clone, error, true);
974d0bcb878SKiyoshi Ueda 		return;
975d0bcb878SKiyoshi Ueda 	}
976d0bcb878SKiyoshi Ueda 
977cec47e3dSKiyoshi Ueda 	tio->error = error;
978cec47e3dSKiyoshi Ueda 	rq->completion_data = clone;
979cec47e3dSKiyoshi Ueda 	blk_complete_request(rq);
980cec47e3dSKiyoshi Ueda }
981cec47e3dSKiyoshi Ueda 
982cec47e3dSKiyoshi Ueda /*
983cec47e3dSKiyoshi Ueda  * Complete the not-mapped clone and the original request with the error status
984cec47e3dSKiyoshi Ueda  * through softirq context.
985cec47e3dSKiyoshi Ueda  * Target's rq_end_io() function isn't called.
986cec47e3dSKiyoshi Ueda  * This may be used when the target's map_rq() function fails.
987cec47e3dSKiyoshi Ueda  */
988cec47e3dSKiyoshi Ueda void dm_kill_unmapped_request(struct request *clone, int error)
989cec47e3dSKiyoshi Ueda {
990cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
991cec47e3dSKiyoshi Ueda 	struct request *rq = tio->orig;
992cec47e3dSKiyoshi Ueda 
99333659ebbSChristoph Hellwig 	if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
994d0bcb878SKiyoshi Ueda 		/*
995d0bcb878SKiyoshi Ueda 		 * Barrier clones share an original request.
996d0bcb878SKiyoshi Ueda 		 * Leave it to dm_end_request(), which handles this special
997d0bcb878SKiyoshi Ueda 		 * case.
998d0bcb878SKiyoshi Ueda 		 */
999d0bcb878SKiyoshi Ueda 		BUG_ON(error > 0);
1000d0bcb878SKiyoshi Ueda 		dm_end_request(clone, error);
1001d0bcb878SKiyoshi Ueda 		return;
1002d0bcb878SKiyoshi Ueda 	}
1003d0bcb878SKiyoshi Ueda 
1004cec47e3dSKiyoshi Ueda 	rq->cmd_flags |= REQ_FAILED;
1005cec47e3dSKiyoshi Ueda 	dm_complete_request(clone, error);
1006cec47e3dSKiyoshi Ueda }
1007cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1008cec47e3dSKiyoshi Ueda 
1009cec47e3dSKiyoshi Ueda /*
1010cec47e3dSKiyoshi Ueda  * Called with the queue lock held
1011cec47e3dSKiyoshi Ueda  */
1012cec47e3dSKiyoshi Ueda static void end_clone_request(struct request *clone, int error)
1013cec47e3dSKiyoshi Ueda {
1014cec47e3dSKiyoshi Ueda 	/*
1015cec47e3dSKiyoshi Ueda 	 * For just cleaning up the information of the queue in which
1016cec47e3dSKiyoshi Ueda 	 * the clone was dispatched.
1017cec47e3dSKiyoshi Ueda 	 * The clone is *NOT* freed actually here because it is alloced from
1018cec47e3dSKiyoshi Ueda 	 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1019cec47e3dSKiyoshi Ueda 	 */
1020cec47e3dSKiyoshi Ueda 	__blk_put_request(clone->q, clone);
1021cec47e3dSKiyoshi Ueda 
1022cec47e3dSKiyoshi Ueda 	/*
1023cec47e3dSKiyoshi Ueda 	 * Actual request completion is done in a softirq context which doesn't
1024cec47e3dSKiyoshi Ueda 	 * hold the queue lock.  Otherwise, deadlock could occur because:
1025cec47e3dSKiyoshi Ueda 	 *     - another request may be submitted by the upper level driver
1026cec47e3dSKiyoshi Ueda 	 *       of the stacking during the completion
1027cec47e3dSKiyoshi Ueda 	 *     - the submission which requires queue lock may be done
1028cec47e3dSKiyoshi Ueda 	 *       against this queue
1029cec47e3dSKiyoshi Ueda 	 */
1030cec47e3dSKiyoshi Ueda 	dm_complete_request(clone, error);
1031cec47e3dSKiyoshi Ueda }
1032cec47e3dSKiyoshi Ueda 
10331da177e4SLinus Torvalds static sector_t max_io_len(struct mapped_device *md,
10341da177e4SLinus Torvalds 			   sector_t sector, struct dm_target *ti)
10351da177e4SLinus Torvalds {
10361da177e4SLinus Torvalds 	sector_t offset = sector - ti->begin;
10371da177e4SLinus Torvalds 	sector_t len = ti->len - offset;
10381da177e4SLinus Torvalds 
10391da177e4SLinus Torvalds 	/*
10401da177e4SLinus Torvalds 	 * Does the target need to split even further ?
10411da177e4SLinus Torvalds 	 */
10421da177e4SLinus Torvalds 	if (ti->split_io) {
10431da177e4SLinus Torvalds 		sector_t boundary;
10441da177e4SLinus Torvalds 		boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
10451da177e4SLinus Torvalds 			   - offset;
10461da177e4SLinus Torvalds 		if (len > boundary)
10471da177e4SLinus Torvalds 			len = boundary;
10481da177e4SLinus Torvalds 	}
10491da177e4SLinus Torvalds 
10501da177e4SLinus Torvalds 	return len;
10511da177e4SLinus Torvalds }
10521da177e4SLinus Torvalds 
10531da177e4SLinus Torvalds static void __map_bio(struct dm_target *ti, struct bio *clone,
1054028867acSAlasdair G Kergon 		      struct dm_target_io *tio)
10551da177e4SLinus Torvalds {
10561da177e4SLinus Torvalds 	int r;
10572056a782SJens Axboe 	sector_t sector;
10589faf400fSStefan Bader 	struct mapped_device *md;
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds 	clone->bi_end_io = clone_endio;
10611da177e4SLinus Torvalds 	clone->bi_private = tio;
10621da177e4SLinus Torvalds 
10631da177e4SLinus Torvalds 	/*
10641da177e4SLinus Torvalds 	 * Map the clone.  If r == 0 we don't need to do
10651da177e4SLinus Torvalds 	 * anything, the target has assumed ownership of
10661da177e4SLinus Torvalds 	 * this io.
10671da177e4SLinus Torvalds 	 */
10681da177e4SLinus Torvalds 	atomic_inc(&tio->io->io_count);
10692056a782SJens Axboe 	sector = clone->bi_sector;
10701da177e4SLinus Torvalds 	r = ti->type->map(ti, clone, &tio->info);
107145cbcd79SKiyoshi Ueda 	if (r == DM_MAPIO_REMAPPED) {
10721da177e4SLinus Torvalds 		/* the bio has been remapped so dispatch it */
10732056a782SJens Axboe 
10745f3ea37cSArnaldo Carvalho de Melo 		trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
107522a7c31aSAlan D. Brunelle 				    tio->io->bio->bi_bdev->bd_dev, sector);
10762056a782SJens Axboe 
10771da177e4SLinus Torvalds 		generic_make_request(clone);
10782e93ccc1SKiyoshi Ueda 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
10792e93ccc1SKiyoshi Ueda 		/* error the io and bail out, or requeue it if needed */
10809faf400fSStefan Bader 		md = tio->io->md;
10819faf400fSStefan Bader 		dec_pending(tio->io, r);
10829faf400fSStefan Bader 		/*
10839faf400fSStefan Bader 		 * Store bio_set for cleanup.
10849faf400fSStefan Bader 		 */
10859faf400fSStefan Bader 		clone->bi_private = md->bs;
10861da177e4SLinus Torvalds 		bio_put(clone);
10879faf400fSStefan Bader 		free_tio(md, tio);
108845cbcd79SKiyoshi Ueda 	} else if (r) {
108945cbcd79SKiyoshi Ueda 		DMWARN("unimplemented target map return value: %d", r);
109045cbcd79SKiyoshi Ueda 		BUG();
10911da177e4SLinus Torvalds 	}
10921da177e4SLinus Torvalds }
10931da177e4SLinus Torvalds 
10941da177e4SLinus Torvalds struct clone_info {
10951da177e4SLinus Torvalds 	struct mapped_device *md;
10961da177e4SLinus Torvalds 	struct dm_table *map;
10971da177e4SLinus Torvalds 	struct bio *bio;
10981da177e4SLinus Torvalds 	struct dm_io *io;
10991da177e4SLinus Torvalds 	sector_t sector;
11001da177e4SLinus Torvalds 	sector_t sector_count;
11011da177e4SLinus Torvalds 	unsigned short idx;
11021da177e4SLinus Torvalds };
11031da177e4SLinus Torvalds 
11043676347aSPeter Osterlund static void dm_bio_destructor(struct bio *bio)
11053676347aSPeter Osterlund {
11069faf400fSStefan Bader 	struct bio_set *bs = bio->bi_private;
11079faf400fSStefan Bader 
11089faf400fSStefan Bader 	bio_free(bio, bs);
11093676347aSPeter Osterlund }
11103676347aSPeter Osterlund 
11111da177e4SLinus Torvalds /*
11121da177e4SLinus Torvalds  * Creates a little bio that is just does part of a bvec.
11131da177e4SLinus Torvalds  */
11141da177e4SLinus Torvalds static struct bio *split_bvec(struct bio *bio, sector_t sector,
11151da177e4SLinus Torvalds 			      unsigned short idx, unsigned int offset,
11169faf400fSStefan Bader 			      unsigned int len, struct bio_set *bs)
11171da177e4SLinus Torvalds {
11181da177e4SLinus Torvalds 	struct bio *clone;
11191da177e4SLinus Torvalds 	struct bio_vec *bv = bio->bi_io_vec + idx;
11201da177e4SLinus Torvalds 
11219faf400fSStefan Bader 	clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
11223676347aSPeter Osterlund 	clone->bi_destructor = dm_bio_destructor;
11231da177e4SLinus Torvalds 	*clone->bi_io_vec = *bv;
11241da177e4SLinus Torvalds 
11251da177e4SLinus Torvalds 	clone->bi_sector = sector;
11261da177e4SLinus Torvalds 	clone->bi_bdev = bio->bi_bdev;
11277b6d91daSChristoph Hellwig 	clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
11281da177e4SLinus Torvalds 	clone->bi_vcnt = 1;
11291da177e4SLinus Torvalds 	clone->bi_size = to_bytes(len);
11301da177e4SLinus Torvalds 	clone->bi_io_vec->bv_offset = offset;
11311da177e4SLinus Torvalds 	clone->bi_io_vec->bv_len = clone->bi_size;
1132f3e1d26eSMartin K. Petersen 	clone->bi_flags |= 1 << BIO_CLONED;
11331da177e4SLinus Torvalds 
11349c47008dSMartin K. Petersen 	if (bio_integrity(bio)) {
11357878cba9SMartin K. Petersen 		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
11369c47008dSMartin K. Petersen 		bio_integrity_trim(clone,
11379c47008dSMartin K. Petersen 				   bio_sector_offset(bio, idx, offset), len);
11389c47008dSMartin K. Petersen 	}
11399c47008dSMartin K. Petersen 
11401da177e4SLinus Torvalds 	return clone;
11411da177e4SLinus Torvalds }
11421da177e4SLinus Torvalds 
11431da177e4SLinus Torvalds /*
11441da177e4SLinus Torvalds  * Creates a bio that consists of range of complete bvecs.
11451da177e4SLinus Torvalds  */
11461da177e4SLinus Torvalds static struct bio *clone_bio(struct bio *bio, sector_t sector,
11471da177e4SLinus Torvalds 			     unsigned short idx, unsigned short bv_count,
11489faf400fSStefan Bader 			     unsigned int len, struct bio_set *bs)
11491da177e4SLinus Torvalds {
11501da177e4SLinus Torvalds 	struct bio *clone;
11511da177e4SLinus Torvalds 
11529faf400fSStefan Bader 	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
11539faf400fSStefan Bader 	__bio_clone(clone, bio);
11547b6d91daSChristoph Hellwig 	clone->bi_rw &= ~REQ_HARDBARRIER;
11559faf400fSStefan Bader 	clone->bi_destructor = dm_bio_destructor;
11561da177e4SLinus Torvalds 	clone->bi_sector = sector;
11571da177e4SLinus Torvalds 	clone->bi_idx = idx;
11581da177e4SLinus Torvalds 	clone->bi_vcnt = idx + bv_count;
11591da177e4SLinus Torvalds 	clone->bi_size = to_bytes(len);
11601da177e4SLinus Torvalds 	clone->bi_flags &= ~(1 << BIO_SEG_VALID);
11611da177e4SLinus Torvalds 
11629c47008dSMartin K. Petersen 	if (bio_integrity(bio)) {
11637878cba9SMartin K. Petersen 		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
11649c47008dSMartin K. Petersen 
11659c47008dSMartin K. Petersen 		if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
11669c47008dSMartin K. Petersen 			bio_integrity_trim(clone,
11679c47008dSMartin K. Petersen 					   bio_sector_offset(bio, idx, 0), len);
11689c47008dSMartin K. Petersen 	}
11699c47008dSMartin K. Petersen 
11701da177e4SLinus Torvalds 	return clone;
11711da177e4SLinus Torvalds }
11721da177e4SLinus Torvalds 
11739015df24SAlasdair G Kergon static struct dm_target_io *alloc_tio(struct clone_info *ci,
11749015df24SAlasdair G Kergon 				      struct dm_target *ti)
1175f9ab94ceSMikulas Patocka {
11769015df24SAlasdair G Kergon 	struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1177f9ab94ceSMikulas Patocka 
1178f9ab94ceSMikulas Patocka 	tio->io = ci->io;
1179f9ab94ceSMikulas Patocka 	tio->ti = ti;
1180f9ab94ceSMikulas Patocka 	memset(&tio->info, 0, sizeof(tio->info));
11819015df24SAlasdair G Kergon 
11829015df24SAlasdair G Kergon 	return tio;
11839015df24SAlasdair G Kergon }
11849015df24SAlasdair G Kergon 
1185*06a426ceSMike Snitzer static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
118657cba5d3SMike Snitzer 				   unsigned request_nr)
11879015df24SAlasdair G Kergon {
11889015df24SAlasdair G Kergon 	struct dm_target_io *tio = alloc_tio(ci, ti);
11899015df24SAlasdair G Kergon 	struct bio *clone;
11909015df24SAlasdair G Kergon 
119157cba5d3SMike Snitzer 	tio->info.target_request_nr = request_nr;
1192f9ab94ceSMikulas Patocka 
1193*06a426ceSMike Snitzer 	/*
1194*06a426ceSMike Snitzer 	 * Discard requests require the bio's inline iovecs be initialized.
1195*06a426ceSMike Snitzer 	 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1196*06a426ceSMike Snitzer 	 * and discard, so no need for concern about wasted bvec allocations.
1197*06a426ceSMike Snitzer 	 */
1198*06a426ceSMike Snitzer 	clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
1199f9ab94ceSMikulas Patocka 	__bio_clone(clone, ci->bio);
1200f9ab94ceSMikulas Patocka 	clone->bi_destructor = dm_bio_destructor;
1201f9ab94ceSMikulas Patocka 
1202f9ab94ceSMikulas Patocka 	__map_bio(ti, clone, tio);
1203f9ab94ceSMikulas Patocka }
1204f9ab94ceSMikulas Patocka 
1205*06a426ceSMike Snitzer static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1206*06a426ceSMike Snitzer 				    unsigned num_requests)
1207*06a426ceSMike Snitzer {
1208*06a426ceSMike Snitzer 	unsigned request_nr;
1209*06a426ceSMike Snitzer 
1210*06a426ceSMike Snitzer 	for (request_nr = 0; request_nr < num_requests; request_nr++)
1211*06a426ceSMike Snitzer 		__issue_target_request(ci, ti, request_nr);
1212*06a426ceSMike Snitzer }
1213*06a426ceSMike Snitzer 
1214f9ab94ceSMikulas Patocka static int __clone_and_map_empty_barrier(struct clone_info *ci)
1215f9ab94ceSMikulas Patocka {
1216*06a426ceSMike Snitzer 	unsigned target_nr = 0;
1217f9ab94ceSMikulas Patocka 	struct dm_target *ti;
1218f9ab94ceSMikulas Patocka 
1219f9ab94ceSMikulas Patocka 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1220*06a426ceSMike Snitzer 		__issue_target_requests(ci, ti, ti->num_flush_requests);
1221f9ab94ceSMikulas Patocka 
1222f9ab94ceSMikulas Patocka 	ci->sector_count = 0;
1223f9ab94ceSMikulas Patocka 
1224f9ab94ceSMikulas Patocka 	return 0;
1225f9ab94ceSMikulas Patocka }
1226f9ab94ceSMikulas Patocka 
12275ae89a87SMike Snitzer /*
12285ae89a87SMike Snitzer  * Perform all io with a single clone.
12295ae89a87SMike Snitzer  */
12305ae89a87SMike Snitzer static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
12315ae89a87SMike Snitzer {
12325ae89a87SMike Snitzer 	struct bio *clone, *bio = ci->bio;
12335ae89a87SMike Snitzer 	struct dm_target_io *tio;
12345ae89a87SMike Snitzer 
12355ae89a87SMike Snitzer 	tio = alloc_tio(ci, ti);
12365ae89a87SMike Snitzer 	clone = clone_bio(bio, ci->sector, ci->idx,
12375ae89a87SMike Snitzer 			  bio->bi_vcnt - ci->idx, ci->sector_count,
12385ae89a87SMike Snitzer 			  ci->md->bs);
12395ae89a87SMike Snitzer 	__map_bio(ti, clone, tio);
12405ae89a87SMike Snitzer 	ci->sector_count = 0;
12415ae89a87SMike Snitzer }
12425ae89a87SMike Snitzer 
12435ae89a87SMike Snitzer static int __clone_and_map_discard(struct clone_info *ci)
12445ae89a87SMike Snitzer {
12455ae89a87SMike Snitzer 	struct dm_target *ti;
12465ae89a87SMike Snitzer 	sector_t max;
12475ae89a87SMike Snitzer 
12485ae89a87SMike Snitzer 	ti = dm_table_find_target(ci->map, ci->sector);
12495ae89a87SMike Snitzer 	if (!dm_target_is_valid(ti))
12505ae89a87SMike Snitzer 		return -EIO;
12515ae89a87SMike Snitzer 
12525ae89a87SMike Snitzer 	/*
12535ae89a87SMike Snitzer 	 * Even though the device advertised discard support,
12545ae89a87SMike Snitzer 	 * reconfiguration might have changed that since the
12555ae89a87SMike Snitzer 	 * check was performed.
12565ae89a87SMike Snitzer 	 */
12575ae89a87SMike Snitzer 
12585ae89a87SMike Snitzer 	if (!ti->num_discard_requests)
12595ae89a87SMike Snitzer 		return -EOPNOTSUPP;
12605ae89a87SMike Snitzer 
12615ae89a87SMike Snitzer 	max = max_io_len(ci->md, ci->sector, ti);
12625ae89a87SMike Snitzer 
12635ae89a87SMike Snitzer 	if (ci->sector_count > max)
12645ae89a87SMike Snitzer 		/*
12655ae89a87SMike Snitzer 		 * FIXME: Handle a discard that spans two or more targets.
12665ae89a87SMike Snitzer 		 */
12675ae89a87SMike Snitzer 		return -EOPNOTSUPP;
12685ae89a87SMike Snitzer 
1269*06a426ceSMike Snitzer 	__issue_target_requests(ci, ti, ti->num_discard_requests);
1270*06a426ceSMike Snitzer 
1271*06a426ceSMike Snitzer 	ci->sector_count = 0;
12725ae89a87SMike Snitzer 
12735ae89a87SMike Snitzer 	return 0;
12745ae89a87SMike Snitzer }
12755ae89a87SMike Snitzer 
1276512875bdSJun'ichi Nomura static int __clone_and_map(struct clone_info *ci)
12771da177e4SLinus Torvalds {
12781da177e4SLinus Torvalds 	struct bio *clone, *bio = ci->bio;
1279512875bdSJun'ichi Nomura 	struct dm_target *ti;
1280512875bdSJun'ichi Nomura 	sector_t len = 0, max;
1281028867acSAlasdair G Kergon 	struct dm_target_io *tio;
12821da177e4SLinus Torvalds 
1283f9ab94ceSMikulas Patocka 	if (unlikely(bio_empty_barrier(bio)))
1284f9ab94ceSMikulas Patocka 		return __clone_and_map_empty_barrier(ci);
1285f9ab94ceSMikulas Patocka 
12865ae89a87SMike Snitzer 	if (unlikely(bio->bi_rw & REQ_DISCARD))
12875ae89a87SMike Snitzer 		return __clone_and_map_discard(ci);
12885ae89a87SMike Snitzer 
1289512875bdSJun'ichi Nomura 	ti = dm_table_find_target(ci->map, ci->sector);
1290512875bdSJun'ichi Nomura 	if (!dm_target_is_valid(ti))
1291512875bdSJun'ichi Nomura 		return -EIO;
1292512875bdSJun'ichi Nomura 
1293512875bdSJun'ichi Nomura 	max = max_io_len(ci->md, ci->sector, ti);
1294512875bdSJun'ichi Nomura 
12951da177e4SLinus Torvalds 	if (ci->sector_count <= max) {
12961da177e4SLinus Torvalds 		/*
12971da177e4SLinus Torvalds 		 * Optimise for the simple case where we can do all of
12981da177e4SLinus Torvalds 		 * the remaining io with a single clone.
12991da177e4SLinus Torvalds 		 */
13005ae89a87SMike Snitzer 		__clone_and_map_simple(ci, ti);
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds 	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
13031da177e4SLinus Torvalds 		/*
13041da177e4SLinus Torvalds 		 * There are some bvecs that don't span targets.
13051da177e4SLinus Torvalds 		 * Do as many of these as possible.
13061da177e4SLinus Torvalds 		 */
13071da177e4SLinus Torvalds 		int i;
13081da177e4SLinus Torvalds 		sector_t remaining = max;
13091da177e4SLinus Torvalds 		sector_t bv_len;
13101da177e4SLinus Torvalds 
13111da177e4SLinus Torvalds 		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
13121da177e4SLinus Torvalds 			bv_len = to_sector(bio->bi_io_vec[i].bv_len);
13131da177e4SLinus Torvalds 
13141da177e4SLinus Torvalds 			if (bv_len > remaining)
13151da177e4SLinus Torvalds 				break;
13161da177e4SLinus Torvalds 
13171da177e4SLinus Torvalds 			remaining -= bv_len;
13181da177e4SLinus Torvalds 			len += bv_len;
13191da177e4SLinus Torvalds 		}
13201da177e4SLinus Torvalds 
13215ae89a87SMike Snitzer 		tio = alloc_tio(ci, ti);
13229faf400fSStefan Bader 		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
13239faf400fSStefan Bader 				  ci->md->bs);
13241da177e4SLinus Torvalds 		__map_bio(ti, clone, tio);
13251da177e4SLinus Torvalds 
13261da177e4SLinus Torvalds 		ci->sector += len;
13271da177e4SLinus Torvalds 		ci->sector_count -= len;
13281da177e4SLinus Torvalds 		ci->idx = i;
13291da177e4SLinus Torvalds 
13301da177e4SLinus Torvalds 	} else {
13311da177e4SLinus Torvalds 		/*
1332d2044a94SAlasdair G Kergon 		 * Handle a bvec that must be split between two or more targets.
13331da177e4SLinus Torvalds 		 */
13341da177e4SLinus Torvalds 		struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1335d2044a94SAlasdair G Kergon 		sector_t remaining = to_sector(bv->bv_len);
1336d2044a94SAlasdair G Kergon 		unsigned int offset = 0;
13371da177e4SLinus Torvalds 
1338d2044a94SAlasdair G Kergon 		do {
1339d2044a94SAlasdair G Kergon 			if (offset) {
13401da177e4SLinus Torvalds 				ti = dm_table_find_target(ci->map, ci->sector);
1341512875bdSJun'ichi Nomura 				if (!dm_target_is_valid(ti))
1342512875bdSJun'ichi Nomura 					return -EIO;
1343512875bdSJun'ichi Nomura 
1344d2044a94SAlasdair G Kergon 				max = max_io_len(ci->md, ci->sector, ti);
1345d2044a94SAlasdair G Kergon 			}
1346d2044a94SAlasdair G Kergon 
1347d2044a94SAlasdair G Kergon 			len = min(remaining, max);
1348d2044a94SAlasdair G Kergon 
13495ae89a87SMike Snitzer 			tio = alloc_tio(ci, ti);
1350d2044a94SAlasdair G Kergon 			clone = split_bvec(bio, ci->sector, ci->idx,
13519faf400fSStefan Bader 					   bv->bv_offset + offset, len,
13529faf400fSStefan Bader 					   ci->md->bs);
1353d2044a94SAlasdair G Kergon 
13541da177e4SLinus Torvalds 			__map_bio(ti, clone, tio);
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 			ci->sector += len;
13571da177e4SLinus Torvalds 			ci->sector_count -= len;
1358d2044a94SAlasdair G Kergon 			offset += to_bytes(len);
1359d2044a94SAlasdair G Kergon 		} while (remaining -= len);
1360d2044a94SAlasdair G Kergon 
13611da177e4SLinus Torvalds 		ci->idx++;
13621da177e4SLinus Torvalds 	}
1363512875bdSJun'ichi Nomura 
1364512875bdSJun'ichi Nomura 	return 0;
13651da177e4SLinus Torvalds }
13661da177e4SLinus Torvalds 
13671da177e4SLinus Torvalds /*
13688a53c28dSMikulas Patocka  * Split the bio into several clones and submit it to targets.
13691da177e4SLinus Torvalds  */
1370f0b9a450SMikulas Patocka static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
13711da177e4SLinus Torvalds {
13721da177e4SLinus Torvalds 	struct clone_info ci;
1373512875bdSJun'ichi Nomura 	int error = 0;
13741da177e4SLinus Torvalds 
13757c666411SAlasdair G Kergon 	ci.map = dm_get_live_table(md);
1376f0b9a450SMikulas Patocka 	if (unlikely(!ci.map)) {
13777b6d91daSChristoph Hellwig 		if (!(bio->bi_rw & REQ_HARDBARRIER))
1378f0b9a450SMikulas Patocka 			bio_io_error(bio);
1379af7e466aSMikulas Patocka 		else
13805aa2781dSMikulas Patocka 			if (!md->barrier_error)
1381af7e466aSMikulas Patocka 				md->barrier_error = -EIO;
1382f0b9a450SMikulas Patocka 		return;
1383f0b9a450SMikulas Patocka 	}
1384692d0eb9SMikulas Patocka 
13851da177e4SLinus Torvalds 	ci.md = md;
13861da177e4SLinus Torvalds 	ci.bio = bio;
13871da177e4SLinus Torvalds 	ci.io = alloc_io(md);
13881da177e4SLinus Torvalds 	ci.io->error = 0;
13891da177e4SLinus Torvalds 	atomic_set(&ci.io->io_count, 1);
13901da177e4SLinus Torvalds 	ci.io->bio = bio;
13911da177e4SLinus Torvalds 	ci.io->md = md;
1392f88fb981SKiyoshi Ueda 	spin_lock_init(&ci.io->endio_lock);
13931da177e4SLinus Torvalds 	ci.sector = bio->bi_sector;
13941da177e4SLinus Torvalds 	ci.sector_count = bio_sectors(bio);
1395f9ab94ceSMikulas Patocka 	if (unlikely(bio_empty_barrier(bio)))
1396f9ab94ceSMikulas Patocka 		ci.sector_count = 1;
13971da177e4SLinus Torvalds 	ci.idx = bio->bi_idx;
13981da177e4SLinus Torvalds 
13993eaf840eSJun'ichi "Nick" Nomura 	start_io_acct(ci.io);
1400512875bdSJun'ichi Nomura 	while (ci.sector_count && !error)
1401512875bdSJun'ichi Nomura 		error = __clone_and_map(&ci);
14021da177e4SLinus Torvalds 
14031da177e4SLinus Torvalds 	/* drop the extra reference count */
1404512875bdSJun'ichi Nomura 	dec_pending(ci.io, error);
14051da177e4SLinus Torvalds 	dm_table_put(ci.map);
14061da177e4SLinus Torvalds }
14071da177e4SLinus Torvalds /*-----------------------------------------------------------------
14081da177e4SLinus Torvalds  * CRUD END
14091da177e4SLinus Torvalds  *---------------------------------------------------------------*/
14101da177e4SLinus Torvalds 
1411f6fccb12SMilan Broz static int dm_merge_bvec(struct request_queue *q,
1412f6fccb12SMilan Broz 			 struct bvec_merge_data *bvm,
1413f6fccb12SMilan Broz 			 struct bio_vec *biovec)
1414f6fccb12SMilan Broz {
1415f6fccb12SMilan Broz 	struct mapped_device *md = q->queuedata;
14167c666411SAlasdair G Kergon 	struct dm_table *map = dm_get_live_table(md);
1417f6fccb12SMilan Broz 	struct dm_target *ti;
1418f6fccb12SMilan Broz 	sector_t max_sectors;
14195037108aSMikulas Patocka 	int max_size = 0;
1420f6fccb12SMilan Broz 
1421f6fccb12SMilan Broz 	if (unlikely(!map))
14225037108aSMikulas Patocka 		goto out;
1423f6fccb12SMilan Broz 
1424f6fccb12SMilan Broz 	ti = dm_table_find_target(map, bvm->bi_sector);
1425b01cd5acSMikulas Patocka 	if (!dm_target_is_valid(ti))
1426b01cd5acSMikulas Patocka 		goto out_table;
1427f6fccb12SMilan Broz 
1428f6fccb12SMilan Broz 	/*
1429f6fccb12SMilan Broz 	 * Find maximum amount of I/O that won't need splitting
1430f6fccb12SMilan Broz 	 */
1431f6fccb12SMilan Broz 	max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1432f6fccb12SMilan Broz 			  (sector_t) BIO_MAX_SECTORS);
1433f6fccb12SMilan Broz 	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1434f6fccb12SMilan Broz 	if (max_size < 0)
1435f6fccb12SMilan Broz 		max_size = 0;
1436f6fccb12SMilan Broz 
1437f6fccb12SMilan Broz 	/*
1438f6fccb12SMilan Broz 	 * merge_bvec_fn() returns number of bytes
1439f6fccb12SMilan Broz 	 * it can accept at this offset
1440f6fccb12SMilan Broz 	 * max is precomputed maximal io size
1441f6fccb12SMilan Broz 	 */
1442f6fccb12SMilan Broz 	if (max_size && ti->type->merge)
1443f6fccb12SMilan Broz 		max_size = ti->type->merge(ti, bvm, biovec, max_size);
14448cbeb67aSMikulas Patocka 	/*
14458cbeb67aSMikulas Patocka 	 * If the target doesn't support merge method and some of the devices
14468cbeb67aSMikulas Patocka 	 * provided their merge_bvec method (we know this by looking at
14478cbeb67aSMikulas Patocka 	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
14488cbeb67aSMikulas Patocka 	 * entries.  So always set max_size to 0, and the code below allows
14498cbeb67aSMikulas Patocka 	 * just one page.
14508cbeb67aSMikulas Patocka 	 */
14518cbeb67aSMikulas Patocka 	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
14528cbeb67aSMikulas Patocka 
14538cbeb67aSMikulas Patocka 		max_size = 0;
1454f6fccb12SMilan Broz 
1455b01cd5acSMikulas Patocka out_table:
14565037108aSMikulas Patocka 	dm_table_put(map);
14575037108aSMikulas Patocka 
14585037108aSMikulas Patocka out:
1459f6fccb12SMilan Broz 	/*
1460f6fccb12SMilan Broz 	 * Always allow an entire first page
1461f6fccb12SMilan Broz 	 */
1462f6fccb12SMilan Broz 	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1463f6fccb12SMilan Broz 		max_size = biovec->bv_len;
1464f6fccb12SMilan Broz 
1465f6fccb12SMilan Broz 	return max_size;
1466f6fccb12SMilan Broz }
1467f6fccb12SMilan Broz 
14681da177e4SLinus Torvalds /*
14691da177e4SLinus Torvalds  * The request function that just remaps the bio built up by
14701da177e4SLinus Torvalds  * dm_merge_bvec.
14711da177e4SLinus Torvalds  */
1472cec47e3dSKiyoshi Ueda static int _dm_request(struct request_queue *q, struct bio *bio)
14731da177e4SLinus Torvalds {
147412f03a49SKevin Corry 	int rw = bio_data_dir(bio);
14751da177e4SLinus Torvalds 	struct mapped_device *md = q->queuedata;
1476c9959059STejun Heo 	int cpu;
14771da177e4SLinus Torvalds 
14782ca3310eSAlasdair G Kergon 	down_read(&md->io_lock);
14791da177e4SLinus Torvalds 
1480074a7acaSTejun Heo 	cpu = part_stat_lock();
1481074a7acaSTejun Heo 	part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1482074a7acaSTejun Heo 	part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1483074a7acaSTejun Heo 	part_stat_unlock();
148412f03a49SKevin Corry 
14851da177e4SLinus Torvalds 	/*
14861eb787ecSAlasdair G Kergon 	 * If we're suspended or the thread is processing barriers
14871eb787ecSAlasdair G Kergon 	 * we have to queue this io for later.
14881da177e4SLinus Torvalds 	 */
1489af7e466aSMikulas Patocka 	if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
14907b6d91daSChristoph Hellwig 	    unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
14912ca3310eSAlasdair G Kergon 		up_read(&md->io_lock);
14921da177e4SLinus Torvalds 
149354d9a1b4SAlasdair G Kergon 		if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
149454d9a1b4SAlasdair G Kergon 		    bio_rw(bio) == READA) {
149554d9a1b4SAlasdair G Kergon 			bio_io_error(bio);
149654d9a1b4SAlasdair G Kergon 			return 0;
149754d9a1b4SAlasdair G Kergon 		}
14981da177e4SLinus Torvalds 
149992c63902SMikulas Patocka 		queue_io(md, bio);
15001da177e4SLinus Torvalds 
150192c63902SMikulas Patocka 		return 0;
15021da177e4SLinus Torvalds 	}
15031da177e4SLinus Torvalds 
1504f0b9a450SMikulas Patocka 	__split_and_process_bio(md, bio);
15052ca3310eSAlasdair G Kergon 	up_read(&md->io_lock);
1506f0b9a450SMikulas Patocka 	return 0;
15071da177e4SLinus Torvalds }
15081da177e4SLinus Torvalds 
1509cec47e3dSKiyoshi Ueda static int dm_make_request(struct request_queue *q, struct bio *bio)
1510cec47e3dSKiyoshi Ueda {
1511cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
1512cec47e3dSKiyoshi Ueda 
1513cec47e3dSKiyoshi Ueda 	return md->saved_make_request_fn(q, bio); /* call __make_request() */
1514cec47e3dSKiyoshi Ueda }
1515cec47e3dSKiyoshi Ueda 
1516cec47e3dSKiyoshi Ueda static int dm_request_based(struct mapped_device *md)
1517cec47e3dSKiyoshi Ueda {
1518cec47e3dSKiyoshi Ueda 	return blk_queue_stackable(md->queue);
1519cec47e3dSKiyoshi Ueda }
1520cec47e3dSKiyoshi Ueda 
1521cec47e3dSKiyoshi Ueda static int dm_request(struct request_queue *q, struct bio *bio)
1522cec47e3dSKiyoshi Ueda {
1523cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
1524cec47e3dSKiyoshi Ueda 
1525cec47e3dSKiyoshi Ueda 	if (dm_request_based(md))
1526cec47e3dSKiyoshi Ueda 		return dm_make_request(q, bio);
1527cec47e3dSKiyoshi Ueda 
1528cec47e3dSKiyoshi Ueda 	return _dm_request(q, bio);
1529cec47e3dSKiyoshi Ueda }
1530cec47e3dSKiyoshi Ueda 
1531d0bcb878SKiyoshi Ueda static bool dm_rq_is_flush_request(struct request *rq)
1532d0bcb878SKiyoshi Ueda {
1533144d6ed5SFUJITA Tomonori 	if (rq->cmd_flags & REQ_FLUSH)
1534d0bcb878SKiyoshi Ueda 		return true;
1535d0bcb878SKiyoshi Ueda 	else
1536d0bcb878SKiyoshi Ueda 		return false;
1537d0bcb878SKiyoshi Ueda }
1538d0bcb878SKiyoshi Ueda 
1539cec47e3dSKiyoshi Ueda void dm_dispatch_request(struct request *rq)
1540cec47e3dSKiyoshi Ueda {
1541cec47e3dSKiyoshi Ueda 	int r;
1542cec47e3dSKiyoshi Ueda 
1543cec47e3dSKiyoshi Ueda 	if (blk_queue_io_stat(rq->q))
1544cec47e3dSKiyoshi Ueda 		rq->cmd_flags |= REQ_IO_STAT;
1545cec47e3dSKiyoshi Ueda 
1546cec47e3dSKiyoshi Ueda 	rq->start_time = jiffies;
1547cec47e3dSKiyoshi Ueda 	r = blk_insert_cloned_request(rq->q, rq);
1548cec47e3dSKiyoshi Ueda 	if (r)
1549cec47e3dSKiyoshi Ueda 		dm_complete_request(rq, r);
1550cec47e3dSKiyoshi Ueda }
1551cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_dispatch_request);
1552cec47e3dSKiyoshi Ueda 
1553cec47e3dSKiyoshi Ueda static void dm_rq_bio_destructor(struct bio *bio)
1554cec47e3dSKiyoshi Ueda {
1555cec47e3dSKiyoshi Ueda 	struct dm_rq_clone_bio_info *info = bio->bi_private;
1556cec47e3dSKiyoshi Ueda 	struct mapped_device *md = info->tio->md;
1557cec47e3dSKiyoshi Ueda 
1558cec47e3dSKiyoshi Ueda 	free_bio_info(info);
1559cec47e3dSKiyoshi Ueda 	bio_free(bio, md->bs);
1560cec47e3dSKiyoshi Ueda }
1561cec47e3dSKiyoshi Ueda 
1562cec47e3dSKiyoshi Ueda static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1563cec47e3dSKiyoshi Ueda 				 void *data)
1564cec47e3dSKiyoshi Ueda {
1565cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = data;
1566cec47e3dSKiyoshi Ueda 	struct mapped_device *md = tio->md;
1567cec47e3dSKiyoshi Ueda 	struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1568cec47e3dSKiyoshi Ueda 
1569cec47e3dSKiyoshi Ueda 	if (!info)
1570cec47e3dSKiyoshi Ueda 		return -ENOMEM;
1571cec47e3dSKiyoshi Ueda 
1572cec47e3dSKiyoshi Ueda 	info->orig = bio_orig;
1573cec47e3dSKiyoshi Ueda 	info->tio = tio;
1574cec47e3dSKiyoshi Ueda 	bio->bi_end_io = end_clone_bio;
1575cec47e3dSKiyoshi Ueda 	bio->bi_private = info;
1576cec47e3dSKiyoshi Ueda 	bio->bi_destructor = dm_rq_bio_destructor;
1577cec47e3dSKiyoshi Ueda 
1578cec47e3dSKiyoshi Ueda 	return 0;
1579cec47e3dSKiyoshi Ueda }
1580cec47e3dSKiyoshi Ueda 
1581cec47e3dSKiyoshi Ueda static int setup_clone(struct request *clone, struct request *rq,
1582cec47e3dSKiyoshi Ueda 		       struct dm_rq_target_io *tio)
1583cec47e3dSKiyoshi Ueda {
1584d0bcb878SKiyoshi Ueda 	int r;
1585cec47e3dSKiyoshi Ueda 
1586d0bcb878SKiyoshi Ueda 	if (dm_rq_is_flush_request(rq)) {
1587d0bcb878SKiyoshi Ueda 		blk_rq_init(NULL, clone);
1588d0bcb878SKiyoshi Ueda 		clone->cmd_type = REQ_TYPE_FS;
1589d0bcb878SKiyoshi Ueda 		clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1590d0bcb878SKiyoshi Ueda 	} else {
1591d0bcb878SKiyoshi Ueda 		r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1592d0bcb878SKiyoshi Ueda 				      dm_rq_bio_constructor, tio);
1593cec47e3dSKiyoshi Ueda 		if (r)
1594cec47e3dSKiyoshi Ueda 			return r;
1595cec47e3dSKiyoshi Ueda 
1596cec47e3dSKiyoshi Ueda 		clone->cmd = rq->cmd;
1597cec47e3dSKiyoshi Ueda 		clone->cmd_len = rq->cmd_len;
1598cec47e3dSKiyoshi Ueda 		clone->sense = rq->sense;
1599cec47e3dSKiyoshi Ueda 		clone->buffer = rq->buffer;
1600d0bcb878SKiyoshi Ueda 	}
1601d0bcb878SKiyoshi Ueda 
1602cec47e3dSKiyoshi Ueda 	clone->end_io = end_clone_request;
1603cec47e3dSKiyoshi Ueda 	clone->end_io_data = tio;
1604cec47e3dSKiyoshi Ueda 
1605cec47e3dSKiyoshi Ueda 	return 0;
1606cec47e3dSKiyoshi Ueda }
1607cec47e3dSKiyoshi Ueda 
16086facdaffSKiyoshi Ueda static struct request *clone_rq(struct request *rq, struct mapped_device *md,
16096facdaffSKiyoshi Ueda 				gfp_t gfp_mask)
16106facdaffSKiyoshi Ueda {
16116facdaffSKiyoshi Ueda 	struct request *clone;
16126facdaffSKiyoshi Ueda 	struct dm_rq_target_io *tio;
16136facdaffSKiyoshi Ueda 
16146facdaffSKiyoshi Ueda 	tio = alloc_rq_tio(md, gfp_mask);
16156facdaffSKiyoshi Ueda 	if (!tio)
16166facdaffSKiyoshi Ueda 		return NULL;
16176facdaffSKiyoshi Ueda 
16186facdaffSKiyoshi Ueda 	tio->md = md;
16196facdaffSKiyoshi Ueda 	tio->ti = NULL;
16206facdaffSKiyoshi Ueda 	tio->orig = rq;
16216facdaffSKiyoshi Ueda 	tio->error = 0;
16226facdaffSKiyoshi Ueda 	memset(&tio->info, 0, sizeof(tio->info));
16236facdaffSKiyoshi Ueda 
16246facdaffSKiyoshi Ueda 	clone = &tio->clone;
16256facdaffSKiyoshi Ueda 	if (setup_clone(clone, rq, tio)) {
16266facdaffSKiyoshi Ueda 		/* -ENOMEM */
16276facdaffSKiyoshi Ueda 		free_rq_tio(tio);
16286facdaffSKiyoshi Ueda 		return NULL;
16296facdaffSKiyoshi Ueda 	}
16306facdaffSKiyoshi Ueda 
16316facdaffSKiyoshi Ueda 	return clone;
16326facdaffSKiyoshi Ueda }
16336facdaffSKiyoshi Ueda 
1634cec47e3dSKiyoshi Ueda /*
1635cec47e3dSKiyoshi Ueda  * Called with the queue lock held.
1636cec47e3dSKiyoshi Ueda  */
1637cec47e3dSKiyoshi Ueda static int dm_prep_fn(struct request_queue *q, struct request *rq)
1638cec47e3dSKiyoshi Ueda {
1639cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
1640cec47e3dSKiyoshi Ueda 	struct request *clone;
1641cec47e3dSKiyoshi Ueda 
1642d0bcb878SKiyoshi Ueda 	if (unlikely(dm_rq_is_flush_request(rq)))
1643d0bcb878SKiyoshi Ueda 		return BLKPREP_OK;
1644d0bcb878SKiyoshi Ueda 
1645cec47e3dSKiyoshi Ueda 	if (unlikely(rq->special)) {
1646cec47e3dSKiyoshi Ueda 		DMWARN("Already has something in rq->special.");
1647cec47e3dSKiyoshi Ueda 		return BLKPREP_KILL;
1648cec47e3dSKiyoshi Ueda 	}
1649cec47e3dSKiyoshi Ueda 
16506facdaffSKiyoshi Ueda 	clone = clone_rq(rq, md, GFP_ATOMIC);
16516facdaffSKiyoshi Ueda 	if (!clone)
1652cec47e3dSKiyoshi Ueda 		return BLKPREP_DEFER;
1653cec47e3dSKiyoshi Ueda 
1654cec47e3dSKiyoshi Ueda 	rq->special = clone;
1655cec47e3dSKiyoshi Ueda 	rq->cmd_flags |= REQ_DONTPREP;
1656cec47e3dSKiyoshi Ueda 
1657cec47e3dSKiyoshi Ueda 	return BLKPREP_OK;
1658cec47e3dSKiyoshi Ueda }
1659cec47e3dSKiyoshi Ueda 
16609eef87daSKiyoshi Ueda /*
16619eef87daSKiyoshi Ueda  * Returns:
16629eef87daSKiyoshi Ueda  * 0  : the request has been processed (not requeued)
16639eef87daSKiyoshi Ueda  * !0 : the request has been requeued
16649eef87daSKiyoshi Ueda  */
16659eef87daSKiyoshi Ueda static int map_request(struct dm_target *ti, struct request *clone,
1666cec47e3dSKiyoshi Ueda 		       struct mapped_device *md)
1667cec47e3dSKiyoshi Ueda {
16689eef87daSKiyoshi Ueda 	int r, requeued = 0;
1669cec47e3dSKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
1670cec47e3dSKiyoshi Ueda 
1671cec47e3dSKiyoshi Ueda 	/*
1672cec47e3dSKiyoshi Ueda 	 * Hold the md reference here for the in-flight I/O.
1673cec47e3dSKiyoshi Ueda 	 * We can't rely on the reference count by device opener,
1674cec47e3dSKiyoshi Ueda 	 * because the device may be closed during the request completion
1675cec47e3dSKiyoshi Ueda 	 * when all bios are completed.
1676cec47e3dSKiyoshi Ueda 	 * See the comment in rq_completed() too.
1677cec47e3dSKiyoshi Ueda 	 */
1678cec47e3dSKiyoshi Ueda 	dm_get(md);
1679cec47e3dSKiyoshi Ueda 
1680cec47e3dSKiyoshi Ueda 	tio->ti = ti;
1681cec47e3dSKiyoshi Ueda 	r = ti->type->map_rq(ti, clone, &tio->info);
1682cec47e3dSKiyoshi Ueda 	switch (r) {
1683cec47e3dSKiyoshi Ueda 	case DM_MAPIO_SUBMITTED:
1684cec47e3dSKiyoshi Ueda 		/* The target has taken the I/O to submit by itself later */
1685cec47e3dSKiyoshi Ueda 		break;
1686cec47e3dSKiyoshi Ueda 	case DM_MAPIO_REMAPPED:
1687cec47e3dSKiyoshi Ueda 		/* The target has remapped the I/O so dispatch it */
16886db4ccd6SJun'ichi Nomura 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
16896db4ccd6SJun'ichi Nomura 				     blk_rq_pos(tio->orig));
1690cec47e3dSKiyoshi Ueda 		dm_dispatch_request(clone);
1691cec47e3dSKiyoshi Ueda 		break;
1692cec47e3dSKiyoshi Ueda 	case DM_MAPIO_REQUEUE:
1693cec47e3dSKiyoshi Ueda 		/* The target wants to requeue the I/O */
1694cec47e3dSKiyoshi Ueda 		dm_requeue_unmapped_request(clone);
16959eef87daSKiyoshi Ueda 		requeued = 1;
1696cec47e3dSKiyoshi Ueda 		break;
1697cec47e3dSKiyoshi Ueda 	default:
1698cec47e3dSKiyoshi Ueda 		if (r > 0) {
1699cec47e3dSKiyoshi Ueda 			DMWARN("unimplemented target map return value: %d", r);
1700cec47e3dSKiyoshi Ueda 			BUG();
1701cec47e3dSKiyoshi Ueda 		}
1702cec47e3dSKiyoshi Ueda 
1703cec47e3dSKiyoshi Ueda 		/* The target wants to complete the I/O */
1704cec47e3dSKiyoshi Ueda 		dm_kill_unmapped_request(clone, r);
1705cec47e3dSKiyoshi Ueda 		break;
1706cec47e3dSKiyoshi Ueda 	}
17079eef87daSKiyoshi Ueda 
17089eef87daSKiyoshi Ueda 	return requeued;
1709cec47e3dSKiyoshi Ueda }
1710cec47e3dSKiyoshi Ueda 
1711cec47e3dSKiyoshi Ueda /*
1712cec47e3dSKiyoshi Ueda  * q->request_fn for request-based dm.
1713cec47e3dSKiyoshi Ueda  * Called with the queue lock held.
1714cec47e3dSKiyoshi Ueda  */
1715cec47e3dSKiyoshi Ueda static void dm_request_fn(struct request_queue *q)
1716cec47e3dSKiyoshi Ueda {
1717cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
17187c666411SAlasdair G Kergon 	struct dm_table *map = dm_get_live_table(md);
1719cec47e3dSKiyoshi Ueda 	struct dm_target *ti;
1720b4324feeSKiyoshi Ueda 	struct request *rq, *clone;
1721cec47e3dSKiyoshi Ueda 
1722cec47e3dSKiyoshi Ueda 	/*
1723b4324feeSKiyoshi Ueda 	 * For suspend, check blk_queue_stopped() and increment
1724b4324feeSKiyoshi Ueda 	 * ->pending within a single queue_lock not to increment the
1725b4324feeSKiyoshi Ueda 	 * number of in-flight I/Os after the queue is stopped in
1726b4324feeSKiyoshi Ueda 	 * dm_suspend().
1727cec47e3dSKiyoshi Ueda 	 */
1728cec47e3dSKiyoshi Ueda 	while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1729cec47e3dSKiyoshi Ueda 		rq = blk_peek_request(q);
1730cec47e3dSKiyoshi Ueda 		if (!rq)
1731cec47e3dSKiyoshi Ueda 			goto plug_and_out;
1732cec47e3dSKiyoshi Ueda 
1733d0bcb878SKiyoshi Ueda 		if (unlikely(dm_rq_is_flush_request(rq))) {
1734d0bcb878SKiyoshi Ueda 			BUG_ON(md->flush_request);
1735d0bcb878SKiyoshi Ueda 			md->flush_request = rq;
1736d0bcb878SKiyoshi Ueda 			blk_start_request(rq);
1737d0bcb878SKiyoshi Ueda 			queue_work(md->wq, &md->barrier_work);
1738d0bcb878SKiyoshi Ueda 			goto out;
1739d0bcb878SKiyoshi Ueda 		}
1740d0bcb878SKiyoshi Ueda 
1741cec47e3dSKiyoshi Ueda 		ti = dm_table_find_target(map, blk_rq_pos(rq));
1742cec47e3dSKiyoshi Ueda 		if (ti->type->busy && ti->type->busy(ti))
1743cec47e3dSKiyoshi Ueda 			goto plug_and_out;
1744cec47e3dSKiyoshi Ueda 
1745cec47e3dSKiyoshi Ueda 		blk_start_request(rq);
1746b4324feeSKiyoshi Ueda 		clone = rq->special;
1747b4324feeSKiyoshi Ueda 		atomic_inc(&md->pending[rq_data_dir(clone)]);
1748b4324feeSKiyoshi Ueda 
1749cec47e3dSKiyoshi Ueda 		spin_unlock(q->queue_lock);
17509eef87daSKiyoshi Ueda 		if (map_request(ti, clone, md))
17519eef87daSKiyoshi Ueda 			goto requeued;
17529eef87daSKiyoshi Ueda 
1753cec47e3dSKiyoshi Ueda 		spin_lock_irq(q->queue_lock);
1754cec47e3dSKiyoshi Ueda 	}
1755cec47e3dSKiyoshi Ueda 
1756cec47e3dSKiyoshi Ueda 	goto out;
1757cec47e3dSKiyoshi Ueda 
17589eef87daSKiyoshi Ueda requeued:
17599eef87daSKiyoshi Ueda 	spin_lock_irq(q->queue_lock);
17609eef87daSKiyoshi Ueda 
1761cec47e3dSKiyoshi Ueda plug_and_out:
1762cec47e3dSKiyoshi Ueda 	if (!elv_queue_empty(q))
1763cec47e3dSKiyoshi Ueda 		/* Some requests still remain, retry later */
1764cec47e3dSKiyoshi Ueda 		blk_plug_device(q);
1765cec47e3dSKiyoshi Ueda 
1766cec47e3dSKiyoshi Ueda out:
1767cec47e3dSKiyoshi Ueda 	dm_table_put(map);
1768cec47e3dSKiyoshi Ueda 
1769cec47e3dSKiyoshi Ueda 	return;
1770cec47e3dSKiyoshi Ueda }
1771cec47e3dSKiyoshi Ueda 
1772cec47e3dSKiyoshi Ueda int dm_underlying_device_busy(struct request_queue *q)
1773cec47e3dSKiyoshi Ueda {
1774cec47e3dSKiyoshi Ueda 	return blk_lld_busy(q);
1775cec47e3dSKiyoshi Ueda }
1776cec47e3dSKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1777cec47e3dSKiyoshi Ueda 
1778cec47e3dSKiyoshi Ueda static int dm_lld_busy(struct request_queue *q)
1779cec47e3dSKiyoshi Ueda {
1780cec47e3dSKiyoshi Ueda 	int r;
1781cec47e3dSKiyoshi Ueda 	struct mapped_device *md = q->queuedata;
17827c666411SAlasdair G Kergon 	struct dm_table *map = dm_get_live_table(md);
1783cec47e3dSKiyoshi Ueda 
1784cec47e3dSKiyoshi Ueda 	if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1785cec47e3dSKiyoshi Ueda 		r = 1;
1786cec47e3dSKiyoshi Ueda 	else
1787cec47e3dSKiyoshi Ueda 		r = dm_table_any_busy_target(map);
1788cec47e3dSKiyoshi Ueda 
1789cec47e3dSKiyoshi Ueda 	dm_table_put(map);
1790cec47e3dSKiyoshi Ueda 
1791cec47e3dSKiyoshi Ueda 	return r;
1792cec47e3dSKiyoshi Ueda }
1793cec47e3dSKiyoshi Ueda 
1794165125e1SJens Axboe static void dm_unplug_all(struct request_queue *q)
17951da177e4SLinus Torvalds {
17961da177e4SLinus Torvalds 	struct mapped_device *md = q->queuedata;
17977c666411SAlasdair G Kergon 	struct dm_table *map = dm_get_live_table(md);
17981da177e4SLinus Torvalds 
17991da177e4SLinus Torvalds 	if (map) {
1800cec47e3dSKiyoshi Ueda 		if (dm_request_based(md))
1801cec47e3dSKiyoshi Ueda 			generic_unplug_device(q);
1802cec47e3dSKiyoshi Ueda 
18031da177e4SLinus Torvalds 		dm_table_unplug_all(map);
18041da177e4SLinus Torvalds 		dm_table_put(map);
18051da177e4SLinus Torvalds 	}
18061da177e4SLinus Torvalds }
18071da177e4SLinus Torvalds 
18081da177e4SLinus Torvalds static int dm_any_congested(void *congested_data, int bdi_bits)
18091da177e4SLinus Torvalds {
18108a57dfc6SChandra Seetharaman 	int r = bdi_bits;
18118a57dfc6SChandra Seetharaman 	struct mapped_device *md = congested_data;
18128a57dfc6SChandra Seetharaman 	struct dm_table *map;
18131da177e4SLinus Torvalds 
18141eb787ecSAlasdair G Kergon 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
18157c666411SAlasdair G Kergon 		map = dm_get_live_table(md);
18168a57dfc6SChandra Seetharaman 		if (map) {
1817cec47e3dSKiyoshi Ueda 			/*
1818cec47e3dSKiyoshi Ueda 			 * Request-based dm cares about only own queue for
1819cec47e3dSKiyoshi Ueda 			 * the query about congestion status of request_queue
1820cec47e3dSKiyoshi Ueda 			 */
1821cec47e3dSKiyoshi Ueda 			if (dm_request_based(md))
1822cec47e3dSKiyoshi Ueda 				r = md->queue->backing_dev_info.state &
1823cec47e3dSKiyoshi Ueda 				    bdi_bits;
1824cec47e3dSKiyoshi Ueda 			else
18251da177e4SLinus Torvalds 				r = dm_table_any_congested(map, bdi_bits);
1826cec47e3dSKiyoshi Ueda 
18271da177e4SLinus Torvalds 			dm_table_put(map);
18288a57dfc6SChandra Seetharaman 		}
18298a57dfc6SChandra Seetharaman 	}
18308a57dfc6SChandra Seetharaman 
18311da177e4SLinus Torvalds 	return r;
18321da177e4SLinus Torvalds }
18331da177e4SLinus Torvalds 
18341da177e4SLinus Torvalds /*-----------------------------------------------------------------
18351da177e4SLinus Torvalds  * An IDR is used to keep track of allocated minor numbers.
18361da177e4SLinus Torvalds  *---------------------------------------------------------------*/
18371da177e4SLinus Torvalds static DEFINE_IDR(_minor_idr);
18381da177e4SLinus Torvalds 
18392b06cfffSAlasdair G Kergon static void free_minor(int minor)
18401da177e4SLinus Torvalds {
1841f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
18421da177e4SLinus Torvalds 	idr_remove(&_minor_idr, minor);
1843f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
18441da177e4SLinus Torvalds }
18451da177e4SLinus Torvalds 
18461da177e4SLinus Torvalds /*
18471da177e4SLinus Torvalds  * See if the device with a specific minor # is free.
18481da177e4SLinus Torvalds  */
1849cf13ab8eSFrederik Deweerdt static int specific_minor(int minor)
18501da177e4SLinus Torvalds {
18511da177e4SLinus Torvalds 	int r, m;
18521da177e4SLinus Torvalds 
18531da177e4SLinus Torvalds 	if (minor >= (1 << MINORBITS))
18541da177e4SLinus Torvalds 		return -EINVAL;
18551da177e4SLinus Torvalds 
185662f75c2fSJeff Mahoney 	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
185762f75c2fSJeff Mahoney 	if (!r)
185862f75c2fSJeff Mahoney 		return -ENOMEM;
185962f75c2fSJeff Mahoney 
1860f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
18611da177e4SLinus Torvalds 
18621da177e4SLinus Torvalds 	if (idr_find(&_minor_idr, minor)) {
18631da177e4SLinus Torvalds 		r = -EBUSY;
18641da177e4SLinus Torvalds 		goto out;
18651da177e4SLinus Torvalds 	}
18661da177e4SLinus Torvalds 
1867ba61fdd1SJeff Mahoney 	r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
186862f75c2fSJeff Mahoney 	if (r)
18691da177e4SLinus Torvalds 		goto out;
18701da177e4SLinus Torvalds 
18711da177e4SLinus Torvalds 	if (m != minor) {
18721da177e4SLinus Torvalds 		idr_remove(&_minor_idr, m);
18731da177e4SLinus Torvalds 		r = -EBUSY;
18741da177e4SLinus Torvalds 		goto out;
18751da177e4SLinus Torvalds 	}
18761da177e4SLinus Torvalds 
18771da177e4SLinus Torvalds out:
1878f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
18791da177e4SLinus Torvalds 	return r;
18801da177e4SLinus Torvalds }
18811da177e4SLinus Torvalds 
1882cf13ab8eSFrederik Deweerdt static int next_free_minor(int *minor)
18831da177e4SLinus Torvalds {
18842b06cfffSAlasdair G Kergon 	int r, m;
18851da177e4SLinus Torvalds 
18861da177e4SLinus Torvalds 	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
188762f75c2fSJeff Mahoney 	if (!r)
188862f75c2fSJeff Mahoney 		return -ENOMEM;
188962f75c2fSJeff Mahoney 
1890f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
18911da177e4SLinus Torvalds 
1892ba61fdd1SJeff Mahoney 	r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1893cf13ab8eSFrederik Deweerdt 	if (r)
18941da177e4SLinus Torvalds 		goto out;
18951da177e4SLinus Torvalds 
18961da177e4SLinus Torvalds 	if (m >= (1 << MINORBITS)) {
18971da177e4SLinus Torvalds 		idr_remove(&_minor_idr, m);
18981da177e4SLinus Torvalds 		r = -ENOSPC;
18991da177e4SLinus Torvalds 		goto out;
19001da177e4SLinus Torvalds 	}
19011da177e4SLinus Torvalds 
19021da177e4SLinus Torvalds 	*minor = m;
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds out:
1905f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
19061da177e4SLinus Torvalds 	return r;
19071da177e4SLinus Torvalds }
19081da177e4SLinus Torvalds 
190983d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops;
19101da177e4SLinus Torvalds 
191153d5914fSMikulas Patocka static void dm_wq_work(struct work_struct *work);
1912d0bcb878SKiyoshi Ueda static void dm_rq_barrier_work(struct work_struct *work);
191353d5914fSMikulas Patocka 
19144a0b4ddfSMike Snitzer static void dm_init_md_queue(struct mapped_device *md)
19154a0b4ddfSMike Snitzer {
19164a0b4ddfSMike Snitzer 	/*
19174a0b4ddfSMike Snitzer 	 * Request-based dm devices cannot be stacked on top of bio-based dm
19184a0b4ddfSMike Snitzer 	 * devices.  The type of this dm device has not been decided yet.
19194a0b4ddfSMike Snitzer 	 * The type is decided at the first table loading time.
19204a0b4ddfSMike Snitzer 	 * To prevent problematic device stacking, clear the queue flag
19214a0b4ddfSMike Snitzer 	 * for request stacking support until then.
19224a0b4ddfSMike Snitzer 	 *
19234a0b4ddfSMike Snitzer 	 * This queue is new, so no concurrency on the queue_flags.
19244a0b4ddfSMike Snitzer 	 */
19254a0b4ddfSMike Snitzer 	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
19264a0b4ddfSMike Snitzer 
19274a0b4ddfSMike Snitzer 	md->queue->queuedata = md;
19284a0b4ddfSMike Snitzer 	md->queue->backing_dev_info.congested_fn = dm_any_congested;
19294a0b4ddfSMike Snitzer 	md->queue->backing_dev_info.congested_data = md;
19304a0b4ddfSMike Snitzer 	blk_queue_make_request(md->queue, dm_request);
19314a0b4ddfSMike Snitzer 	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
19324a0b4ddfSMike Snitzer 	md->queue->unplug_fn = dm_unplug_all;
19334a0b4ddfSMike Snitzer 	blk_queue_merge_bvec(md->queue, dm_merge_bvec);
19344a0b4ddfSMike Snitzer }
19354a0b4ddfSMike Snitzer 
19361da177e4SLinus Torvalds /*
19371da177e4SLinus Torvalds  * Allocate and initialise a blank device with a given minor.
19381da177e4SLinus Torvalds  */
19392b06cfffSAlasdair G Kergon static struct mapped_device *alloc_dev(int minor)
19401da177e4SLinus Torvalds {
19411da177e4SLinus Torvalds 	int r;
1942cf13ab8eSFrederik Deweerdt 	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1943ba61fdd1SJeff Mahoney 	void *old_md;
19441da177e4SLinus Torvalds 
19451da177e4SLinus Torvalds 	if (!md) {
19461da177e4SLinus Torvalds 		DMWARN("unable to allocate device, out of memory.");
19471da177e4SLinus Torvalds 		return NULL;
19481da177e4SLinus Torvalds 	}
19491da177e4SLinus Torvalds 
195010da4f79SJeff Mahoney 	if (!try_module_get(THIS_MODULE))
19516ed7ade8SMilan Broz 		goto bad_module_get;
195210da4f79SJeff Mahoney 
19531da177e4SLinus Torvalds 	/* get a minor number for the dev */
19542b06cfffSAlasdair G Kergon 	if (minor == DM_ANY_MINOR)
1955cf13ab8eSFrederik Deweerdt 		r = next_free_minor(&minor);
19562b06cfffSAlasdair G Kergon 	else
1957cf13ab8eSFrederik Deweerdt 		r = specific_minor(minor);
19581da177e4SLinus Torvalds 	if (r < 0)
19596ed7ade8SMilan Broz 		goto bad_minor;
19601da177e4SLinus Torvalds 
1961a5664dadSMike Snitzer 	md->type = DM_TYPE_NONE;
19622ca3310eSAlasdair G Kergon 	init_rwsem(&md->io_lock);
1963e61290a4SDaniel Walker 	mutex_init(&md->suspend_lock);
1964a5664dadSMike Snitzer 	mutex_init(&md->type_lock);
1965022c2611SMikulas Patocka 	spin_lock_init(&md->deferred_lock);
1966d0bcb878SKiyoshi Ueda 	spin_lock_init(&md->barrier_error_lock);
19671da177e4SLinus Torvalds 	rwlock_init(&md->map_lock);
19681da177e4SLinus Torvalds 	atomic_set(&md->holders, 1);
19695c6bd75dSAlasdair G Kergon 	atomic_set(&md->open_count, 0);
19701da177e4SLinus Torvalds 	atomic_set(&md->event_nr, 0);
19717a8c3d3bSMike Anderson 	atomic_set(&md->uevent_seq, 0);
19727a8c3d3bSMike Anderson 	INIT_LIST_HEAD(&md->uevent_list);
19737a8c3d3bSMike Anderson 	spin_lock_init(&md->uevent_lock);
19741da177e4SLinus Torvalds 
19754a0b4ddfSMike Snitzer 	md->queue = blk_alloc_queue(GFP_KERNEL);
19761da177e4SLinus Torvalds 	if (!md->queue)
19776ed7ade8SMilan Broz 		goto bad_queue;
19781da177e4SLinus Torvalds 
19794a0b4ddfSMike Snitzer 	dm_init_md_queue(md);
19809faf400fSStefan Bader 
19811da177e4SLinus Torvalds 	md->disk = alloc_disk(1);
19821da177e4SLinus Torvalds 	if (!md->disk)
19836ed7ade8SMilan Broz 		goto bad_disk;
19841da177e4SLinus Torvalds 
1985316d315bSNikanth Karthikesan 	atomic_set(&md->pending[0], 0);
1986316d315bSNikanth Karthikesan 	atomic_set(&md->pending[1], 0);
1987f0b04115SJeff Mahoney 	init_waitqueue_head(&md->wait);
198853d5914fSMikulas Patocka 	INIT_WORK(&md->work, dm_wq_work);
1989d0bcb878SKiyoshi Ueda 	INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
1990f0b04115SJeff Mahoney 	init_waitqueue_head(&md->eventq);
1991f0b04115SJeff Mahoney 
19921da177e4SLinus Torvalds 	md->disk->major = _major;
19931da177e4SLinus Torvalds 	md->disk->first_minor = minor;
19941da177e4SLinus Torvalds 	md->disk->fops = &dm_blk_dops;
19951da177e4SLinus Torvalds 	md->disk->queue = md->queue;
19961da177e4SLinus Torvalds 	md->disk->private_data = md;
19971da177e4SLinus Torvalds 	sprintf(md->disk->disk_name, "dm-%d", minor);
19981da177e4SLinus Torvalds 	add_disk(md->disk);
19997e51f257SMike Anderson 	format_dev_t(md->name, MKDEV(_major, minor));
20001da177e4SLinus Torvalds 
2001304f3f6aSMilan Broz 	md->wq = create_singlethread_workqueue("kdmflush");
2002304f3f6aSMilan Broz 	if (!md->wq)
2003304f3f6aSMilan Broz 		goto bad_thread;
2004304f3f6aSMilan Broz 
200532a926daSMikulas Patocka 	md->bdev = bdget_disk(md->disk, 0);
200632a926daSMikulas Patocka 	if (!md->bdev)
200732a926daSMikulas Patocka 		goto bad_bdev;
200832a926daSMikulas Patocka 
2009ba61fdd1SJeff Mahoney 	/* Populate the mapping, nobody knows we exist yet */
2010f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
2011ba61fdd1SJeff Mahoney 	old_md = idr_replace(&_minor_idr, md, minor);
2012f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
2013ba61fdd1SJeff Mahoney 
2014ba61fdd1SJeff Mahoney 	BUG_ON(old_md != MINOR_ALLOCED);
2015ba61fdd1SJeff Mahoney 
20161da177e4SLinus Torvalds 	return md;
20171da177e4SLinus Torvalds 
201832a926daSMikulas Patocka bad_bdev:
201932a926daSMikulas Patocka 	destroy_workqueue(md->wq);
2020304f3f6aSMilan Broz bad_thread:
202103022c54SZdenek Kabelac 	del_gendisk(md->disk);
2022304f3f6aSMilan Broz 	put_disk(md->disk);
20236ed7ade8SMilan Broz bad_disk:
20241312f40eSAl Viro 	blk_cleanup_queue(md->queue);
20256ed7ade8SMilan Broz bad_queue:
20261da177e4SLinus Torvalds 	free_minor(minor);
20276ed7ade8SMilan Broz bad_minor:
202810da4f79SJeff Mahoney 	module_put(THIS_MODULE);
20296ed7ade8SMilan Broz bad_module_get:
20301da177e4SLinus Torvalds 	kfree(md);
20311da177e4SLinus Torvalds 	return NULL;
20321da177e4SLinus Torvalds }
20331da177e4SLinus Torvalds 
2034ae9da83fSJun'ichi Nomura static void unlock_fs(struct mapped_device *md);
2035ae9da83fSJun'ichi Nomura 
20361da177e4SLinus Torvalds static void free_dev(struct mapped_device *md)
20371da177e4SLinus Torvalds {
2038f331c029STejun Heo 	int minor = MINOR(disk_devt(md->disk));
203963d94e48SJun'ichi Nomura 
2040ae9da83fSJun'ichi Nomura 	unlock_fs(md);
2041db8fef4fSMikulas Patocka 	bdput(md->bdev);
2042304f3f6aSMilan Broz 	destroy_workqueue(md->wq);
2043e6ee8c0bSKiyoshi Ueda 	if (md->tio_pool)
20441da177e4SLinus Torvalds 		mempool_destroy(md->tio_pool);
2045e6ee8c0bSKiyoshi Ueda 	if (md->io_pool)
20461da177e4SLinus Torvalds 		mempool_destroy(md->io_pool);
2047e6ee8c0bSKiyoshi Ueda 	if (md->bs)
20489faf400fSStefan Bader 		bioset_free(md->bs);
20499c47008dSMartin K. Petersen 	blk_integrity_unregister(md->disk);
20501da177e4SLinus Torvalds 	del_gendisk(md->disk);
205163d94e48SJun'ichi Nomura 	free_minor(minor);
2052fba9f90eSJeff Mahoney 
2053fba9f90eSJeff Mahoney 	spin_lock(&_minor_lock);
2054fba9f90eSJeff Mahoney 	md->disk->private_data = NULL;
2055fba9f90eSJeff Mahoney 	spin_unlock(&_minor_lock);
2056fba9f90eSJeff Mahoney 
20571da177e4SLinus Torvalds 	put_disk(md->disk);
20581312f40eSAl Viro 	blk_cleanup_queue(md->queue);
205910da4f79SJeff Mahoney 	module_put(THIS_MODULE);
20601da177e4SLinus Torvalds 	kfree(md);
20611da177e4SLinus Torvalds }
20621da177e4SLinus Torvalds 
2063e6ee8c0bSKiyoshi Ueda static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2064e6ee8c0bSKiyoshi Ueda {
2065e6ee8c0bSKiyoshi Ueda 	struct dm_md_mempools *p;
2066e6ee8c0bSKiyoshi Ueda 
2067e6ee8c0bSKiyoshi Ueda 	if (md->io_pool && md->tio_pool && md->bs)
2068e6ee8c0bSKiyoshi Ueda 		/* the md already has necessary mempools */
2069e6ee8c0bSKiyoshi Ueda 		goto out;
2070e6ee8c0bSKiyoshi Ueda 
2071e6ee8c0bSKiyoshi Ueda 	p = dm_table_get_md_mempools(t);
2072e6ee8c0bSKiyoshi Ueda 	BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2073e6ee8c0bSKiyoshi Ueda 
2074e6ee8c0bSKiyoshi Ueda 	md->io_pool = p->io_pool;
2075e6ee8c0bSKiyoshi Ueda 	p->io_pool = NULL;
2076e6ee8c0bSKiyoshi Ueda 	md->tio_pool = p->tio_pool;
2077e6ee8c0bSKiyoshi Ueda 	p->tio_pool = NULL;
2078e6ee8c0bSKiyoshi Ueda 	md->bs = p->bs;
2079e6ee8c0bSKiyoshi Ueda 	p->bs = NULL;
2080e6ee8c0bSKiyoshi Ueda 
2081e6ee8c0bSKiyoshi Ueda out:
2082e6ee8c0bSKiyoshi Ueda 	/* mempool bind completed, now no need any mempools in the table */
2083e6ee8c0bSKiyoshi Ueda 	dm_table_free_md_mempools(t);
2084e6ee8c0bSKiyoshi Ueda }
2085e6ee8c0bSKiyoshi Ueda 
20861da177e4SLinus Torvalds /*
20871da177e4SLinus Torvalds  * Bind a table to the device.
20881da177e4SLinus Torvalds  */
20891da177e4SLinus Torvalds static void event_callback(void *context)
20901da177e4SLinus Torvalds {
20917a8c3d3bSMike Anderson 	unsigned long flags;
20927a8c3d3bSMike Anderson 	LIST_HEAD(uevents);
20931da177e4SLinus Torvalds 	struct mapped_device *md = (struct mapped_device *) context;
20941da177e4SLinus Torvalds 
20957a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
20967a8c3d3bSMike Anderson 	list_splice_init(&md->uevent_list, &uevents);
20977a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
20987a8c3d3bSMike Anderson 
2099ed9e1982STejun Heo 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
21007a8c3d3bSMike Anderson 
21011da177e4SLinus Torvalds 	atomic_inc(&md->event_nr);
21021da177e4SLinus Torvalds 	wake_up(&md->eventq);
21031da177e4SLinus Torvalds }
21041da177e4SLinus Torvalds 
21054e90188bSAlasdair G Kergon static void __set_size(struct mapped_device *md, sector_t size)
21061da177e4SLinus Torvalds {
21074e90188bSAlasdair G Kergon 	set_capacity(md->disk, size);
21081da177e4SLinus Torvalds 
2109db8fef4fSMikulas Patocka 	mutex_lock(&md->bdev->bd_inode->i_mutex);
2110db8fef4fSMikulas Patocka 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2111db8fef4fSMikulas Patocka 	mutex_unlock(&md->bdev->bd_inode->i_mutex);
21121da177e4SLinus Torvalds }
21131da177e4SLinus Torvalds 
2114042d2a9bSAlasdair G Kergon /*
2115042d2a9bSAlasdair G Kergon  * Returns old map, which caller must destroy.
2116042d2a9bSAlasdair G Kergon  */
2117042d2a9bSAlasdair G Kergon static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2118754c5fc7SMike Snitzer 			       struct queue_limits *limits)
21191da177e4SLinus Torvalds {
2120042d2a9bSAlasdair G Kergon 	struct dm_table *old_map;
2121165125e1SJens Axboe 	struct request_queue *q = md->queue;
21221da177e4SLinus Torvalds 	sector_t size;
2123523d9297SKiyoshi Ueda 	unsigned long flags;
21241da177e4SLinus Torvalds 
21251da177e4SLinus Torvalds 	size = dm_table_get_size(t);
21263ac51e74SDarrick J. Wong 
21273ac51e74SDarrick J. Wong 	/*
21283ac51e74SDarrick J. Wong 	 * Wipe any geometry if the size of the table changed.
21293ac51e74SDarrick J. Wong 	 */
21303ac51e74SDarrick J. Wong 	if (size != get_capacity(md->disk))
21313ac51e74SDarrick J. Wong 		memset(&md->geometry, 0, sizeof(md->geometry));
21323ac51e74SDarrick J. Wong 
21334e90188bSAlasdair G Kergon 	__set_size(md, size);
21341da177e4SLinus Torvalds 
2135cf222b37SAlasdair G Kergon 	dm_table_event_callback(t, event_callback, md);
21362ca3310eSAlasdair G Kergon 
2137e6ee8c0bSKiyoshi Ueda 	/*
2138e6ee8c0bSKiyoshi Ueda 	 * The queue hasn't been stopped yet, if the old table type wasn't
2139e6ee8c0bSKiyoshi Ueda 	 * for request-based during suspension.  So stop it to prevent
2140e6ee8c0bSKiyoshi Ueda 	 * I/O mapping before resume.
2141e6ee8c0bSKiyoshi Ueda 	 * This must be done before setting the queue restrictions,
2142e6ee8c0bSKiyoshi Ueda 	 * because request-based dm may be run just after the setting.
2143e6ee8c0bSKiyoshi Ueda 	 */
2144e6ee8c0bSKiyoshi Ueda 	if (dm_table_request_based(t) && !blk_queue_stopped(q))
2145e6ee8c0bSKiyoshi Ueda 		stop_queue(q);
2146e6ee8c0bSKiyoshi Ueda 
2147e6ee8c0bSKiyoshi Ueda 	__bind_mempools(md, t);
2148e6ee8c0bSKiyoshi Ueda 
2149523d9297SKiyoshi Ueda 	write_lock_irqsave(&md->map_lock, flags);
2150042d2a9bSAlasdair G Kergon 	old_map = md->map;
21512ca3310eSAlasdair G Kergon 	md->map = t;
2152754c5fc7SMike Snitzer 	dm_table_set_restrictions(t, q, limits);
2153523d9297SKiyoshi Ueda 	write_unlock_irqrestore(&md->map_lock, flags);
21542ca3310eSAlasdair G Kergon 
2155042d2a9bSAlasdair G Kergon 	return old_map;
21561da177e4SLinus Torvalds }
21571da177e4SLinus Torvalds 
2158a7940155SAlasdair G Kergon /*
2159a7940155SAlasdair G Kergon  * Returns unbound table for the caller to free.
2160a7940155SAlasdair G Kergon  */
2161a7940155SAlasdair G Kergon static struct dm_table *__unbind(struct mapped_device *md)
21621da177e4SLinus Torvalds {
21631da177e4SLinus Torvalds 	struct dm_table *map = md->map;
2164523d9297SKiyoshi Ueda 	unsigned long flags;
21651da177e4SLinus Torvalds 
21661da177e4SLinus Torvalds 	if (!map)
2167a7940155SAlasdair G Kergon 		return NULL;
21681da177e4SLinus Torvalds 
21691da177e4SLinus Torvalds 	dm_table_event_callback(map, NULL, NULL);
2170523d9297SKiyoshi Ueda 	write_lock_irqsave(&md->map_lock, flags);
21711da177e4SLinus Torvalds 	md->map = NULL;
2172523d9297SKiyoshi Ueda 	write_unlock_irqrestore(&md->map_lock, flags);
2173a7940155SAlasdair G Kergon 
2174a7940155SAlasdair G Kergon 	return map;
21751da177e4SLinus Torvalds }
21761da177e4SLinus Torvalds 
21771da177e4SLinus Torvalds /*
21781da177e4SLinus Torvalds  * Constructor for a new device.
21791da177e4SLinus Torvalds  */
21802b06cfffSAlasdair G Kergon int dm_create(int minor, struct mapped_device **result)
21811da177e4SLinus Torvalds {
21821da177e4SLinus Torvalds 	struct mapped_device *md;
21831da177e4SLinus Torvalds 
21842b06cfffSAlasdair G Kergon 	md = alloc_dev(minor);
21851da177e4SLinus Torvalds 	if (!md)
21861da177e4SLinus Torvalds 		return -ENXIO;
21871da177e4SLinus Torvalds 
2188784aae73SMilan Broz 	dm_sysfs_init(md);
2189784aae73SMilan Broz 
21901da177e4SLinus Torvalds 	*result = md;
21911da177e4SLinus Torvalds 	return 0;
21921da177e4SLinus Torvalds }
21931da177e4SLinus Torvalds 
2194a5664dadSMike Snitzer /*
2195a5664dadSMike Snitzer  * Functions to manage md->type.
2196a5664dadSMike Snitzer  * All are required to hold md->type_lock.
2197a5664dadSMike Snitzer  */
2198a5664dadSMike Snitzer void dm_lock_md_type(struct mapped_device *md)
2199a5664dadSMike Snitzer {
2200a5664dadSMike Snitzer 	mutex_lock(&md->type_lock);
2201a5664dadSMike Snitzer }
2202a5664dadSMike Snitzer 
2203a5664dadSMike Snitzer void dm_unlock_md_type(struct mapped_device *md)
2204a5664dadSMike Snitzer {
2205a5664dadSMike Snitzer 	mutex_unlock(&md->type_lock);
2206a5664dadSMike Snitzer }
2207a5664dadSMike Snitzer 
2208a5664dadSMike Snitzer void dm_set_md_type(struct mapped_device *md, unsigned type)
2209a5664dadSMike Snitzer {
2210a5664dadSMike Snitzer 	md->type = type;
2211a5664dadSMike Snitzer }
2212a5664dadSMike Snitzer 
2213a5664dadSMike Snitzer unsigned dm_get_md_type(struct mapped_device *md)
2214a5664dadSMike Snitzer {
2215a5664dadSMike Snitzer 	return md->type;
2216a5664dadSMike Snitzer }
2217a5664dadSMike Snitzer 
22184a0b4ddfSMike Snitzer /*
22194a0b4ddfSMike Snitzer  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
22204a0b4ddfSMike Snitzer  */
22214a0b4ddfSMike Snitzer static int dm_init_request_based_queue(struct mapped_device *md)
22224a0b4ddfSMike Snitzer {
22234a0b4ddfSMike Snitzer 	struct request_queue *q = NULL;
22244a0b4ddfSMike Snitzer 
22254a0b4ddfSMike Snitzer 	if (md->queue->elevator)
22264a0b4ddfSMike Snitzer 		return 1;
22274a0b4ddfSMike Snitzer 
22284a0b4ddfSMike Snitzer 	/* Fully initialize the queue */
22294a0b4ddfSMike Snitzer 	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
22304a0b4ddfSMike Snitzer 	if (!q)
22314a0b4ddfSMike Snitzer 		return 0;
22324a0b4ddfSMike Snitzer 
22334a0b4ddfSMike Snitzer 	md->queue = q;
22344a0b4ddfSMike Snitzer 	md->saved_make_request_fn = md->queue->make_request_fn;
22354a0b4ddfSMike Snitzer 	dm_init_md_queue(md);
22364a0b4ddfSMike Snitzer 	blk_queue_softirq_done(md->queue, dm_softirq_done);
22374a0b4ddfSMike Snitzer 	blk_queue_prep_rq(md->queue, dm_prep_fn);
22384a0b4ddfSMike Snitzer 	blk_queue_lld_busy(md->queue, dm_lld_busy);
22394a0b4ddfSMike Snitzer 	blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
22404a0b4ddfSMike Snitzer 
22414a0b4ddfSMike Snitzer 	elv_register_queue(md->queue);
22424a0b4ddfSMike Snitzer 
22434a0b4ddfSMike Snitzer 	return 1;
22444a0b4ddfSMike Snitzer }
22454a0b4ddfSMike Snitzer 
22464a0b4ddfSMike Snitzer /*
22474a0b4ddfSMike Snitzer  * Setup the DM device's queue based on md's type
22484a0b4ddfSMike Snitzer  */
22494a0b4ddfSMike Snitzer int dm_setup_md_queue(struct mapped_device *md)
22504a0b4ddfSMike Snitzer {
22514a0b4ddfSMike Snitzer 	if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
22524a0b4ddfSMike Snitzer 	    !dm_init_request_based_queue(md)) {
22534a0b4ddfSMike Snitzer 		DMWARN("Cannot initialize queue for request-based mapped device");
22544a0b4ddfSMike Snitzer 		return -EINVAL;
22554a0b4ddfSMike Snitzer 	}
22564a0b4ddfSMike Snitzer 
22574a0b4ddfSMike Snitzer 	return 0;
22584a0b4ddfSMike Snitzer }
22594a0b4ddfSMike Snitzer 
2260637842cfSDavid Teigland static struct mapped_device *dm_find_md(dev_t dev)
22611da177e4SLinus Torvalds {
22621da177e4SLinus Torvalds 	struct mapped_device *md;
22631da177e4SLinus Torvalds 	unsigned minor = MINOR(dev);
22641da177e4SLinus Torvalds 
22651da177e4SLinus Torvalds 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
22661da177e4SLinus Torvalds 		return NULL;
22671da177e4SLinus Torvalds 
2268f32c10b0SJeff Mahoney 	spin_lock(&_minor_lock);
22691da177e4SLinus Torvalds 
22701da177e4SLinus Torvalds 	md = idr_find(&_minor_idr, minor);
2271fba9f90eSJeff Mahoney 	if (md && (md == MINOR_ALLOCED ||
2272f331c029STejun Heo 		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
2273abdc568bSKiyoshi Ueda 		   dm_deleting_md(md) ||
2274fba9f90eSJeff Mahoney 		   test_bit(DMF_FREEING, &md->flags))) {
2275637842cfSDavid Teigland 		md = NULL;
2276fba9f90eSJeff Mahoney 		goto out;
2277fba9f90eSJeff Mahoney 	}
22781da177e4SLinus Torvalds 
2279fba9f90eSJeff Mahoney out:
2280f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
22811da177e4SLinus Torvalds 
2282637842cfSDavid Teigland 	return md;
2283637842cfSDavid Teigland }
2284637842cfSDavid Teigland 
2285d229a958SDavid Teigland struct mapped_device *dm_get_md(dev_t dev)
2286d229a958SDavid Teigland {
2287d229a958SDavid Teigland 	struct mapped_device *md = dm_find_md(dev);
2288d229a958SDavid Teigland 
2289d229a958SDavid Teigland 	if (md)
2290d229a958SDavid Teigland 		dm_get(md);
2291d229a958SDavid Teigland 
2292d229a958SDavid Teigland 	return md;
2293d229a958SDavid Teigland }
2294d229a958SDavid Teigland 
22959ade92a9SAlasdair G Kergon void *dm_get_mdptr(struct mapped_device *md)
2296637842cfSDavid Teigland {
22979ade92a9SAlasdair G Kergon 	return md->interface_ptr;
22981da177e4SLinus Torvalds }
22991da177e4SLinus Torvalds 
23001da177e4SLinus Torvalds void dm_set_mdptr(struct mapped_device *md, void *ptr)
23011da177e4SLinus Torvalds {
23021da177e4SLinus Torvalds 	md->interface_ptr = ptr;
23031da177e4SLinus Torvalds }
23041da177e4SLinus Torvalds 
23051da177e4SLinus Torvalds void dm_get(struct mapped_device *md)
23061da177e4SLinus Torvalds {
23071da177e4SLinus Torvalds 	atomic_inc(&md->holders);
23083f77316dSKiyoshi Ueda 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
23091da177e4SLinus Torvalds }
23101da177e4SLinus Torvalds 
231172d94861SAlasdair G Kergon const char *dm_device_name(struct mapped_device *md)
231272d94861SAlasdair G Kergon {
231372d94861SAlasdair G Kergon 	return md->name;
231472d94861SAlasdair G Kergon }
231572d94861SAlasdair G Kergon EXPORT_SYMBOL_GPL(dm_device_name);
231672d94861SAlasdair G Kergon 
23173f77316dSKiyoshi Ueda static void __dm_destroy(struct mapped_device *md, bool wait)
23181da177e4SLinus Torvalds {
23191134e5aeSMike Anderson 	struct dm_table *map;
23201da177e4SLinus Torvalds 
23213f77316dSKiyoshi Ueda 	might_sleep();
2322fba9f90eSJeff Mahoney 
23233f77316dSKiyoshi Ueda 	spin_lock(&_minor_lock);
23247c666411SAlasdair G Kergon 	map = dm_get_live_table(md);
23253f77316dSKiyoshi Ueda 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2326fba9f90eSJeff Mahoney 	set_bit(DMF_FREEING, &md->flags);
2327f32c10b0SJeff Mahoney 	spin_unlock(&_minor_lock);
23283f77316dSKiyoshi Ueda 
23294f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md)) {
23301da177e4SLinus Torvalds 		dm_table_presuspend_targets(map);
23311da177e4SLinus Torvalds 		dm_table_postsuspend_targets(map);
23321da177e4SLinus Torvalds 	}
23333f77316dSKiyoshi Ueda 
23343f77316dSKiyoshi Ueda 	/*
23353f77316dSKiyoshi Ueda 	 * Rare, but there may be I/O requests still going to complete,
23363f77316dSKiyoshi Ueda 	 * for example.  Wait for all references to disappear.
23373f77316dSKiyoshi Ueda 	 * No one should increment the reference count of the mapped_device,
23383f77316dSKiyoshi Ueda 	 * after the mapped_device state becomes DMF_FREEING.
23393f77316dSKiyoshi Ueda 	 */
23403f77316dSKiyoshi Ueda 	if (wait)
23413f77316dSKiyoshi Ueda 		while (atomic_read(&md->holders))
23423f77316dSKiyoshi Ueda 			msleep(1);
23433f77316dSKiyoshi Ueda 	else if (atomic_read(&md->holders))
23443f77316dSKiyoshi Ueda 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
23453f77316dSKiyoshi Ueda 		       dm_device_name(md), atomic_read(&md->holders));
23463f77316dSKiyoshi Ueda 
2347784aae73SMilan Broz 	dm_sysfs_exit(md);
23481134e5aeSMike Anderson 	dm_table_put(map);
2349a7940155SAlasdair G Kergon 	dm_table_destroy(__unbind(md));
23501da177e4SLinus Torvalds 	free_dev(md);
23511da177e4SLinus Torvalds }
23523f77316dSKiyoshi Ueda 
23533f77316dSKiyoshi Ueda void dm_destroy(struct mapped_device *md)
23543f77316dSKiyoshi Ueda {
23553f77316dSKiyoshi Ueda 	__dm_destroy(md, true);
23563f77316dSKiyoshi Ueda }
23573f77316dSKiyoshi Ueda 
23583f77316dSKiyoshi Ueda void dm_destroy_immediate(struct mapped_device *md)
23593f77316dSKiyoshi Ueda {
23603f77316dSKiyoshi Ueda 	__dm_destroy(md, false);
23613f77316dSKiyoshi Ueda }
23623f77316dSKiyoshi Ueda 
23633f77316dSKiyoshi Ueda void dm_put(struct mapped_device *md)
23643f77316dSKiyoshi Ueda {
23653f77316dSKiyoshi Ueda 	atomic_dec(&md->holders);
23661da177e4SLinus Torvalds }
236779eb885cSEdward Goggin EXPORT_SYMBOL_GPL(dm_put);
23681da177e4SLinus Torvalds 
2369401600dfSMikulas Patocka static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
237046125c1cSMilan Broz {
237146125c1cSMilan Broz 	int r = 0;
2372b44ebeb0SMikulas Patocka 	DECLARE_WAITQUEUE(wait, current);
2373b44ebeb0SMikulas Patocka 
2374b44ebeb0SMikulas Patocka 	dm_unplug_all(md->queue);
2375b44ebeb0SMikulas Patocka 
2376b44ebeb0SMikulas Patocka 	add_wait_queue(&md->wait, &wait);
237746125c1cSMilan Broz 
237846125c1cSMilan Broz 	while (1) {
2379401600dfSMikulas Patocka 		set_current_state(interruptible);
238046125c1cSMilan Broz 
238146125c1cSMilan Broz 		smp_mb();
2382b4324feeSKiyoshi Ueda 		if (!md_in_flight(md))
238346125c1cSMilan Broz 			break;
238446125c1cSMilan Broz 
2385401600dfSMikulas Patocka 		if (interruptible == TASK_INTERRUPTIBLE &&
2386401600dfSMikulas Patocka 		    signal_pending(current)) {
238746125c1cSMilan Broz 			r = -EINTR;
238846125c1cSMilan Broz 			break;
238946125c1cSMilan Broz 		}
239046125c1cSMilan Broz 
239146125c1cSMilan Broz 		io_schedule();
239246125c1cSMilan Broz 	}
239346125c1cSMilan Broz 	set_current_state(TASK_RUNNING);
239446125c1cSMilan Broz 
2395b44ebeb0SMikulas Patocka 	remove_wait_queue(&md->wait, &wait);
2396b44ebeb0SMikulas Patocka 
239746125c1cSMilan Broz 	return r;
239846125c1cSMilan Broz }
239946125c1cSMilan Broz 
2400531fe963SMikulas Patocka static void dm_flush(struct mapped_device *md)
2401af7e466aSMikulas Patocka {
2402af7e466aSMikulas Patocka 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
240352b1fd5aSMikulas Patocka 
240452b1fd5aSMikulas Patocka 	bio_init(&md->barrier_bio);
240552b1fd5aSMikulas Patocka 	md->barrier_bio.bi_bdev = md->bdev;
240652b1fd5aSMikulas Patocka 	md->barrier_bio.bi_rw = WRITE_BARRIER;
240752b1fd5aSMikulas Patocka 	__split_and_process_bio(md, &md->barrier_bio);
240852b1fd5aSMikulas Patocka 
240952b1fd5aSMikulas Patocka 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2410af7e466aSMikulas Patocka }
2411af7e466aSMikulas Patocka 
2412af7e466aSMikulas Patocka static void process_barrier(struct mapped_device *md, struct bio *bio)
2413af7e466aSMikulas Patocka {
24145aa2781dSMikulas Patocka 	md->barrier_error = 0;
24155aa2781dSMikulas Patocka 
2416531fe963SMikulas Patocka 	dm_flush(md);
2417af7e466aSMikulas Patocka 
24185aa2781dSMikulas Patocka 	if (!bio_empty_barrier(bio)) {
2419af7e466aSMikulas Patocka 		__split_and_process_bio(md, bio);
2420708e9295SMikulas Patocka 		/*
2421708e9295SMikulas Patocka 		 * If the request isn't supported, don't waste time with
2422708e9295SMikulas Patocka 		 * the second flush.
2423708e9295SMikulas Patocka 		 */
2424708e9295SMikulas Patocka 		if (md->barrier_error != -EOPNOTSUPP)
2425531fe963SMikulas Patocka 			dm_flush(md);
24265aa2781dSMikulas Patocka 	}
2427af7e466aSMikulas Patocka 
2428af7e466aSMikulas Patocka 	if (md->barrier_error != DM_ENDIO_REQUEUE)
2429531fe963SMikulas Patocka 		bio_endio(bio, md->barrier_error);
24302761e95fSMikulas Patocka 	else {
24312761e95fSMikulas Patocka 		spin_lock_irq(&md->deferred_lock);
24322761e95fSMikulas Patocka 		bio_list_add_head(&md->deferred, bio);
24332761e95fSMikulas Patocka 		spin_unlock_irq(&md->deferred_lock);
24342761e95fSMikulas Patocka 	}
2435af7e466aSMikulas Patocka }
2436af7e466aSMikulas Patocka 
24371da177e4SLinus Torvalds /*
24381da177e4SLinus Torvalds  * Process the deferred bios
24391da177e4SLinus Torvalds  */
2440ef208587SMikulas Patocka static void dm_wq_work(struct work_struct *work)
24411da177e4SLinus Torvalds {
2442ef208587SMikulas Patocka 	struct mapped_device *md = container_of(work, struct mapped_device,
2443ef208587SMikulas Patocka 						work);
24446d6f10dfSMilan Broz 	struct bio *c;
24451da177e4SLinus Torvalds 
2446ef208587SMikulas Patocka 	down_write(&md->io_lock);
2447ef208587SMikulas Patocka 
24483b00b203SMikulas Patocka 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2449022c2611SMikulas Patocka 		spin_lock_irq(&md->deferred_lock);
2450022c2611SMikulas Patocka 		c = bio_list_pop(&md->deferred);
2451022c2611SMikulas Patocka 		spin_unlock_irq(&md->deferred_lock);
2452022c2611SMikulas Patocka 
2453df12ee99SAlasdair G Kergon 		if (!c) {
24541eb787ecSAlasdair G Kergon 			clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2455df12ee99SAlasdair G Kergon 			break;
2456022c2611SMikulas Patocka 		}
245773d410c0SMilan Broz 
24583b00b203SMikulas Patocka 		up_write(&md->io_lock);
24593b00b203SMikulas Patocka 
2460e6ee8c0bSKiyoshi Ueda 		if (dm_request_based(md))
2461e6ee8c0bSKiyoshi Ueda 			generic_make_request(c);
2462e6ee8c0bSKiyoshi Ueda 		else {
24637b6d91daSChristoph Hellwig 			if (c->bi_rw & REQ_HARDBARRIER)
2464af7e466aSMikulas Patocka 				process_barrier(md, c);
2465af7e466aSMikulas Patocka 			else
2466df12ee99SAlasdair G Kergon 				__split_and_process_bio(md, c);
2467e6ee8c0bSKiyoshi Ueda 		}
24683b00b203SMikulas Patocka 
24693b00b203SMikulas Patocka 		down_write(&md->io_lock);
2470df12ee99SAlasdair G Kergon 	}
2471ef208587SMikulas Patocka 
2472ef208587SMikulas Patocka 	up_write(&md->io_lock);
24731da177e4SLinus Torvalds }
24741da177e4SLinus Torvalds 
24759a1fb464SMikulas Patocka static void dm_queue_flush(struct mapped_device *md)
2476304f3f6aSMilan Broz {
24773b00b203SMikulas Patocka 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
24783b00b203SMikulas Patocka 	smp_mb__after_clear_bit();
247953d5914fSMikulas Patocka 	queue_work(md->wq, &md->work);
2480304f3f6aSMilan Broz }
2481304f3f6aSMilan Broz 
248257cba5d3SMike Snitzer static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
2483d0bcb878SKiyoshi Ueda {
2484d0bcb878SKiyoshi Ueda 	struct dm_rq_target_io *tio = clone->end_io_data;
2485d0bcb878SKiyoshi Ueda 
248657cba5d3SMike Snitzer 	tio->info.target_request_nr = request_nr;
2487d0bcb878SKiyoshi Ueda }
2488d0bcb878SKiyoshi Ueda 
2489d0bcb878SKiyoshi Ueda /* Issue barrier requests to targets and wait for their completion. */
2490d0bcb878SKiyoshi Ueda static int dm_rq_barrier(struct mapped_device *md)
2491d0bcb878SKiyoshi Ueda {
2492d0bcb878SKiyoshi Ueda 	int i, j;
24937c666411SAlasdair G Kergon 	struct dm_table *map = dm_get_live_table(md);
2494d0bcb878SKiyoshi Ueda 	unsigned num_targets = dm_table_get_num_targets(map);
2495d0bcb878SKiyoshi Ueda 	struct dm_target *ti;
2496d0bcb878SKiyoshi Ueda 	struct request *clone;
2497d0bcb878SKiyoshi Ueda 
2498d0bcb878SKiyoshi Ueda 	md->barrier_error = 0;
2499d0bcb878SKiyoshi Ueda 
2500d0bcb878SKiyoshi Ueda 	for (i = 0; i < num_targets; i++) {
2501d0bcb878SKiyoshi Ueda 		ti = dm_table_get_target(map, i);
2502d0bcb878SKiyoshi Ueda 		for (j = 0; j < ti->num_flush_requests; j++) {
2503d0bcb878SKiyoshi Ueda 			clone = clone_rq(md->flush_request, md, GFP_NOIO);
250457cba5d3SMike Snitzer 			dm_rq_set_target_request_nr(clone, j);
2505d0bcb878SKiyoshi Ueda 			atomic_inc(&md->pending[rq_data_dir(clone)]);
2506d0bcb878SKiyoshi Ueda 			map_request(ti, clone, md);
2507d0bcb878SKiyoshi Ueda 		}
2508d0bcb878SKiyoshi Ueda 	}
2509d0bcb878SKiyoshi Ueda 
2510d0bcb878SKiyoshi Ueda 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2511d0bcb878SKiyoshi Ueda 	dm_table_put(map);
2512d0bcb878SKiyoshi Ueda 
2513d0bcb878SKiyoshi Ueda 	return md->barrier_error;
2514d0bcb878SKiyoshi Ueda }
2515d0bcb878SKiyoshi Ueda 
2516d0bcb878SKiyoshi Ueda static void dm_rq_barrier_work(struct work_struct *work)
2517d0bcb878SKiyoshi Ueda {
2518d0bcb878SKiyoshi Ueda 	int error;
2519d0bcb878SKiyoshi Ueda 	struct mapped_device *md = container_of(work, struct mapped_device,
2520d0bcb878SKiyoshi Ueda 						barrier_work);
2521d0bcb878SKiyoshi Ueda 	struct request_queue *q = md->queue;
2522d0bcb878SKiyoshi Ueda 	struct request *rq;
2523d0bcb878SKiyoshi Ueda 	unsigned long flags;
2524d0bcb878SKiyoshi Ueda 
2525d0bcb878SKiyoshi Ueda 	/*
2526d0bcb878SKiyoshi Ueda 	 * Hold the md reference here and leave it at the last part so that
2527d0bcb878SKiyoshi Ueda 	 * the md can't be deleted by device opener when the barrier request
2528d0bcb878SKiyoshi Ueda 	 * completes.
2529d0bcb878SKiyoshi Ueda 	 */
2530d0bcb878SKiyoshi Ueda 	dm_get(md);
2531d0bcb878SKiyoshi Ueda 
2532d0bcb878SKiyoshi Ueda 	error = dm_rq_barrier(md);
2533d0bcb878SKiyoshi Ueda 
2534d0bcb878SKiyoshi Ueda 	rq = md->flush_request;
2535d0bcb878SKiyoshi Ueda 	md->flush_request = NULL;
2536d0bcb878SKiyoshi Ueda 
2537d0bcb878SKiyoshi Ueda 	if (error == DM_ENDIO_REQUEUE) {
2538d0bcb878SKiyoshi Ueda 		spin_lock_irqsave(q->queue_lock, flags);
2539d0bcb878SKiyoshi Ueda 		blk_requeue_request(q, rq);
2540d0bcb878SKiyoshi Ueda 		spin_unlock_irqrestore(q->queue_lock, flags);
2541d0bcb878SKiyoshi Ueda 	} else
2542d0bcb878SKiyoshi Ueda 		blk_end_request_all(rq, error);
2543d0bcb878SKiyoshi Ueda 
2544d0bcb878SKiyoshi Ueda 	blk_run_queue(q);
2545d0bcb878SKiyoshi Ueda 
2546d0bcb878SKiyoshi Ueda 	dm_put(md);
2547d0bcb878SKiyoshi Ueda }
2548d0bcb878SKiyoshi Ueda 
25491da177e4SLinus Torvalds /*
2550042d2a9bSAlasdair G Kergon  * Swap in a new table, returning the old one for the caller to destroy.
25511da177e4SLinus Torvalds  */
2552042d2a9bSAlasdair G Kergon struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
25531da177e4SLinus Torvalds {
2554042d2a9bSAlasdair G Kergon 	struct dm_table *map = ERR_PTR(-EINVAL);
2555754c5fc7SMike Snitzer 	struct queue_limits limits;
2556042d2a9bSAlasdair G Kergon 	int r;
25571da177e4SLinus Torvalds 
2558e61290a4SDaniel Walker 	mutex_lock(&md->suspend_lock);
25591da177e4SLinus Torvalds 
25601da177e4SLinus Torvalds 	/* device must be suspended */
25614f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md))
256293c534aeSAlasdair G Kergon 		goto out;
25631da177e4SLinus Torvalds 
2564754c5fc7SMike Snitzer 	r = dm_calculate_queue_limits(table, &limits);
2565042d2a9bSAlasdair G Kergon 	if (r) {
2566042d2a9bSAlasdair G Kergon 		map = ERR_PTR(r);
2567754c5fc7SMike Snitzer 		goto out;
2568042d2a9bSAlasdair G Kergon 	}
2569754c5fc7SMike Snitzer 
2570042d2a9bSAlasdair G Kergon 	map = __bind(md, table, &limits);
25711da177e4SLinus Torvalds 
257293c534aeSAlasdair G Kergon out:
2573e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
2574042d2a9bSAlasdair G Kergon 	return map;
25751da177e4SLinus Torvalds }
25761da177e4SLinus Torvalds 
25771da177e4SLinus Torvalds /*
25781da177e4SLinus Torvalds  * Functions to lock and unlock any filesystem running on the
25791da177e4SLinus Torvalds  * device.
25801da177e4SLinus Torvalds  */
25812ca3310eSAlasdair G Kergon static int lock_fs(struct mapped_device *md)
25821da177e4SLinus Torvalds {
2583e39e2e95SAlasdair G Kergon 	int r;
25841da177e4SLinus Torvalds 
25851da177e4SLinus Torvalds 	WARN_ON(md->frozen_sb);
2586dfbe03f6SAlasdair G Kergon 
2587db8fef4fSMikulas Patocka 	md->frozen_sb = freeze_bdev(md->bdev);
2588dfbe03f6SAlasdair G Kergon 	if (IS_ERR(md->frozen_sb)) {
2589cf222b37SAlasdair G Kergon 		r = PTR_ERR(md->frozen_sb);
2590e39e2e95SAlasdair G Kergon 		md->frozen_sb = NULL;
2591e39e2e95SAlasdair G Kergon 		return r;
2592dfbe03f6SAlasdair G Kergon 	}
2593dfbe03f6SAlasdair G Kergon 
2594aa8d7c2fSAlasdair G Kergon 	set_bit(DMF_FROZEN, &md->flags);
2595aa8d7c2fSAlasdair G Kergon 
25961da177e4SLinus Torvalds 	return 0;
25971da177e4SLinus Torvalds }
25981da177e4SLinus Torvalds 
25992ca3310eSAlasdair G Kergon static void unlock_fs(struct mapped_device *md)
26001da177e4SLinus Torvalds {
2601aa8d7c2fSAlasdair G Kergon 	if (!test_bit(DMF_FROZEN, &md->flags))
2602aa8d7c2fSAlasdair G Kergon 		return;
2603aa8d7c2fSAlasdair G Kergon 
2604db8fef4fSMikulas Patocka 	thaw_bdev(md->bdev, md->frozen_sb);
26051da177e4SLinus Torvalds 	md->frozen_sb = NULL;
2606aa8d7c2fSAlasdair G Kergon 	clear_bit(DMF_FROZEN, &md->flags);
26071da177e4SLinus Torvalds }
26081da177e4SLinus Torvalds 
26091da177e4SLinus Torvalds /*
26101da177e4SLinus Torvalds  * We need to be able to change a mapping table under a mounted
26111da177e4SLinus Torvalds  * filesystem.  For example we might want to move some data in
26121da177e4SLinus Torvalds  * the background.  Before the table can be swapped with
26131da177e4SLinus Torvalds  * dm_bind_table, dm_suspend must be called to flush any in
26141da177e4SLinus Torvalds  * flight bios and ensure that any further io gets deferred.
26151da177e4SLinus Torvalds  */
2616cec47e3dSKiyoshi Ueda /*
2617cec47e3dSKiyoshi Ueda  * Suspend mechanism in request-based dm.
2618cec47e3dSKiyoshi Ueda  *
26199f518b27SKiyoshi Ueda  * 1. Flush all I/Os by lock_fs() if needed.
26209f518b27SKiyoshi Ueda  * 2. Stop dispatching any I/O by stopping the request_queue.
26219f518b27SKiyoshi Ueda  * 3. Wait for all in-flight I/Os to be completed or requeued.
2622cec47e3dSKiyoshi Ueda  *
26239f518b27SKiyoshi Ueda  * To abort suspend, start the request_queue.
2624cec47e3dSKiyoshi Ueda  */
2625a3d77d35SKiyoshi Ueda int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
26261da177e4SLinus Torvalds {
26272ca3310eSAlasdair G Kergon 	struct dm_table *map = NULL;
262846125c1cSMilan Broz 	int r = 0;
2629a3d77d35SKiyoshi Ueda 	int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
26302e93ccc1SKiyoshi Ueda 	int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
26311da177e4SLinus Torvalds 
2632e61290a4SDaniel Walker 	mutex_lock(&md->suspend_lock);
26332ca3310eSAlasdair G Kergon 
26344f186f8bSKiyoshi Ueda 	if (dm_suspended_md(md)) {
263573d410c0SMilan Broz 		r = -EINVAL;
2636d287483dSAlasdair G Kergon 		goto out_unlock;
263773d410c0SMilan Broz 	}
26381da177e4SLinus Torvalds 
26397c666411SAlasdair G Kergon 	map = dm_get_live_table(md);
2640cf222b37SAlasdair G Kergon 
26412e93ccc1SKiyoshi Ueda 	/*
26422e93ccc1SKiyoshi Ueda 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
26432e93ccc1SKiyoshi Ueda 	 * This flag is cleared before dm_suspend returns.
26442e93ccc1SKiyoshi Ueda 	 */
26452e93ccc1SKiyoshi Ueda 	if (noflush)
26462e93ccc1SKiyoshi Ueda 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
26472e93ccc1SKiyoshi Ueda 
2648436d4108SAlasdair G Kergon 	/* This does not get reverted if there's an error later. */
26491da177e4SLinus Torvalds 	dm_table_presuspend_targets(map);
26501da177e4SLinus Torvalds 
26512e93ccc1SKiyoshi Ueda 	/*
26529f518b27SKiyoshi Ueda 	 * Flush I/O to the device.
26539f518b27SKiyoshi Ueda 	 * Any I/O submitted after lock_fs() may not be flushed.
26549f518b27SKiyoshi Ueda 	 * noflush takes precedence over do_lockfs.
26559f518b27SKiyoshi Ueda 	 * (lock_fs() flushes I/Os and waits for them to complete.)
26562e93ccc1SKiyoshi Ueda 	 */
265732a926daSMikulas Patocka 	if (!noflush && do_lockfs) {
26582ca3310eSAlasdair G Kergon 		r = lock_fs(md);
26592ca3310eSAlasdair G Kergon 		if (r)
26602ca3310eSAlasdair G Kergon 			goto out;
2661aa8d7c2fSAlasdair G Kergon 	}
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds 	/*
26643b00b203SMikulas Patocka 	 * Here we must make sure that no processes are submitting requests
26653b00b203SMikulas Patocka 	 * to target drivers i.e. no one may be executing
26663b00b203SMikulas Patocka 	 * __split_and_process_bio. This is called from dm_request and
26673b00b203SMikulas Patocka 	 * dm_wq_work.
26683b00b203SMikulas Patocka 	 *
26693b00b203SMikulas Patocka 	 * To get all processes out of __split_and_process_bio in dm_request,
26703b00b203SMikulas Patocka 	 * we take the write lock. To prevent any process from reentering
26713b00b203SMikulas Patocka 	 * __split_and_process_bio from dm_request, we set
26723b00b203SMikulas Patocka 	 * DMF_QUEUE_IO_TO_THREAD.
26733b00b203SMikulas Patocka 	 *
26743b00b203SMikulas Patocka 	 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
26753b00b203SMikulas Patocka 	 * and call flush_workqueue(md->wq). flush_workqueue will wait until
26763b00b203SMikulas Patocka 	 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
26773b00b203SMikulas Patocka 	 * further calls to __split_and_process_bio from dm_wq_work.
26781da177e4SLinus Torvalds 	 */
26792ca3310eSAlasdair G Kergon 	down_write(&md->io_lock);
26801eb787ecSAlasdair G Kergon 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
26811eb787ecSAlasdair G Kergon 	set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
26822ca3310eSAlasdair G Kergon 	up_write(&md->io_lock);
26831da177e4SLinus Torvalds 
2684d0bcb878SKiyoshi Ueda 	/*
2685d0bcb878SKiyoshi Ueda 	 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2686d0bcb878SKiyoshi Ueda 	 * can be kicked until md->queue is stopped.  So stop md->queue before
2687d0bcb878SKiyoshi Ueda 	 * flushing md->wq.
2688d0bcb878SKiyoshi Ueda 	 */
2689cec47e3dSKiyoshi Ueda 	if (dm_request_based(md))
26909f518b27SKiyoshi Ueda 		stop_queue(md->queue);
2691cec47e3dSKiyoshi Ueda 
2692d0bcb878SKiyoshi Ueda 	flush_workqueue(md->wq);
2693d0bcb878SKiyoshi Ueda 
26941da177e4SLinus Torvalds 	/*
26953b00b203SMikulas Patocka 	 * At this point no more requests are entering target request routines.
26963b00b203SMikulas Patocka 	 * We call dm_wait_for_completion to wait for all existing requests
26973b00b203SMikulas Patocka 	 * to finish.
26981da177e4SLinus Torvalds 	 */
2699401600dfSMikulas Patocka 	r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
27001da177e4SLinus Torvalds 
27012ca3310eSAlasdair G Kergon 	down_write(&md->io_lock);
27026d6f10dfSMilan Broz 	if (noflush)
2703022c2611SMikulas Patocka 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
270494d6351eSMilan Broz 	up_write(&md->io_lock);
27052e93ccc1SKiyoshi Ueda 
27061da177e4SLinus Torvalds 	/* were we interrupted ? */
270746125c1cSMilan Broz 	if (r < 0) {
27089a1fb464SMikulas Patocka 		dm_queue_flush(md);
270973d410c0SMilan Broz 
2710cec47e3dSKiyoshi Ueda 		if (dm_request_based(md))
27119f518b27SKiyoshi Ueda 			start_queue(md->queue);
2712cec47e3dSKiyoshi Ueda 
27132ca3310eSAlasdair G Kergon 		unlock_fs(md);
27142e93ccc1SKiyoshi Ueda 		goto out; /* pushback list is already flushed, so skip flush */
27152ca3310eSAlasdair G Kergon 	}
27162ca3310eSAlasdair G Kergon 
27173b00b203SMikulas Patocka 	/*
27183b00b203SMikulas Patocka 	 * If dm_wait_for_completion returned 0, the device is completely
27193b00b203SMikulas Patocka 	 * quiescent now. There is no request-processing activity. All new
27203b00b203SMikulas Patocka 	 * requests are being added to md->deferred list.
27213b00b203SMikulas Patocka 	 */
27223b00b203SMikulas Patocka 
27231da177e4SLinus Torvalds 	set_bit(DMF_SUSPENDED, &md->flags);
27241da177e4SLinus Torvalds 
27254d4471cbSKiyoshi Ueda 	dm_table_postsuspend_targets(map);
27264d4471cbSKiyoshi Ueda 
27272ca3310eSAlasdair G Kergon out:
27281da177e4SLinus Torvalds 	dm_table_put(map);
2729d287483dSAlasdair G Kergon 
2730d287483dSAlasdair G Kergon out_unlock:
2731e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
2732cf222b37SAlasdair G Kergon 	return r;
27331da177e4SLinus Torvalds }
27341da177e4SLinus Torvalds 
27351da177e4SLinus Torvalds int dm_resume(struct mapped_device *md)
27361da177e4SLinus Torvalds {
2737cf222b37SAlasdair G Kergon 	int r = -EINVAL;
2738cf222b37SAlasdair G Kergon 	struct dm_table *map = NULL;
27391da177e4SLinus Torvalds 
2740e61290a4SDaniel Walker 	mutex_lock(&md->suspend_lock);
27414f186f8bSKiyoshi Ueda 	if (!dm_suspended_md(md))
2742cf222b37SAlasdair G Kergon 		goto out;
2743cf222b37SAlasdair G Kergon 
27447c666411SAlasdair G Kergon 	map = dm_get_live_table(md);
27452ca3310eSAlasdair G Kergon 	if (!map || !dm_table_get_size(map))
2746cf222b37SAlasdair G Kergon 		goto out;
27471da177e4SLinus Torvalds 
27488757b776SMilan Broz 	r = dm_table_resume_targets(map);
27498757b776SMilan Broz 	if (r)
27508757b776SMilan Broz 		goto out;
27512ca3310eSAlasdair G Kergon 
27529a1fb464SMikulas Patocka 	dm_queue_flush(md);
27532ca3310eSAlasdair G Kergon 
2754cec47e3dSKiyoshi Ueda 	/*
2755cec47e3dSKiyoshi Ueda 	 * Flushing deferred I/Os must be done after targets are resumed
2756cec47e3dSKiyoshi Ueda 	 * so that mapping of targets can work correctly.
2757cec47e3dSKiyoshi Ueda 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2758cec47e3dSKiyoshi Ueda 	 */
2759cec47e3dSKiyoshi Ueda 	if (dm_request_based(md))
2760cec47e3dSKiyoshi Ueda 		start_queue(md->queue);
2761cec47e3dSKiyoshi Ueda 
27622ca3310eSAlasdair G Kergon 	unlock_fs(md);
27632ca3310eSAlasdair G Kergon 
27642ca3310eSAlasdair G Kergon 	clear_bit(DMF_SUSPENDED, &md->flags);
27652ca3310eSAlasdair G Kergon 
27661da177e4SLinus Torvalds 	dm_table_unplug_all(map);
2767cf222b37SAlasdair G Kergon 	r = 0;
2768cf222b37SAlasdair G Kergon out:
2769cf222b37SAlasdair G Kergon 	dm_table_put(map);
2770e61290a4SDaniel Walker 	mutex_unlock(&md->suspend_lock);
27712ca3310eSAlasdair G Kergon 
2772cf222b37SAlasdair G Kergon 	return r;
27731da177e4SLinus Torvalds }
27741da177e4SLinus Torvalds 
27751da177e4SLinus Torvalds /*-----------------------------------------------------------------
27761da177e4SLinus Torvalds  * Event notification.
27771da177e4SLinus Torvalds  *---------------------------------------------------------------*/
27783abf85b5SPeter Rajnoha int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
277960935eb2SMilan Broz 		       unsigned cookie)
278069267a30SAlasdair G Kergon {
278160935eb2SMilan Broz 	char udev_cookie[DM_COOKIE_LENGTH];
278260935eb2SMilan Broz 	char *envp[] = { udev_cookie, NULL };
278360935eb2SMilan Broz 
278460935eb2SMilan Broz 	if (!cookie)
27853abf85b5SPeter Rajnoha 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
278660935eb2SMilan Broz 	else {
278760935eb2SMilan Broz 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
278860935eb2SMilan Broz 			 DM_COOKIE_ENV_VAR_NAME, cookie);
27893abf85b5SPeter Rajnoha 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
27903abf85b5SPeter Rajnoha 					  action, envp);
279160935eb2SMilan Broz 	}
279269267a30SAlasdair G Kergon }
279369267a30SAlasdair G Kergon 
27947a8c3d3bSMike Anderson uint32_t dm_next_uevent_seq(struct mapped_device *md)
27957a8c3d3bSMike Anderson {
27967a8c3d3bSMike Anderson 	return atomic_add_return(1, &md->uevent_seq);
27977a8c3d3bSMike Anderson }
27987a8c3d3bSMike Anderson 
27991da177e4SLinus Torvalds uint32_t dm_get_event_nr(struct mapped_device *md)
28001da177e4SLinus Torvalds {
28011da177e4SLinus Torvalds 	return atomic_read(&md->event_nr);
28021da177e4SLinus Torvalds }
28031da177e4SLinus Torvalds 
28041da177e4SLinus Torvalds int dm_wait_event(struct mapped_device *md, int event_nr)
28051da177e4SLinus Torvalds {
28061da177e4SLinus Torvalds 	return wait_event_interruptible(md->eventq,
28071da177e4SLinus Torvalds 			(event_nr != atomic_read(&md->event_nr)));
28081da177e4SLinus Torvalds }
28091da177e4SLinus Torvalds 
28107a8c3d3bSMike Anderson void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
28117a8c3d3bSMike Anderson {
28127a8c3d3bSMike Anderson 	unsigned long flags;
28137a8c3d3bSMike Anderson 
28147a8c3d3bSMike Anderson 	spin_lock_irqsave(&md->uevent_lock, flags);
28157a8c3d3bSMike Anderson 	list_add(elist, &md->uevent_list);
28167a8c3d3bSMike Anderson 	spin_unlock_irqrestore(&md->uevent_lock, flags);
28177a8c3d3bSMike Anderson }
28187a8c3d3bSMike Anderson 
28191da177e4SLinus Torvalds /*
28201da177e4SLinus Torvalds  * The gendisk is only valid as long as you have a reference
28211da177e4SLinus Torvalds  * count on 'md'.
28221da177e4SLinus Torvalds  */
28231da177e4SLinus Torvalds struct gendisk *dm_disk(struct mapped_device *md)
28241da177e4SLinus Torvalds {
28251da177e4SLinus Torvalds 	return md->disk;
28261da177e4SLinus Torvalds }
28271da177e4SLinus Torvalds 
2828784aae73SMilan Broz struct kobject *dm_kobject(struct mapped_device *md)
2829784aae73SMilan Broz {
2830784aae73SMilan Broz 	return &md->kobj;
2831784aae73SMilan Broz }
2832784aae73SMilan Broz 
2833784aae73SMilan Broz /*
2834784aae73SMilan Broz  * struct mapped_device should not be exported outside of dm.c
2835784aae73SMilan Broz  * so use this check to verify that kobj is part of md structure
2836784aae73SMilan Broz  */
2837784aae73SMilan Broz struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2838784aae73SMilan Broz {
2839784aae73SMilan Broz 	struct mapped_device *md;
2840784aae73SMilan Broz 
2841784aae73SMilan Broz 	md = container_of(kobj, struct mapped_device, kobj);
2842784aae73SMilan Broz 	if (&md->kobj != kobj)
2843784aae73SMilan Broz 		return NULL;
2844784aae73SMilan Broz 
28454d89b7b4SMilan Broz 	if (test_bit(DMF_FREEING, &md->flags) ||
2846432a212cSMike Anderson 	    dm_deleting_md(md))
28474d89b7b4SMilan Broz 		return NULL;
28484d89b7b4SMilan Broz 
2849784aae73SMilan Broz 	dm_get(md);
2850784aae73SMilan Broz 	return md;
2851784aae73SMilan Broz }
2852784aae73SMilan Broz 
28534f186f8bSKiyoshi Ueda int dm_suspended_md(struct mapped_device *md)
28541da177e4SLinus Torvalds {
28551da177e4SLinus Torvalds 	return test_bit(DMF_SUSPENDED, &md->flags);
28561da177e4SLinus Torvalds }
28571da177e4SLinus Torvalds 
285864dbce58SKiyoshi Ueda int dm_suspended(struct dm_target *ti)
285964dbce58SKiyoshi Ueda {
2860ecdb2e25SKiyoshi Ueda 	return dm_suspended_md(dm_table_get_md(ti->table));
286164dbce58SKiyoshi Ueda }
286264dbce58SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_suspended);
286364dbce58SKiyoshi Ueda 
28642e93ccc1SKiyoshi Ueda int dm_noflush_suspending(struct dm_target *ti)
28652e93ccc1SKiyoshi Ueda {
2866ecdb2e25SKiyoshi Ueda 	return __noflush_suspending(dm_table_get_md(ti->table));
28672e93ccc1SKiyoshi Ueda }
28682e93ccc1SKiyoshi Ueda EXPORT_SYMBOL_GPL(dm_noflush_suspending);
28692e93ccc1SKiyoshi Ueda 
2870e6ee8c0bSKiyoshi Ueda struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2871e6ee8c0bSKiyoshi Ueda {
2872e6ee8c0bSKiyoshi Ueda 	struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2873e6ee8c0bSKiyoshi Ueda 
2874e6ee8c0bSKiyoshi Ueda 	if (!pools)
2875e6ee8c0bSKiyoshi Ueda 		return NULL;
2876e6ee8c0bSKiyoshi Ueda 
2877e6ee8c0bSKiyoshi Ueda 	pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2878e6ee8c0bSKiyoshi Ueda 			 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2879e6ee8c0bSKiyoshi Ueda 			 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2880e6ee8c0bSKiyoshi Ueda 	if (!pools->io_pool)
2881e6ee8c0bSKiyoshi Ueda 		goto free_pools_and_out;
2882e6ee8c0bSKiyoshi Ueda 
2883e6ee8c0bSKiyoshi Ueda 	pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2884e6ee8c0bSKiyoshi Ueda 			  mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2885e6ee8c0bSKiyoshi Ueda 			  mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2886e6ee8c0bSKiyoshi Ueda 	if (!pools->tio_pool)
2887e6ee8c0bSKiyoshi Ueda 		goto free_io_pool_and_out;
2888e6ee8c0bSKiyoshi Ueda 
2889e6ee8c0bSKiyoshi Ueda 	pools->bs = (type == DM_TYPE_BIO_BASED) ?
2890e6ee8c0bSKiyoshi Ueda 		    bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2891e6ee8c0bSKiyoshi Ueda 	if (!pools->bs)
2892e6ee8c0bSKiyoshi Ueda 		goto free_tio_pool_and_out;
2893e6ee8c0bSKiyoshi Ueda 
2894e6ee8c0bSKiyoshi Ueda 	return pools;
2895e6ee8c0bSKiyoshi Ueda 
2896e6ee8c0bSKiyoshi Ueda free_tio_pool_and_out:
2897e6ee8c0bSKiyoshi Ueda 	mempool_destroy(pools->tio_pool);
2898e6ee8c0bSKiyoshi Ueda 
2899e6ee8c0bSKiyoshi Ueda free_io_pool_and_out:
2900e6ee8c0bSKiyoshi Ueda 	mempool_destroy(pools->io_pool);
2901e6ee8c0bSKiyoshi Ueda 
2902e6ee8c0bSKiyoshi Ueda free_pools_and_out:
2903e6ee8c0bSKiyoshi Ueda 	kfree(pools);
2904e6ee8c0bSKiyoshi Ueda 
2905e6ee8c0bSKiyoshi Ueda 	return NULL;
2906e6ee8c0bSKiyoshi Ueda }
2907e6ee8c0bSKiyoshi Ueda 
2908e6ee8c0bSKiyoshi Ueda void dm_free_md_mempools(struct dm_md_mempools *pools)
2909e6ee8c0bSKiyoshi Ueda {
2910e6ee8c0bSKiyoshi Ueda 	if (!pools)
2911e6ee8c0bSKiyoshi Ueda 		return;
2912e6ee8c0bSKiyoshi Ueda 
2913e6ee8c0bSKiyoshi Ueda 	if (pools->io_pool)
2914e6ee8c0bSKiyoshi Ueda 		mempool_destroy(pools->io_pool);
2915e6ee8c0bSKiyoshi Ueda 
2916e6ee8c0bSKiyoshi Ueda 	if (pools->tio_pool)
2917e6ee8c0bSKiyoshi Ueda 		mempool_destroy(pools->tio_pool);
2918e6ee8c0bSKiyoshi Ueda 
2919e6ee8c0bSKiyoshi Ueda 	if (pools->bs)
2920e6ee8c0bSKiyoshi Ueda 		bioset_free(pools->bs);
2921e6ee8c0bSKiyoshi Ueda 
2922e6ee8c0bSKiyoshi Ueda 	kfree(pools);
2923e6ee8c0bSKiyoshi Ueda }
2924e6ee8c0bSKiyoshi Ueda 
292583d5cde4SAlexey Dobriyan static const struct block_device_operations dm_blk_dops = {
29261da177e4SLinus Torvalds 	.open = dm_blk_open,
29271da177e4SLinus Torvalds 	.release = dm_blk_close,
2928aa129a22SMilan Broz 	.ioctl = dm_blk_ioctl,
29293ac51e74SDarrick J. Wong 	.getgeo = dm_blk_getgeo,
29301da177e4SLinus Torvalds 	.owner = THIS_MODULE
29311da177e4SLinus Torvalds };
29321da177e4SLinus Torvalds 
29331da177e4SLinus Torvalds EXPORT_SYMBOL(dm_get_mapinfo);
29341da177e4SLinus Torvalds 
29351da177e4SLinus Torvalds /*
29361da177e4SLinus Torvalds  * module hooks
29371da177e4SLinus Torvalds  */
29381da177e4SLinus Torvalds module_init(dm_init);
29391da177e4SLinus Torvalds module_exit(dm_exit);
29401da177e4SLinus Torvalds 
29411da177e4SLinus Torvalds module_param(major, uint, 0);
29421da177e4SLinus Torvalds MODULE_PARM_DESC(major, "The major number of the device mapper");
29431da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " driver");
29441da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
29451da177e4SLinus Torvalds MODULE_LICENSE("GPL");
2946