xref: /openbmc/linux/drivers/md/dm.c (revision 92b19ff5)
1 /*
2  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 #include "dm-uevent.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
23 #include <linux/kthread.h>
24 #include <linux/ktime.h>
25 #include <linux/elevator.h> /* for rq_end_sector() */
26 #include <linux/blk-mq.h>
27 
28 #include <trace/events/block.h>
29 
30 #define DM_MSG_PREFIX "core"
31 
32 #ifdef CONFIG_PRINTK
33 /*
34  * ratelimit state to be used in DMXXX_LIMIT().
35  */
36 DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
37 		       DEFAULT_RATELIMIT_INTERVAL,
38 		       DEFAULT_RATELIMIT_BURST);
39 EXPORT_SYMBOL(dm_ratelimit_state);
40 #endif
41 
42 /*
43  * Cookies are numeric values sent with CHANGE and REMOVE
44  * uevents while resuming, removing or renaming the device.
45  */
46 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
47 #define DM_COOKIE_LENGTH 24
48 
49 static const char *_name = DM_NAME;
50 
51 static unsigned int major = 0;
52 static unsigned int _major = 0;
53 
54 static DEFINE_IDR(_minor_idr);
55 
56 static DEFINE_SPINLOCK(_minor_lock);
57 
58 static void do_deferred_remove(struct work_struct *w);
59 
60 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
61 
62 static struct workqueue_struct *deferred_remove_workqueue;
63 
64 /*
65  * For bio-based dm.
66  * One of these is allocated per bio.
67  */
68 struct dm_io {
69 	struct mapped_device *md;
70 	int error;
71 	atomic_t io_count;
72 	struct bio *bio;
73 	unsigned long start_time;
74 	spinlock_t endio_lock;
75 	struct dm_stats_aux stats_aux;
76 };
77 
78 /*
79  * For request-based dm.
80  * One of these is allocated per request.
81  */
82 struct dm_rq_target_io {
83 	struct mapped_device *md;
84 	struct dm_target *ti;
85 	struct request *orig, *clone;
86 	struct kthread_work work;
87 	int error;
88 	union map_info info;
89 	struct dm_stats_aux stats_aux;
90 	unsigned long duration_jiffies;
91 	unsigned n_sectors;
92 };
93 
94 /*
95  * For request-based dm - the bio clones we allocate are embedded in these
96  * structs.
97  *
98  * We allocate these with bio_alloc_bioset, using the front_pad parameter when
99  * the bioset is created - this means the bio has to come at the end of the
100  * struct.
101  */
102 struct dm_rq_clone_bio_info {
103 	struct bio *orig;
104 	struct dm_rq_target_io *tio;
105 	struct bio clone;
106 };
107 
108 union map_info *dm_get_rq_mapinfo(struct request *rq)
109 {
110 	if (rq && rq->end_io_data)
111 		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
112 	return NULL;
113 }
114 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
115 
116 #define MINOR_ALLOCED ((void *)-1)
117 
118 /*
119  * Bits for the md->flags field.
120  */
121 #define DMF_BLOCK_IO_FOR_SUSPEND 0
122 #define DMF_SUSPENDED 1
123 #define DMF_FROZEN 2
124 #define DMF_FREEING 3
125 #define DMF_DELETING 4
126 #define DMF_NOFLUSH_SUSPENDING 5
127 #define DMF_MERGE_IS_OPTIONAL 6
128 #define DMF_DEFERRED_REMOVE 7
129 #define DMF_SUSPENDED_INTERNALLY 8
130 
131 /*
132  * A dummy definition to make RCU happy.
133  * struct dm_table should never be dereferenced in this file.
134  */
135 struct dm_table {
136 	int undefined__;
137 };
138 
139 /*
140  * Work processed by per-device workqueue.
141  */
142 struct mapped_device {
143 	struct srcu_struct io_barrier;
144 	struct mutex suspend_lock;
145 	atomic_t holders;
146 	atomic_t open_count;
147 
148 	/*
149 	 * The current mapping.
150 	 * Use dm_get_live_table{_fast} or take suspend_lock for
151 	 * dereference.
152 	 */
153 	struct dm_table __rcu *map;
154 
155 	struct list_head table_devices;
156 	struct mutex table_devices_lock;
157 
158 	unsigned long flags;
159 
160 	struct request_queue *queue;
161 	unsigned type;
162 	/* Protect queue and type against concurrent access. */
163 	struct mutex type_lock;
164 
165 	struct target_type *immutable_target_type;
166 
167 	struct gendisk *disk;
168 	char name[16];
169 
170 	void *interface_ptr;
171 
172 	/*
173 	 * A list of ios that arrived while we were suspended.
174 	 */
175 	atomic_t pending[2];
176 	wait_queue_head_t wait;
177 	struct work_struct work;
178 	struct bio_list deferred;
179 	spinlock_t deferred_lock;
180 
181 	/*
182 	 * Processing queue (flush)
183 	 */
184 	struct workqueue_struct *wq;
185 
186 	/*
187 	 * io objects are allocated from here.
188 	 */
189 	mempool_t *io_pool;
190 	mempool_t *rq_pool;
191 
192 	struct bio_set *bs;
193 
194 	/*
195 	 * Event handling.
196 	 */
197 	atomic_t event_nr;
198 	wait_queue_head_t eventq;
199 	atomic_t uevent_seq;
200 	struct list_head uevent_list;
201 	spinlock_t uevent_lock; /* Protect access to uevent_list */
202 
203 	/*
204 	 * freeze/thaw support require holding onto a super block
205 	 */
206 	struct super_block *frozen_sb;
207 	struct block_device *bdev;
208 
209 	/* forced geometry settings */
210 	struct hd_geometry geometry;
211 
212 	/* kobject and completion */
213 	struct dm_kobject_holder kobj_holder;
214 
215 	/* zero-length flush that will be cloned and submitted to targets */
216 	struct bio flush_bio;
217 
218 	/* the number of internal suspends */
219 	unsigned internal_suspend_count;
220 
221 	struct dm_stats stats;
222 
223 	struct kthread_worker kworker;
224 	struct task_struct *kworker_task;
225 
226 	/* for request-based merge heuristic in dm_request_fn() */
227 	unsigned seq_rq_merge_deadline_usecs;
228 	int last_rq_rw;
229 	sector_t last_rq_pos;
230 	ktime_t last_rq_start_time;
231 
232 	/* for blk-mq request-based DM support */
233 	struct blk_mq_tag_set tag_set;
234 	bool use_blk_mq;
235 };
236 
237 #ifdef CONFIG_DM_MQ_DEFAULT
238 static bool use_blk_mq = true;
239 #else
240 static bool use_blk_mq = false;
241 #endif
242 
243 bool dm_use_blk_mq(struct mapped_device *md)
244 {
245 	return md->use_blk_mq;
246 }
247 
248 /*
249  * For mempools pre-allocation at the table loading time.
250  */
251 struct dm_md_mempools {
252 	mempool_t *io_pool;
253 	mempool_t *rq_pool;
254 	struct bio_set *bs;
255 };
256 
257 struct table_device {
258 	struct list_head list;
259 	atomic_t count;
260 	struct dm_dev dm_dev;
261 };
262 
263 #define RESERVED_BIO_BASED_IOS		16
264 #define RESERVED_REQUEST_BASED_IOS	256
265 #define RESERVED_MAX_IOS		1024
266 static struct kmem_cache *_io_cache;
267 static struct kmem_cache *_rq_tio_cache;
268 static struct kmem_cache *_rq_cache;
269 
270 /*
271  * Bio-based DM's mempools' reserved IOs set by the user.
272  */
273 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
274 
275 /*
276  * Request-based DM's mempools' reserved IOs set by the user.
277  */
278 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
279 
280 static unsigned __dm_get_module_param(unsigned *module_param,
281 				      unsigned def, unsigned max)
282 {
283 	unsigned param = ACCESS_ONCE(*module_param);
284 	unsigned modified_param = 0;
285 
286 	if (!param)
287 		modified_param = def;
288 	else if (param > max)
289 		modified_param = max;
290 
291 	if (modified_param) {
292 		(void)cmpxchg(module_param, param, modified_param);
293 		param = modified_param;
294 	}
295 
296 	return param;
297 }
298 
299 unsigned dm_get_reserved_bio_based_ios(void)
300 {
301 	return __dm_get_module_param(&reserved_bio_based_ios,
302 				     RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
303 }
304 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
305 
306 unsigned dm_get_reserved_rq_based_ios(void)
307 {
308 	return __dm_get_module_param(&reserved_rq_based_ios,
309 				     RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
310 }
311 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
312 
313 static int __init local_init(void)
314 {
315 	int r = -ENOMEM;
316 
317 	/* allocate a slab for the dm_ios */
318 	_io_cache = KMEM_CACHE(dm_io, 0);
319 	if (!_io_cache)
320 		return r;
321 
322 	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
323 	if (!_rq_tio_cache)
324 		goto out_free_io_cache;
325 
326 	_rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
327 				      __alignof__(struct request), 0, NULL);
328 	if (!_rq_cache)
329 		goto out_free_rq_tio_cache;
330 
331 	r = dm_uevent_init();
332 	if (r)
333 		goto out_free_rq_cache;
334 
335 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
336 	if (!deferred_remove_workqueue) {
337 		r = -ENOMEM;
338 		goto out_uevent_exit;
339 	}
340 
341 	_major = major;
342 	r = register_blkdev(_major, _name);
343 	if (r < 0)
344 		goto out_free_workqueue;
345 
346 	if (!_major)
347 		_major = r;
348 
349 	return 0;
350 
351 out_free_workqueue:
352 	destroy_workqueue(deferred_remove_workqueue);
353 out_uevent_exit:
354 	dm_uevent_exit();
355 out_free_rq_cache:
356 	kmem_cache_destroy(_rq_cache);
357 out_free_rq_tio_cache:
358 	kmem_cache_destroy(_rq_tio_cache);
359 out_free_io_cache:
360 	kmem_cache_destroy(_io_cache);
361 
362 	return r;
363 }
364 
365 static void local_exit(void)
366 {
367 	flush_scheduled_work();
368 	destroy_workqueue(deferred_remove_workqueue);
369 
370 	kmem_cache_destroy(_rq_cache);
371 	kmem_cache_destroy(_rq_tio_cache);
372 	kmem_cache_destroy(_io_cache);
373 	unregister_blkdev(_major, _name);
374 	dm_uevent_exit();
375 
376 	_major = 0;
377 
378 	DMINFO("cleaned up");
379 }
380 
381 static int (*_inits[])(void) __initdata = {
382 	local_init,
383 	dm_target_init,
384 	dm_linear_init,
385 	dm_stripe_init,
386 	dm_io_init,
387 	dm_kcopyd_init,
388 	dm_interface_init,
389 	dm_statistics_init,
390 };
391 
392 static void (*_exits[])(void) = {
393 	local_exit,
394 	dm_target_exit,
395 	dm_linear_exit,
396 	dm_stripe_exit,
397 	dm_io_exit,
398 	dm_kcopyd_exit,
399 	dm_interface_exit,
400 	dm_statistics_exit,
401 };
402 
403 static int __init dm_init(void)
404 {
405 	const int count = ARRAY_SIZE(_inits);
406 
407 	int r, i;
408 
409 	for (i = 0; i < count; i++) {
410 		r = _inits[i]();
411 		if (r)
412 			goto bad;
413 	}
414 
415 	return 0;
416 
417       bad:
418 	while (i--)
419 		_exits[i]();
420 
421 	return r;
422 }
423 
424 static void __exit dm_exit(void)
425 {
426 	int i = ARRAY_SIZE(_exits);
427 
428 	while (i--)
429 		_exits[i]();
430 
431 	/*
432 	 * Should be empty by this point.
433 	 */
434 	idr_destroy(&_minor_idr);
435 }
436 
437 /*
438  * Block device functions
439  */
440 int dm_deleting_md(struct mapped_device *md)
441 {
442 	return test_bit(DMF_DELETING, &md->flags);
443 }
444 
445 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
446 {
447 	struct mapped_device *md;
448 
449 	spin_lock(&_minor_lock);
450 
451 	md = bdev->bd_disk->private_data;
452 	if (!md)
453 		goto out;
454 
455 	if (test_bit(DMF_FREEING, &md->flags) ||
456 	    dm_deleting_md(md)) {
457 		md = NULL;
458 		goto out;
459 	}
460 
461 	dm_get(md);
462 	atomic_inc(&md->open_count);
463 out:
464 	spin_unlock(&_minor_lock);
465 
466 	return md ? 0 : -ENXIO;
467 }
468 
469 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
470 {
471 	struct mapped_device *md;
472 
473 	spin_lock(&_minor_lock);
474 
475 	md = disk->private_data;
476 	if (WARN_ON(!md))
477 		goto out;
478 
479 	if (atomic_dec_and_test(&md->open_count) &&
480 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
481 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
482 
483 	dm_put(md);
484 out:
485 	spin_unlock(&_minor_lock);
486 }
487 
488 int dm_open_count(struct mapped_device *md)
489 {
490 	return atomic_read(&md->open_count);
491 }
492 
493 /*
494  * Guarantees nothing is using the device before it's deleted.
495  */
496 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
497 {
498 	int r = 0;
499 
500 	spin_lock(&_minor_lock);
501 
502 	if (dm_open_count(md)) {
503 		r = -EBUSY;
504 		if (mark_deferred)
505 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
506 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
507 		r = -EEXIST;
508 	else
509 		set_bit(DMF_DELETING, &md->flags);
510 
511 	spin_unlock(&_minor_lock);
512 
513 	return r;
514 }
515 
516 int dm_cancel_deferred_remove(struct mapped_device *md)
517 {
518 	int r = 0;
519 
520 	spin_lock(&_minor_lock);
521 
522 	if (test_bit(DMF_DELETING, &md->flags))
523 		r = -EBUSY;
524 	else
525 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
526 
527 	spin_unlock(&_minor_lock);
528 
529 	return r;
530 }
531 
532 static void do_deferred_remove(struct work_struct *w)
533 {
534 	dm_deferred_remove();
535 }
536 
537 sector_t dm_get_size(struct mapped_device *md)
538 {
539 	return get_capacity(md->disk);
540 }
541 
542 struct request_queue *dm_get_md_queue(struct mapped_device *md)
543 {
544 	return md->queue;
545 }
546 
547 struct dm_stats *dm_get_stats(struct mapped_device *md)
548 {
549 	return &md->stats;
550 }
551 
552 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
553 {
554 	struct mapped_device *md = bdev->bd_disk->private_data;
555 
556 	return dm_get_geometry(md, geo);
557 }
558 
559 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
560 			unsigned int cmd, unsigned long arg)
561 {
562 	struct mapped_device *md = bdev->bd_disk->private_data;
563 	int srcu_idx;
564 	struct dm_table *map;
565 	struct dm_target *tgt;
566 	int r = -ENOTTY;
567 
568 retry:
569 	map = dm_get_live_table(md, &srcu_idx);
570 
571 	if (!map || !dm_table_get_size(map))
572 		goto out;
573 
574 	/* We only support devices that have a single target */
575 	if (dm_table_get_num_targets(map) != 1)
576 		goto out;
577 
578 	tgt = dm_table_get_target(map, 0);
579 	if (!tgt->type->ioctl)
580 		goto out;
581 
582 	if (dm_suspended_md(md)) {
583 		r = -EAGAIN;
584 		goto out;
585 	}
586 
587 	r = tgt->type->ioctl(tgt, cmd, arg);
588 
589 out:
590 	dm_put_live_table(md, srcu_idx);
591 
592 	if (r == -ENOTCONN) {
593 		msleep(10);
594 		goto retry;
595 	}
596 
597 	return r;
598 }
599 
600 static struct dm_io *alloc_io(struct mapped_device *md)
601 {
602 	return mempool_alloc(md->io_pool, GFP_NOIO);
603 }
604 
605 static void free_io(struct mapped_device *md, struct dm_io *io)
606 {
607 	mempool_free(io, md->io_pool);
608 }
609 
610 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
611 {
612 	bio_put(&tio->clone);
613 }
614 
615 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
616 					    gfp_t gfp_mask)
617 {
618 	return mempool_alloc(md->io_pool, gfp_mask);
619 }
620 
621 static void free_rq_tio(struct dm_rq_target_io *tio)
622 {
623 	mempool_free(tio, tio->md->io_pool);
624 }
625 
626 static struct request *alloc_clone_request(struct mapped_device *md,
627 					   gfp_t gfp_mask)
628 {
629 	return mempool_alloc(md->rq_pool, gfp_mask);
630 }
631 
632 static void free_clone_request(struct mapped_device *md, struct request *rq)
633 {
634 	mempool_free(rq, md->rq_pool);
635 }
636 
637 static int md_in_flight(struct mapped_device *md)
638 {
639 	return atomic_read(&md->pending[READ]) +
640 	       atomic_read(&md->pending[WRITE]);
641 }
642 
643 static void start_io_acct(struct dm_io *io)
644 {
645 	struct mapped_device *md = io->md;
646 	struct bio *bio = io->bio;
647 	int cpu;
648 	int rw = bio_data_dir(bio);
649 
650 	io->start_time = jiffies;
651 
652 	cpu = part_stat_lock();
653 	part_round_stats(cpu, &dm_disk(md)->part0);
654 	part_stat_unlock();
655 	atomic_set(&dm_disk(md)->part0.in_flight[rw],
656 		atomic_inc_return(&md->pending[rw]));
657 
658 	if (unlikely(dm_stats_used(&md->stats)))
659 		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
660 				    bio_sectors(bio), false, 0, &io->stats_aux);
661 }
662 
663 static void end_io_acct(struct dm_io *io)
664 {
665 	struct mapped_device *md = io->md;
666 	struct bio *bio = io->bio;
667 	unsigned long duration = jiffies - io->start_time;
668 	int pending;
669 	int rw = bio_data_dir(bio);
670 
671 	generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
672 
673 	if (unlikely(dm_stats_used(&md->stats)))
674 		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
675 				    bio_sectors(bio), true, duration, &io->stats_aux);
676 
677 	/*
678 	 * After this is decremented the bio must not be touched if it is
679 	 * a flush.
680 	 */
681 	pending = atomic_dec_return(&md->pending[rw]);
682 	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
683 	pending += atomic_read(&md->pending[rw^0x1]);
684 
685 	/* nudge anyone waiting on suspend queue */
686 	if (!pending)
687 		wake_up(&md->wait);
688 }
689 
690 /*
691  * Add the bio to the list of deferred io.
692  */
693 static void queue_io(struct mapped_device *md, struct bio *bio)
694 {
695 	unsigned long flags;
696 
697 	spin_lock_irqsave(&md->deferred_lock, flags);
698 	bio_list_add(&md->deferred, bio);
699 	spin_unlock_irqrestore(&md->deferred_lock, flags);
700 	queue_work(md->wq, &md->work);
701 }
702 
703 /*
704  * Everyone (including functions in this file), should use this
705  * function to access the md->map field, and make sure they call
706  * dm_put_live_table() when finished.
707  */
708 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
709 {
710 	*srcu_idx = srcu_read_lock(&md->io_barrier);
711 
712 	return srcu_dereference(md->map, &md->io_barrier);
713 }
714 
715 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
716 {
717 	srcu_read_unlock(&md->io_barrier, srcu_idx);
718 }
719 
720 void dm_sync_table(struct mapped_device *md)
721 {
722 	synchronize_srcu(&md->io_barrier);
723 	synchronize_rcu_expedited();
724 }
725 
726 /*
727  * A fast alternative to dm_get_live_table/dm_put_live_table.
728  * The caller must not block between these two functions.
729  */
730 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
731 {
732 	rcu_read_lock();
733 	return rcu_dereference(md->map);
734 }
735 
736 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
737 {
738 	rcu_read_unlock();
739 }
740 
741 /*
742  * Open a table device so we can use it as a map destination.
743  */
744 static int open_table_device(struct table_device *td, dev_t dev,
745 			     struct mapped_device *md)
746 {
747 	static char *_claim_ptr = "I belong to device-mapper";
748 	struct block_device *bdev;
749 
750 	int r;
751 
752 	BUG_ON(td->dm_dev.bdev);
753 
754 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
755 	if (IS_ERR(bdev))
756 		return PTR_ERR(bdev);
757 
758 	r = bd_link_disk_holder(bdev, dm_disk(md));
759 	if (r) {
760 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
761 		return r;
762 	}
763 
764 	td->dm_dev.bdev = bdev;
765 	return 0;
766 }
767 
768 /*
769  * Close a table device that we've been using.
770  */
771 static void close_table_device(struct table_device *td, struct mapped_device *md)
772 {
773 	if (!td->dm_dev.bdev)
774 		return;
775 
776 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
777 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
778 	td->dm_dev.bdev = NULL;
779 }
780 
781 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
782 					      fmode_t mode) {
783 	struct table_device *td;
784 
785 	list_for_each_entry(td, l, list)
786 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
787 			return td;
788 
789 	return NULL;
790 }
791 
792 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
793 			struct dm_dev **result) {
794 	int r;
795 	struct table_device *td;
796 
797 	mutex_lock(&md->table_devices_lock);
798 	td = find_table_device(&md->table_devices, dev, mode);
799 	if (!td) {
800 		td = kmalloc(sizeof(*td), GFP_KERNEL);
801 		if (!td) {
802 			mutex_unlock(&md->table_devices_lock);
803 			return -ENOMEM;
804 		}
805 
806 		td->dm_dev.mode = mode;
807 		td->dm_dev.bdev = NULL;
808 
809 		if ((r = open_table_device(td, dev, md))) {
810 			mutex_unlock(&md->table_devices_lock);
811 			kfree(td);
812 			return r;
813 		}
814 
815 		format_dev_t(td->dm_dev.name, dev);
816 
817 		atomic_set(&td->count, 0);
818 		list_add(&td->list, &md->table_devices);
819 	}
820 	atomic_inc(&td->count);
821 	mutex_unlock(&md->table_devices_lock);
822 
823 	*result = &td->dm_dev;
824 	return 0;
825 }
826 EXPORT_SYMBOL_GPL(dm_get_table_device);
827 
828 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
829 {
830 	struct table_device *td = container_of(d, struct table_device, dm_dev);
831 
832 	mutex_lock(&md->table_devices_lock);
833 	if (atomic_dec_and_test(&td->count)) {
834 		close_table_device(td, md);
835 		list_del(&td->list);
836 		kfree(td);
837 	}
838 	mutex_unlock(&md->table_devices_lock);
839 }
840 EXPORT_SYMBOL(dm_put_table_device);
841 
842 static void free_table_devices(struct list_head *devices)
843 {
844 	struct list_head *tmp, *next;
845 
846 	list_for_each_safe(tmp, next, devices) {
847 		struct table_device *td = list_entry(tmp, struct table_device, list);
848 
849 		DMWARN("dm_destroy: %s still exists with %d references",
850 		       td->dm_dev.name, atomic_read(&td->count));
851 		kfree(td);
852 	}
853 }
854 
855 /*
856  * Get the geometry associated with a dm device
857  */
858 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
859 {
860 	*geo = md->geometry;
861 
862 	return 0;
863 }
864 
865 /*
866  * Set the geometry of a device.
867  */
868 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
869 {
870 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
871 
872 	if (geo->start > sz) {
873 		DMWARN("Start sector is beyond the geometry limits.");
874 		return -EINVAL;
875 	}
876 
877 	md->geometry = *geo;
878 
879 	return 0;
880 }
881 
882 /*-----------------------------------------------------------------
883  * CRUD START:
884  *   A more elegant soln is in the works that uses the queue
885  *   merge fn, unfortunately there are a couple of changes to
886  *   the block layer that I want to make for this.  So in the
887  *   interests of getting something for people to use I give
888  *   you this clearly demarcated crap.
889  *---------------------------------------------------------------*/
890 
891 static int __noflush_suspending(struct mapped_device *md)
892 {
893 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
894 }
895 
896 /*
897  * Decrements the number of outstanding ios that a bio has been
898  * cloned into, completing the original io if necc.
899  */
900 static void dec_pending(struct dm_io *io, int error)
901 {
902 	unsigned long flags;
903 	int io_error;
904 	struct bio *bio;
905 	struct mapped_device *md = io->md;
906 
907 	/* Push-back supersedes any I/O errors */
908 	if (unlikely(error)) {
909 		spin_lock_irqsave(&io->endio_lock, flags);
910 		if (!(io->error > 0 && __noflush_suspending(md)))
911 			io->error = error;
912 		spin_unlock_irqrestore(&io->endio_lock, flags);
913 	}
914 
915 	if (atomic_dec_and_test(&io->io_count)) {
916 		if (io->error == DM_ENDIO_REQUEUE) {
917 			/*
918 			 * Target requested pushing back the I/O.
919 			 */
920 			spin_lock_irqsave(&md->deferred_lock, flags);
921 			if (__noflush_suspending(md))
922 				bio_list_add_head(&md->deferred, io->bio);
923 			else
924 				/* noflush suspend was interrupted. */
925 				io->error = -EIO;
926 			spin_unlock_irqrestore(&md->deferred_lock, flags);
927 		}
928 
929 		io_error = io->error;
930 		bio = io->bio;
931 		end_io_acct(io);
932 		free_io(md, io);
933 
934 		if (io_error == DM_ENDIO_REQUEUE)
935 			return;
936 
937 		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
938 			/*
939 			 * Preflush done for flush with data, reissue
940 			 * without REQ_FLUSH.
941 			 */
942 			bio->bi_rw &= ~REQ_FLUSH;
943 			queue_io(md, bio);
944 		} else {
945 			/* done with normal IO or empty flush */
946 			trace_block_bio_complete(md->queue, bio, io_error);
947 			bio_endio(bio, io_error);
948 		}
949 	}
950 }
951 
952 static void disable_write_same(struct mapped_device *md)
953 {
954 	struct queue_limits *limits = dm_get_queue_limits(md);
955 
956 	/* device doesn't really support WRITE SAME, disable it */
957 	limits->max_write_same_sectors = 0;
958 }
959 
960 static void clone_endio(struct bio *bio, int error)
961 {
962 	int r = error;
963 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
964 	struct dm_io *io = tio->io;
965 	struct mapped_device *md = tio->io->md;
966 	dm_endio_fn endio = tio->ti->type->end_io;
967 
968 	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
969 		error = -EIO;
970 
971 	if (endio) {
972 		r = endio(tio->ti, bio, error);
973 		if (r < 0 || r == DM_ENDIO_REQUEUE)
974 			/*
975 			 * error and requeue request are handled
976 			 * in dec_pending().
977 			 */
978 			error = r;
979 		else if (r == DM_ENDIO_INCOMPLETE)
980 			/* The target will handle the io */
981 			return;
982 		else if (r) {
983 			DMWARN("unimplemented target endio return value: %d", r);
984 			BUG();
985 		}
986 	}
987 
988 	if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
989 		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
990 		disable_write_same(md);
991 
992 	free_tio(md, tio);
993 	dec_pending(io, error);
994 }
995 
996 /*
997  * Partial completion handling for request-based dm
998  */
999 static void end_clone_bio(struct bio *clone, int error)
1000 {
1001 	struct dm_rq_clone_bio_info *info =
1002 		container_of(clone, struct dm_rq_clone_bio_info, clone);
1003 	struct dm_rq_target_io *tio = info->tio;
1004 	struct bio *bio = info->orig;
1005 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
1006 
1007 	bio_put(clone);
1008 
1009 	if (tio->error)
1010 		/*
1011 		 * An error has already been detected on the request.
1012 		 * Once error occurred, just let clone->end_io() handle
1013 		 * the remainder.
1014 		 */
1015 		return;
1016 	else if (error) {
1017 		/*
1018 		 * Don't notice the error to the upper layer yet.
1019 		 * The error handling decision is made by the target driver,
1020 		 * when the request is completed.
1021 		 */
1022 		tio->error = error;
1023 		return;
1024 	}
1025 
1026 	/*
1027 	 * I/O for the bio successfully completed.
1028 	 * Notice the data completion to the upper layer.
1029 	 */
1030 
1031 	/*
1032 	 * bios are processed from the head of the list.
1033 	 * So the completing bio should always be rq->bio.
1034 	 * If it's not, something wrong is happening.
1035 	 */
1036 	if (tio->orig->bio != bio)
1037 		DMERR("bio completion is going in the middle of the request");
1038 
1039 	/*
1040 	 * Update the original request.
1041 	 * Do not use blk_end_request() here, because it may complete
1042 	 * the original request before the clone, and break the ordering.
1043 	 */
1044 	blk_update_request(tio->orig, 0, nr_bytes);
1045 }
1046 
1047 static struct dm_rq_target_io *tio_from_request(struct request *rq)
1048 {
1049 	return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
1050 }
1051 
1052 static void rq_end_stats(struct mapped_device *md, struct request *orig)
1053 {
1054 	if (unlikely(dm_stats_used(&md->stats))) {
1055 		struct dm_rq_target_io *tio = tio_from_request(orig);
1056 		tio->duration_jiffies = jiffies - tio->duration_jiffies;
1057 		dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
1058 				    tio->n_sectors, true, tio->duration_jiffies,
1059 				    &tio->stats_aux);
1060 	}
1061 }
1062 
1063 /*
1064  * Don't touch any member of the md after calling this function because
1065  * the md may be freed in dm_put() at the end of this function.
1066  * Or do dm_get() before calling this function and dm_put() later.
1067  */
1068 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1069 {
1070 	atomic_dec(&md->pending[rw]);
1071 
1072 	/* nudge anyone waiting on suspend queue */
1073 	if (!md_in_flight(md))
1074 		wake_up(&md->wait);
1075 
1076 	/*
1077 	 * Run this off this callpath, as drivers could invoke end_io while
1078 	 * inside their request_fn (and holding the queue lock). Calling
1079 	 * back into ->request_fn() could deadlock attempting to grab the
1080 	 * queue lock again.
1081 	 */
1082 	if (run_queue) {
1083 		if (md->queue->mq_ops)
1084 			blk_mq_run_hw_queues(md->queue, true);
1085 		else
1086 			blk_run_queue_async(md->queue);
1087 	}
1088 
1089 	/*
1090 	 * dm_put() must be at the end of this function. See the comment above
1091 	 */
1092 	dm_put(md);
1093 }
1094 
1095 static void free_rq_clone(struct request *clone)
1096 {
1097 	struct dm_rq_target_io *tio = clone->end_io_data;
1098 	struct mapped_device *md = tio->md;
1099 
1100 	blk_rq_unprep_clone(clone);
1101 
1102 	if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1103 		/* stacked on blk-mq queue(s) */
1104 		tio->ti->type->release_clone_rq(clone);
1105 	else if (!md->queue->mq_ops)
1106 		/* request_fn queue stacked on request_fn queue(s) */
1107 		free_clone_request(md, clone);
1108 	/*
1109 	 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
1110 	 * no need to call free_clone_request() because we leverage blk-mq by
1111 	 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
1112 	 */
1113 
1114 	if (!md->queue->mq_ops)
1115 		free_rq_tio(tio);
1116 }
1117 
1118 /*
1119  * Complete the clone and the original request.
1120  * Must be called without clone's queue lock held,
1121  * see end_clone_request() for more details.
1122  */
1123 static void dm_end_request(struct request *clone, int error)
1124 {
1125 	int rw = rq_data_dir(clone);
1126 	struct dm_rq_target_io *tio = clone->end_io_data;
1127 	struct mapped_device *md = tio->md;
1128 	struct request *rq = tio->orig;
1129 
1130 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1131 		rq->errors = clone->errors;
1132 		rq->resid_len = clone->resid_len;
1133 
1134 		if (rq->sense)
1135 			/*
1136 			 * We are using the sense buffer of the original
1137 			 * request.
1138 			 * So setting the length of the sense data is enough.
1139 			 */
1140 			rq->sense_len = clone->sense_len;
1141 	}
1142 
1143 	free_rq_clone(clone);
1144 	rq_end_stats(md, rq);
1145 	if (!rq->q->mq_ops)
1146 		blk_end_request_all(rq, error);
1147 	else
1148 		blk_mq_end_request(rq, error);
1149 	rq_completed(md, rw, true);
1150 }
1151 
1152 static void dm_unprep_request(struct request *rq)
1153 {
1154 	struct dm_rq_target_io *tio = tio_from_request(rq);
1155 	struct request *clone = tio->clone;
1156 
1157 	if (!rq->q->mq_ops) {
1158 		rq->special = NULL;
1159 		rq->cmd_flags &= ~REQ_DONTPREP;
1160 	}
1161 
1162 	if (clone)
1163 		free_rq_clone(clone);
1164 }
1165 
1166 /*
1167  * Requeue the original request of a clone.
1168  */
1169 static void old_requeue_request(struct request *rq)
1170 {
1171 	struct request_queue *q = rq->q;
1172 	unsigned long flags;
1173 
1174 	spin_lock_irqsave(q->queue_lock, flags);
1175 	blk_requeue_request(q, rq);
1176 	blk_run_queue_async(q);
1177 	spin_unlock_irqrestore(q->queue_lock, flags);
1178 }
1179 
1180 static void dm_requeue_original_request(struct mapped_device *md,
1181 					struct request *rq)
1182 {
1183 	int rw = rq_data_dir(rq);
1184 
1185 	dm_unprep_request(rq);
1186 
1187 	rq_end_stats(md, rq);
1188 	if (!rq->q->mq_ops)
1189 		old_requeue_request(rq);
1190 	else {
1191 		blk_mq_requeue_request(rq);
1192 		blk_mq_kick_requeue_list(rq->q);
1193 	}
1194 
1195 	rq_completed(md, rw, false);
1196 }
1197 
1198 static void old_stop_queue(struct request_queue *q)
1199 {
1200 	unsigned long flags;
1201 
1202 	if (blk_queue_stopped(q))
1203 		return;
1204 
1205 	spin_lock_irqsave(q->queue_lock, flags);
1206 	blk_stop_queue(q);
1207 	spin_unlock_irqrestore(q->queue_lock, flags);
1208 }
1209 
1210 static void stop_queue(struct request_queue *q)
1211 {
1212 	if (!q->mq_ops)
1213 		old_stop_queue(q);
1214 	else
1215 		blk_mq_stop_hw_queues(q);
1216 }
1217 
1218 static void old_start_queue(struct request_queue *q)
1219 {
1220 	unsigned long flags;
1221 
1222 	spin_lock_irqsave(q->queue_lock, flags);
1223 	if (blk_queue_stopped(q))
1224 		blk_start_queue(q);
1225 	spin_unlock_irqrestore(q->queue_lock, flags);
1226 }
1227 
1228 static void start_queue(struct request_queue *q)
1229 {
1230 	if (!q->mq_ops)
1231 		old_start_queue(q);
1232 	else
1233 		blk_mq_start_stopped_hw_queues(q, true);
1234 }
1235 
1236 static void dm_done(struct request *clone, int error, bool mapped)
1237 {
1238 	int r = error;
1239 	struct dm_rq_target_io *tio = clone->end_io_data;
1240 	dm_request_endio_fn rq_end_io = NULL;
1241 
1242 	if (tio->ti) {
1243 		rq_end_io = tio->ti->type->rq_end_io;
1244 
1245 		if (mapped && rq_end_io)
1246 			r = rq_end_io(tio->ti, clone, error, &tio->info);
1247 	}
1248 
1249 	if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
1250 		     !clone->q->limits.max_write_same_sectors))
1251 		disable_write_same(tio->md);
1252 
1253 	if (r <= 0)
1254 		/* The target wants to complete the I/O */
1255 		dm_end_request(clone, r);
1256 	else if (r == DM_ENDIO_INCOMPLETE)
1257 		/* The target will handle the I/O */
1258 		return;
1259 	else if (r == DM_ENDIO_REQUEUE)
1260 		/* The target wants to requeue the I/O */
1261 		dm_requeue_original_request(tio->md, tio->orig);
1262 	else {
1263 		DMWARN("unimplemented target endio return value: %d", r);
1264 		BUG();
1265 	}
1266 }
1267 
1268 /*
1269  * Request completion handler for request-based dm
1270  */
1271 static void dm_softirq_done(struct request *rq)
1272 {
1273 	bool mapped = true;
1274 	struct dm_rq_target_io *tio = tio_from_request(rq);
1275 	struct request *clone = tio->clone;
1276 	int rw;
1277 
1278 	if (!clone) {
1279 		rq_end_stats(tio->md, rq);
1280 		rw = rq_data_dir(rq);
1281 		if (!rq->q->mq_ops) {
1282 			blk_end_request_all(rq, tio->error);
1283 			rq_completed(tio->md, rw, false);
1284 			free_rq_tio(tio);
1285 		} else {
1286 			blk_mq_end_request(rq, tio->error);
1287 			rq_completed(tio->md, rw, false);
1288 		}
1289 		return;
1290 	}
1291 
1292 	if (rq->cmd_flags & REQ_FAILED)
1293 		mapped = false;
1294 
1295 	dm_done(clone, tio->error, mapped);
1296 }
1297 
1298 /*
1299  * Complete the clone and the original request with the error status
1300  * through softirq context.
1301  */
1302 static void dm_complete_request(struct request *rq, int error)
1303 {
1304 	struct dm_rq_target_io *tio = tio_from_request(rq);
1305 
1306 	tio->error = error;
1307 	blk_complete_request(rq);
1308 }
1309 
1310 /*
1311  * Complete the not-mapped clone and the original request with the error status
1312  * through softirq context.
1313  * Target's rq_end_io() function isn't called.
1314  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1315  */
1316 static void dm_kill_unmapped_request(struct request *rq, int error)
1317 {
1318 	rq->cmd_flags |= REQ_FAILED;
1319 	dm_complete_request(rq, error);
1320 }
1321 
1322 /*
1323  * Called with the clone's queue lock held (for non-blk-mq)
1324  */
1325 static void end_clone_request(struct request *clone, int error)
1326 {
1327 	struct dm_rq_target_io *tio = clone->end_io_data;
1328 
1329 	if (!clone->q->mq_ops) {
1330 		/*
1331 		 * For just cleaning up the information of the queue in which
1332 		 * the clone was dispatched.
1333 		 * The clone is *NOT* freed actually here because it is alloced
1334 		 * from dm own mempool (REQ_ALLOCED isn't set).
1335 		 */
1336 		__blk_put_request(clone->q, clone);
1337 	}
1338 
1339 	/*
1340 	 * Actual request completion is done in a softirq context which doesn't
1341 	 * hold the clone's queue lock.  Otherwise, deadlock could occur because:
1342 	 *     - another request may be submitted by the upper level driver
1343 	 *       of the stacking during the completion
1344 	 *     - the submission which requires queue lock may be done
1345 	 *       against this clone's queue
1346 	 */
1347 	dm_complete_request(tio->orig, error);
1348 }
1349 
1350 /*
1351  * Return maximum size of I/O possible at the supplied sector up to the current
1352  * target boundary.
1353  */
1354 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1355 {
1356 	sector_t target_offset = dm_target_offset(ti, sector);
1357 
1358 	return ti->len - target_offset;
1359 }
1360 
1361 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1362 {
1363 	sector_t len = max_io_len_target_boundary(sector, ti);
1364 	sector_t offset, max_len;
1365 
1366 	/*
1367 	 * Does the target need to split even further?
1368 	 */
1369 	if (ti->max_io_len) {
1370 		offset = dm_target_offset(ti, sector);
1371 		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1372 			max_len = sector_div(offset, ti->max_io_len);
1373 		else
1374 			max_len = offset & (ti->max_io_len - 1);
1375 		max_len = ti->max_io_len - max_len;
1376 
1377 		if (len > max_len)
1378 			len = max_len;
1379 	}
1380 
1381 	return len;
1382 }
1383 
1384 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1385 {
1386 	if (len > UINT_MAX) {
1387 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1388 		      (unsigned long long)len, UINT_MAX);
1389 		ti->error = "Maximum size of target IO is too large";
1390 		return -EINVAL;
1391 	}
1392 
1393 	ti->max_io_len = (uint32_t) len;
1394 
1395 	return 0;
1396 }
1397 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1398 
1399 /*
1400  * A target may call dm_accept_partial_bio only from the map routine.  It is
1401  * allowed for all bio types except REQ_FLUSH.
1402  *
1403  * dm_accept_partial_bio informs the dm that the target only wants to process
1404  * additional n_sectors sectors of the bio and the rest of the data should be
1405  * sent in a next bio.
1406  *
1407  * A diagram that explains the arithmetics:
1408  * +--------------------+---------------+-------+
1409  * |         1          |       2       |   3   |
1410  * +--------------------+---------------+-------+
1411  *
1412  * <-------------- *tio->len_ptr --------------->
1413  *                      <------- bi_size ------->
1414  *                      <-- n_sectors -->
1415  *
1416  * Region 1 was already iterated over with bio_advance or similar function.
1417  *	(it may be empty if the target doesn't use bio_advance)
1418  * Region 2 is the remaining bio size that the target wants to process.
1419  *	(it may be empty if region 1 is non-empty, although there is no reason
1420  *	 to make it empty)
1421  * The target requires that region 3 is to be sent in the next bio.
1422  *
1423  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1424  * the partially processed part (the sum of regions 1+2) must be the same for all
1425  * copies of the bio.
1426  */
1427 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1428 {
1429 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1430 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1431 	BUG_ON(bio->bi_rw & REQ_FLUSH);
1432 	BUG_ON(bi_size > *tio->len_ptr);
1433 	BUG_ON(n_sectors > bi_size);
1434 	*tio->len_ptr -= bi_size - n_sectors;
1435 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1436 }
1437 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1438 
1439 static void __map_bio(struct dm_target_io *tio)
1440 {
1441 	int r;
1442 	sector_t sector;
1443 	struct mapped_device *md;
1444 	struct bio *clone = &tio->clone;
1445 	struct dm_target *ti = tio->ti;
1446 
1447 	clone->bi_end_io = clone_endio;
1448 
1449 	/*
1450 	 * Map the clone.  If r == 0 we don't need to do
1451 	 * anything, the target has assumed ownership of
1452 	 * this io.
1453 	 */
1454 	atomic_inc(&tio->io->io_count);
1455 	sector = clone->bi_iter.bi_sector;
1456 	r = ti->type->map(ti, clone);
1457 	if (r == DM_MAPIO_REMAPPED) {
1458 		/* the bio has been remapped so dispatch it */
1459 
1460 		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1461 				      tio->io->bio->bi_bdev->bd_dev, sector);
1462 
1463 		generic_make_request(clone);
1464 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1465 		/* error the io and bail out, or requeue it if needed */
1466 		md = tio->io->md;
1467 		dec_pending(tio->io, r);
1468 		free_tio(md, tio);
1469 	} else if (r) {
1470 		DMWARN("unimplemented target map return value: %d", r);
1471 		BUG();
1472 	}
1473 }
1474 
1475 struct clone_info {
1476 	struct mapped_device *md;
1477 	struct dm_table *map;
1478 	struct bio *bio;
1479 	struct dm_io *io;
1480 	sector_t sector;
1481 	unsigned sector_count;
1482 };
1483 
1484 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1485 {
1486 	bio->bi_iter.bi_sector = sector;
1487 	bio->bi_iter.bi_size = to_bytes(len);
1488 }
1489 
1490 /*
1491  * Creates a bio that consists of range of complete bvecs.
1492  */
1493 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1494 		      sector_t sector, unsigned len)
1495 {
1496 	struct bio *clone = &tio->clone;
1497 
1498 	__bio_clone_fast(clone, bio);
1499 
1500 	if (bio_integrity(bio))
1501 		bio_integrity_clone(clone, bio, GFP_NOIO);
1502 
1503 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1504 	clone->bi_iter.bi_size = to_bytes(len);
1505 
1506 	if (bio_integrity(bio))
1507 		bio_integrity_trim(clone, 0, len);
1508 }
1509 
1510 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1511 				      struct dm_target *ti,
1512 				      unsigned target_bio_nr)
1513 {
1514 	struct dm_target_io *tio;
1515 	struct bio *clone;
1516 
1517 	clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1518 	tio = container_of(clone, struct dm_target_io, clone);
1519 
1520 	tio->io = ci->io;
1521 	tio->ti = ti;
1522 	tio->target_bio_nr = target_bio_nr;
1523 
1524 	return tio;
1525 }
1526 
1527 static void __clone_and_map_simple_bio(struct clone_info *ci,
1528 				       struct dm_target *ti,
1529 				       unsigned target_bio_nr, unsigned *len)
1530 {
1531 	struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1532 	struct bio *clone = &tio->clone;
1533 
1534 	tio->len_ptr = len;
1535 
1536 	__bio_clone_fast(clone, ci->bio);
1537 	if (len)
1538 		bio_setup_sector(clone, ci->sector, *len);
1539 
1540 	__map_bio(tio);
1541 }
1542 
1543 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1544 				  unsigned num_bios, unsigned *len)
1545 {
1546 	unsigned target_bio_nr;
1547 
1548 	for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1549 		__clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1550 }
1551 
1552 static int __send_empty_flush(struct clone_info *ci)
1553 {
1554 	unsigned target_nr = 0;
1555 	struct dm_target *ti;
1556 
1557 	BUG_ON(bio_has_data(ci->bio));
1558 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1559 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1560 
1561 	return 0;
1562 }
1563 
1564 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1565 				     sector_t sector, unsigned *len)
1566 {
1567 	struct bio *bio = ci->bio;
1568 	struct dm_target_io *tio;
1569 	unsigned target_bio_nr;
1570 	unsigned num_target_bios = 1;
1571 
1572 	/*
1573 	 * Does the target want to receive duplicate copies of the bio?
1574 	 */
1575 	if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1576 		num_target_bios = ti->num_write_bios(ti, bio);
1577 
1578 	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1579 		tio = alloc_tio(ci, ti, target_bio_nr);
1580 		tio->len_ptr = len;
1581 		clone_bio(tio, bio, sector, *len);
1582 		__map_bio(tio);
1583 	}
1584 }
1585 
1586 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1587 
1588 static unsigned get_num_discard_bios(struct dm_target *ti)
1589 {
1590 	return ti->num_discard_bios;
1591 }
1592 
1593 static unsigned get_num_write_same_bios(struct dm_target *ti)
1594 {
1595 	return ti->num_write_same_bios;
1596 }
1597 
1598 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1599 
1600 static bool is_split_required_for_discard(struct dm_target *ti)
1601 {
1602 	return ti->split_discard_bios;
1603 }
1604 
1605 static int __send_changing_extent_only(struct clone_info *ci,
1606 				       get_num_bios_fn get_num_bios,
1607 				       is_split_required_fn is_split_required)
1608 {
1609 	struct dm_target *ti;
1610 	unsigned len;
1611 	unsigned num_bios;
1612 
1613 	do {
1614 		ti = dm_table_find_target(ci->map, ci->sector);
1615 		if (!dm_target_is_valid(ti))
1616 			return -EIO;
1617 
1618 		/*
1619 		 * Even though the device advertised support for this type of
1620 		 * request, that does not mean every target supports it, and
1621 		 * reconfiguration might also have changed that since the
1622 		 * check was performed.
1623 		 */
1624 		num_bios = get_num_bios ? get_num_bios(ti) : 0;
1625 		if (!num_bios)
1626 			return -EOPNOTSUPP;
1627 
1628 		if (is_split_required && !is_split_required(ti))
1629 			len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1630 		else
1631 			len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1632 
1633 		__send_duplicate_bios(ci, ti, num_bios, &len);
1634 
1635 		ci->sector += len;
1636 	} while (ci->sector_count -= len);
1637 
1638 	return 0;
1639 }
1640 
1641 static int __send_discard(struct clone_info *ci)
1642 {
1643 	return __send_changing_extent_only(ci, get_num_discard_bios,
1644 					   is_split_required_for_discard);
1645 }
1646 
1647 static int __send_write_same(struct clone_info *ci)
1648 {
1649 	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1650 }
1651 
1652 /*
1653  * Select the correct strategy for processing a non-flush bio.
1654  */
1655 static int __split_and_process_non_flush(struct clone_info *ci)
1656 {
1657 	struct bio *bio = ci->bio;
1658 	struct dm_target *ti;
1659 	unsigned len;
1660 
1661 	if (unlikely(bio->bi_rw & REQ_DISCARD))
1662 		return __send_discard(ci);
1663 	else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1664 		return __send_write_same(ci);
1665 
1666 	ti = dm_table_find_target(ci->map, ci->sector);
1667 	if (!dm_target_is_valid(ti))
1668 		return -EIO;
1669 
1670 	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1671 
1672 	__clone_and_map_data_bio(ci, ti, ci->sector, &len);
1673 
1674 	ci->sector += len;
1675 	ci->sector_count -= len;
1676 
1677 	return 0;
1678 }
1679 
1680 /*
1681  * Entry point to split a bio into clones and submit them to the targets.
1682  */
1683 static void __split_and_process_bio(struct mapped_device *md,
1684 				    struct dm_table *map, struct bio *bio)
1685 {
1686 	struct clone_info ci;
1687 	int error = 0;
1688 
1689 	if (unlikely(!map)) {
1690 		bio_io_error(bio);
1691 		return;
1692 	}
1693 
1694 	ci.map = map;
1695 	ci.md = md;
1696 	ci.io = alloc_io(md);
1697 	ci.io->error = 0;
1698 	atomic_set(&ci.io->io_count, 1);
1699 	ci.io->bio = bio;
1700 	ci.io->md = md;
1701 	spin_lock_init(&ci.io->endio_lock);
1702 	ci.sector = bio->bi_iter.bi_sector;
1703 
1704 	start_io_acct(ci.io);
1705 
1706 	if (bio->bi_rw & REQ_FLUSH) {
1707 		ci.bio = &ci.md->flush_bio;
1708 		ci.sector_count = 0;
1709 		error = __send_empty_flush(&ci);
1710 		/* dec_pending submits any data associated with flush */
1711 	} else {
1712 		ci.bio = bio;
1713 		ci.sector_count = bio_sectors(bio);
1714 		while (ci.sector_count && !error)
1715 			error = __split_and_process_non_flush(&ci);
1716 	}
1717 
1718 	/* drop the extra reference count */
1719 	dec_pending(ci.io, error);
1720 }
1721 /*-----------------------------------------------------------------
1722  * CRUD END
1723  *---------------------------------------------------------------*/
1724 
1725 static int dm_merge_bvec(struct request_queue *q,
1726 			 struct bvec_merge_data *bvm,
1727 			 struct bio_vec *biovec)
1728 {
1729 	struct mapped_device *md = q->queuedata;
1730 	struct dm_table *map = dm_get_live_table_fast(md);
1731 	struct dm_target *ti;
1732 	sector_t max_sectors, max_size = 0;
1733 
1734 	if (unlikely(!map))
1735 		goto out;
1736 
1737 	ti = dm_table_find_target(map, bvm->bi_sector);
1738 	if (!dm_target_is_valid(ti))
1739 		goto out;
1740 
1741 	/*
1742 	 * Find maximum amount of I/O that won't need splitting
1743 	 */
1744 	max_sectors = min(max_io_len(bvm->bi_sector, ti),
1745 			  (sector_t) queue_max_sectors(q));
1746 	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1747 
1748 	/*
1749 	 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1750 	 * to the targets' merge function since it holds sectors not bytes).
1751 	 * Just doing this as an interim fix for stable@ because the more
1752 	 * comprehensive cleanup of switching to sector_t will impact every
1753 	 * DM target that implements a ->merge hook.
1754 	 */
1755 	if (max_size > INT_MAX)
1756 		max_size = INT_MAX;
1757 
1758 	/*
1759 	 * merge_bvec_fn() returns number of bytes
1760 	 * it can accept at this offset
1761 	 * max is precomputed maximal io size
1762 	 */
1763 	if (max_size && ti->type->merge)
1764 		max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
1765 	/*
1766 	 * If the target doesn't support merge method and some of the devices
1767 	 * provided their merge_bvec method (we know this by looking for the
1768 	 * max_hw_sectors that dm_set_device_limits may set), then we can't
1769 	 * allow bios with multiple vector entries.  So always set max_size
1770 	 * to 0, and the code below allows just one page.
1771 	 */
1772 	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1773 		max_size = 0;
1774 
1775 out:
1776 	dm_put_live_table_fast(md);
1777 	/*
1778 	 * Always allow an entire first page
1779 	 */
1780 	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1781 		max_size = biovec->bv_len;
1782 
1783 	return max_size;
1784 }
1785 
1786 /*
1787  * The request function that just remaps the bio built up by
1788  * dm_merge_bvec.
1789  */
1790 static void dm_make_request(struct request_queue *q, struct bio *bio)
1791 {
1792 	int rw = bio_data_dir(bio);
1793 	struct mapped_device *md = q->queuedata;
1794 	int srcu_idx;
1795 	struct dm_table *map;
1796 
1797 	map = dm_get_live_table(md, &srcu_idx);
1798 
1799 	generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1800 
1801 	/* if we're suspended, we have to queue this io for later */
1802 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1803 		dm_put_live_table(md, srcu_idx);
1804 
1805 		if (bio_rw(bio) != READA)
1806 			queue_io(md, bio);
1807 		else
1808 			bio_io_error(bio);
1809 		return;
1810 	}
1811 
1812 	__split_and_process_bio(md, map, bio);
1813 	dm_put_live_table(md, srcu_idx);
1814 	return;
1815 }
1816 
1817 int dm_request_based(struct mapped_device *md)
1818 {
1819 	return blk_queue_stackable(md->queue);
1820 }
1821 
1822 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1823 {
1824 	int r;
1825 
1826 	if (blk_queue_io_stat(clone->q))
1827 		clone->cmd_flags |= REQ_IO_STAT;
1828 
1829 	clone->start_time = jiffies;
1830 	r = blk_insert_cloned_request(clone->q, clone);
1831 	if (r)
1832 		/* must complete clone in terms of original request */
1833 		dm_complete_request(rq, r);
1834 }
1835 
1836 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1837 				 void *data)
1838 {
1839 	struct dm_rq_target_io *tio = data;
1840 	struct dm_rq_clone_bio_info *info =
1841 		container_of(bio, struct dm_rq_clone_bio_info, clone);
1842 
1843 	info->orig = bio_orig;
1844 	info->tio = tio;
1845 	bio->bi_end_io = end_clone_bio;
1846 
1847 	return 0;
1848 }
1849 
1850 static int setup_clone(struct request *clone, struct request *rq,
1851 		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
1852 {
1853 	int r;
1854 
1855 	r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
1856 			      dm_rq_bio_constructor, tio);
1857 	if (r)
1858 		return r;
1859 
1860 	clone->cmd = rq->cmd;
1861 	clone->cmd_len = rq->cmd_len;
1862 	clone->sense = rq->sense;
1863 	clone->end_io = end_clone_request;
1864 	clone->end_io_data = tio;
1865 
1866 	tio->clone = clone;
1867 
1868 	return 0;
1869 }
1870 
1871 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1872 				struct dm_rq_target_io *tio, gfp_t gfp_mask)
1873 {
1874 	/*
1875 	 * Do not allocate a clone if tio->clone was already set
1876 	 * (see: dm_mq_queue_rq).
1877 	 */
1878 	bool alloc_clone = !tio->clone;
1879 	struct request *clone;
1880 
1881 	if (alloc_clone) {
1882 		clone = alloc_clone_request(md, gfp_mask);
1883 		if (!clone)
1884 			return NULL;
1885 	} else
1886 		clone = tio->clone;
1887 
1888 	blk_rq_init(NULL, clone);
1889 	if (setup_clone(clone, rq, tio, gfp_mask)) {
1890 		/* -ENOMEM */
1891 		if (alloc_clone)
1892 			free_clone_request(md, clone);
1893 		return NULL;
1894 	}
1895 
1896 	return clone;
1897 }
1898 
1899 static void map_tio_request(struct kthread_work *work);
1900 
1901 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1902 		     struct mapped_device *md)
1903 {
1904 	tio->md = md;
1905 	tio->ti = NULL;
1906 	tio->clone = NULL;
1907 	tio->orig = rq;
1908 	tio->error = 0;
1909 	memset(&tio->info, 0, sizeof(tio->info));
1910 	if (md->kworker_task)
1911 		init_kthread_work(&tio->work, map_tio_request);
1912 }
1913 
1914 static struct dm_rq_target_io *prep_tio(struct request *rq,
1915 					struct mapped_device *md, gfp_t gfp_mask)
1916 {
1917 	struct dm_rq_target_io *tio;
1918 	int srcu_idx;
1919 	struct dm_table *table;
1920 
1921 	tio = alloc_rq_tio(md, gfp_mask);
1922 	if (!tio)
1923 		return NULL;
1924 
1925 	init_tio(tio, rq, md);
1926 
1927 	table = dm_get_live_table(md, &srcu_idx);
1928 	if (!dm_table_mq_request_based(table)) {
1929 		if (!clone_rq(rq, md, tio, gfp_mask)) {
1930 			dm_put_live_table(md, srcu_idx);
1931 			free_rq_tio(tio);
1932 			return NULL;
1933 		}
1934 	}
1935 	dm_put_live_table(md, srcu_idx);
1936 
1937 	return tio;
1938 }
1939 
1940 /*
1941  * Called with the queue lock held.
1942  */
1943 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1944 {
1945 	struct mapped_device *md = q->queuedata;
1946 	struct dm_rq_target_io *tio;
1947 
1948 	if (unlikely(rq->special)) {
1949 		DMWARN("Already has something in rq->special.");
1950 		return BLKPREP_KILL;
1951 	}
1952 
1953 	tio = prep_tio(rq, md, GFP_ATOMIC);
1954 	if (!tio)
1955 		return BLKPREP_DEFER;
1956 
1957 	rq->special = tio;
1958 	rq->cmd_flags |= REQ_DONTPREP;
1959 
1960 	return BLKPREP_OK;
1961 }
1962 
1963 /*
1964  * Returns:
1965  * 0                : the request has been processed
1966  * DM_MAPIO_REQUEUE : the original request needs to be requeued
1967  * < 0              : the request was completed due to failure
1968  */
1969 static int map_request(struct dm_rq_target_io *tio, struct request *rq,
1970 		       struct mapped_device *md)
1971 {
1972 	int r;
1973 	struct dm_target *ti = tio->ti;
1974 	struct request *clone = NULL;
1975 
1976 	if (tio->clone) {
1977 		clone = tio->clone;
1978 		r = ti->type->map_rq(ti, clone, &tio->info);
1979 	} else {
1980 		r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
1981 		if (r < 0) {
1982 			/* The target wants to complete the I/O */
1983 			dm_kill_unmapped_request(rq, r);
1984 			return r;
1985 		}
1986 		if (r != DM_MAPIO_REMAPPED)
1987 			return r;
1988 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
1989 			/* -ENOMEM */
1990 			ti->type->release_clone_rq(clone);
1991 			return DM_MAPIO_REQUEUE;
1992 		}
1993 	}
1994 
1995 	switch (r) {
1996 	case DM_MAPIO_SUBMITTED:
1997 		/* The target has taken the I/O to submit by itself later */
1998 		break;
1999 	case DM_MAPIO_REMAPPED:
2000 		/* The target has remapped the I/O so dispatch it */
2001 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
2002 				     blk_rq_pos(rq));
2003 		dm_dispatch_clone_request(clone, rq);
2004 		break;
2005 	case DM_MAPIO_REQUEUE:
2006 		/* The target wants to requeue the I/O */
2007 		dm_requeue_original_request(md, tio->orig);
2008 		break;
2009 	default:
2010 		if (r > 0) {
2011 			DMWARN("unimplemented target map return value: %d", r);
2012 			BUG();
2013 		}
2014 
2015 		/* The target wants to complete the I/O */
2016 		dm_kill_unmapped_request(rq, r);
2017 		return r;
2018 	}
2019 
2020 	return 0;
2021 }
2022 
2023 static void map_tio_request(struct kthread_work *work)
2024 {
2025 	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
2026 	struct request *rq = tio->orig;
2027 	struct mapped_device *md = tio->md;
2028 
2029 	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
2030 		dm_requeue_original_request(md, rq);
2031 }
2032 
2033 static void dm_start_request(struct mapped_device *md, struct request *orig)
2034 {
2035 	if (!orig->q->mq_ops)
2036 		blk_start_request(orig);
2037 	else
2038 		blk_mq_start_request(orig);
2039 	atomic_inc(&md->pending[rq_data_dir(orig)]);
2040 
2041 	if (md->seq_rq_merge_deadline_usecs) {
2042 		md->last_rq_pos = rq_end_sector(orig);
2043 		md->last_rq_rw = rq_data_dir(orig);
2044 		md->last_rq_start_time = ktime_get();
2045 	}
2046 
2047 	if (unlikely(dm_stats_used(&md->stats))) {
2048 		struct dm_rq_target_io *tio = tio_from_request(orig);
2049 		tio->duration_jiffies = jiffies;
2050 		tio->n_sectors = blk_rq_sectors(orig);
2051 		dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
2052 				    tio->n_sectors, false, 0, &tio->stats_aux);
2053 	}
2054 
2055 	/*
2056 	 * Hold the md reference here for the in-flight I/O.
2057 	 * We can't rely on the reference count by device opener,
2058 	 * because the device may be closed during the request completion
2059 	 * when all bios are completed.
2060 	 * See the comment in rq_completed() too.
2061 	 */
2062 	dm_get(md);
2063 }
2064 
2065 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
2066 
2067 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
2068 {
2069 	return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
2070 }
2071 
2072 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
2073 						     const char *buf, size_t count)
2074 {
2075 	unsigned deadline;
2076 
2077 	if (!dm_request_based(md) || md->use_blk_mq)
2078 		return count;
2079 
2080 	if (kstrtouint(buf, 10, &deadline))
2081 		return -EINVAL;
2082 
2083 	if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
2084 		deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
2085 
2086 	md->seq_rq_merge_deadline_usecs = deadline;
2087 
2088 	return count;
2089 }
2090 
2091 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
2092 {
2093 	ktime_t kt_deadline;
2094 
2095 	if (!md->seq_rq_merge_deadline_usecs)
2096 		return false;
2097 
2098 	kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
2099 	kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
2100 
2101 	return !ktime_after(ktime_get(), kt_deadline);
2102 }
2103 
2104 /*
2105  * q->request_fn for request-based dm.
2106  * Called with the queue lock held.
2107  */
2108 static void dm_request_fn(struct request_queue *q)
2109 {
2110 	struct mapped_device *md = q->queuedata;
2111 	int srcu_idx;
2112 	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2113 	struct dm_target *ti;
2114 	struct request *rq;
2115 	struct dm_rq_target_io *tio;
2116 	sector_t pos;
2117 
2118 	/*
2119 	 * For suspend, check blk_queue_stopped() and increment
2120 	 * ->pending within a single queue_lock not to increment the
2121 	 * number of in-flight I/Os after the queue is stopped in
2122 	 * dm_suspend().
2123 	 */
2124 	while (!blk_queue_stopped(q)) {
2125 		rq = blk_peek_request(q);
2126 		if (!rq)
2127 			goto out;
2128 
2129 		/* always use block 0 to find the target for flushes for now */
2130 		pos = 0;
2131 		if (!(rq->cmd_flags & REQ_FLUSH))
2132 			pos = blk_rq_pos(rq);
2133 
2134 		ti = dm_table_find_target(map, pos);
2135 		if (!dm_target_is_valid(ti)) {
2136 			/*
2137 			 * Must perform setup, that rq_completed() requires,
2138 			 * before calling dm_kill_unmapped_request
2139 			 */
2140 			DMERR_LIMIT("request attempted access beyond the end of device");
2141 			dm_start_request(md, rq);
2142 			dm_kill_unmapped_request(rq, -EIO);
2143 			continue;
2144 		}
2145 
2146 		if (dm_request_peeked_before_merge_deadline(md) &&
2147 		    md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2148 		    md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
2149 			goto delay_and_out;
2150 
2151 		if (ti->type->busy && ti->type->busy(ti))
2152 			goto delay_and_out;
2153 
2154 		dm_start_request(md, rq);
2155 
2156 		tio = tio_from_request(rq);
2157 		/* Establish tio->ti before queuing work (map_tio_request) */
2158 		tio->ti = ti;
2159 		queue_kthread_work(&md->kworker, &tio->work);
2160 		BUG_ON(!irqs_disabled());
2161 	}
2162 
2163 	goto out;
2164 
2165 delay_and_out:
2166 	blk_delay_queue(q, HZ / 100);
2167 out:
2168 	dm_put_live_table(md, srcu_idx);
2169 }
2170 
2171 static int dm_any_congested(void *congested_data, int bdi_bits)
2172 {
2173 	int r = bdi_bits;
2174 	struct mapped_device *md = congested_data;
2175 	struct dm_table *map;
2176 
2177 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2178 		map = dm_get_live_table_fast(md);
2179 		if (map) {
2180 			/*
2181 			 * Request-based dm cares about only own queue for
2182 			 * the query about congestion status of request_queue
2183 			 */
2184 			if (dm_request_based(md))
2185 				r = md->queue->backing_dev_info.wb.state &
2186 				    bdi_bits;
2187 			else
2188 				r = dm_table_any_congested(map, bdi_bits);
2189 		}
2190 		dm_put_live_table_fast(md);
2191 	}
2192 
2193 	return r;
2194 }
2195 
2196 /*-----------------------------------------------------------------
2197  * An IDR is used to keep track of allocated minor numbers.
2198  *---------------------------------------------------------------*/
2199 static void free_minor(int minor)
2200 {
2201 	spin_lock(&_minor_lock);
2202 	idr_remove(&_minor_idr, minor);
2203 	spin_unlock(&_minor_lock);
2204 }
2205 
2206 /*
2207  * See if the device with a specific minor # is free.
2208  */
2209 static int specific_minor(int minor)
2210 {
2211 	int r;
2212 
2213 	if (minor >= (1 << MINORBITS))
2214 		return -EINVAL;
2215 
2216 	idr_preload(GFP_KERNEL);
2217 	spin_lock(&_minor_lock);
2218 
2219 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
2220 
2221 	spin_unlock(&_minor_lock);
2222 	idr_preload_end();
2223 	if (r < 0)
2224 		return r == -ENOSPC ? -EBUSY : r;
2225 	return 0;
2226 }
2227 
2228 static int next_free_minor(int *minor)
2229 {
2230 	int r;
2231 
2232 	idr_preload(GFP_KERNEL);
2233 	spin_lock(&_minor_lock);
2234 
2235 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2236 
2237 	spin_unlock(&_minor_lock);
2238 	idr_preload_end();
2239 	if (r < 0)
2240 		return r;
2241 	*minor = r;
2242 	return 0;
2243 }
2244 
2245 static const struct block_device_operations dm_blk_dops;
2246 
2247 static void dm_wq_work(struct work_struct *work);
2248 
2249 static void dm_init_md_queue(struct mapped_device *md)
2250 {
2251 	/*
2252 	 * Request-based dm devices cannot be stacked on top of bio-based dm
2253 	 * devices.  The type of this dm device may not have been decided yet.
2254 	 * The type is decided at the first table loading time.
2255 	 * To prevent problematic device stacking, clear the queue flag
2256 	 * for request stacking support until then.
2257 	 *
2258 	 * This queue is new, so no concurrency on the queue_flags.
2259 	 */
2260 	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
2261 }
2262 
2263 static void dm_init_old_md_queue(struct mapped_device *md)
2264 {
2265 	md->use_blk_mq = false;
2266 	dm_init_md_queue(md);
2267 
2268 	/*
2269 	 * Initialize aspects of queue that aren't relevant for blk-mq
2270 	 */
2271 	md->queue->queuedata = md;
2272 	md->queue->backing_dev_info.congested_fn = dm_any_congested;
2273 	md->queue->backing_dev_info.congested_data = md;
2274 
2275 	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
2276 }
2277 
2278 static void cleanup_mapped_device(struct mapped_device *md)
2279 {
2280 	if (md->wq)
2281 		destroy_workqueue(md->wq);
2282 	if (md->kworker_task)
2283 		kthread_stop(md->kworker_task);
2284 	if (md->io_pool)
2285 		mempool_destroy(md->io_pool);
2286 	if (md->rq_pool)
2287 		mempool_destroy(md->rq_pool);
2288 	if (md->bs)
2289 		bioset_free(md->bs);
2290 
2291 	cleanup_srcu_struct(&md->io_barrier);
2292 
2293 	if (md->disk) {
2294 		spin_lock(&_minor_lock);
2295 		md->disk->private_data = NULL;
2296 		spin_unlock(&_minor_lock);
2297 		if (blk_get_integrity(md->disk))
2298 			blk_integrity_unregister(md->disk);
2299 		del_gendisk(md->disk);
2300 		put_disk(md->disk);
2301 	}
2302 
2303 	if (md->queue)
2304 		blk_cleanup_queue(md->queue);
2305 
2306 	if (md->bdev) {
2307 		bdput(md->bdev);
2308 		md->bdev = NULL;
2309 	}
2310 }
2311 
2312 /*
2313  * Allocate and initialise a blank device with a given minor.
2314  */
2315 static struct mapped_device *alloc_dev(int minor)
2316 {
2317 	int r;
2318 	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
2319 	void *old_md;
2320 
2321 	if (!md) {
2322 		DMWARN("unable to allocate device, out of memory.");
2323 		return NULL;
2324 	}
2325 
2326 	if (!try_module_get(THIS_MODULE))
2327 		goto bad_module_get;
2328 
2329 	/* get a minor number for the dev */
2330 	if (minor == DM_ANY_MINOR)
2331 		r = next_free_minor(&minor);
2332 	else
2333 		r = specific_minor(minor);
2334 	if (r < 0)
2335 		goto bad_minor;
2336 
2337 	r = init_srcu_struct(&md->io_barrier);
2338 	if (r < 0)
2339 		goto bad_io_barrier;
2340 
2341 	md->use_blk_mq = use_blk_mq;
2342 	md->type = DM_TYPE_NONE;
2343 	mutex_init(&md->suspend_lock);
2344 	mutex_init(&md->type_lock);
2345 	mutex_init(&md->table_devices_lock);
2346 	spin_lock_init(&md->deferred_lock);
2347 	atomic_set(&md->holders, 1);
2348 	atomic_set(&md->open_count, 0);
2349 	atomic_set(&md->event_nr, 0);
2350 	atomic_set(&md->uevent_seq, 0);
2351 	INIT_LIST_HEAD(&md->uevent_list);
2352 	INIT_LIST_HEAD(&md->table_devices);
2353 	spin_lock_init(&md->uevent_lock);
2354 
2355 	md->queue = blk_alloc_queue(GFP_KERNEL);
2356 	if (!md->queue)
2357 		goto bad;
2358 
2359 	dm_init_md_queue(md);
2360 
2361 	md->disk = alloc_disk(1);
2362 	if (!md->disk)
2363 		goto bad;
2364 
2365 	atomic_set(&md->pending[0], 0);
2366 	atomic_set(&md->pending[1], 0);
2367 	init_waitqueue_head(&md->wait);
2368 	INIT_WORK(&md->work, dm_wq_work);
2369 	init_waitqueue_head(&md->eventq);
2370 	init_completion(&md->kobj_holder.completion);
2371 	md->kworker_task = NULL;
2372 
2373 	md->disk->major = _major;
2374 	md->disk->first_minor = minor;
2375 	md->disk->fops = &dm_blk_dops;
2376 	md->disk->queue = md->queue;
2377 	md->disk->private_data = md;
2378 	sprintf(md->disk->disk_name, "dm-%d", minor);
2379 	add_disk(md->disk);
2380 	format_dev_t(md->name, MKDEV(_major, minor));
2381 
2382 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2383 	if (!md->wq)
2384 		goto bad;
2385 
2386 	md->bdev = bdget_disk(md->disk, 0);
2387 	if (!md->bdev)
2388 		goto bad;
2389 
2390 	bio_init(&md->flush_bio);
2391 	md->flush_bio.bi_bdev = md->bdev;
2392 	md->flush_bio.bi_rw = WRITE_FLUSH;
2393 
2394 	dm_stats_init(&md->stats);
2395 
2396 	/* Populate the mapping, nobody knows we exist yet */
2397 	spin_lock(&_minor_lock);
2398 	old_md = idr_replace(&_minor_idr, md, minor);
2399 	spin_unlock(&_minor_lock);
2400 
2401 	BUG_ON(old_md != MINOR_ALLOCED);
2402 
2403 	return md;
2404 
2405 bad:
2406 	cleanup_mapped_device(md);
2407 bad_io_barrier:
2408 	free_minor(minor);
2409 bad_minor:
2410 	module_put(THIS_MODULE);
2411 bad_module_get:
2412 	kfree(md);
2413 	return NULL;
2414 }
2415 
2416 static void unlock_fs(struct mapped_device *md);
2417 
2418 static void free_dev(struct mapped_device *md)
2419 {
2420 	int minor = MINOR(disk_devt(md->disk));
2421 
2422 	unlock_fs(md);
2423 
2424 	cleanup_mapped_device(md);
2425 	if (md->use_blk_mq)
2426 		blk_mq_free_tag_set(&md->tag_set);
2427 
2428 	free_table_devices(&md->table_devices);
2429 	dm_stats_cleanup(&md->stats);
2430 	free_minor(minor);
2431 
2432 	module_put(THIS_MODULE);
2433 	kfree(md);
2434 }
2435 
2436 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2437 {
2438 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2439 
2440 	if (md->bs) {
2441 		/* The md already has necessary mempools. */
2442 		if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2443 			/*
2444 			 * Reload bioset because front_pad may have changed
2445 			 * because a different table was loaded.
2446 			 */
2447 			bioset_free(md->bs);
2448 			md->bs = p->bs;
2449 			p->bs = NULL;
2450 		}
2451 		/*
2452 		 * There's no need to reload with request-based dm
2453 		 * because the size of front_pad doesn't change.
2454 		 * Note for future: If you are to reload bioset,
2455 		 * prep-ed requests in the queue may refer
2456 		 * to bio from the old bioset, so you must walk
2457 		 * through the queue to unprep.
2458 		 */
2459 		goto out;
2460 	}
2461 
2462 	BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2463 
2464 	md->io_pool = p->io_pool;
2465 	p->io_pool = NULL;
2466 	md->rq_pool = p->rq_pool;
2467 	p->rq_pool = NULL;
2468 	md->bs = p->bs;
2469 	p->bs = NULL;
2470 
2471 out:
2472 	/* mempool bind completed, no longer need any mempools in the table */
2473 	dm_table_free_md_mempools(t);
2474 }
2475 
2476 /*
2477  * Bind a table to the device.
2478  */
2479 static void event_callback(void *context)
2480 {
2481 	unsigned long flags;
2482 	LIST_HEAD(uevents);
2483 	struct mapped_device *md = (struct mapped_device *) context;
2484 
2485 	spin_lock_irqsave(&md->uevent_lock, flags);
2486 	list_splice_init(&md->uevent_list, &uevents);
2487 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2488 
2489 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2490 
2491 	atomic_inc(&md->event_nr);
2492 	wake_up(&md->eventq);
2493 }
2494 
2495 /*
2496  * Protected by md->suspend_lock obtained by dm_swap_table().
2497  */
2498 static void __set_size(struct mapped_device *md, sector_t size)
2499 {
2500 	set_capacity(md->disk, size);
2501 
2502 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2503 }
2504 
2505 /*
2506  * Return 1 if the queue has a compulsory merge_bvec_fn function.
2507  *
2508  * If this function returns 0, then the device is either a non-dm
2509  * device without a merge_bvec_fn, or it is a dm device that is
2510  * able to split any bios it receives that are too big.
2511  */
2512 int dm_queue_merge_is_compulsory(struct request_queue *q)
2513 {
2514 	struct mapped_device *dev_md;
2515 
2516 	if (!q->merge_bvec_fn)
2517 		return 0;
2518 
2519 	if (q->make_request_fn == dm_make_request) {
2520 		dev_md = q->queuedata;
2521 		if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2522 			return 0;
2523 	}
2524 
2525 	return 1;
2526 }
2527 
2528 static int dm_device_merge_is_compulsory(struct dm_target *ti,
2529 					 struct dm_dev *dev, sector_t start,
2530 					 sector_t len, void *data)
2531 {
2532 	struct block_device *bdev = dev->bdev;
2533 	struct request_queue *q = bdev_get_queue(bdev);
2534 
2535 	return dm_queue_merge_is_compulsory(q);
2536 }
2537 
2538 /*
2539  * Return 1 if it is acceptable to ignore merge_bvec_fn based
2540  * on the properties of the underlying devices.
2541  */
2542 static int dm_table_merge_is_optional(struct dm_table *table)
2543 {
2544 	unsigned i = 0;
2545 	struct dm_target *ti;
2546 
2547 	while (i < dm_table_get_num_targets(table)) {
2548 		ti = dm_table_get_target(table, i++);
2549 
2550 		if (ti->type->iterate_devices &&
2551 		    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2552 			return 0;
2553 	}
2554 
2555 	return 1;
2556 }
2557 
2558 /*
2559  * Returns old map, which caller must destroy.
2560  */
2561 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2562 			       struct queue_limits *limits)
2563 {
2564 	struct dm_table *old_map;
2565 	struct request_queue *q = md->queue;
2566 	sector_t size;
2567 	int merge_is_optional;
2568 
2569 	size = dm_table_get_size(t);
2570 
2571 	/*
2572 	 * Wipe any geometry if the size of the table changed.
2573 	 */
2574 	if (size != dm_get_size(md))
2575 		memset(&md->geometry, 0, sizeof(md->geometry));
2576 
2577 	__set_size(md, size);
2578 
2579 	dm_table_event_callback(t, event_callback, md);
2580 
2581 	/*
2582 	 * The queue hasn't been stopped yet, if the old table type wasn't
2583 	 * for request-based during suspension.  So stop it to prevent
2584 	 * I/O mapping before resume.
2585 	 * This must be done before setting the queue restrictions,
2586 	 * because request-based dm may be run just after the setting.
2587 	 */
2588 	if (dm_table_request_based(t))
2589 		stop_queue(q);
2590 
2591 	__bind_mempools(md, t);
2592 
2593 	merge_is_optional = dm_table_merge_is_optional(t);
2594 
2595 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2596 	rcu_assign_pointer(md->map, t);
2597 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2598 
2599 	dm_table_set_restrictions(t, q, limits);
2600 	if (merge_is_optional)
2601 		set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2602 	else
2603 		clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2604 	if (old_map)
2605 		dm_sync_table(md);
2606 
2607 	return old_map;
2608 }
2609 
2610 /*
2611  * Returns unbound table for the caller to free.
2612  */
2613 static struct dm_table *__unbind(struct mapped_device *md)
2614 {
2615 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2616 
2617 	if (!map)
2618 		return NULL;
2619 
2620 	dm_table_event_callback(map, NULL, NULL);
2621 	RCU_INIT_POINTER(md->map, NULL);
2622 	dm_sync_table(md);
2623 
2624 	return map;
2625 }
2626 
2627 /*
2628  * Constructor for a new device.
2629  */
2630 int dm_create(int minor, struct mapped_device **result)
2631 {
2632 	struct mapped_device *md;
2633 
2634 	md = alloc_dev(minor);
2635 	if (!md)
2636 		return -ENXIO;
2637 
2638 	dm_sysfs_init(md);
2639 
2640 	*result = md;
2641 	return 0;
2642 }
2643 
2644 /*
2645  * Functions to manage md->type.
2646  * All are required to hold md->type_lock.
2647  */
2648 void dm_lock_md_type(struct mapped_device *md)
2649 {
2650 	mutex_lock(&md->type_lock);
2651 }
2652 
2653 void dm_unlock_md_type(struct mapped_device *md)
2654 {
2655 	mutex_unlock(&md->type_lock);
2656 }
2657 
2658 void dm_set_md_type(struct mapped_device *md, unsigned type)
2659 {
2660 	BUG_ON(!mutex_is_locked(&md->type_lock));
2661 	md->type = type;
2662 }
2663 
2664 unsigned dm_get_md_type(struct mapped_device *md)
2665 {
2666 	BUG_ON(!mutex_is_locked(&md->type_lock));
2667 	return md->type;
2668 }
2669 
2670 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2671 {
2672 	return md->immutable_target_type;
2673 }
2674 
2675 /*
2676  * The queue_limits are only valid as long as you have a reference
2677  * count on 'md'.
2678  */
2679 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2680 {
2681 	BUG_ON(!atomic_read(&md->holders));
2682 	return &md->queue->limits;
2683 }
2684 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2685 
2686 static void init_rq_based_worker_thread(struct mapped_device *md)
2687 {
2688 	/* Initialize the request-based DM worker thread */
2689 	init_kthread_worker(&md->kworker);
2690 	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2691 				       "kdmwork-%s", dm_device_name(md));
2692 }
2693 
2694 /*
2695  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2696  */
2697 static int dm_init_request_based_queue(struct mapped_device *md)
2698 {
2699 	struct request_queue *q = NULL;
2700 
2701 	/* Fully initialize the queue */
2702 	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2703 	if (!q)
2704 		return -EINVAL;
2705 
2706 	/* disable dm_request_fn's merge heuristic by default */
2707 	md->seq_rq_merge_deadline_usecs = 0;
2708 
2709 	md->queue = q;
2710 	dm_init_old_md_queue(md);
2711 	blk_queue_softirq_done(md->queue, dm_softirq_done);
2712 	blk_queue_prep_rq(md->queue, dm_prep_fn);
2713 
2714 	init_rq_based_worker_thread(md);
2715 
2716 	elv_register_queue(md->queue);
2717 
2718 	return 0;
2719 }
2720 
2721 static int dm_mq_init_request(void *data, struct request *rq,
2722 			      unsigned int hctx_idx, unsigned int request_idx,
2723 			      unsigned int numa_node)
2724 {
2725 	struct mapped_device *md = data;
2726 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
2727 
2728 	/*
2729 	 * Must initialize md member of tio, otherwise it won't
2730 	 * be available in dm_mq_queue_rq.
2731 	 */
2732 	tio->md = md;
2733 
2734 	return 0;
2735 }
2736 
2737 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2738 			  const struct blk_mq_queue_data *bd)
2739 {
2740 	struct request *rq = bd->rq;
2741 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
2742 	struct mapped_device *md = tio->md;
2743 	int srcu_idx;
2744 	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2745 	struct dm_target *ti;
2746 	sector_t pos;
2747 
2748 	/* always use block 0 to find the target for flushes for now */
2749 	pos = 0;
2750 	if (!(rq->cmd_flags & REQ_FLUSH))
2751 		pos = blk_rq_pos(rq);
2752 
2753 	ti = dm_table_find_target(map, pos);
2754 	if (!dm_target_is_valid(ti)) {
2755 		dm_put_live_table(md, srcu_idx);
2756 		DMERR_LIMIT("request attempted access beyond the end of device");
2757 		/*
2758 		 * Must perform setup, that rq_completed() requires,
2759 		 * before returning BLK_MQ_RQ_QUEUE_ERROR
2760 		 */
2761 		dm_start_request(md, rq);
2762 		return BLK_MQ_RQ_QUEUE_ERROR;
2763 	}
2764 	dm_put_live_table(md, srcu_idx);
2765 
2766 	if (ti->type->busy && ti->type->busy(ti))
2767 		return BLK_MQ_RQ_QUEUE_BUSY;
2768 
2769 	dm_start_request(md, rq);
2770 
2771 	/* Init tio using md established in .init_request */
2772 	init_tio(tio, rq, md);
2773 
2774 	/*
2775 	 * Establish tio->ti before queuing work (map_tio_request)
2776 	 * or making direct call to map_request().
2777 	 */
2778 	tio->ti = ti;
2779 
2780 	/* Clone the request if underlying devices aren't blk-mq */
2781 	if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
2782 		/* clone request is allocated at the end of the pdu */
2783 		tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
2784 		(void) clone_rq(rq, md, tio, GFP_ATOMIC);
2785 		queue_kthread_work(&md->kworker, &tio->work);
2786 	} else {
2787 		/* Direct call is fine since .queue_rq allows allocations */
2788 		if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
2789 			/* Undo dm_start_request() before requeuing */
2790 			rq_end_stats(md, rq);
2791 			rq_completed(md, rq_data_dir(rq), false);
2792 			return BLK_MQ_RQ_QUEUE_BUSY;
2793 		}
2794 	}
2795 
2796 	return BLK_MQ_RQ_QUEUE_OK;
2797 }
2798 
2799 static struct blk_mq_ops dm_mq_ops = {
2800 	.queue_rq = dm_mq_queue_rq,
2801 	.map_queue = blk_mq_map_queue,
2802 	.complete = dm_softirq_done,
2803 	.init_request = dm_mq_init_request,
2804 };
2805 
2806 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
2807 {
2808 	unsigned md_type = dm_get_md_type(md);
2809 	struct request_queue *q;
2810 	int err;
2811 
2812 	memset(&md->tag_set, 0, sizeof(md->tag_set));
2813 	md->tag_set.ops = &dm_mq_ops;
2814 	md->tag_set.queue_depth = BLKDEV_MAX_RQ;
2815 	md->tag_set.numa_node = NUMA_NO_NODE;
2816 	md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2817 	md->tag_set.nr_hw_queues = 1;
2818 	if (md_type == DM_TYPE_REQUEST_BASED) {
2819 		/* make the memory for non-blk-mq clone part of the pdu */
2820 		md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
2821 	} else
2822 		md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
2823 	md->tag_set.driver_data = md;
2824 
2825 	err = blk_mq_alloc_tag_set(&md->tag_set);
2826 	if (err)
2827 		return err;
2828 
2829 	q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
2830 	if (IS_ERR(q)) {
2831 		err = PTR_ERR(q);
2832 		goto out_tag_set;
2833 	}
2834 	md->queue = q;
2835 	dm_init_md_queue(md);
2836 
2837 	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
2838 	blk_mq_register_disk(md->disk);
2839 
2840 	if (md_type == DM_TYPE_REQUEST_BASED)
2841 		init_rq_based_worker_thread(md);
2842 
2843 	return 0;
2844 
2845 out_tag_set:
2846 	blk_mq_free_tag_set(&md->tag_set);
2847 	return err;
2848 }
2849 
2850 static unsigned filter_md_type(unsigned type, struct mapped_device *md)
2851 {
2852 	if (type == DM_TYPE_BIO_BASED)
2853 		return type;
2854 
2855 	return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
2856 }
2857 
2858 /*
2859  * Setup the DM device's queue based on md's type
2860  */
2861 int dm_setup_md_queue(struct mapped_device *md)
2862 {
2863 	int r;
2864 	unsigned md_type = filter_md_type(dm_get_md_type(md), md);
2865 
2866 	switch (md_type) {
2867 	case DM_TYPE_REQUEST_BASED:
2868 		r = dm_init_request_based_queue(md);
2869 		if (r) {
2870 			DMWARN("Cannot initialize queue for request-based mapped device");
2871 			return r;
2872 		}
2873 		break;
2874 	case DM_TYPE_MQ_REQUEST_BASED:
2875 		r = dm_init_request_based_blk_mq_queue(md);
2876 		if (r) {
2877 			DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
2878 			return r;
2879 		}
2880 		break;
2881 	case DM_TYPE_BIO_BASED:
2882 		dm_init_old_md_queue(md);
2883 		blk_queue_make_request(md->queue, dm_make_request);
2884 		blk_queue_merge_bvec(md->queue, dm_merge_bvec);
2885 		break;
2886 	}
2887 
2888 	return 0;
2889 }
2890 
2891 struct mapped_device *dm_get_md(dev_t dev)
2892 {
2893 	struct mapped_device *md;
2894 	unsigned minor = MINOR(dev);
2895 
2896 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2897 		return NULL;
2898 
2899 	spin_lock(&_minor_lock);
2900 
2901 	md = idr_find(&_minor_idr, minor);
2902 	if (md) {
2903 		if ((md == MINOR_ALLOCED ||
2904 		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
2905 		     dm_deleting_md(md) ||
2906 		     test_bit(DMF_FREEING, &md->flags))) {
2907 			md = NULL;
2908 			goto out;
2909 		}
2910 		dm_get(md);
2911 	}
2912 
2913 out:
2914 	spin_unlock(&_minor_lock);
2915 
2916 	return md;
2917 }
2918 EXPORT_SYMBOL_GPL(dm_get_md);
2919 
2920 void *dm_get_mdptr(struct mapped_device *md)
2921 {
2922 	return md->interface_ptr;
2923 }
2924 
2925 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2926 {
2927 	md->interface_ptr = ptr;
2928 }
2929 
2930 void dm_get(struct mapped_device *md)
2931 {
2932 	atomic_inc(&md->holders);
2933 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2934 }
2935 
2936 int dm_hold(struct mapped_device *md)
2937 {
2938 	spin_lock(&_minor_lock);
2939 	if (test_bit(DMF_FREEING, &md->flags)) {
2940 		spin_unlock(&_minor_lock);
2941 		return -EBUSY;
2942 	}
2943 	dm_get(md);
2944 	spin_unlock(&_minor_lock);
2945 	return 0;
2946 }
2947 EXPORT_SYMBOL_GPL(dm_hold);
2948 
2949 const char *dm_device_name(struct mapped_device *md)
2950 {
2951 	return md->name;
2952 }
2953 EXPORT_SYMBOL_GPL(dm_device_name);
2954 
2955 static void __dm_destroy(struct mapped_device *md, bool wait)
2956 {
2957 	struct dm_table *map;
2958 	int srcu_idx;
2959 
2960 	might_sleep();
2961 
2962 	map = dm_get_live_table(md, &srcu_idx);
2963 
2964 	spin_lock(&_minor_lock);
2965 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2966 	set_bit(DMF_FREEING, &md->flags);
2967 	spin_unlock(&_minor_lock);
2968 
2969 	if (dm_request_based(md) && md->kworker_task)
2970 		flush_kthread_worker(&md->kworker);
2971 
2972 	/*
2973 	 * Take suspend_lock so that presuspend and postsuspend methods
2974 	 * do not race with internal suspend.
2975 	 */
2976 	mutex_lock(&md->suspend_lock);
2977 	if (!dm_suspended_md(md)) {
2978 		dm_table_presuspend_targets(map);
2979 		dm_table_postsuspend_targets(map);
2980 	}
2981 	mutex_unlock(&md->suspend_lock);
2982 
2983 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2984 	dm_put_live_table(md, srcu_idx);
2985 
2986 	/*
2987 	 * Rare, but there may be I/O requests still going to complete,
2988 	 * for example.  Wait for all references to disappear.
2989 	 * No one should increment the reference count of the mapped_device,
2990 	 * after the mapped_device state becomes DMF_FREEING.
2991 	 */
2992 	if (wait)
2993 		while (atomic_read(&md->holders))
2994 			msleep(1);
2995 	else if (atomic_read(&md->holders))
2996 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2997 		       dm_device_name(md), atomic_read(&md->holders));
2998 
2999 	dm_sysfs_exit(md);
3000 	dm_table_destroy(__unbind(md));
3001 	free_dev(md);
3002 }
3003 
3004 void dm_destroy(struct mapped_device *md)
3005 {
3006 	__dm_destroy(md, true);
3007 }
3008 
3009 void dm_destroy_immediate(struct mapped_device *md)
3010 {
3011 	__dm_destroy(md, false);
3012 }
3013 
3014 void dm_put(struct mapped_device *md)
3015 {
3016 	atomic_dec(&md->holders);
3017 }
3018 EXPORT_SYMBOL_GPL(dm_put);
3019 
3020 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
3021 {
3022 	int r = 0;
3023 	DECLARE_WAITQUEUE(wait, current);
3024 
3025 	add_wait_queue(&md->wait, &wait);
3026 
3027 	while (1) {
3028 		set_current_state(interruptible);
3029 
3030 		if (!md_in_flight(md))
3031 			break;
3032 
3033 		if (interruptible == TASK_INTERRUPTIBLE &&
3034 		    signal_pending(current)) {
3035 			r = -EINTR;
3036 			break;
3037 		}
3038 
3039 		io_schedule();
3040 	}
3041 	set_current_state(TASK_RUNNING);
3042 
3043 	remove_wait_queue(&md->wait, &wait);
3044 
3045 	return r;
3046 }
3047 
3048 /*
3049  * Process the deferred bios
3050  */
3051 static void dm_wq_work(struct work_struct *work)
3052 {
3053 	struct mapped_device *md = container_of(work, struct mapped_device,
3054 						work);
3055 	struct bio *c;
3056 	int srcu_idx;
3057 	struct dm_table *map;
3058 
3059 	map = dm_get_live_table(md, &srcu_idx);
3060 
3061 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
3062 		spin_lock_irq(&md->deferred_lock);
3063 		c = bio_list_pop(&md->deferred);
3064 		spin_unlock_irq(&md->deferred_lock);
3065 
3066 		if (!c)
3067 			break;
3068 
3069 		if (dm_request_based(md))
3070 			generic_make_request(c);
3071 		else
3072 			__split_and_process_bio(md, map, c);
3073 	}
3074 
3075 	dm_put_live_table(md, srcu_idx);
3076 }
3077 
3078 static void dm_queue_flush(struct mapped_device *md)
3079 {
3080 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3081 	smp_mb__after_atomic();
3082 	queue_work(md->wq, &md->work);
3083 }
3084 
3085 /*
3086  * Swap in a new table, returning the old one for the caller to destroy.
3087  */
3088 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
3089 {
3090 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
3091 	struct queue_limits limits;
3092 	int r;
3093 
3094 	mutex_lock(&md->suspend_lock);
3095 
3096 	/* device must be suspended */
3097 	if (!dm_suspended_md(md))
3098 		goto out;
3099 
3100 	/*
3101 	 * If the new table has no data devices, retain the existing limits.
3102 	 * This helps multipath with queue_if_no_path if all paths disappear,
3103 	 * then new I/O is queued based on these limits, and then some paths
3104 	 * reappear.
3105 	 */
3106 	if (dm_table_has_no_data_devices(table)) {
3107 		live_map = dm_get_live_table_fast(md);
3108 		if (live_map)
3109 			limits = md->queue->limits;
3110 		dm_put_live_table_fast(md);
3111 	}
3112 
3113 	if (!live_map) {
3114 		r = dm_calculate_queue_limits(table, &limits);
3115 		if (r) {
3116 			map = ERR_PTR(r);
3117 			goto out;
3118 		}
3119 	}
3120 
3121 	map = __bind(md, table, &limits);
3122 
3123 out:
3124 	mutex_unlock(&md->suspend_lock);
3125 	return map;
3126 }
3127 
3128 /*
3129  * Functions to lock and unlock any filesystem running on the
3130  * device.
3131  */
3132 static int lock_fs(struct mapped_device *md)
3133 {
3134 	int r;
3135 
3136 	WARN_ON(md->frozen_sb);
3137 
3138 	md->frozen_sb = freeze_bdev(md->bdev);
3139 	if (IS_ERR(md->frozen_sb)) {
3140 		r = PTR_ERR(md->frozen_sb);
3141 		md->frozen_sb = NULL;
3142 		return r;
3143 	}
3144 
3145 	set_bit(DMF_FROZEN, &md->flags);
3146 
3147 	return 0;
3148 }
3149 
3150 static void unlock_fs(struct mapped_device *md)
3151 {
3152 	if (!test_bit(DMF_FROZEN, &md->flags))
3153 		return;
3154 
3155 	thaw_bdev(md->bdev, md->frozen_sb);
3156 	md->frozen_sb = NULL;
3157 	clear_bit(DMF_FROZEN, &md->flags);
3158 }
3159 
3160 /*
3161  * If __dm_suspend returns 0, the device is completely quiescent
3162  * now. There is no request-processing activity. All new requests
3163  * are being added to md->deferred list.
3164  *
3165  * Caller must hold md->suspend_lock
3166  */
3167 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
3168 			unsigned suspend_flags, int interruptible)
3169 {
3170 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
3171 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
3172 	int r;
3173 
3174 	/*
3175 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
3176 	 * This flag is cleared before dm_suspend returns.
3177 	 */
3178 	if (noflush)
3179 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
3180 
3181 	/*
3182 	 * This gets reverted if there's an error later and the targets
3183 	 * provide the .presuspend_undo hook.
3184 	 */
3185 	dm_table_presuspend_targets(map);
3186 
3187 	/*
3188 	 * Flush I/O to the device.
3189 	 * Any I/O submitted after lock_fs() may not be flushed.
3190 	 * noflush takes precedence over do_lockfs.
3191 	 * (lock_fs() flushes I/Os and waits for them to complete.)
3192 	 */
3193 	if (!noflush && do_lockfs) {
3194 		r = lock_fs(md);
3195 		if (r) {
3196 			dm_table_presuspend_undo_targets(map);
3197 			return r;
3198 		}
3199 	}
3200 
3201 	/*
3202 	 * Here we must make sure that no processes are submitting requests
3203 	 * to target drivers i.e. no one may be executing
3204 	 * __split_and_process_bio. This is called from dm_request and
3205 	 * dm_wq_work.
3206 	 *
3207 	 * To get all processes out of __split_and_process_bio in dm_request,
3208 	 * we take the write lock. To prevent any process from reentering
3209 	 * __split_and_process_bio from dm_request and quiesce the thread
3210 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
3211 	 * flush_workqueue(md->wq).
3212 	 */
3213 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3214 	if (map)
3215 		synchronize_srcu(&md->io_barrier);
3216 
3217 	/*
3218 	 * Stop md->queue before flushing md->wq in case request-based
3219 	 * dm defers requests to md->wq from md->queue.
3220 	 */
3221 	if (dm_request_based(md)) {
3222 		stop_queue(md->queue);
3223 		if (md->kworker_task)
3224 			flush_kthread_worker(&md->kworker);
3225 	}
3226 
3227 	flush_workqueue(md->wq);
3228 
3229 	/*
3230 	 * At this point no more requests are entering target request routines.
3231 	 * We call dm_wait_for_completion to wait for all existing requests
3232 	 * to finish.
3233 	 */
3234 	r = dm_wait_for_completion(md, interruptible);
3235 
3236 	if (noflush)
3237 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
3238 	if (map)
3239 		synchronize_srcu(&md->io_barrier);
3240 
3241 	/* were we interrupted ? */
3242 	if (r < 0) {
3243 		dm_queue_flush(md);
3244 
3245 		if (dm_request_based(md))
3246 			start_queue(md->queue);
3247 
3248 		unlock_fs(md);
3249 		dm_table_presuspend_undo_targets(map);
3250 		/* pushback list is already flushed, so skip flush */
3251 	}
3252 
3253 	return r;
3254 }
3255 
3256 /*
3257  * We need to be able to change a mapping table under a mounted
3258  * filesystem.  For example we might want to move some data in
3259  * the background.  Before the table can be swapped with
3260  * dm_bind_table, dm_suspend must be called to flush any in
3261  * flight bios and ensure that any further io gets deferred.
3262  */
3263 /*
3264  * Suspend mechanism in request-based dm.
3265  *
3266  * 1. Flush all I/Os by lock_fs() if needed.
3267  * 2. Stop dispatching any I/O by stopping the request_queue.
3268  * 3. Wait for all in-flight I/Os to be completed or requeued.
3269  *
3270  * To abort suspend, start the request_queue.
3271  */
3272 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
3273 {
3274 	struct dm_table *map = NULL;
3275 	int r = 0;
3276 
3277 retry:
3278 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3279 
3280 	if (dm_suspended_md(md)) {
3281 		r = -EINVAL;
3282 		goto out_unlock;
3283 	}
3284 
3285 	if (dm_suspended_internally_md(md)) {
3286 		/* already internally suspended, wait for internal resume */
3287 		mutex_unlock(&md->suspend_lock);
3288 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3289 		if (r)
3290 			return r;
3291 		goto retry;
3292 	}
3293 
3294 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3295 
3296 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
3297 	if (r)
3298 		goto out_unlock;
3299 
3300 	set_bit(DMF_SUSPENDED, &md->flags);
3301 
3302 	dm_table_postsuspend_targets(map);
3303 
3304 out_unlock:
3305 	mutex_unlock(&md->suspend_lock);
3306 	return r;
3307 }
3308 
3309 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
3310 {
3311 	if (map) {
3312 		int r = dm_table_resume_targets(map);
3313 		if (r)
3314 			return r;
3315 	}
3316 
3317 	dm_queue_flush(md);
3318 
3319 	/*
3320 	 * Flushing deferred I/Os must be done after targets are resumed
3321 	 * so that mapping of targets can work correctly.
3322 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
3323 	 */
3324 	if (dm_request_based(md))
3325 		start_queue(md->queue);
3326 
3327 	unlock_fs(md);
3328 
3329 	return 0;
3330 }
3331 
3332 int dm_resume(struct mapped_device *md)
3333 {
3334 	int r = -EINVAL;
3335 	struct dm_table *map = NULL;
3336 
3337 retry:
3338 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3339 
3340 	if (!dm_suspended_md(md))
3341 		goto out;
3342 
3343 	if (dm_suspended_internally_md(md)) {
3344 		/* already internally suspended, wait for internal resume */
3345 		mutex_unlock(&md->suspend_lock);
3346 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3347 		if (r)
3348 			return r;
3349 		goto retry;
3350 	}
3351 
3352 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3353 	if (!map || !dm_table_get_size(map))
3354 		goto out;
3355 
3356 	r = __dm_resume(md, map);
3357 	if (r)
3358 		goto out;
3359 
3360 	clear_bit(DMF_SUSPENDED, &md->flags);
3361 
3362 	r = 0;
3363 out:
3364 	mutex_unlock(&md->suspend_lock);
3365 
3366 	return r;
3367 }
3368 
3369 /*
3370  * Internal suspend/resume works like userspace-driven suspend. It waits
3371  * until all bios finish and prevents issuing new bios to the target drivers.
3372  * It may be used only from the kernel.
3373  */
3374 
3375 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
3376 {
3377 	struct dm_table *map = NULL;
3378 
3379 	if (md->internal_suspend_count++)
3380 		return; /* nested internal suspend */
3381 
3382 	if (dm_suspended_md(md)) {
3383 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3384 		return; /* nest suspend */
3385 	}
3386 
3387 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3388 
3389 	/*
3390 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3391 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
3392 	 * would require changing .presuspend to return an error -- avoid this
3393 	 * until there is a need for more elaborate variants of internal suspend.
3394 	 */
3395 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
3396 
3397 	set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3398 
3399 	dm_table_postsuspend_targets(map);
3400 }
3401 
3402 static void __dm_internal_resume(struct mapped_device *md)
3403 {
3404 	BUG_ON(!md->internal_suspend_count);
3405 
3406 	if (--md->internal_suspend_count)
3407 		return; /* resume from nested internal suspend */
3408 
3409 	if (dm_suspended_md(md))
3410 		goto done; /* resume from nested suspend */
3411 
3412 	/*
3413 	 * NOTE: existing callers don't need to call dm_table_resume_targets
3414 	 * (which may fail -- so best to avoid it for now by passing NULL map)
3415 	 */
3416 	(void) __dm_resume(md, NULL);
3417 
3418 done:
3419 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3420 	smp_mb__after_atomic();
3421 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3422 }
3423 
3424 void dm_internal_suspend_noflush(struct mapped_device *md)
3425 {
3426 	mutex_lock(&md->suspend_lock);
3427 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3428 	mutex_unlock(&md->suspend_lock);
3429 }
3430 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3431 
3432 void dm_internal_resume(struct mapped_device *md)
3433 {
3434 	mutex_lock(&md->suspend_lock);
3435 	__dm_internal_resume(md);
3436 	mutex_unlock(&md->suspend_lock);
3437 }
3438 EXPORT_SYMBOL_GPL(dm_internal_resume);
3439 
3440 /*
3441  * Fast variants of internal suspend/resume hold md->suspend_lock,
3442  * which prevents interaction with userspace-driven suspend.
3443  */
3444 
3445 void dm_internal_suspend_fast(struct mapped_device *md)
3446 {
3447 	mutex_lock(&md->suspend_lock);
3448 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3449 		return;
3450 
3451 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3452 	synchronize_srcu(&md->io_barrier);
3453 	flush_workqueue(md->wq);
3454 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3455 }
3456 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3457 
3458 void dm_internal_resume_fast(struct mapped_device *md)
3459 {
3460 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3461 		goto done;
3462 
3463 	dm_queue_flush(md);
3464 
3465 done:
3466 	mutex_unlock(&md->suspend_lock);
3467 }
3468 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3469 
3470 /*-----------------------------------------------------------------
3471  * Event notification.
3472  *---------------------------------------------------------------*/
3473 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3474 		       unsigned cookie)
3475 {
3476 	char udev_cookie[DM_COOKIE_LENGTH];
3477 	char *envp[] = { udev_cookie, NULL };
3478 
3479 	if (!cookie)
3480 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
3481 	else {
3482 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3483 			 DM_COOKIE_ENV_VAR_NAME, cookie);
3484 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
3485 					  action, envp);
3486 	}
3487 }
3488 
3489 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3490 {
3491 	return atomic_add_return(1, &md->uevent_seq);
3492 }
3493 
3494 uint32_t dm_get_event_nr(struct mapped_device *md)
3495 {
3496 	return atomic_read(&md->event_nr);
3497 }
3498 
3499 int dm_wait_event(struct mapped_device *md, int event_nr)
3500 {
3501 	return wait_event_interruptible(md->eventq,
3502 			(event_nr != atomic_read(&md->event_nr)));
3503 }
3504 
3505 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3506 {
3507 	unsigned long flags;
3508 
3509 	spin_lock_irqsave(&md->uevent_lock, flags);
3510 	list_add(elist, &md->uevent_list);
3511 	spin_unlock_irqrestore(&md->uevent_lock, flags);
3512 }
3513 
3514 /*
3515  * The gendisk is only valid as long as you have a reference
3516  * count on 'md'.
3517  */
3518 struct gendisk *dm_disk(struct mapped_device *md)
3519 {
3520 	return md->disk;
3521 }
3522 EXPORT_SYMBOL_GPL(dm_disk);
3523 
3524 struct kobject *dm_kobject(struct mapped_device *md)
3525 {
3526 	return &md->kobj_holder.kobj;
3527 }
3528 
3529 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3530 {
3531 	struct mapped_device *md;
3532 
3533 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3534 
3535 	if (test_bit(DMF_FREEING, &md->flags) ||
3536 	    dm_deleting_md(md))
3537 		return NULL;
3538 
3539 	dm_get(md);
3540 	return md;
3541 }
3542 
3543 int dm_suspended_md(struct mapped_device *md)
3544 {
3545 	return test_bit(DMF_SUSPENDED, &md->flags);
3546 }
3547 
3548 int dm_suspended_internally_md(struct mapped_device *md)
3549 {
3550 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3551 }
3552 
3553 int dm_test_deferred_remove_flag(struct mapped_device *md)
3554 {
3555 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3556 }
3557 
3558 int dm_suspended(struct dm_target *ti)
3559 {
3560 	return dm_suspended_md(dm_table_get_md(ti->table));
3561 }
3562 EXPORT_SYMBOL_GPL(dm_suspended);
3563 
3564 int dm_noflush_suspending(struct dm_target *ti)
3565 {
3566 	return __noflush_suspending(dm_table_get_md(ti->table));
3567 }
3568 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3569 
3570 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
3571 					    unsigned integrity, unsigned per_bio_data_size)
3572 {
3573 	struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3574 	struct kmem_cache *cachep = NULL;
3575 	unsigned int pool_size = 0;
3576 	unsigned int front_pad;
3577 
3578 	if (!pools)
3579 		return NULL;
3580 
3581 	type = filter_md_type(type, md);
3582 
3583 	switch (type) {
3584 	case DM_TYPE_BIO_BASED:
3585 		cachep = _io_cache;
3586 		pool_size = dm_get_reserved_bio_based_ios();
3587 		front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3588 		break;
3589 	case DM_TYPE_REQUEST_BASED:
3590 		cachep = _rq_tio_cache;
3591 		pool_size = dm_get_reserved_rq_based_ios();
3592 		pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
3593 		if (!pools->rq_pool)
3594 			goto out;
3595 		/* fall through to setup remaining rq-based pools */
3596 	case DM_TYPE_MQ_REQUEST_BASED:
3597 		if (!pool_size)
3598 			pool_size = dm_get_reserved_rq_based_ios();
3599 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3600 		/* per_bio_data_size is not used. See __bind_mempools(). */
3601 		WARN_ON(per_bio_data_size != 0);
3602 		break;
3603 	default:
3604 		BUG();
3605 	}
3606 
3607 	if (cachep) {
3608 		pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3609 		if (!pools->io_pool)
3610 			goto out;
3611 	}
3612 
3613 	pools->bs = bioset_create_nobvec(pool_size, front_pad);
3614 	if (!pools->bs)
3615 		goto out;
3616 
3617 	if (integrity && bioset_integrity_create(pools->bs, pool_size))
3618 		goto out;
3619 
3620 	return pools;
3621 
3622 out:
3623 	dm_free_md_mempools(pools);
3624 
3625 	return NULL;
3626 }
3627 
3628 void dm_free_md_mempools(struct dm_md_mempools *pools)
3629 {
3630 	if (!pools)
3631 		return;
3632 
3633 	if (pools->io_pool)
3634 		mempool_destroy(pools->io_pool);
3635 
3636 	if (pools->rq_pool)
3637 		mempool_destroy(pools->rq_pool);
3638 
3639 	if (pools->bs)
3640 		bioset_free(pools->bs);
3641 
3642 	kfree(pools);
3643 }
3644 
3645 static const struct block_device_operations dm_blk_dops = {
3646 	.open = dm_blk_open,
3647 	.release = dm_blk_close,
3648 	.ioctl = dm_blk_ioctl,
3649 	.getgeo = dm_blk_getgeo,
3650 	.owner = THIS_MODULE
3651 };
3652 
3653 /*
3654  * module hooks
3655  */
3656 module_init(dm_init);
3657 module_exit(dm_exit);
3658 
3659 module_param(major, uint, 0);
3660 MODULE_PARM_DESC(major, "The major number of the device mapper");
3661 
3662 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3663 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3664 
3665 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
3666 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
3667 
3668 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
3669 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
3670 
3671 MODULE_DESCRIPTION(DM_NAME " driver");
3672 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3673 MODULE_LICENSE("GPL");
3674