xref: /openbmc/linux/drivers/md/dm.c (revision 0c380187)
1 /*
2  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 #include "dm-rq.h"
10 #include "dm-uevent.h"
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/delay.h>
23 #include <linux/wait.h>
24 #include <linux/pr.h>
25 
26 #define DM_MSG_PREFIX "core"
27 
28 #ifdef CONFIG_PRINTK
29 /*
30  * ratelimit state to be used in DMXXX_LIMIT().
31  */
32 DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
33 		       DEFAULT_RATELIMIT_INTERVAL,
34 		       DEFAULT_RATELIMIT_BURST);
35 EXPORT_SYMBOL(dm_ratelimit_state);
36 #endif
37 
38 /*
39  * Cookies are numeric values sent with CHANGE and REMOVE
40  * uevents while resuming, removing or renaming the device.
41  */
42 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
43 #define DM_COOKIE_LENGTH 24
44 
45 static const char *_name = DM_NAME;
46 
47 static unsigned int major = 0;
48 static unsigned int _major = 0;
49 
50 static DEFINE_IDR(_minor_idr);
51 
52 static DEFINE_SPINLOCK(_minor_lock);
53 
54 static void do_deferred_remove(struct work_struct *w);
55 
56 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
57 
58 static struct workqueue_struct *deferred_remove_workqueue;
59 
60 /*
61  * One of these is allocated per bio.
62  */
63 struct dm_io {
64 	struct mapped_device *md;
65 	int error;
66 	atomic_t io_count;
67 	struct bio *bio;
68 	unsigned long start_time;
69 	spinlock_t endio_lock;
70 	struct dm_stats_aux stats_aux;
71 };
72 
73 #define MINOR_ALLOCED ((void *)-1)
74 
75 /*
76  * Bits for the md->flags field.
77  */
78 #define DMF_BLOCK_IO_FOR_SUSPEND 0
79 #define DMF_SUSPENDED 1
80 #define DMF_FROZEN 2
81 #define DMF_FREEING 3
82 #define DMF_DELETING 4
83 #define DMF_NOFLUSH_SUSPENDING 5
84 #define DMF_DEFERRED_REMOVE 6
85 #define DMF_SUSPENDED_INTERNALLY 7
86 
87 #define DM_NUMA_NODE NUMA_NO_NODE
88 static int dm_numa_node = DM_NUMA_NODE;
89 
90 /*
91  * For mempools pre-allocation at the table loading time.
92  */
93 struct dm_md_mempools {
94 	mempool_t *io_pool;
95 	struct bio_set *bs;
96 };
97 
98 struct table_device {
99 	struct list_head list;
100 	atomic_t count;
101 	struct dm_dev dm_dev;
102 };
103 
104 static struct kmem_cache *_io_cache;
105 static struct kmem_cache *_rq_tio_cache;
106 static struct kmem_cache *_rq_cache;
107 
108 /*
109  * Bio-based DM's mempools' reserved IOs set by the user.
110  */
111 #define RESERVED_BIO_BASED_IOS		16
112 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
113 
114 static int __dm_get_module_param_int(int *module_param, int min, int max)
115 {
116 	int param = ACCESS_ONCE(*module_param);
117 	int modified_param = 0;
118 	bool modified = true;
119 
120 	if (param < min)
121 		modified_param = min;
122 	else if (param > max)
123 		modified_param = max;
124 	else
125 		modified = false;
126 
127 	if (modified) {
128 		(void)cmpxchg(module_param, param, modified_param);
129 		param = modified_param;
130 	}
131 
132 	return param;
133 }
134 
135 unsigned __dm_get_module_param(unsigned *module_param,
136 			       unsigned def, unsigned max)
137 {
138 	unsigned param = ACCESS_ONCE(*module_param);
139 	unsigned modified_param = 0;
140 
141 	if (!param)
142 		modified_param = def;
143 	else if (param > max)
144 		modified_param = max;
145 
146 	if (modified_param) {
147 		(void)cmpxchg(module_param, param, modified_param);
148 		param = modified_param;
149 	}
150 
151 	return param;
152 }
153 
154 unsigned dm_get_reserved_bio_based_ios(void)
155 {
156 	return __dm_get_module_param(&reserved_bio_based_ios,
157 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
158 }
159 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
160 
161 static unsigned dm_get_numa_node(void)
162 {
163 	return __dm_get_module_param_int(&dm_numa_node,
164 					 DM_NUMA_NODE, num_online_nodes() - 1);
165 }
166 
167 static int __init local_init(void)
168 {
169 	int r = -ENOMEM;
170 
171 	/* allocate a slab for the dm_ios */
172 	_io_cache = KMEM_CACHE(dm_io, 0);
173 	if (!_io_cache)
174 		return r;
175 
176 	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
177 	if (!_rq_tio_cache)
178 		goto out_free_io_cache;
179 
180 	_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
181 				      __alignof__(struct request), 0, NULL);
182 	if (!_rq_cache)
183 		goto out_free_rq_tio_cache;
184 
185 	r = dm_uevent_init();
186 	if (r)
187 		goto out_free_rq_cache;
188 
189 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
190 	if (!deferred_remove_workqueue) {
191 		r = -ENOMEM;
192 		goto out_uevent_exit;
193 	}
194 
195 	_major = major;
196 	r = register_blkdev(_major, _name);
197 	if (r < 0)
198 		goto out_free_workqueue;
199 
200 	if (!_major)
201 		_major = r;
202 
203 	return 0;
204 
205 out_free_workqueue:
206 	destroy_workqueue(deferred_remove_workqueue);
207 out_uevent_exit:
208 	dm_uevent_exit();
209 out_free_rq_cache:
210 	kmem_cache_destroy(_rq_cache);
211 out_free_rq_tio_cache:
212 	kmem_cache_destroy(_rq_tio_cache);
213 out_free_io_cache:
214 	kmem_cache_destroy(_io_cache);
215 
216 	return r;
217 }
218 
219 static void local_exit(void)
220 {
221 	flush_scheduled_work();
222 	destroy_workqueue(deferred_remove_workqueue);
223 
224 	kmem_cache_destroy(_rq_cache);
225 	kmem_cache_destroy(_rq_tio_cache);
226 	kmem_cache_destroy(_io_cache);
227 	unregister_blkdev(_major, _name);
228 	dm_uevent_exit();
229 
230 	_major = 0;
231 
232 	DMINFO("cleaned up");
233 }
234 
235 static int (*_inits[])(void) __initdata = {
236 	local_init,
237 	dm_target_init,
238 	dm_linear_init,
239 	dm_stripe_init,
240 	dm_io_init,
241 	dm_kcopyd_init,
242 	dm_interface_init,
243 	dm_statistics_init,
244 };
245 
246 static void (*_exits[])(void) = {
247 	local_exit,
248 	dm_target_exit,
249 	dm_linear_exit,
250 	dm_stripe_exit,
251 	dm_io_exit,
252 	dm_kcopyd_exit,
253 	dm_interface_exit,
254 	dm_statistics_exit,
255 };
256 
257 static int __init dm_init(void)
258 {
259 	const int count = ARRAY_SIZE(_inits);
260 
261 	int r, i;
262 
263 	for (i = 0; i < count; i++) {
264 		r = _inits[i]();
265 		if (r)
266 			goto bad;
267 	}
268 
269 	return 0;
270 
271       bad:
272 	while (i--)
273 		_exits[i]();
274 
275 	return r;
276 }
277 
278 static void __exit dm_exit(void)
279 {
280 	int i = ARRAY_SIZE(_exits);
281 
282 	while (i--)
283 		_exits[i]();
284 
285 	/*
286 	 * Should be empty by this point.
287 	 */
288 	idr_destroy(&_minor_idr);
289 }
290 
291 /*
292  * Block device functions
293  */
294 int dm_deleting_md(struct mapped_device *md)
295 {
296 	return test_bit(DMF_DELETING, &md->flags);
297 }
298 
299 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
300 {
301 	struct mapped_device *md;
302 
303 	spin_lock(&_minor_lock);
304 
305 	md = bdev->bd_disk->private_data;
306 	if (!md)
307 		goto out;
308 
309 	if (test_bit(DMF_FREEING, &md->flags) ||
310 	    dm_deleting_md(md)) {
311 		md = NULL;
312 		goto out;
313 	}
314 
315 	dm_get(md);
316 	atomic_inc(&md->open_count);
317 out:
318 	spin_unlock(&_minor_lock);
319 
320 	return md ? 0 : -ENXIO;
321 }
322 
323 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
324 {
325 	struct mapped_device *md;
326 
327 	spin_lock(&_minor_lock);
328 
329 	md = disk->private_data;
330 	if (WARN_ON(!md))
331 		goto out;
332 
333 	if (atomic_dec_and_test(&md->open_count) &&
334 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
335 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
336 
337 	dm_put(md);
338 out:
339 	spin_unlock(&_minor_lock);
340 }
341 
342 int dm_open_count(struct mapped_device *md)
343 {
344 	return atomic_read(&md->open_count);
345 }
346 
347 /*
348  * Guarantees nothing is using the device before it's deleted.
349  */
350 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
351 {
352 	int r = 0;
353 
354 	spin_lock(&_minor_lock);
355 
356 	if (dm_open_count(md)) {
357 		r = -EBUSY;
358 		if (mark_deferred)
359 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
360 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
361 		r = -EEXIST;
362 	else
363 		set_bit(DMF_DELETING, &md->flags);
364 
365 	spin_unlock(&_minor_lock);
366 
367 	return r;
368 }
369 
370 int dm_cancel_deferred_remove(struct mapped_device *md)
371 {
372 	int r = 0;
373 
374 	spin_lock(&_minor_lock);
375 
376 	if (test_bit(DMF_DELETING, &md->flags))
377 		r = -EBUSY;
378 	else
379 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
380 
381 	spin_unlock(&_minor_lock);
382 
383 	return r;
384 }
385 
386 static void do_deferred_remove(struct work_struct *w)
387 {
388 	dm_deferred_remove();
389 }
390 
391 sector_t dm_get_size(struct mapped_device *md)
392 {
393 	return get_capacity(md->disk);
394 }
395 
396 struct request_queue *dm_get_md_queue(struct mapped_device *md)
397 {
398 	return md->queue;
399 }
400 
401 struct dm_stats *dm_get_stats(struct mapped_device *md)
402 {
403 	return &md->stats;
404 }
405 
406 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
407 {
408 	struct mapped_device *md = bdev->bd_disk->private_data;
409 
410 	return dm_get_geometry(md, geo);
411 }
412 
413 static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
414 				  struct block_device **bdev,
415 				  fmode_t *mode)
416 {
417 	struct dm_target *tgt;
418 	struct dm_table *map;
419 	int srcu_idx, r;
420 
421 retry:
422 	r = -ENOTTY;
423 	map = dm_get_live_table(md, &srcu_idx);
424 	if (!map || !dm_table_get_size(map))
425 		goto out;
426 
427 	/* We only support devices that have a single target */
428 	if (dm_table_get_num_targets(map) != 1)
429 		goto out;
430 
431 	tgt = dm_table_get_target(map, 0);
432 	if (!tgt->type->prepare_ioctl)
433 		goto out;
434 
435 	if (dm_suspended_md(md)) {
436 		r = -EAGAIN;
437 		goto out;
438 	}
439 
440 	r = tgt->type->prepare_ioctl(tgt, bdev, mode);
441 	if (r < 0)
442 		goto out;
443 
444 	bdgrab(*bdev);
445 	dm_put_live_table(md, srcu_idx);
446 	return r;
447 
448 out:
449 	dm_put_live_table(md, srcu_idx);
450 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
451 		msleep(10);
452 		goto retry;
453 	}
454 	return r;
455 }
456 
457 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
458 			unsigned int cmd, unsigned long arg)
459 {
460 	struct mapped_device *md = bdev->bd_disk->private_data;
461 	int r;
462 
463 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
464 	if (r < 0)
465 		return r;
466 
467 	if (r > 0) {
468 		/*
469 		 * Target determined this ioctl is being issued against a
470 		 * subset of the parent bdev; require extra privileges.
471 		 */
472 		if (!capable(CAP_SYS_RAWIO)) {
473 			DMWARN_LIMIT(
474 	"%s: sending ioctl %x to DM device without required privilege.",
475 				current->comm, cmd);
476 			r = -ENOIOCTLCMD;
477 			goto out;
478 		}
479 	}
480 
481 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
482 out:
483 	bdput(bdev);
484 	return r;
485 }
486 
487 static struct dm_io *alloc_io(struct mapped_device *md)
488 {
489 	return mempool_alloc(md->io_pool, GFP_NOIO);
490 }
491 
492 static void free_io(struct mapped_device *md, struct dm_io *io)
493 {
494 	mempool_free(io, md->io_pool);
495 }
496 
497 static void free_tio(struct dm_target_io *tio)
498 {
499 	bio_put(&tio->clone);
500 }
501 
502 int md_in_flight(struct mapped_device *md)
503 {
504 	return atomic_read(&md->pending[READ]) +
505 	       atomic_read(&md->pending[WRITE]);
506 }
507 
508 static void start_io_acct(struct dm_io *io)
509 {
510 	struct mapped_device *md = io->md;
511 	struct bio *bio = io->bio;
512 	int cpu;
513 	int rw = bio_data_dir(bio);
514 
515 	io->start_time = jiffies;
516 
517 	cpu = part_stat_lock();
518 	part_round_stats(cpu, &dm_disk(md)->part0);
519 	part_stat_unlock();
520 	atomic_set(&dm_disk(md)->part0.in_flight[rw],
521 		atomic_inc_return(&md->pending[rw]));
522 
523 	if (unlikely(dm_stats_used(&md->stats)))
524 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
525 				    bio->bi_iter.bi_sector, bio_sectors(bio),
526 				    false, 0, &io->stats_aux);
527 }
528 
529 static void end_io_acct(struct dm_io *io)
530 {
531 	struct mapped_device *md = io->md;
532 	struct bio *bio = io->bio;
533 	unsigned long duration = jiffies - io->start_time;
534 	int pending;
535 	int rw = bio_data_dir(bio);
536 
537 	generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
538 
539 	if (unlikely(dm_stats_used(&md->stats)))
540 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
541 				    bio->bi_iter.bi_sector, bio_sectors(bio),
542 				    true, duration, &io->stats_aux);
543 
544 	/*
545 	 * After this is decremented the bio must not be touched if it is
546 	 * a flush.
547 	 */
548 	pending = atomic_dec_return(&md->pending[rw]);
549 	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
550 	pending += atomic_read(&md->pending[rw^0x1]);
551 
552 	/* nudge anyone waiting on suspend queue */
553 	if (!pending)
554 		wake_up(&md->wait);
555 }
556 
557 /*
558  * Add the bio to the list of deferred io.
559  */
560 static void queue_io(struct mapped_device *md, struct bio *bio)
561 {
562 	unsigned long flags;
563 
564 	spin_lock_irqsave(&md->deferred_lock, flags);
565 	bio_list_add(&md->deferred, bio);
566 	spin_unlock_irqrestore(&md->deferred_lock, flags);
567 	queue_work(md->wq, &md->work);
568 }
569 
570 /*
571  * Everyone (including functions in this file), should use this
572  * function to access the md->map field, and make sure they call
573  * dm_put_live_table() when finished.
574  */
575 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
576 {
577 	*srcu_idx = srcu_read_lock(&md->io_barrier);
578 
579 	return srcu_dereference(md->map, &md->io_barrier);
580 }
581 
582 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
583 {
584 	srcu_read_unlock(&md->io_barrier, srcu_idx);
585 }
586 
587 void dm_sync_table(struct mapped_device *md)
588 {
589 	synchronize_srcu(&md->io_barrier);
590 	synchronize_rcu_expedited();
591 }
592 
593 /*
594  * A fast alternative to dm_get_live_table/dm_put_live_table.
595  * The caller must not block between these two functions.
596  */
597 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
598 {
599 	rcu_read_lock();
600 	return rcu_dereference(md->map);
601 }
602 
603 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
604 {
605 	rcu_read_unlock();
606 }
607 
608 /*
609  * Open a table device so we can use it as a map destination.
610  */
611 static int open_table_device(struct table_device *td, dev_t dev,
612 			     struct mapped_device *md)
613 {
614 	static char *_claim_ptr = "I belong to device-mapper";
615 	struct block_device *bdev;
616 
617 	int r;
618 
619 	BUG_ON(td->dm_dev.bdev);
620 
621 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
622 	if (IS_ERR(bdev))
623 		return PTR_ERR(bdev);
624 
625 	r = bd_link_disk_holder(bdev, dm_disk(md));
626 	if (r) {
627 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
628 		return r;
629 	}
630 
631 	td->dm_dev.bdev = bdev;
632 	return 0;
633 }
634 
635 /*
636  * Close a table device that we've been using.
637  */
638 static void close_table_device(struct table_device *td, struct mapped_device *md)
639 {
640 	if (!td->dm_dev.bdev)
641 		return;
642 
643 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
644 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
645 	td->dm_dev.bdev = NULL;
646 }
647 
648 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
649 					      fmode_t mode) {
650 	struct table_device *td;
651 
652 	list_for_each_entry(td, l, list)
653 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
654 			return td;
655 
656 	return NULL;
657 }
658 
659 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
660 			struct dm_dev **result) {
661 	int r;
662 	struct table_device *td;
663 
664 	mutex_lock(&md->table_devices_lock);
665 	td = find_table_device(&md->table_devices, dev, mode);
666 	if (!td) {
667 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
668 		if (!td) {
669 			mutex_unlock(&md->table_devices_lock);
670 			return -ENOMEM;
671 		}
672 
673 		td->dm_dev.mode = mode;
674 		td->dm_dev.bdev = NULL;
675 
676 		if ((r = open_table_device(td, dev, md))) {
677 			mutex_unlock(&md->table_devices_lock);
678 			kfree(td);
679 			return r;
680 		}
681 
682 		format_dev_t(td->dm_dev.name, dev);
683 
684 		atomic_set(&td->count, 0);
685 		list_add(&td->list, &md->table_devices);
686 	}
687 	atomic_inc(&td->count);
688 	mutex_unlock(&md->table_devices_lock);
689 
690 	*result = &td->dm_dev;
691 	return 0;
692 }
693 EXPORT_SYMBOL_GPL(dm_get_table_device);
694 
695 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
696 {
697 	struct table_device *td = container_of(d, struct table_device, dm_dev);
698 
699 	mutex_lock(&md->table_devices_lock);
700 	if (atomic_dec_and_test(&td->count)) {
701 		close_table_device(td, md);
702 		list_del(&td->list);
703 		kfree(td);
704 	}
705 	mutex_unlock(&md->table_devices_lock);
706 }
707 EXPORT_SYMBOL(dm_put_table_device);
708 
709 static void free_table_devices(struct list_head *devices)
710 {
711 	struct list_head *tmp, *next;
712 
713 	list_for_each_safe(tmp, next, devices) {
714 		struct table_device *td = list_entry(tmp, struct table_device, list);
715 
716 		DMWARN("dm_destroy: %s still exists with %d references",
717 		       td->dm_dev.name, atomic_read(&td->count));
718 		kfree(td);
719 	}
720 }
721 
722 /*
723  * Get the geometry associated with a dm device
724  */
725 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
726 {
727 	*geo = md->geometry;
728 
729 	return 0;
730 }
731 
732 /*
733  * Set the geometry of a device.
734  */
735 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
736 {
737 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
738 
739 	if (geo->start > sz) {
740 		DMWARN("Start sector is beyond the geometry limits.");
741 		return -EINVAL;
742 	}
743 
744 	md->geometry = *geo;
745 
746 	return 0;
747 }
748 
749 /*-----------------------------------------------------------------
750  * CRUD START:
751  *   A more elegant soln is in the works that uses the queue
752  *   merge fn, unfortunately there are a couple of changes to
753  *   the block layer that I want to make for this.  So in the
754  *   interests of getting something for people to use I give
755  *   you this clearly demarcated crap.
756  *---------------------------------------------------------------*/
757 
758 static int __noflush_suspending(struct mapped_device *md)
759 {
760 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
761 }
762 
763 /*
764  * Decrements the number of outstanding ios that a bio has been
765  * cloned into, completing the original io if necc.
766  */
767 static void dec_pending(struct dm_io *io, int error)
768 {
769 	unsigned long flags;
770 	int io_error;
771 	struct bio *bio;
772 	struct mapped_device *md = io->md;
773 
774 	/* Push-back supersedes any I/O errors */
775 	if (unlikely(error)) {
776 		spin_lock_irqsave(&io->endio_lock, flags);
777 		if (!(io->error > 0 && __noflush_suspending(md)))
778 			io->error = error;
779 		spin_unlock_irqrestore(&io->endio_lock, flags);
780 	}
781 
782 	if (atomic_dec_and_test(&io->io_count)) {
783 		if (io->error == DM_ENDIO_REQUEUE) {
784 			/*
785 			 * Target requested pushing back the I/O.
786 			 */
787 			spin_lock_irqsave(&md->deferred_lock, flags);
788 			if (__noflush_suspending(md))
789 				bio_list_add_head(&md->deferred, io->bio);
790 			else
791 				/* noflush suspend was interrupted. */
792 				io->error = -EIO;
793 			spin_unlock_irqrestore(&md->deferred_lock, flags);
794 		}
795 
796 		io_error = io->error;
797 		bio = io->bio;
798 		end_io_acct(io);
799 		free_io(md, io);
800 
801 		if (io_error == DM_ENDIO_REQUEUE)
802 			return;
803 
804 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
805 			/*
806 			 * Preflush done for flush with data, reissue
807 			 * without REQ_PREFLUSH.
808 			 */
809 			bio->bi_opf &= ~REQ_PREFLUSH;
810 			queue_io(md, bio);
811 		} else {
812 			/* done with normal IO or empty flush */
813 			trace_block_bio_complete(md->queue, bio, io_error);
814 			bio->bi_error = io_error;
815 			bio_endio(bio);
816 		}
817 	}
818 }
819 
820 void disable_write_same(struct mapped_device *md)
821 {
822 	struct queue_limits *limits = dm_get_queue_limits(md);
823 
824 	/* device doesn't really support WRITE SAME, disable it */
825 	limits->max_write_same_sectors = 0;
826 }
827 
828 static void clone_endio(struct bio *bio)
829 {
830 	int error = bio->bi_error;
831 	int r = error;
832 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
833 	struct dm_io *io = tio->io;
834 	struct mapped_device *md = tio->io->md;
835 	dm_endio_fn endio = tio->ti->type->end_io;
836 
837 	if (endio) {
838 		r = endio(tio->ti, bio, error);
839 		if (r < 0 || r == DM_ENDIO_REQUEUE)
840 			/*
841 			 * error and requeue request are handled
842 			 * in dec_pending().
843 			 */
844 			error = r;
845 		else if (r == DM_ENDIO_INCOMPLETE)
846 			/* The target will handle the io */
847 			return;
848 		else if (r) {
849 			DMWARN("unimplemented target endio return value: %d", r);
850 			BUG();
851 		}
852 	}
853 
854 	if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
855 		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
856 		disable_write_same(md);
857 
858 	free_tio(tio);
859 	dec_pending(io, error);
860 }
861 
862 /*
863  * Return maximum size of I/O possible at the supplied sector up to the current
864  * target boundary.
865  */
866 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
867 {
868 	sector_t target_offset = dm_target_offset(ti, sector);
869 
870 	return ti->len - target_offset;
871 }
872 
873 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
874 {
875 	sector_t len = max_io_len_target_boundary(sector, ti);
876 	sector_t offset, max_len;
877 
878 	/*
879 	 * Does the target need to split even further?
880 	 */
881 	if (ti->max_io_len) {
882 		offset = dm_target_offset(ti, sector);
883 		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
884 			max_len = sector_div(offset, ti->max_io_len);
885 		else
886 			max_len = offset & (ti->max_io_len - 1);
887 		max_len = ti->max_io_len - max_len;
888 
889 		if (len > max_len)
890 			len = max_len;
891 	}
892 
893 	return len;
894 }
895 
896 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
897 {
898 	if (len > UINT_MAX) {
899 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
900 		      (unsigned long long)len, UINT_MAX);
901 		ti->error = "Maximum size of target IO is too large";
902 		return -EINVAL;
903 	}
904 
905 	ti->max_io_len = (uint32_t) len;
906 
907 	return 0;
908 }
909 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
910 
911 static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
912 				 void **kaddr, pfn_t *pfn, long size)
913 {
914 	struct mapped_device *md = bdev->bd_disk->private_data;
915 	struct dm_table *map;
916 	struct dm_target *ti;
917 	int srcu_idx;
918 	long len, ret = -EIO;
919 
920 	map = dm_get_live_table(md, &srcu_idx);
921 	if (!map)
922 		goto out;
923 
924 	ti = dm_table_find_target(map, sector);
925 	if (!dm_target_is_valid(ti))
926 		goto out;
927 
928 	len = max_io_len(sector, ti) << SECTOR_SHIFT;
929 	size = min(len, size);
930 
931 	if (ti->type->direct_access)
932 		ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
933 out:
934 	dm_put_live_table(md, srcu_idx);
935 	return min(ret, size);
936 }
937 
938 /*
939  * A target may call dm_accept_partial_bio only from the map routine.  It is
940  * allowed for all bio types except REQ_PREFLUSH.
941  *
942  * dm_accept_partial_bio informs the dm that the target only wants to process
943  * additional n_sectors sectors of the bio and the rest of the data should be
944  * sent in a next bio.
945  *
946  * A diagram that explains the arithmetics:
947  * +--------------------+---------------+-------+
948  * |         1          |       2       |   3   |
949  * +--------------------+---------------+-------+
950  *
951  * <-------------- *tio->len_ptr --------------->
952  *                      <------- bi_size ------->
953  *                      <-- n_sectors -->
954  *
955  * Region 1 was already iterated over with bio_advance or similar function.
956  *	(it may be empty if the target doesn't use bio_advance)
957  * Region 2 is the remaining bio size that the target wants to process.
958  *	(it may be empty if region 1 is non-empty, although there is no reason
959  *	 to make it empty)
960  * The target requires that region 3 is to be sent in the next bio.
961  *
962  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
963  * the partially processed part (the sum of regions 1+2) must be the same for all
964  * copies of the bio.
965  */
966 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
967 {
968 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
969 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
970 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
971 	BUG_ON(bi_size > *tio->len_ptr);
972 	BUG_ON(n_sectors > bi_size);
973 	*tio->len_ptr -= bi_size - n_sectors;
974 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
975 }
976 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
977 
978 /*
979  * Flush current->bio_list when the target map method blocks.
980  * This fixes deadlocks in snapshot and possibly in other targets.
981  */
982 struct dm_offload {
983 	struct blk_plug plug;
984 	struct blk_plug_cb cb;
985 };
986 
987 static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
988 {
989 	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
990 	struct bio_list list;
991 	struct bio *bio;
992 
993 	INIT_LIST_HEAD(&o->cb.list);
994 
995 	if (unlikely(!current->bio_list))
996 		return;
997 
998 	list = *current->bio_list;
999 	bio_list_init(current->bio_list);
1000 
1001 	while ((bio = bio_list_pop(&list))) {
1002 		struct bio_set *bs = bio->bi_pool;
1003 		if (unlikely(!bs) || bs == fs_bio_set) {
1004 			bio_list_add(current->bio_list, bio);
1005 			continue;
1006 		}
1007 
1008 		spin_lock(&bs->rescue_lock);
1009 		bio_list_add(&bs->rescue_list, bio);
1010 		queue_work(bs->rescue_workqueue, &bs->rescue_work);
1011 		spin_unlock(&bs->rescue_lock);
1012 	}
1013 }
1014 
1015 static void dm_offload_start(struct dm_offload *o)
1016 {
1017 	blk_start_plug(&o->plug);
1018 	o->cb.callback = flush_current_bio_list;
1019 	list_add(&o->cb.list, &current->plug->cb_list);
1020 }
1021 
1022 static void dm_offload_end(struct dm_offload *o)
1023 {
1024 	list_del(&o->cb.list);
1025 	blk_finish_plug(&o->plug);
1026 }
1027 
1028 static void __map_bio(struct dm_target_io *tio)
1029 {
1030 	int r;
1031 	sector_t sector;
1032 	struct dm_offload o;
1033 	struct bio *clone = &tio->clone;
1034 	struct dm_target *ti = tio->ti;
1035 
1036 	clone->bi_end_io = clone_endio;
1037 
1038 	/*
1039 	 * Map the clone.  If r == 0 we don't need to do
1040 	 * anything, the target has assumed ownership of
1041 	 * this io.
1042 	 */
1043 	atomic_inc(&tio->io->io_count);
1044 	sector = clone->bi_iter.bi_sector;
1045 
1046 	dm_offload_start(&o);
1047 	r = ti->type->map(ti, clone);
1048 	dm_offload_end(&o);
1049 
1050 	if (r == DM_MAPIO_REMAPPED) {
1051 		/* the bio has been remapped so dispatch it */
1052 
1053 		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1054 				      tio->io->bio->bi_bdev->bd_dev, sector);
1055 
1056 		generic_make_request(clone);
1057 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1058 		/* error the io and bail out, or requeue it if needed */
1059 		dec_pending(tio->io, r);
1060 		free_tio(tio);
1061 	} else if (r != DM_MAPIO_SUBMITTED) {
1062 		DMWARN("unimplemented target map return value: %d", r);
1063 		BUG();
1064 	}
1065 }
1066 
1067 struct clone_info {
1068 	struct mapped_device *md;
1069 	struct dm_table *map;
1070 	struct bio *bio;
1071 	struct dm_io *io;
1072 	sector_t sector;
1073 	unsigned sector_count;
1074 };
1075 
1076 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1077 {
1078 	bio->bi_iter.bi_sector = sector;
1079 	bio->bi_iter.bi_size = to_bytes(len);
1080 }
1081 
1082 /*
1083  * Creates a bio that consists of range of complete bvecs.
1084  */
1085 static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1086 		     sector_t sector, unsigned len)
1087 {
1088 	struct bio *clone = &tio->clone;
1089 
1090 	__bio_clone_fast(clone, bio);
1091 
1092 	if (bio_integrity(bio)) {
1093 		int r = bio_integrity_clone(clone, bio, GFP_NOIO);
1094 		if (r < 0)
1095 			return r;
1096 	}
1097 
1098 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1099 	clone->bi_iter.bi_size = to_bytes(len);
1100 
1101 	if (bio_integrity(bio))
1102 		bio_integrity_trim(clone, 0, len);
1103 
1104 	return 0;
1105 }
1106 
1107 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1108 				      struct dm_target *ti,
1109 				      unsigned target_bio_nr)
1110 {
1111 	struct dm_target_io *tio;
1112 	struct bio *clone;
1113 
1114 	clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1115 	tio = container_of(clone, struct dm_target_io, clone);
1116 
1117 	tio->io = ci->io;
1118 	tio->ti = ti;
1119 	tio->target_bio_nr = target_bio_nr;
1120 
1121 	return tio;
1122 }
1123 
1124 static void __clone_and_map_simple_bio(struct clone_info *ci,
1125 				       struct dm_target *ti,
1126 				       unsigned target_bio_nr, unsigned *len)
1127 {
1128 	struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1129 	struct bio *clone = &tio->clone;
1130 
1131 	tio->len_ptr = len;
1132 
1133 	__bio_clone_fast(clone, ci->bio);
1134 	if (len)
1135 		bio_setup_sector(clone, ci->sector, *len);
1136 
1137 	__map_bio(tio);
1138 }
1139 
1140 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1141 				  unsigned num_bios, unsigned *len)
1142 {
1143 	unsigned target_bio_nr;
1144 
1145 	for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1146 		__clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1147 }
1148 
1149 static int __send_empty_flush(struct clone_info *ci)
1150 {
1151 	unsigned target_nr = 0;
1152 	struct dm_target *ti;
1153 
1154 	BUG_ON(bio_has_data(ci->bio));
1155 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1156 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1157 
1158 	return 0;
1159 }
1160 
1161 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1162 				     sector_t sector, unsigned *len)
1163 {
1164 	struct bio *bio = ci->bio;
1165 	struct dm_target_io *tio;
1166 	unsigned target_bio_nr;
1167 	unsigned num_target_bios = 1;
1168 	int r = 0;
1169 
1170 	/*
1171 	 * Does the target want to receive duplicate copies of the bio?
1172 	 */
1173 	if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1174 		num_target_bios = ti->num_write_bios(ti, bio);
1175 
1176 	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1177 		tio = alloc_tio(ci, ti, target_bio_nr);
1178 		tio->len_ptr = len;
1179 		r = clone_bio(tio, bio, sector, *len);
1180 		if (r < 0) {
1181 			free_tio(tio);
1182 			break;
1183 		}
1184 		__map_bio(tio);
1185 	}
1186 
1187 	return r;
1188 }
1189 
1190 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1191 
1192 static unsigned get_num_discard_bios(struct dm_target *ti)
1193 {
1194 	return ti->num_discard_bios;
1195 }
1196 
1197 static unsigned get_num_write_same_bios(struct dm_target *ti)
1198 {
1199 	return ti->num_write_same_bios;
1200 }
1201 
1202 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1203 
1204 static bool is_split_required_for_discard(struct dm_target *ti)
1205 {
1206 	return ti->split_discard_bios;
1207 }
1208 
1209 static int __send_changing_extent_only(struct clone_info *ci,
1210 				       get_num_bios_fn get_num_bios,
1211 				       is_split_required_fn is_split_required)
1212 {
1213 	struct dm_target *ti;
1214 	unsigned len;
1215 	unsigned num_bios;
1216 
1217 	do {
1218 		ti = dm_table_find_target(ci->map, ci->sector);
1219 		if (!dm_target_is_valid(ti))
1220 			return -EIO;
1221 
1222 		/*
1223 		 * Even though the device advertised support for this type of
1224 		 * request, that does not mean every target supports it, and
1225 		 * reconfiguration might also have changed that since the
1226 		 * check was performed.
1227 		 */
1228 		num_bios = get_num_bios ? get_num_bios(ti) : 0;
1229 		if (!num_bios)
1230 			return -EOPNOTSUPP;
1231 
1232 		if (is_split_required && !is_split_required(ti))
1233 			len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1234 		else
1235 			len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1236 
1237 		__send_duplicate_bios(ci, ti, num_bios, &len);
1238 
1239 		ci->sector += len;
1240 	} while (ci->sector_count -= len);
1241 
1242 	return 0;
1243 }
1244 
1245 static int __send_discard(struct clone_info *ci)
1246 {
1247 	return __send_changing_extent_only(ci, get_num_discard_bios,
1248 					   is_split_required_for_discard);
1249 }
1250 
1251 static int __send_write_same(struct clone_info *ci)
1252 {
1253 	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1254 }
1255 
1256 /*
1257  * Select the correct strategy for processing a non-flush bio.
1258  */
1259 static int __split_and_process_non_flush(struct clone_info *ci)
1260 {
1261 	struct bio *bio = ci->bio;
1262 	struct dm_target *ti;
1263 	unsigned len;
1264 	int r;
1265 
1266 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1267 		return __send_discard(ci);
1268 	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1269 		return __send_write_same(ci);
1270 
1271 	ti = dm_table_find_target(ci->map, ci->sector);
1272 	if (!dm_target_is_valid(ti))
1273 		return -EIO;
1274 
1275 	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1276 
1277 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1278 	if (r < 0)
1279 		return r;
1280 
1281 	ci->sector += len;
1282 	ci->sector_count -= len;
1283 
1284 	return 0;
1285 }
1286 
1287 /*
1288  * Entry point to split a bio into clones and submit them to the targets.
1289  */
1290 static void __split_and_process_bio(struct mapped_device *md,
1291 				    struct dm_table *map, struct bio *bio)
1292 {
1293 	struct clone_info ci;
1294 	int error = 0;
1295 
1296 	if (unlikely(!map)) {
1297 		bio_io_error(bio);
1298 		return;
1299 	}
1300 
1301 	ci.map = map;
1302 	ci.md = md;
1303 	ci.io = alloc_io(md);
1304 	ci.io->error = 0;
1305 	atomic_set(&ci.io->io_count, 1);
1306 	ci.io->bio = bio;
1307 	ci.io->md = md;
1308 	spin_lock_init(&ci.io->endio_lock);
1309 	ci.sector = bio->bi_iter.bi_sector;
1310 
1311 	start_io_acct(ci.io);
1312 
1313 	if (bio->bi_opf & REQ_PREFLUSH) {
1314 		ci.bio = &ci.md->flush_bio;
1315 		ci.sector_count = 0;
1316 		error = __send_empty_flush(&ci);
1317 		/* dec_pending submits any data associated with flush */
1318 	} else {
1319 		ci.bio = bio;
1320 		ci.sector_count = bio_sectors(bio);
1321 		while (ci.sector_count && !error)
1322 			error = __split_and_process_non_flush(&ci);
1323 	}
1324 
1325 	/* drop the extra reference count */
1326 	dec_pending(ci.io, error);
1327 }
1328 /*-----------------------------------------------------------------
1329  * CRUD END
1330  *---------------------------------------------------------------*/
1331 
1332 /*
1333  * The request function that just remaps the bio built up by
1334  * dm_merge_bvec.
1335  */
1336 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1337 {
1338 	int rw = bio_data_dir(bio);
1339 	struct mapped_device *md = q->queuedata;
1340 	int srcu_idx;
1341 	struct dm_table *map;
1342 
1343 	map = dm_get_live_table(md, &srcu_idx);
1344 
1345 	generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1346 
1347 	/* if we're suspended, we have to queue this io for later */
1348 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1349 		dm_put_live_table(md, srcu_idx);
1350 
1351 		if (!(bio->bi_opf & REQ_RAHEAD))
1352 			queue_io(md, bio);
1353 		else
1354 			bio_io_error(bio);
1355 		return BLK_QC_T_NONE;
1356 	}
1357 
1358 	__split_and_process_bio(md, map, bio);
1359 	dm_put_live_table(md, srcu_idx);
1360 	return BLK_QC_T_NONE;
1361 }
1362 
1363 static int dm_any_congested(void *congested_data, int bdi_bits)
1364 {
1365 	int r = bdi_bits;
1366 	struct mapped_device *md = congested_data;
1367 	struct dm_table *map;
1368 
1369 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1370 		if (dm_request_based(md)) {
1371 			/*
1372 			 * With request-based DM we only need to check the
1373 			 * top-level queue for congestion.
1374 			 */
1375 			r = md->queue->backing_dev_info->wb.state & bdi_bits;
1376 		} else {
1377 			map = dm_get_live_table_fast(md);
1378 			if (map)
1379 				r = dm_table_any_congested(map, bdi_bits);
1380 			dm_put_live_table_fast(md);
1381 		}
1382 	}
1383 
1384 	return r;
1385 }
1386 
1387 /*-----------------------------------------------------------------
1388  * An IDR is used to keep track of allocated minor numbers.
1389  *---------------------------------------------------------------*/
1390 static void free_minor(int minor)
1391 {
1392 	spin_lock(&_minor_lock);
1393 	idr_remove(&_minor_idr, minor);
1394 	spin_unlock(&_minor_lock);
1395 }
1396 
1397 /*
1398  * See if the device with a specific minor # is free.
1399  */
1400 static int specific_minor(int minor)
1401 {
1402 	int r;
1403 
1404 	if (minor >= (1 << MINORBITS))
1405 		return -EINVAL;
1406 
1407 	idr_preload(GFP_KERNEL);
1408 	spin_lock(&_minor_lock);
1409 
1410 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1411 
1412 	spin_unlock(&_minor_lock);
1413 	idr_preload_end();
1414 	if (r < 0)
1415 		return r == -ENOSPC ? -EBUSY : r;
1416 	return 0;
1417 }
1418 
1419 static int next_free_minor(int *minor)
1420 {
1421 	int r;
1422 
1423 	idr_preload(GFP_KERNEL);
1424 	spin_lock(&_minor_lock);
1425 
1426 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1427 
1428 	spin_unlock(&_minor_lock);
1429 	idr_preload_end();
1430 	if (r < 0)
1431 		return r;
1432 	*minor = r;
1433 	return 0;
1434 }
1435 
1436 static const struct block_device_operations dm_blk_dops;
1437 
1438 static void dm_wq_work(struct work_struct *work);
1439 
1440 void dm_init_md_queue(struct mapped_device *md)
1441 {
1442 	/*
1443 	 * Request-based dm devices cannot be stacked on top of bio-based dm
1444 	 * devices.  The type of this dm device may not have been decided yet.
1445 	 * The type is decided at the first table loading time.
1446 	 * To prevent problematic device stacking, clear the queue flag
1447 	 * for request stacking support until then.
1448 	 *
1449 	 * This queue is new, so no concurrency on the queue_flags.
1450 	 */
1451 	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1452 
1453 	/*
1454 	 * Initialize data that will only be used by a non-blk-mq DM queue
1455 	 * - must do so here (in alloc_dev callchain) before queue is used
1456 	 */
1457 	md->queue->queuedata = md;
1458 	md->queue->backing_dev_info->congested_data = md;
1459 }
1460 
1461 void dm_init_normal_md_queue(struct mapped_device *md)
1462 {
1463 	md->use_blk_mq = false;
1464 	dm_init_md_queue(md);
1465 
1466 	/*
1467 	 * Initialize aspects of queue that aren't relevant for blk-mq
1468 	 */
1469 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
1470 	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1471 }
1472 
1473 static void cleanup_mapped_device(struct mapped_device *md)
1474 {
1475 	if (md->wq)
1476 		destroy_workqueue(md->wq);
1477 	if (md->kworker_task)
1478 		kthread_stop(md->kworker_task);
1479 	mempool_destroy(md->io_pool);
1480 	if (md->bs)
1481 		bioset_free(md->bs);
1482 
1483 	if (md->disk) {
1484 		spin_lock(&_minor_lock);
1485 		md->disk->private_data = NULL;
1486 		spin_unlock(&_minor_lock);
1487 		del_gendisk(md->disk);
1488 		put_disk(md->disk);
1489 	}
1490 
1491 	if (md->queue)
1492 		blk_cleanup_queue(md->queue);
1493 
1494 	cleanup_srcu_struct(&md->io_barrier);
1495 
1496 	if (md->bdev) {
1497 		bdput(md->bdev);
1498 		md->bdev = NULL;
1499 	}
1500 
1501 	dm_mq_cleanup_mapped_device(md);
1502 }
1503 
1504 /*
1505  * Allocate and initialise a blank device with a given minor.
1506  */
1507 static struct mapped_device *alloc_dev(int minor)
1508 {
1509 	int r, numa_node_id = dm_get_numa_node();
1510 	struct mapped_device *md;
1511 	void *old_md;
1512 
1513 	md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1514 	if (!md) {
1515 		DMWARN("unable to allocate device, out of memory.");
1516 		return NULL;
1517 	}
1518 
1519 	if (!try_module_get(THIS_MODULE))
1520 		goto bad_module_get;
1521 
1522 	/* get a minor number for the dev */
1523 	if (minor == DM_ANY_MINOR)
1524 		r = next_free_minor(&minor);
1525 	else
1526 		r = specific_minor(minor);
1527 	if (r < 0)
1528 		goto bad_minor;
1529 
1530 	r = init_srcu_struct(&md->io_barrier);
1531 	if (r < 0)
1532 		goto bad_io_barrier;
1533 
1534 	md->numa_node_id = numa_node_id;
1535 	md->use_blk_mq = dm_use_blk_mq_default();
1536 	md->init_tio_pdu = false;
1537 	md->type = DM_TYPE_NONE;
1538 	mutex_init(&md->suspend_lock);
1539 	mutex_init(&md->type_lock);
1540 	mutex_init(&md->table_devices_lock);
1541 	spin_lock_init(&md->deferred_lock);
1542 	atomic_set(&md->holders, 1);
1543 	atomic_set(&md->open_count, 0);
1544 	atomic_set(&md->event_nr, 0);
1545 	atomic_set(&md->uevent_seq, 0);
1546 	INIT_LIST_HEAD(&md->uevent_list);
1547 	INIT_LIST_HEAD(&md->table_devices);
1548 	spin_lock_init(&md->uevent_lock);
1549 
1550 	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1551 	if (!md->queue)
1552 		goto bad;
1553 
1554 	dm_init_md_queue(md);
1555 
1556 	md->disk = alloc_disk_node(1, numa_node_id);
1557 	if (!md->disk)
1558 		goto bad;
1559 
1560 	atomic_set(&md->pending[0], 0);
1561 	atomic_set(&md->pending[1], 0);
1562 	init_waitqueue_head(&md->wait);
1563 	INIT_WORK(&md->work, dm_wq_work);
1564 	init_waitqueue_head(&md->eventq);
1565 	init_completion(&md->kobj_holder.completion);
1566 	md->kworker_task = NULL;
1567 
1568 	md->disk->major = _major;
1569 	md->disk->first_minor = minor;
1570 	md->disk->fops = &dm_blk_dops;
1571 	md->disk->queue = md->queue;
1572 	md->disk->private_data = md;
1573 	sprintf(md->disk->disk_name, "dm-%d", minor);
1574 	add_disk(md->disk);
1575 	format_dev_t(md->name, MKDEV(_major, minor));
1576 
1577 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1578 	if (!md->wq)
1579 		goto bad;
1580 
1581 	md->bdev = bdget_disk(md->disk, 0);
1582 	if (!md->bdev)
1583 		goto bad;
1584 
1585 	bio_init(&md->flush_bio, NULL, 0);
1586 	md->flush_bio.bi_bdev = md->bdev;
1587 	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1588 
1589 	dm_stats_init(&md->stats);
1590 
1591 	/* Populate the mapping, nobody knows we exist yet */
1592 	spin_lock(&_minor_lock);
1593 	old_md = idr_replace(&_minor_idr, md, minor);
1594 	spin_unlock(&_minor_lock);
1595 
1596 	BUG_ON(old_md != MINOR_ALLOCED);
1597 
1598 	return md;
1599 
1600 bad:
1601 	cleanup_mapped_device(md);
1602 bad_io_barrier:
1603 	free_minor(minor);
1604 bad_minor:
1605 	module_put(THIS_MODULE);
1606 bad_module_get:
1607 	kfree(md);
1608 	return NULL;
1609 }
1610 
1611 static void unlock_fs(struct mapped_device *md);
1612 
1613 static void free_dev(struct mapped_device *md)
1614 {
1615 	int minor = MINOR(disk_devt(md->disk));
1616 
1617 	unlock_fs(md);
1618 
1619 	cleanup_mapped_device(md);
1620 
1621 	free_table_devices(&md->table_devices);
1622 	dm_stats_cleanup(&md->stats);
1623 	free_minor(minor);
1624 
1625 	module_put(THIS_MODULE);
1626 	kfree(md);
1627 }
1628 
1629 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1630 {
1631 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1632 
1633 	if (md->bs) {
1634 		/* The md already has necessary mempools. */
1635 		if (dm_table_bio_based(t)) {
1636 			/*
1637 			 * Reload bioset because front_pad may have changed
1638 			 * because a different table was loaded.
1639 			 */
1640 			bioset_free(md->bs);
1641 			md->bs = p->bs;
1642 			p->bs = NULL;
1643 		}
1644 		/*
1645 		 * There's no need to reload with request-based dm
1646 		 * because the size of front_pad doesn't change.
1647 		 * Note for future: If you are to reload bioset,
1648 		 * prep-ed requests in the queue may refer
1649 		 * to bio from the old bioset, so you must walk
1650 		 * through the queue to unprep.
1651 		 */
1652 		goto out;
1653 	}
1654 
1655 	BUG_ON(!p || md->io_pool || md->bs);
1656 
1657 	md->io_pool = p->io_pool;
1658 	p->io_pool = NULL;
1659 	md->bs = p->bs;
1660 	p->bs = NULL;
1661 
1662 out:
1663 	/* mempool bind completed, no longer need any mempools in the table */
1664 	dm_table_free_md_mempools(t);
1665 }
1666 
1667 /*
1668  * Bind a table to the device.
1669  */
1670 static void event_callback(void *context)
1671 {
1672 	unsigned long flags;
1673 	LIST_HEAD(uevents);
1674 	struct mapped_device *md = (struct mapped_device *) context;
1675 
1676 	spin_lock_irqsave(&md->uevent_lock, flags);
1677 	list_splice_init(&md->uevent_list, &uevents);
1678 	spin_unlock_irqrestore(&md->uevent_lock, flags);
1679 
1680 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1681 
1682 	atomic_inc(&md->event_nr);
1683 	wake_up(&md->eventq);
1684 }
1685 
1686 /*
1687  * Protected by md->suspend_lock obtained by dm_swap_table().
1688  */
1689 static void __set_size(struct mapped_device *md, sector_t size)
1690 {
1691 	set_capacity(md->disk, size);
1692 
1693 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1694 }
1695 
1696 /*
1697  * Returns old map, which caller must destroy.
1698  */
1699 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
1700 			       struct queue_limits *limits)
1701 {
1702 	struct dm_table *old_map;
1703 	struct request_queue *q = md->queue;
1704 	sector_t size;
1705 
1706 	lockdep_assert_held(&md->suspend_lock);
1707 
1708 	size = dm_table_get_size(t);
1709 
1710 	/*
1711 	 * Wipe any geometry if the size of the table changed.
1712 	 */
1713 	if (size != dm_get_size(md))
1714 		memset(&md->geometry, 0, sizeof(md->geometry));
1715 
1716 	__set_size(md, size);
1717 
1718 	dm_table_event_callback(t, event_callback, md);
1719 
1720 	/*
1721 	 * The queue hasn't been stopped yet, if the old table type wasn't
1722 	 * for request-based during suspension.  So stop it to prevent
1723 	 * I/O mapping before resume.
1724 	 * This must be done before setting the queue restrictions,
1725 	 * because request-based dm may be run just after the setting.
1726 	 */
1727 	if (dm_table_request_based(t)) {
1728 		dm_stop_queue(q);
1729 		/*
1730 		 * Leverage the fact that request-based DM targets are
1731 		 * immutable singletons and establish md->immutable_target
1732 		 * - used to optimize both dm_request_fn and dm_mq_queue_rq
1733 		 */
1734 		md->immutable_target = dm_table_get_immutable_target(t);
1735 	}
1736 
1737 	__bind_mempools(md, t);
1738 
1739 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
1740 	rcu_assign_pointer(md->map, (void *)t);
1741 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
1742 
1743 	dm_table_set_restrictions(t, q, limits);
1744 	if (old_map)
1745 		dm_sync_table(md);
1746 
1747 	return old_map;
1748 }
1749 
1750 /*
1751  * Returns unbound table for the caller to free.
1752  */
1753 static struct dm_table *__unbind(struct mapped_device *md)
1754 {
1755 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
1756 
1757 	if (!map)
1758 		return NULL;
1759 
1760 	dm_table_event_callback(map, NULL, NULL);
1761 	RCU_INIT_POINTER(md->map, NULL);
1762 	dm_sync_table(md);
1763 
1764 	return map;
1765 }
1766 
1767 /*
1768  * Constructor for a new device.
1769  */
1770 int dm_create(int minor, struct mapped_device **result)
1771 {
1772 	struct mapped_device *md;
1773 
1774 	md = alloc_dev(minor);
1775 	if (!md)
1776 		return -ENXIO;
1777 
1778 	dm_sysfs_init(md);
1779 
1780 	*result = md;
1781 	return 0;
1782 }
1783 
1784 /*
1785  * Functions to manage md->type.
1786  * All are required to hold md->type_lock.
1787  */
1788 void dm_lock_md_type(struct mapped_device *md)
1789 {
1790 	mutex_lock(&md->type_lock);
1791 }
1792 
1793 void dm_unlock_md_type(struct mapped_device *md)
1794 {
1795 	mutex_unlock(&md->type_lock);
1796 }
1797 
1798 void dm_set_md_type(struct mapped_device *md, unsigned type)
1799 {
1800 	BUG_ON(!mutex_is_locked(&md->type_lock));
1801 	md->type = type;
1802 }
1803 
1804 unsigned dm_get_md_type(struct mapped_device *md)
1805 {
1806 	return md->type;
1807 }
1808 
1809 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
1810 {
1811 	return md->immutable_target_type;
1812 }
1813 
1814 /*
1815  * The queue_limits are only valid as long as you have a reference
1816  * count on 'md'.
1817  */
1818 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1819 {
1820 	BUG_ON(!atomic_read(&md->holders));
1821 	return &md->queue->limits;
1822 }
1823 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
1824 
1825 /*
1826  * Setup the DM device's queue based on md's type
1827  */
1828 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1829 {
1830 	int r;
1831 	unsigned type = dm_get_md_type(md);
1832 
1833 	switch (type) {
1834 	case DM_TYPE_REQUEST_BASED:
1835 		r = dm_old_init_request_queue(md, t);
1836 		if (r) {
1837 			DMERR("Cannot initialize queue for request-based mapped device");
1838 			return r;
1839 		}
1840 		break;
1841 	case DM_TYPE_MQ_REQUEST_BASED:
1842 		r = dm_mq_init_request_queue(md, t);
1843 		if (r) {
1844 			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
1845 			return r;
1846 		}
1847 		break;
1848 	case DM_TYPE_BIO_BASED:
1849 	case DM_TYPE_DAX_BIO_BASED:
1850 		dm_init_normal_md_queue(md);
1851 		blk_queue_make_request(md->queue, dm_make_request);
1852 		/*
1853 		 * DM handles splitting bios as needed.  Free the bio_split bioset
1854 		 * since it won't be used (saves 1 process per bio-based DM device).
1855 		 */
1856 		bioset_free(md->queue->bio_split);
1857 		md->queue->bio_split = NULL;
1858 
1859 		if (type == DM_TYPE_DAX_BIO_BASED)
1860 			queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
1861 		break;
1862 	}
1863 
1864 	return 0;
1865 }
1866 
1867 struct mapped_device *dm_get_md(dev_t dev)
1868 {
1869 	struct mapped_device *md;
1870 	unsigned minor = MINOR(dev);
1871 
1872 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1873 		return NULL;
1874 
1875 	spin_lock(&_minor_lock);
1876 
1877 	md = idr_find(&_minor_idr, minor);
1878 	if (md) {
1879 		if ((md == MINOR_ALLOCED ||
1880 		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
1881 		     dm_deleting_md(md) ||
1882 		     test_bit(DMF_FREEING, &md->flags))) {
1883 			md = NULL;
1884 			goto out;
1885 		}
1886 		dm_get(md);
1887 	}
1888 
1889 out:
1890 	spin_unlock(&_minor_lock);
1891 
1892 	return md;
1893 }
1894 EXPORT_SYMBOL_GPL(dm_get_md);
1895 
1896 void *dm_get_mdptr(struct mapped_device *md)
1897 {
1898 	return md->interface_ptr;
1899 }
1900 
1901 void dm_set_mdptr(struct mapped_device *md, void *ptr)
1902 {
1903 	md->interface_ptr = ptr;
1904 }
1905 
1906 void dm_get(struct mapped_device *md)
1907 {
1908 	atomic_inc(&md->holders);
1909 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
1910 }
1911 
1912 int dm_hold(struct mapped_device *md)
1913 {
1914 	spin_lock(&_minor_lock);
1915 	if (test_bit(DMF_FREEING, &md->flags)) {
1916 		spin_unlock(&_minor_lock);
1917 		return -EBUSY;
1918 	}
1919 	dm_get(md);
1920 	spin_unlock(&_minor_lock);
1921 	return 0;
1922 }
1923 EXPORT_SYMBOL_GPL(dm_hold);
1924 
1925 const char *dm_device_name(struct mapped_device *md)
1926 {
1927 	return md->name;
1928 }
1929 EXPORT_SYMBOL_GPL(dm_device_name);
1930 
1931 static void __dm_destroy(struct mapped_device *md, bool wait)
1932 {
1933 	struct request_queue *q = dm_get_md_queue(md);
1934 	struct dm_table *map;
1935 	int srcu_idx;
1936 
1937 	might_sleep();
1938 
1939 	spin_lock(&_minor_lock);
1940 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
1941 	set_bit(DMF_FREEING, &md->flags);
1942 	spin_unlock(&_minor_lock);
1943 
1944 	blk_set_queue_dying(q);
1945 
1946 	if (dm_request_based(md) && md->kworker_task)
1947 		kthread_flush_worker(&md->kworker);
1948 
1949 	/*
1950 	 * Take suspend_lock so that presuspend and postsuspend methods
1951 	 * do not race with internal suspend.
1952 	 */
1953 	mutex_lock(&md->suspend_lock);
1954 	map = dm_get_live_table(md, &srcu_idx);
1955 	if (!dm_suspended_md(md)) {
1956 		dm_table_presuspend_targets(map);
1957 		dm_table_postsuspend_targets(map);
1958 	}
1959 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
1960 	dm_put_live_table(md, srcu_idx);
1961 	mutex_unlock(&md->suspend_lock);
1962 
1963 	/*
1964 	 * Rare, but there may be I/O requests still going to complete,
1965 	 * for example.  Wait for all references to disappear.
1966 	 * No one should increment the reference count of the mapped_device,
1967 	 * after the mapped_device state becomes DMF_FREEING.
1968 	 */
1969 	if (wait)
1970 		while (atomic_read(&md->holders))
1971 			msleep(1);
1972 	else if (atomic_read(&md->holders))
1973 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
1974 		       dm_device_name(md), atomic_read(&md->holders));
1975 
1976 	dm_sysfs_exit(md);
1977 	dm_table_destroy(__unbind(md));
1978 	free_dev(md);
1979 }
1980 
1981 void dm_destroy(struct mapped_device *md)
1982 {
1983 	__dm_destroy(md, true);
1984 }
1985 
1986 void dm_destroy_immediate(struct mapped_device *md)
1987 {
1988 	__dm_destroy(md, false);
1989 }
1990 
1991 void dm_put(struct mapped_device *md)
1992 {
1993 	atomic_dec(&md->holders);
1994 }
1995 EXPORT_SYMBOL_GPL(dm_put);
1996 
1997 static int dm_wait_for_completion(struct mapped_device *md, long task_state)
1998 {
1999 	int r = 0;
2000 	DEFINE_WAIT(wait);
2001 
2002 	while (1) {
2003 		prepare_to_wait(&md->wait, &wait, task_state);
2004 
2005 		if (!md_in_flight(md))
2006 			break;
2007 
2008 		if (signal_pending_state(task_state, current)) {
2009 			r = -EINTR;
2010 			break;
2011 		}
2012 
2013 		io_schedule();
2014 	}
2015 	finish_wait(&md->wait, &wait);
2016 
2017 	return r;
2018 }
2019 
2020 /*
2021  * Process the deferred bios
2022  */
2023 static void dm_wq_work(struct work_struct *work)
2024 {
2025 	struct mapped_device *md = container_of(work, struct mapped_device,
2026 						work);
2027 	struct bio *c;
2028 	int srcu_idx;
2029 	struct dm_table *map;
2030 
2031 	map = dm_get_live_table(md, &srcu_idx);
2032 
2033 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2034 		spin_lock_irq(&md->deferred_lock);
2035 		c = bio_list_pop(&md->deferred);
2036 		spin_unlock_irq(&md->deferred_lock);
2037 
2038 		if (!c)
2039 			break;
2040 
2041 		if (dm_request_based(md))
2042 			generic_make_request(c);
2043 		else
2044 			__split_and_process_bio(md, map, c);
2045 	}
2046 
2047 	dm_put_live_table(md, srcu_idx);
2048 }
2049 
2050 static void dm_queue_flush(struct mapped_device *md)
2051 {
2052 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2053 	smp_mb__after_atomic();
2054 	queue_work(md->wq, &md->work);
2055 }
2056 
2057 /*
2058  * Swap in a new table, returning the old one for the caller to destroy.
2059  */
2060 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2061 {
2062 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2063 	struct queue_limits limits;
2064 	int r;
2065 
2066 	mutex_lock(&md->suspend_lock);
2067 
2068 	/* device must be suspended */
2069 	if (!dm_suspended_md(md))
2070 		goto out;
2071 
2072 	/*
2073 	 * If the new table has no data devices, retain the existing limits.
2074 	 * This helps multipath with queue_if_no_path if all paths disappear,
2075 	 * then new I/O is queued based on these limits, and then some paths
2076 	 * reappear.
2077 	 */
2078 	if (dm_table_has_no_data_devices(table)) {
2079 		live_map = dm_get_live_table_fast(md);
2080 		if (live_map)
2081 			limits = md->queue->limits;
2082 		dm_put_live_table_fast(md);
2083 	}
2084 
2085 	if (!live_map) {
2086 		r = dm_calculate_queue_limits(table, &limits);
2087 		if (r) {
2088 			map = ERR_PTR(r);
2089 			goto out;
2090 		}
2091 	}
2092 
2093 	map = __bind(md, table, &limits);
2094 
2095 out:
2096 	mutex_unlock(&md->suspend_lock);
2097 	return map;
2098 }
2099 
2100 /*
2101  * Functions to lock and unlock any filesystem running on the
2102  * device.
2103  */
2104 static int lock_fs(struct mapped_device *md)
2105 {
2106 	int r;
2107 
2108 	WARN_ON(md->frozen_sb);
2109 
2110 	md->frozen_sb = freeze_bdev(md->bdev);
2111 	if (IS_ERR(md->frozen_sb)) {
2112 		r = PTR_ERR(md->frozen_sb);
2113 		md->frozen_sb = NULL;
2114 		return r;
2115 	}
2116 
2117 	set_bit(DMF_FROZEN, &md->flags);
2118 
2119 	return 0;
2120 }
2121 
2122 static void unlock_fs(struct mapped_device *md)
2123 {
2124 	if (!test_bit(DMF_FROZEN, &md->flags))
2125 		return;
2126 
2127 	thaw_bdev(md->bdev, md->frozen_sb);
2128 	md->frozen_sb = NULL;
2129 	clear_bit(DMF_FROZEN, &md->flags);
2130 }
2131 
2132 /*
2133  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2134  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2135  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2136  *
2137  * If __dm_suspend returns 0, the device is completely quiescent
2138  * now. There is no request-processing activity. All new requests
2139  * are being added to md->deferred list.
2140  *
2141  * Caller must hold md->suspend_lock
2142  */
2143 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2144 			unsigned suspend_flags, long task_state,
2145 			int dmf_suspended_flag)
2146 {
2147 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2148 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2149 	int r;
2150 
2151 	lockdep_assert_held(&md->suspend_lock);
2152 
2153 	/*
2154 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2155 	 * This flag is cleared before dm_suspend returns.
2156 	 */
2157 	if (noflush)
2158 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2159 
2160 	/*
2161 	 * This gets reverted if there's an error later and the targets
2162 	 * provide the .presuspend_undo hook.
2163 	 */
2164 	dm_table_presuspend_targets(map);
2165 
2166 	/*
2167 	 * Flush I/O to the device.
2168 	 * Any I/O submitted after lock_fs() may not be flushed.
2169 	 * noflush takes precedence over do_lockfs.
2170 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2171 	 */
2172 	if (!noflush && do_lockfs) {
2173 		r = lock_fs(md);
2174 		if (r) {
2175 			dm_table_presuspend_undo_targets(map);
2176 			return r;
2177 		}
2178 	}
2179 
2180 	/*
2181 	 * Here we must make sure that no processes are submitting requests
2182 	 * to target drivers i.e. no one may be executing
2183 	 * __split_and_process_bio. This is called from dm_request and
2184 	 * dm_wq_work.
2185 	 *
2186 	 * To get all processes out of __split_and_process_bio in dm_request,
2187 	 * we take the write lock. To prevent any process from reentering
2188 	 * __split_and_process_bio from dm_request and quiesce the thread
2189 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2190 	 * flush_workqueue(md->wq).
2191 	 */
2192 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2193 	if (map)
2194 		synchronize_srcu(&md->io_barrier);
2195 
2196 	/*
2197 	 * Stop md->queue before flushing md->wq in case request-based
2198 	 * dm defers requests to md->wq from md->queue.
2199 	 */
2200 	if (dm_request_based(md)) {
2201 		dm_stop_queue(md->queue);
2202 		if (md->kworker_task)
2203 			kthread_flush_worker(&md->kworker);
2204 	}
2205 
2206 	flush_workqueue(md->wq);
2207 
2208 	/*
2209 	 * At this point no more requests are entering target request routines.
2210 	 * We call dm_wait_for_completion to wait for all existing requests
2211 	 * to finish.
2212 	 */
2213 	r = dm_wait_for_completion(md, task_state);
2214 	if (!r)
2215 		set_bit(dmf_suspended_flag, &md->flags);
2216 
2217 	if (noflush)
2218 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2219 	if (map)
2220 		synchronize_srcu(&md->io_barrier);
2221 
2222 	/* were we interrupted ? */
2223 	if (r < 0) {
2224 		dm_queue_flush(md);
2225 
2226 		if (dm_request_based(md))
2227 			dm_start_queue(md->queue);
2228 
2229 		unlock_fs(md);
2230 		dm_table_presuspend_undo_targets(map);
2231 		/* pushback list is already flushed, so skip flush */
2232 	}
2233 
2234 	return r;
2235 }
2236 
2237 /*
2238  * We need to be able to change a mapping table under a mounted
2239  * filesystem.  For example we might want to move some data in
2240  * the background.  Before the table can be swapped with
2241  * dm_bind_table, dm_suspend must be called to flush any in
2242  * flight bios and ensure that any further io gets deferred.
2243  */
2244 /*
2245  * Suspend mechanism in request-based dm.
2246  *
2247  * 1. Flush all I/Os by lock_fs() if needed.
2248  * 2. Stop dispatching any I/O by stopping the request_queue.
2249  * 3. Wait for all in-flight I/Os to be completed or requeued.
2250  *
2251  * To abort suspend, start the request_queue.
2252  */
2253 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2254 {
2255 	struct dm_table *map = NULL;
2256 	int r = 0;
2257 
2258 retry:
2259 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2260 
2261 	if (dm_suspended_md(md)) {
2262 		r = -EINVAL;
2263 		goto out_unlock;
2264 	}
2265 
2266 	if (dm_suspended_internally_md(md)) {
2267 		/* already internally suspended, wait for internal resume */
2268 		mutex_unlock(&md->suspend_lock);
2269 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2270 		if (r)
2271 			return r;
2272 		goto retry;
2273 	}
2274 
2275 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2276 
2277 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2278 	if (r)
2279 		goto out_unlock;
2280 
2281 	dm_table_postsuspend_targets(map);
2282 
2283 out_unlock:
2284 	mutex_unlock(&md->suspend_lock);
2285 	return r;
2286 }
2287 
2288 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2289 {
2290 	if (map) {
2291 		int r = dm_table_resume_targets(map);
2292 		if (r)
2293 			return r;
2294 	}
2295 
2296 	dm_queue_flush(md);
2297 
2298 	/*
2299 	 * Flushing deferred I/Os must be done after targets are resumed
2300 	 * so that mapping of targets can work correctly.
2301 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2302 	 */
2303 	if (dm_request_based(md))
2304 		dm_start_queue(md->queue);
2305 
2306 	unlock_fs(md);
2307 
2308 	return 0;
2309 }
2310 
2311 int dm_resume(struct mapped_device *md)
2312 {
2313 	int r;
2314 	struct dm_table *map = NULL;
2315 
2316 retry:
2317 	r = -EINVAL;
2318 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2319 
2320 	if (!dm_suspended_md(md))
2321 		goto out;
2322 
2323 	if (dm_suspended_internally_md(md)) {
2324 		/* already internally suspended, wait for internal resume */
2325 		mutex_unlock(&md->suspend_lock);
2326 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2327 		if (r)
2328 			return r;
2329 		goto retry;
2330 	}
2331 
2332 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2333 	if (!map || !dm_table_get_size(map))
2334 		goto out;
2335 
2336 	r = __dm_resume(md, map);
2337 	if (r)
2338 		goto out;
2339 
2340 	clear_bit(DMF_SUSPENDED, &md->flags);
2341 out:
2342 	mutex_unlock(&md->suspend_lock);
2343 
2344 	return r;
2345 }
2346 
2347 /*
2348  * Internal suspend/resume works like userspace-driven suspend. It waits
2349  * until all bios finish and prevents issuing new bios to the target drivers.
2350  * It may be used only from the kernel.
2351  */
2352 
2353 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2354 {
2355 	struct dm_table *map = NULL;
2356 
2357 	if (md->internal_suspend_count++)
2358 		return; /* nested internal suspend */
2359 
2360 	if (dm_suspended_md(md)) {
2361 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2362 		return; /* nest suspend */
2363 	}
2364 
2365 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2366 
2367 	/*
2368 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2369 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2370 	 * would require changing .presuspend to return an error -- avoid this
2371 	 * until there is a need for more elaborate variants of internal suspend.
2372 	 */
2373 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2374 			    DMF_SUSPENDED_INTERNALLY);
2375 
2376 	dm_table_postsuspend_targets(map);
2377 }
2378 
2379 static void __dm_internal_resume(struct mapped_device *md)
2380 {
2381 	BUG_ON(!md->internal_suspend_count);
2382 
2383 	if (--md->internal_suspend_count)
2384 		return; /* resume from nested internal suspend */
2385 
2386 	if (dm_suspended_md(md))
2387 		goto done; /* resume from nested suspend */
2388 
2389 	/*
2390 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2391 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2392 	 */
2393 	(void) __dm_resume(md, NULL);
2394 
2395 done:
2396 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2397 	smp_mb__after_atomic();
2398 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2399 }
2400 
2401 void dm_internal_suspend_noflush(struct mapped_device *md)
2402 {
2403 	mutex_lock(&md->suspend_lock);
2404 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2405 	mutex_unlock(&md->suspend_lock);
2406 }
2407 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2408 
2409 void dm_internal_resume(struct mapped_device *md)
2410 {
2411 	mutex_lock(&md->suspend_lock);
2412 	__dm_internal_resume(md);
2413 	mutex_unlock(&md->suspend_lock);
2414 }
2415 EXPORT_SYMBOL_GPL(dm_internal_resume);
2416 
2417 /*
2418  * Fast variants of internal suspend/resume hold md->suspend_lock,
2419  * which prevents interaction with userspace-driven suspend.
2420  */
2421 
2422 void dm_internal_suspend_fast(struct mapped_device *md)
2423 {
2424 	mutex_lock(&md->suspend_lock);
2425 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2426 		return;
2427 
2428 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2429 	synchronize_srcu(&md->io_barrier);
2430 	flush_workqueue(md->wq);
2431 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2432 }
2433 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2434 
2435 void dm_internal_resume_fast(struct mapped_device *md)
2436 {
2437 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2438 		goto done;
2439 
2440 	dm_queue_flush(md);
2441 
2442 done:
2443 	mutex_unlock(&md->suspend_lock);
2444 }
2445 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2446 
2447 /*-----------------------------------------------------------------
2448  * Event notification.
2449  *---------------------------------------------------------------*/
2450 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2451 		       unsigned cookie)
2452 {
2453 	char udev_cookie[DM_COOKIE_LENGTH];
2454 	char *envp[] = { udev_cookie, NULL };
2455 
2456 	if (!cookie)
2457 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2458 	else {
2459 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2460 			 DM_COOKIE_ENV_VAR_NAME, cookie);
2461 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2462 					  action, envp);
2463 	}
2464 }
2465 
2466 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2467 {
2468 	return atomic_add_return(1, &md->uevent_seq);
2469 }
2470 
2471 uint32_t dm_get_event_nr(struct mapped_device *md)
2472 {
2473 	return atomic_read(&md->event_nr);
2474 }
2475 
2476 int dm_wait_event(struct mapped_device *md, int event_nr)
2477 {
2478 	return wait_event_interruptible(md->eventq,
2479 			(event_nr != atomic_read(&md->event_nr)));
2480 }
2481 
2482 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2483 {
2484 	unsigned long flags;
2485 
2486 	spin_lock_irqsave(&md->uevent_lock, flags);
2487 	list_add(elist, &md->uevent_list);
2488 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2489 }
2490 
2491 /*
2492  * The gendisk is only valid as long as you have a reference
2493  * count on 'md'.
2494  */
2495 struct gendisk *dm_disk(struct mapped_device *md)
2496 {
2497 	return md->disk;
2498 }
2499 EXPORT_SYMBOL_GPL(dm_disk);
2500 
2501 struct kobject *dm_kobject(struct mapped_device *md)
2502 {
2503 	return &md->kobj_holder.kobj;
2504 }
2505 
2506 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2507 {
2508 	struct mapped_device *md;
2509 
2510 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2511 
2512 	if (test_bit(DMF_FREEING, &md->flags) ||
2513 	    dm_deleting_md(md))
2514 		return NULL;
2515 
2516 	dm_get(md);
2517 	return md;
2518 }
2519 
2520 int dm_suspended_md(struct mapped_device *md)
2521 {
2522 	return test_bit(DMF_SUSPENDED, &md->flags);
2523 }
2524 
2525 int dm_suspended_internally_md(struct mapped_device *md)
2526 {
2527 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2528 }
2529 
2530 int dm_test_deferred_remove_flag(struct mapped_device *md)
2531 {
2532 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2533 }
2534 
2535 int dm_suspended(struct dm_target *ti)
2536 {
2537 	return dm_suspended_md(dm_table_get_md(ti->table));
2538 }
2539 EXPORT_SYMBOL_GPL(dm_suspended);
2540 
2541 int dm_noflush_suspending(struct dm_target *ti)
2542 {
2543 	return __noflush_suspending(dm_table_get_md(ti->table));
2544 }
2545 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2546 
2547 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
2548 					    unsigned integrity, unsigned per_io_data_size)
2549 {
2550 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2551 	unsigned int pool_size = 0;
2552 	unsigned int front_pad;
2553 
2554 	if (!pools)
2555 		return NULL;
2556 
2557 	switch (type) {
2558 	case DM_TYPE_BIO_BASED:
2559 	case DM_TYPE_DAX_BIO_BASED:
2560 		pool_size = dm_get_reserved_bio_based_ios();
2561 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2562 
2563 		pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2564 		if (!pools->io_pool)
2565 			goto out;
2566 		break;
2567 	case DM_TYPE_REQUEST_BASED:
2568 	case DM_TYPE_MQ_REQUEST_BASED:
2569 		pool_size = dm_get_reserved_rq_based_ios();
2570 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2571 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
2572 		break;
2573 	default:
2574 		BUG();
2575 	}
2576 
2577 	pools->bs = bioset_create_nobvec(pool_size, front_pad);
2578 	if (!pools->bs)
2579 		goto out;
2580 
2581 	if (integrity && bioset_integrity_create(pools->bs, pool_size))
2582 		goto out;
2583 
2584 	return pools;
2585 
2586 out:
2587 	dm_free_md_mempools(pools);
2588 
2589 	return NULL;
2590 }
2591 
2592 void dm_free_md_mempools(struct dm_md_mempools *pools)
2593 {
2594 	if (!pools)
2595 		return;
2596 
2597 	mempool_destroy(pools->io_pool);
2598 
2599 	if (pools->bs)
2600 		bioset_free(pools->bs);
2601 
2602 	kfree(pools);
2603 }
2604 
2605 struct dm_pr {
2606 	u64	old_key;
2607 	u64	new_key;
2608 	u32	flags;
2609 	bool	fail_early;
2610 };
2611 
2612 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
2613 		      void *data)
2614 {
2615 	struct mapped_device *md = bdev->bd_disk->private_data;
2616 	struct dm_table *table;
2617 	struct dm_target *ti;
2618 	int ret = -ENOTTY, srcu_idx;
2619 
2620 	table = dm_get_live_table(md, &srcu_idx);
2621 	if (!table || !dm_table_get_size(table))
2622 		goto out;
2623 
2624 	/* We only support devices that have a single target */
2625 	if (dm_table_get_num_targets(table) != 1)
2626 		goto out;
2627 	ti = dm_table_get_target(table, 0);
2628 
2629 	ret = -EINVAL;
2630 	if (!ti->type->iterate_devices)
2631 		goto out;
2632 
2633 	ret = ti->type->iterate_devices(ti, fn, data);
2634 out:
2635 	dm_put_live_table(md, srcu_idx);
2636 	return ret;
2637 }
2638 
2639 /*
2640  * For register / unregister we need to manually call out to every path.
2641  */
2642 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
2643 			    sector_t start, sector_t len, void *data)
2644 {
2645 	struct dm_pr *pr = data;
2646 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
2647 
2648 	if (!ops || !ops->pr_register)
2649 		return -EOPNOTSUPP;
2650 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
2651 }
2652 
2653 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
2654 			  u32 flags)
2655 {
2656 	struct dm_pr pr = {
2657 		.old_key	= old_key,
2658 		.new_key	= new_key,
2659 		.flags		= flags,
2660 		.fail_early	= true,
2661 	};
2662 	int ret;
2663 
2664 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
2665 	if (ret && new_key) {
2666 		/* unregister all paths if we failed to register any path */
2667 		pr.old_key = new_key;
2668 		pr.new_key = 0;
2669 		pr.flags = 0;
2670 		pr.fail_early = false;
2671 		dm_call_pr(bdev, __dm_pr_register, &pr);
2672 	}
2673 
2674 	return ret;
2675 }
2676 
2677 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
2678 			 u32 flags)
2679 {
2680 	struct mapped_device *md = bdev->bd_disk->private_data;
2681 	const struct pr_ops *ops;
2682 	fmode_t mode;
2683 	int r;
2684 
2685 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
2686 	if (r < 0)
2687 		return r;
2688 
2689 	ops = bdev->bd_disk->fops->pr_ops;
2690 	if (ops && ops->pr_reserve)
2691 		r = ops->pr_reserve(bdev, key, type, flags);
2692 	else
2693 		r = -EOPNOTSUPP;
2694 
2695 	bdput(bdev);
2696 	return r;
2697 }
2698 
2699 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2700 {
2701 	struct mapped_device *md = bdev->bd_disk->private_data;
2702 	const struct pr_ops *ops;
2703 	fmode_t mode;
2704 	int r;
2705 
2706 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
2707 	if (r < 0)
2708 		return r;
2709 
2710 	ops = bdev->bd_disk->fops->pr_ops;
2711 	if (ops && ops->pr_release)
2712 		r = ops->pr_release(bdev, key, type);
2713 	else
2714 		r = -EOPNOTSUPP;
2715 
2716 	bdput(bdev);
2717 	return r;
2718 }
2719 
2720 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
2721 			 enum pr_type type, bool abort)
2722 {
2723 	struct mapped_device *md = bdev->bd_disk->private_data;
2724 	const struct pr_ops *ops;
2725 	fmode_t mode;
2726 	int r;
2727 
2728 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
2729 	if (r < 0)
2730 		return r;
2731 
2732 	ops = bdev->bd_disk->fops->pr_ops;
2733 	if (ops && ops->pr_preempt)
2734 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
2735 	else
2736 		r = -EOPNOTSUPP;
2737 
2738 	bdput(bdev);
2739 	return r;
2740 }
2741 
2742 static int dm_pr_clear(struct block_device *bdev, u64 key)
2743 {
2744 	struct mapped_device *md = bdev->bd_disk->private_data;
2745 	const struct pr_ops *ops;
2746 	fmode_t mode;
2747 	int r;
2748 
2749 	r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
2750 	if (r < 0)
2751 		return r;
2752 
2753 	ops = bdev->bd_disk->fops->pr_ops;
2754 	if (ops && ops->pr_clear)
2755 		r = ops->pr_clear(bdev, key);
2756 	else
2757 		r = -EOPNOTSUPP;
2758 
2759 	bdput(bdev);
2760 	return r;
2761 }
2762 
2763 static const struct pr_ops dm_pr_ops = {
2764 	.pr_register	= dm_pr_register,
2765 	.pr_reserve	= dm_pr_reserve,
2766 	.pr_release	= dm_pr_release,
2767 	.pr_preempt	= dm_pr_preempt,
2768 	.pr_clear	= dm_pr_clear,
2769 };
2770 
2771 static const struct block_device_operations dm_blk_dops = {
2772 	.open = dm_blk_open,
2773 	.release = dm_blk_close,
2774 	.ioctl = dm_blk_ioctl,
2775 	.direct_access = dm_blk_direct_access,
2776 	.getgeo = dm_blk_getgeo,
2777 	.pr_ops = &dm_pr_ops,
2778 	.owner = THIS_MODULE
2779 };
2780 
2781 /*
2782  * module hooks
2783  */
2784 module_init(dm_init);
2785 module_exit(dm_exit);
2786 
2787 module_param(major, uint, 0);
2788 MODULE_PARM_DESC(major, "The major number of the device mapper");
2789 
2790 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2791 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2792 
2793 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
2794 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
2795 
2796 MODULE_DESCRIPTION(DM_NAME " driver");
2797 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2798 MODULE_LICENSE("GPL");
2799