xref: /openbmc/linux/drivers/md/dm.c (revision cb1aaebe)
1 /*
2  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 #include "dm-rq.h"
10 #include "dm-uevent.h"
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/dax.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/hdreg.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/pr.h>
27 #include <linux/refcount.h>
28 
29 #define DM_MSG_PREFIX "core"
30 
31 /*
32  * Cookies are numeric values sent with CHANGE and REMOVE
33  * uevents while resuming, removing or renaming the device.
34  */
35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
36 #define DM_COOKIE_LENGTH 24
37 
38 static const char *_name = DM_NAME;
39 
40 static unsigned int major = 0;
41 static unsigned int _major = 0;
42 
43 static DEFINE_IDR(_minor_idr);
44 
45 static DEFINE_SPINLOCK(_minor_lock);
46 
47 static void do_deferred_remove(struct work_struct *w);
48 
49 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
50 
51 static struct workqueue_struct *deferred_remove_workqueue;
52 
53 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
55 
56 void dm_issue_global_event(void)
57 {
58 	atomic_inc(&dm_global_event_nr);
59 	wake_up(&dm_global_eventq);
60 }
61 
62 /*
63  * One of these is allocated (on-stack) per original bio.
64  */
65 struct clone_info {
66 	struct dm_table *map;
67 	struct bio *bio;
68 	struct dm_io *io;
69 	sector_t sector;
70 	unsigned sector_count;
71 };
72 
73 /*
74  * One of these is allocated per clone bio.
75  */
76 #define DM_TIO_MAGIC 7282014
77 struct dm_target_io {
78 	unsigned magic;
79 	struct dm_io *io;
80 	struct dm_target *ti;
81 	unsigned target_bio_nr;
82 	unsigned *len_ptr;
83 	bool inside_dm_io;
84 	struct bio clone;
85 };
86 
87 /*
88  * One of these is allocated per original bio.
89  * It contains the first clone used for that original.
90  */
91 #define DM_IO_MAGIC 5191977
92 struct dm_io {
93 	unsigned magic;
94 	struct mapped_device *md;
95 	blk_status_t status;
96 	atomic_t io_count;
97 	struct bio *orig_bio;
98 	unsigned long start_time;
99 	spinlock_t endio_lock;
100 	struct dm_stats_aux stats_aux;
101 	/* last member of dm_target_io is 'struct bio' */
102 	struct dm_target_io tio;
103 };
104 
105 void *dm_per_bio_data(struct bio *bio, size_t data_size)
106 {
107 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
108 	if (!tio->inside_dm_io)
109 		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
110 	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
111 }
112 EXPORT_SYMBOL_GPL(dm_per_bio_data);
113 
114 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
115 {
116 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
117 	if (io->magic == DM_IO_MAGIC)
118 		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
119 	BUG_ON(io->magic != DM_TIO_MAGIC);
120 	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
121 }
122 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
123 
124 unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
125 {
126 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
127 }
128 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
129 
130 #define MINOR_ALLOCED ((void *)-1)
131 
132 /*
133  * Bits for the md->flags field.
134  */
135 #define DMF_BLOCK_IO_FOR_SUSPEND 0
136 #define DMF_SUSPENDED 1
137 #define DMF_FROZEN 2
138 #define DMF_FREEING 3
139 #define DMF_DELETING 4
140 #define DMF_NOFLUSH_SUSPENDING 5
141 #define DMF_DEFERRED_REMOVE 6
142 #define DMF_SUSPENDED_INTERNALLY 7
143 
144 #define DM_NUMA_NODE NUMA_NO_NODE
145 static int dm_numa_node = DM_NUMA_NODE;
146 
147 /*
148  * For mempools pre-allocation at the table loading time.
149  */
150 struct dm_md_mempools {
151 	struct bio_set bs;
152 	struct bio_set io_bs;
153 };
154 
155 struct table_device {
156 	struct list_head list;
157 	refcount_t count;
158 	struct dm_dev dm_dev;
159 };
160 
161 /*
162  * Bio-based DM's mempools' reserved IOs set by the user.
163  */
164 #define RESERVED_BIO_BASED_IOS		16
165 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
166 
167 static int __dm_get_module_param_int(int *module_param, int min, int max)
168 {
169 	int param = READ_ONCE(*module_param);
170 	int modified_param = 0;
171 	bool modified = true;
172 
173 	if (param < min)
174 		modified_param = min;
175 	else if (param > max)
176 		modified_param = max;
177 	else
178 		modified = false;
179 
180 	if (modified) {
181 		(void)cmpxchg(module_param, param, modified_param);
182 		param = modified_param;
183 	}
184 
185 	return param;
186 }
187 
188 unsigned __dm_get_module_param(unsigned *module_param,
189 			       unsigned def, unsigned max)
190 {
191 	unsigned param = READ_ONCE(*module_param);
192 	unsigned modified_param = 0;
193 
194 	if (!param)
195 		modified_param = def;
196 	else if (param > max)
197 		modified_param = max;
198 
199 	if (modified_param) {
200 		(void)cmpxchg(module_param, param, modified_param);
201 		param = modified_param;
202 	}
203 
204 	return param;
205 }
206 
207 unsigned dm_get_reserved_bio_based_ios(void)
208 {
209 	return __dm_get_module_param(&reserved_bio_based_ios,
210 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
211 }
212 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
213 
214 static unsigned dm_get_numa_node(void)
215 {
216 	return __dm_get_module_param_int(&dm_numa_node,
217 					 DM_NUMA_NODE, num_online_nodes() - 1);
218 }
219 
220 static int __init local_init(void)
221 {
222 	int r;
223 
224 	r = dm_uevent_init();
225 	if (r)
226 		return r;
227 
228 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
229 	if (!deferred_remove_workqueue) {
230 		r = -ENOMEM;
231 		goto out_uevent_exit;
232 	}
233 
234 	_major = major;
235 	r = register_blkdev(_major, _name);
236 	if (r < 0)
237 		goto out_free_workqueue;
238 
239 	if (!_major)
240 		_major = r;
241 
242 	return 0;
243 
244 out_free_workqueue:
245 	destroy_workqueue(deferred_remove_workqueue);
246 out_uevent_exit:
247 	dm_uevent_exit();
248 
249 	return r;
250 }
251 
252 static void local_exit(void)
253 {
254 	flush_scheduled_work();
255 	destroy_workqueue(deferred_remove_workqueue);
256 
257 	unregister_blkdev(_major, _name);
258 	dm_uevent_exit();
259 
260 	_major = 0;
261 
262 	DMINFO("cleaned up");
263 }
264 
265 static int (*_inits[])(void) __initdata = {
266 	local_init,
267 	dm_target_init,
268 	dm_linear_init,
269 	dm_stripe_init,
270 	dm_io_init,
271 	dm_kcopyd_init,
272 	dm_interface_init,
273 	dm_statistics_init,
274 };
275 
276 static void (*_exits[])(void) = {
277 	local_exit,
278 	dm_target_exit,
279 	dm_linear_exit,
280 	dm_stripe_exit,
281 	dm_io_exit,
282 	dm_kcopyd_exit,
283 	dm_interface_exit,
284 	dm_statistics_exit,
285 };
286 
287 static int __init dm_init(void)
288 {
289 	const int count = ARRAY_SIZE(_inits);
290 
291 	int r, i;
292 
293 	for (i = 0; i < count; i++) {
294 		r = _inits[i]();
295 		if (r)
296 			goto bad;
297 	}
298 
299 	return 0;
300 
301       bad:
302 	while (i--)
303 		_exits[i]();
304 
305 	return r;
306 }
307 
308 static void __exit dm_exit(void)
309 {
310 	int i = ARRAY_SIZE(_exits);
311 
312 	while (i--)
313 		_exits[i]();
314 
315 	/*
316 	 * Should be empty by this point.
317 	 */
318 	idr_destroy(&_minor_idr);
319 }
320 
321 /*
322  * Block device functions
323  */
324 int dm_deleting_md(struct mapped_device *md)
325 {
326 	return test_bit(DMF_DELETING, &md->flags);
327 }
328 
329 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
330 {
331 	struct mapped_device *md;
332 
333 	spin_lock(&_minor_lock);
334 
335 	md = bdev->bd_disk->private_data;
336 	if (!md)
337 		goto out;
338 
339 	if (test_bit(DMF_FREEING, &md->flags) ||
340 	    dm_deleting_md(md)) {
341 		md = NULL;
342 		goto out;
343 	}
344 
345 	dm_get(md);
346 	atomic_inc(&md->open_count);
347 out:
348 	spin_unlock(&_minor_lock);
349 
350 	return md ? 0 : -ENXIO;
351 }
352 
353 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
354 {
355 	struct mapped_device *md;
356 
357 	spin_lock(&_minor_lock);
358 
359 	md = disk->private_data;
360 	if (WARN_ON(!md))
361 		goto out;
362 
363 	if (atomic_dec_and_test(&md->open_count) &&
364 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
365 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
366 
367 	dm_put(md);
368 out:
369 	spin_unlock(&_minor_lock);
370 }
371 
372 int dm_open_count(struct mapped_device *md)
373 {
374 	return atomic_read(&md->open_count);
375 }
376 
377 /*
378  * Guarantees nothing is using the device before it's deleted.
379  */
380 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
381 {
382 	int r = 0;
383 
384 	spin_lock(&_minor_lock);
385 
386 	if (dm_open_count(md)) {
387 		r = -EBUSY;
388 		if (mark_deferred)
389 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
390 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
391 		r = -EEXIST;
392 	else
393 		set_bit(DMF_DELETING, &md->flags);
394 
395 	spin_unlock(&_minor_lock);
396 
397 	return r;
398 }
399 
400 int dm_cancel_deferred_remove(struct mapped_device *md)
401 {
402 	int r = 0;
403 
404 	spin_lock(&_minor_lock);
405 
406 	if (test_bit(DMF_DELETING, &md->flags))
407 		r = -EBUSY;
408 	else
409 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
410 
411 	spin_unlock(&_minor_lock);
412 
413 	return r;
414 }
415 
416 static void do_deferred_remove(struct work_struct *w)
417 {
418 	dm_deferred_remove();
419 }
420 
421 sector_t dm_get_size(struct mapped_device *md)
422 {
423 	return get_capacity(md->disk);
424 }
425 
426 struct request_queue *dm_get_md_queue(struct mapped_device *md)
427 {
428 	return md->queue;
429 }
430 
431 struct dm_stats *dm_get_stats(struct mapped_device *md)
432 {
433 	return &md->stats;
434 }
435 
436 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
437 {
438 	struct mapped_device *md = bdev->bd_disk->private_data;
439 
440 	return dm_get_geometry(md, geo);
441 }
442 
443 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
444 			       struct blk_zone *zones, unsigned int *nr_zones,
445 			       gfp_t gfp_mask)
446 {
447 #ifdef CONFIG_BLK_DEV_ZONED
448 	struct mapped_device *md = disk->private_data;
449 	struct dm_target *tgt;
450 	struct dm_table *map;
451 	int srcu_idx, ret;
452 
453 	if (dm_suspended_md(md))
454 		return -EAGAIN;
455 
456 	map = dm_get_live_table(md, &srcu_idx);
457 	if (!map)
458 		return -EIO;
459 
460 	tgt = dm_table_find_target(map, sector);
461 	if (!dm_target_is_valid(tgt)) {
462 		ret = -EIO;
463 		goto out;
464 	}
465 
466 	/*
467 	 * If we are executing this, we already know that the block device
468 	 * is a zoned device and so each target should have support for that
469 	 * type of drive. A missing report_zones method means that the target
470 	 * driver has a problem.
471 	 */
472 	if (WARN_ON(!tgt->type->report_zones)) {
473 		ret = -EIO;
474 		goto out;
475 	}
476 
477 	/*
478 	 * blkdev_report_zones() will loop and call this again to cover all the
479 	 * zones of the target, eventually moving on to the next target.
480 	 * So there is no need to loop here trying to fill the entire array
481 	 * of zones.
482 	 */
483 	ret = tgt->type->report_zones(tgt, sector, zones,
484 				      nr_zones, gfp_mask);
485 
486 out:
487 	dm_put_live_table(md, srcu_idx);
488 	return ret;
489 #else
490 	return -ENOTSUPP;
491 #endif
492 }
493 
494 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
495 			    struct block_device **bdev)
496 	__acquires(md->io_barrier)
497 {
498 	struct dm_target *tgt;
499 	struct dm_table *map;
500 	int r;
501 
502 retry:
503 	r = -ENOTTY;
504 	map = dm_get_live_table(md, srcu_idx);
505 	if (!map || !dm_table_get_size(map))
506 		return r;
507 
508 	/* We only support devices that have a single target */
509 	if (dm_table_get_num_targets(map) != 1)
510 		return r;
511 
512 	tgt = dm_table_get_target(map, 0);
513 	if (!tgt->type->prepare_ioctl)
514 		return r;
515 
516 	if (dm_suspended_md(md))
517 		return -EAGAIN;
518 
519 	r = tgt->type->prepare_ioctl(tgt, bdev);
520 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
521 		dm_put_live_table(md, *srcu_idx);
522 		msleep(10);
523 		goto retry;
524 	}
525 
526 	return r;
527 }
528 
529 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
530 	__releases(md->io_barrier)
531 {
532 	dm_put_live_table(md, srcu_idx);
533 }
534 
535 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
536 			unsigned int cmd, unsigned long arg)
537 {
538 	struct mapped_device *md = bdev->bd_disk->private_data;
539 	int r, srcu_idx;
540 
541 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
542 	if (r < 0)
543 		goto out;
544 
545 	if (r > 0) {
546 		/*
547 		 * Target determined this ioctl is being issued against a
548 		 * subset of the parent bdev; require extra privileges.
549 		 */
550 		if (!capable(CAP_SYS_RAWIO)) {
551 			DMWARN_LIMIT(
552 	"%s: sending ioctl %x to DM device without required privilege.",
553 				current->comm, cmd);
554 			r = -ENOIOCTLCMD;
555 			goto out;
556 		}
557 	}
558 
559 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
560 out:
561 	dm_unprepare_ioctl(md, srcu_idx);
562 	return r;
563 }
564 
565 static void start_io_acct(struct dm_io *io);
566 
567 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
568 {
569 	struct dm_io *io;
570 	struct dm_target_io *tio;
571 	struct bio *clone;
572 
573 	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
574 	if (!clone)
575 		return NULL;
576 
577 	tio = container_of(clone, struct dm_target_io, clone);
578 	tio->inside_dm_io = true;
579 	tio->io = NULL;
580 
581 	io = container_of(tio, struct dm_io, tio);
582 	io->magic = DM_IO_MAGIC;
583 	io->status = 0;
584 	atomic_set(&io->io_count, 1);
585 	io->orig_bio = bio;
586 	io->md = md;
587 	spin_lock_init(&io->endio_lock);
588 
589 	start_io_acct(io);
590 
591 	return io;
592 }
593 
594 static void free_io(struct mapped_device *md, struct dm_io *io)
595 {
596 	bio_put(&io->tio.clone);
597 }
598 
599 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
600 				      unsigned target_bio_nr, gfp_t gfp_mask)
601 {
602 	struct dm_target_io *tio;
603 
604 	if (!ci->io->tio.io) {
605 		/* the dm_target_io embedded in ci->io is available */
606 		tio = &ci->io->tio;
607 	} else {
608 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
609 		if (!clone)
610 			return NULL;
611 
612 		tio = container_of(clone, struct dm_target_io, clone);
613 		tio->inside_dm_io = false;
614 	}
615 
616 	tio->magic = DM_TIO_MAGIC;
617 	tio->io = ci->io;
618 	tio->ti = ti;
619 	tio->target_bio_nr = target_bio_nr;
620 
621 	return tio;
622 }
623 
624 static void free_tio(struct dm_target_io *tio)
625 {
626 	if (tio->inside_dm_io)
627 		return;
628 	bio_put(&tio->clone);
629 }
630 
631 static bool md_in_flight_bios(struct mapped_device *md)
632 {
633 	int cpu;
634 	struct hd_struct *part = &dm_disk(md)->part0;
635 	long sum = 0;
636 
637 	for_each_possible_cpu(cpu) {
638 		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
639 		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
640 	}
641 
642 	return sum != 0;
643 }
644 
645 static bool md_in_flight(struct mapped_device *md)
646 {
647 	if (queue_is_mq(md->queue))
648 		return blk_mq_queue_inflight(md->queue);
649 	else
650 		return md_in_flight_bios(md);
651 }
652 
653 static void start_io_acct(struct dm_io *io)
654 {
655 	struct mapped_device *md = io->md;
656 	struct bio *bio = io->orig_bio;
657 
658 	io->start_time = jiffies;
659 
660 	generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
661 			      &dm_disk(md)->part0);
662 
663 	if (unlikely(dm_stats_used(&md->stats)))
664 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
665 				    bio->bi_iter.bi_sector, bio_sectors(bio),
666 				    false, 0, &io->stats_aux);
667 }
668 
669 static void end_io_acct(struct dm_io *io)
670 {
671 	struct mapped_device *md = io->md;
672 	struct bio *bio = io->orig_bio;
673 	unsigned long duration = jiffies - io->start_time;
674 
675 	generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
676 			    io->start_time);
677 
678 	if (unlikely(dm_stats_used(&md->stats)))
679 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
680 				    bio->bi_iter.bi_sector, bio_sectors(bio),
681 				    true, duration, &io->stats_aux);
682 
683 	/* nudge anyone waiting on suspend queue */
684 	if (unlikely(wq_has_sleeper(&md->wait)))
685 		wake_up(&md->wait);
686 }
687 
688 /*
689  * Add the bio to the list of deferred io.
690  */
691 static void queue_io(struct mapped_device *md, struct bio *bio)
692 {
693 	unsigned long flags;
694 
695 	spin_lock_irqsave(&md->deferred_lock, flags);
696 	bio_list_add(&md->deferred, bio);
697 	spin_unlock_irqrestore(&md->deferred_lock, flags);
698 	queue_work(md->wq, &md->work);
699 }
700 
701 /*
702  * Everyone (including functions in this file), should use this
703  * function to access the md->map field, and make sure they call
704  * dm_put_live_table() when finished.
705  */
706 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
707 {
708 	*srcu_idx = srcu_read_lock(&md->io_barrier);
709 
710 	return srcu_dereference(md->map, &md->io_barrier);
711 }
712 
713 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
714 {
715 	srcu_read_unlock(&md->io_barrier, srcu_idx);
716 }
717 
718 void dm_sync_table(struct mapped_device *md)
719 {
720 	synchronize_srcu(&md->io_barrier);
721 	synchronize_rcu_expedited();
722 }
723 
724 /*
725  * A fast alternative to dm_get_live_table/dm_put_live_table.
726  * The caller must not block between these two functions.
727  */
728 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
729 {
730 	rcu_read_lock();
731 	return rcu_dereference(md->map);
732 }
733 
734 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
735 {
736 	rcu_read_unlock();
737 }
738 
739 static char *_dm_claim_ptr = "I belong to device-mapper";
740 
741 /*
742  * Open a table device so we can use it as a map destination.
743  */
744 static int open_table_device(struct table_device *td, dev_t dev,
745 			     struct mapped_device *md)
746 {
747 	struct block_device *bdev;
748 
749 	int r;
750 
751 	BUG_ON(td->dm_dev.bdev);
752 
753 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
754 	if (IS_ERR(bdev))
755 		return PTR_ERR(bdev);
756 
757 	r = bd_link_disk_holder(bdev, dm_disk(md));
758 	if (r) {
759 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
760 		return r;
761 	}
762 
763 	td->dm_dev.bdev = bdev;
764 	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
765 	return 0;
766 }
767 
768 /*
769  * Close a table device that we've been using.
770  */
771 static void close_table_device(struct table_device *td, struct mapped_device *md)
772 {
773 	if (!td->dm_dev.bdev)
774 		return;
775 
776 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
777 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
778 	put_dax(td->dm_dev.dax_dev);
779 	td->dm_dev.bdev = NULL;
780 	td->dm_dev.dax_dev = NULL;
781 }
782 
783 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
784 					      fmode_t mode)
785 {
786 	struct table_device *td;
787 
788 	list_for_each_entry(td, l, list)
789 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
790 			return td;
791 
792 	return NULL;
793 }
794 
795 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
796 			struct dm_dev **result)
797 {
798 	int r;
799 	struct table_device *td;
800 
801 	mutex_lock(&md->table_devices_lock);
802 	td = find_table_device(&md->table_devices, dev, mode);
803 	if (!td) {
804 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
805 		if (!td) {
806 			mutex_unlock(&md->table_devices_lock);
807 			return -ENOMEM;
808 		}
809 
810 		td->dm_dev.mode = mode;
811 		td->dm_dev.bdev = NULL;
812 
813 		if ((r = open_table_device(td, dev, md))) {
814 			mutex_unlock(&md->table_devices_lock);
815 			kfree(td);
816 			return r;
817 		}
818 
819 		format_dev_t(td->dm_dev.name, dev);
820 
821 		refcount_set(&td->count, 1);
822 		list_add(&td->list, &md->table_devices);
823 	} else {
824 		refcount_inc(&td->count);
825 	}
826 	mutex_unlock(&md->table_devices_lock);
827 
828 	*result = &td->dm_dev;
829 	return 0;
830 }
831 EXPORT_SYMBOL_GPL(dm_get_table_device);
832 
833 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
834 {
835 	struct table_device *td = container_of(d, struct table_device, dm_dev);
836 
837 	mutex_lock(&md->table_devices_lock);
838 	if (refcount_dec_and_test(&td->count)) {
839 		close_table_device(td, md);
840 		list_del(&td->list);
841 		kfree(td);
842 	}
843 	mutex_unlock(&md->table_devices_lock);
844 }
845 EXPORT_SYMBOL(dm_put_table_device);
846 
847 static void free_table_devices(struct list_head *devices)
848 {
849 	struct list_head *tmp, *next;
850 
851 	list_for_each_safe(tmp, next, devices) {
852 		struct table_device *td = list_entry(tmp, struct table_device, list);
853 
854 		DMWARN("dm_destroy: %s still exists with %d references",
855 		       td->dm_dev.name, refcount_read(&td->count));
856 		kfree(td);
857 	}
858 }
859 
860 /*
861  * Get the geometry associated with a dm device
862  */
863 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
864 {
865 	*geo = md->geometry;
866 
867 	return 0;
868 }
869 
870 /*
871  * Set the geometry of a device.
872  */
873 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
874 {
875 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
876 
877 	if (geo->start > sz) {
878 		DMWARN("Start sector is beyond the geometry limits.");
879 		return -EINVAL;
880 	}
881 
882 	md->geometry = *geo;
883 
884 	return 0;
885 }
886 
887 static int __noflush_suspending(struct mapped_device *md)
888 {
889 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
890 }
891 
892 /*
893  * Decrements the number of outstanding ios that a bio has been
894  * cloned into, completing the original io if necc.
895  */
896 static void dec_pending(struct dm_io *io, blk_status_t error)
897 {
898 	unsigned long flags;
899 	blk_status_t io_error;
900 	struct bio *bio;
901 	struct mapped_device *md = io->md;
902 
903 	/* Push-back supersedes any I/O errors */
904 	if (unlikely(error)) {
905 		spin_lock_irqsave(&io->endio_lock, flags);
906 		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
907 			io->status = error;
908 		spin_unlock_irqrestore(&io->endio_lock, flags);
909 	}
910 
911 	if (atomic_dec_and_test(&io->io_count)) {
912 		if (io->status == BLK_STS_DM_REQUEUE) {
913 			/*
914 			 * Target requested pushing back the I/O.
915 			 */
916 			spin_lock_irqsave(&md->deferred_lock, flags);
917 			if (__noflush_suspending(md))
918 				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
919 				bio_list_add_head(&md->deferred, io->orig_bio);
920 			else
921 				/* noflush suspend was interrupted. */
922 				io->status = BLK_STS_IOERR;
923 			spin_unlock_irqrestore(&md->deferred_lock, flags);
924 		}
925 
926 		io_error = io->status;
927 		bio = io->orig_bio;
928 		end_io_acct(io);
929 		free_io(md, io);
930 
931 		if (io_error == BLK_STS_DM_REQUEUE)
932 			return;
933 
934 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
935 			/*
936 			 * Preflush done for flush with data, reissue
937 			 * without REQ_PREFLUSH.
938 			 */
939 			bio->bi_opf &= ~REQ_PREFLUSH;
940 			queue_io(md, bio);
941 		} else {
942 			/* done with normal IO or empty flush */
943 			if (io_error)
944 				bio->bi_status = io_error;
945 			bio_endio(bio);
946 		}
947 	}
948 }
949 
950 void disable_discard(struct mapped_device *md)
951 {
952 	struct queue_limits *limits = dm_get_queue_limits(md);
953 
954 	/* device doesn't really support DISCARD, disable it */
955 	limits->max_discard_sectors = 0;
956 	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
957 }
958 
959 void disable_write_same(struct mapped_device *md)
960 {
961 	struct queue_limits *limits = dm_get_queue_limits(md);
962 
963 	/* device doesn't really support WRITE SAME, disable it */
964 	limits->max_write_same_sectors = 0;
965 }
966 
967 void disable_write_zeroes(struct mapped_device *md)
968 {
969 	struct queue_limits *limits = dm_get_queue_limits(md);
970 
971 	/* device doesn't really support WRITE ZEROES, disable it */
972 	limits->max_write_zeroes_sectors = 0;
973 }
974 
975 static void clone_endio(struct bio *bio)
976 {
977 	blk_status_t error = bio->bi_status;
978 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
979 	struct dm_io *io = tio->io;
980 	struct mapped_device *md = tio->io->md;
981 	dm_endio_fn endio = tio->ti->type->end_io;
982 
983 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
984 		if (bio_op(bio) == REQ_OP_DISCARD &&
985 		    !bio->bi_disk->queue->limits.max_discard_sectors)
986 			disable_discard(md);
987 		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
988 			 !bio->bi_disk->queue->limits.max_write_same_sectors)
989 			disable_write_same(md);
990 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
991 			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
992 			disable_write_zeroes(md);
993 	}
994 
995 	if (endio) {
996 		int r = endio(tio->ti, bio, &error);
997 		switch (r) {
998 		case DM_ENDIO_REQUEUE:
999 			error = BLK_STS_DM_REQUEUE;
1000 			/*FALLTHRU*/
1001 		case DM_ENDIO_DONE:
1002 			break;
1003 		case DM_ENDIO_INCOMPLETE:
1004 			/* The target will handle the io */
1005 			return;
1006 		default:
1007 			DMWARN("unimplemented target endio return value: %d", r);
1008 			BUG();
1009 		}
1010 	}
1011 
1012 	free_tio(tio);
1013 	dec_pending(io, error);
1014 }
1015 
1016 /*
1017  * Return maximum size of I/O possible at the supplied sector up to the current
1018  * target boundary.
1019  */
1020 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1021 {
1022 	sector_t target_offset = dm_target_offset(ti, sector);
1023 
1024 	return ti->len - target_offset;
1025 }
1026 
1027 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1028 {
1029 	sector_t len = max_io_len_target_boundary(sector, ti);
1030 	sector_t offset, max_len;
1031 
1032 	/*
1033 	 * Does the target need to split even further?
1034 	 */
1035 	if (ti->max_io_len) {
1036 		offset = dm_target_offset(ti, sector);
1037 		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1038 			max_len = sector_div(offset, ti->max_io_len);
1039 		else
1040 			max_len = offset & (ti->max_io_len - 1);
1041 		max_len = ti->max_io_len - max_len;
1042 
1043 		if (len > max_len)
1044 			len = max_len;
1045 	}
1046 
1047 	return len;
1048 }
1049 
1050 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1051 {
1052 	if (len > UINT_MAX) {
1053 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1054 		      (unsigned long long)len, UINT_MAX);
1055 		ti->error = "Maximum size of target IO is too large";
1056 		return -EINVAL;
1057 	}
1058 
1059 	ti->max_io_len = (uint32_t) len;
1060 
1061 	return 0;
1062 }
1063 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1064 
1065 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1066 						sector_t sector, int *srcu_idx)
1067 	__acquires(md->io_barrier)
1068 {
1069 	struct dm_table *map;
1070 	struct dm_target *ti;
1071 
1072 	map = dm_get_live_table(md, srcu_idx);
1073 	if (!map)
1074 		return NULL;
1075 
1076 	ti = dm_table_find_target(map, sector);
1077 	if (!dm_target_is_valid(ti))
1078 		return NULL;
1079 
1080 	return ti;
1081 }
1082 
1083 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1084 				 long nr_pages, void **kaddr, pfn_t *pfn)
1085 {
1086 	struct mapped_device *md = dax_get_private(dax_dev);
1087 	sector_t sector = pgoff * PAGE_SECTORS;
1088 	struct dm_target *ti;
1089 	long len, ret = -EIO;
1090 	int srcu_idx;
1091 
1092 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1093 
1094 	if (!ti)
1095 		goto out;
1096 	if (!ti->type->direct_access)
1097 		goto out;
1098 	len = max_io_len(sector, ti) / PAGE_SECTORS;
1099 	if (len < 1)
1100 		goto out;
1101 	nr_pages = min(len, nr_pages);
1102 	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1103 
1104  out:
1105 	dm_put_live_table(md, srcu_idx);
1106 
1107 	return ret;
1108 }
1109 
1110 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1111 				    void *addr, size_t bytes, struct iov_iter *i)
1112 {
1113 	struct mapped_device *md = dax_get_private(dax_dev);
1114 	sector_t sector = pgoff * PAGE_SECTORS;
1115 	struct dm_target *ti;
1116 	long ret = 0;
1117 	int srcu_idx;
1118 
1119 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1120 
1121 	if (!ti)
1122 		goto out;
1123 	if (!ti->type->dax_copy_from_iter) {
1124 		ret = copy_from_iter(addr, bytes, i);
1125 		goto out;
1126 	}
1127 	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1128  out:
1129 	dm_put_live_table(md, srcu_idx);
1130 
1131 	return ret;
1132 }
1133 
1134 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1135 		void *addr, size_t bytes, struct iov_iter *i)
1136 {
1137 	struct mapped_device *md = dax_get_private(dax_dev);
1138 	sector_t sector = pgoff * PAGE_SECTORS;
1139 	struct dm_target *ti;
1140 	long ret = 0;
1141 	int srcu_idx;
1142 
1143 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1144 
1145 	if (!ti)
1146 		goto out;
1147 	if (!ti->type->dax_copy_to_iter) {
1148 		ret = copy_to_iter(addr, bytes, i);
1149 		goto out;
1150 	}
1151 	ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1152  out:
1153 	dm_put_live_table(md, srcu_idx);
1154 
1155 	return ret;
1156 }
1157 
1158 /*
1159  * A target may call dm_accept_partial_bio only from the map routine.  It is
1160  * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
1161  *
1162  * dm_accept_partial_bio informs the dm that the target only wants to process
1163  * additional n_sectors sectors of the bio and the rest of the data should be
1164  * sent in a next bio.
1165  *
1166  * A diagram that explains the arithmetics:
1167  * +--------------------+---------------+-------+
1168  * |         1          |       2       |   3   |
1169  * +--------------------+---------------+-------+
1170  *
1171  * <-------------- *tio->len_ptr --------------->
1172  *                      <------- bi_size ------->
1173  *                      <-- n_sectors -->
1174  *
1175  * Region 1 was already iterated over with bio_advance or similar function.
1176  *	(it may be empty if the target doesn't use bio_advance)
1177  * Region 2 is the remaining bio size that the target wants to process.
1178  *	(it may be empty if region 1 is non-empty, although there is no reason
1179  *	 to make it empty)
1180  * The target requires that region 3 is to be sent in the next bio.
1181  *
1182  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1183  * the partially processed part (the sum of regions 1+2) must be the same for all
1184  * copies of the bio.
1185  */
1186 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1187 {
1188 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1189 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1190 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1191 	BUG_ON(bi_size > *tio->len_ptr);
1192 	BUG_ON(n_sectors > bi_size);
1193 	*tio->len_ptr -= bi_size - n_sectors;
1194 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1195 }
1196 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1197 
1198 /*
1199  * The zone descriptors obtained with a zone report indicate
1200  * zone positions within the underlying device of the target. The zone
1201  * descriptors must be remapped to match their position within the dm device.
1202  * The caller target should obtain the zones information using
1203  * blkdev_report_zones() to ensure that remapping for partition offset is
1204  * already handled.
1205  */
1206 void dm_remap_zone_report(struct dm_target *ti, sector_t start,
1207 			  struct blk_zone *zones, unsigned int *nr_zones)
1208 {
1209 #ifdef CONFIG_BLK_DEV_ZONED
1210 	struct blk_zone *zone;
1211 	unsigned int nrz = *nr_zones;
1212 	int i;
1213 
1214 	/*
1215 	 * Remap the start sector and write pointer position of the zones in
1216 	 * the array. Since we may have obtained from the target underlying
1217 	 * device more zones that the target size, also adjust the number
1218 	 * of zones.
1219 	 */
1220 	for (i = 0; i < nrz; i++) {
1221 		zone = zones + i;
1222 		if (zone->start >= start + ti->len) {
1223 			memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
1224 			break;
1225 		}
1226 
1227 		zone->start = zone->start + ti->begin - start;
1228 		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
1229 			continue;
1230 
1231 		if (zone->cond == BLK_ZONE_COND_FULL)
1232 			zone->wp = zone->start + zone->len;
1233 		else if (zone->cond == BLK_ZONE_COND_EMPTY)
1234 			zone->wp = zone->start;
1235 		else
1236 			zone->wp = zone->wp + ti->begin - start;
1237 	}
1238 
1239 	*nr_zones = i;
1240 #else /* !CONFIG_BLK_DEV_ZONED */
1241 	*nr_zones = 0;
1242 #endif
1243 }
1244 EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1245 
1246 static blk_qc_t __map_bio(struct dm_target_io *tio)
1247 {
1248 	int r;
1249 	sector_t sector;
1250 	struct bio *clone = &tio->clone;
1251 	struct dm_io *io = tio->io;
1252 	struct mapped_device *md = io->md;
1253 	struct dm_target *ti = tio->ti;
1254 	blk_qc_t ret = BLK_QC_T_NONE;
1255 
1256 	clone->bi_end_io = clone_endio;
1257 
1258 	/*
1259 	 * Map the clone.  If r == 0 we don't need to do
1260 	 * anything, the target has assumed ownership of
1261 	 * this io.
1262 	 */
1263 	atomic_inc(&io->io_count);
1264 	sector = clone->bi_iter.bi_sector;
1265 
1266 	r = ti->type->map(ti, clone);
1267 	switch (r) {
1268 	case DM_MAPIO_SUBMITTED:
1269 		break;
1270 	case DM_MAPIO_REMAPPED:
1271 		/* the bio has been remapped so dispatch it */
1272 		trace_block_bio_remap(clone->bi_disk->queue, clone,
1273 				      bio_dev(io->orig_bio), sector);
1274 		if (md->type == DM_TYPE_NVME_BIO_BASED)
1275 			ret = direct_make_request(clone);
1276 		else
1277 			ret = generic_make_request(clone);
1278 		break;
1279 	case DM_MAPIO_KILL:
1280 		free_tio(tio);
1281 		dec_pending(io, BLK_STS_IOERR);
1282 		break;
1283 	case DM_MAPIO_REQUEUE:
1284 		free_tio(tio);
1285 		dec_pending(io, BLK_STS_DM_REQUEUE);
1286 		break;
1287 	default:
1288 		DMWARN("unimplemented target map return value: %d", r);
1289 		BUG();
1290 	}
1291 
1292 	return ret;
1293 }
1294 
1295 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1296 {
1297 	bio->bi_iter.bi_sector = sector;
1298 	bio->bi_iter.bi_size = to_bytes(len);
1299 }
1300 
1301 /*
1302  * Creates a bio that consists of range of complete bvecs.
1303  */
1304 static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1305 		     sector_t sector, unsigned len)
1306 {
1307 	struct bio *clone = &tio->clone;
1308 
1309 	__bio_clone_fast(clone, bio);
1310 
1311 	if (bio_integrity(bio)) {
1312 		int r;
1313 
1314 		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1315 			     !dm_target_passes_integrity(tio->ti->type))) {
1316 			DMWARN("%s: the target %s doesn't support integrity data.",
1317 				dm_device_name(tio->io->md),
1318 				tio->ti->type->name);
1319 			return -EIO;
1320 		}
1321 
1322 		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1323 		if (r < 0)
1324 			return r;
1325 	}
1326 
1327 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1328 	clone->bi_iter.bi_size = to_bytes(len);
1329 
1330 	if (bio_integrity(bio))
1331 		bio_integrity_trim(clone);
1332 
1333 	return 0;
1334 }
1335 
1336 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1337 				struct dm_target *ti, unsigned num_bios)
1338 {
1339 	struct dm_target_io *tio;
1340 	int try;
1341 
1342 	if (!num_bios)
1343 		return;
1344 
1345 	if (num_bios == 1) {
1346 		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1347 		bio_list_add(blist, &tio->clone);
1348 		return;
1349 	}
1350 
1351 	for (try = 0; try < 2; try++) {
1352 		int bio_nr;
1353 		struct bio *bio;
1354 
1355 		if (try)
1356 			mutex_lock(&ci->io->md->table_devices_lock);
1357 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1358 			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1359 			if (!tio)
1360 				break;
1361 
1362 			bio_list_add(blist, &tio->clone);
1363 		}
1364 		if (try)
1365 			mutex_unlock(&ci->io->md->table_devices_lock);
1366 		if (bio_nr == num_bios)
1367 			return;
1368 
1369 		while ((bio = bio_list_pop(blist))) {
1370 			tio = container_of(bio, struct dm_target_io, clone);
1371 			free_tio(tio);
1372 		}
1373 	}
1374 }
1375 
1376 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1377 					   struct dm_target_io *tio, unsigned *len)
1378 {
1379 	struct bio *clone = &tio->clone;
1380 
1381 	tio->len_ptr = len;
1382 
1383 	__bio_clone_fast(clone, ci->bio);
1384 	if (len)
1385 		bio_setup_sector(clone, ci->sector, *len);
1386 
1387 	return __map_bio(tio);
1388 }
1389 
1390 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1391 				  unsigned num_bios, unsigned *len)
1392 {
1393 	struct bio_list blist = BIO_EMPTY_LIST;
1394 	struct bio *bio;
1395 	struct dm_target_io *tio;
1396 
1397 	alloc_multiple_bios(&blist, ci, ti, num_bios);
1398 
1399 	while ((bio = bio_list_pop(&blist))) {
1400 		tio = container_of(bio, struct dm_target_io, clone);
1401 		(void) __clone_and_map_simple_bio(ci, tio, len);
1402 	}
1403 }
1404 
1405 static int __send_empty_flush(struct clone_info *ci)
1406 {
1407 	unsigned target_nr = 0;
1408 	struct dm_target *ti;
1409 
1410 	/*
1411 	 * Empty flush uses a statically initialized bio, as the base for
1412 	 * cloning.  However, blkg association requires that a bdev is
1413 	 * associated with a gendisk, which doesn't happen until the bdev is
1414 	 * opened.  So, blkg association is done at issue time of the flush
1415 	 * rather than when the device is created in alloc_dev().
1416 	 */
1417 	bio_set_dev(ci->bio, ci->io->md->bdev);
1418 
1419 	BUG_ON(bio_has_data(ci->bio));
1420 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1421 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1422 
1423 	bio_disassociate_blkg(ci->bio);
1424 
1425 	return 0;
1426 }
1427 
1428 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1429 				    sector_t sector, unsigned *len)
1430 {
1431 	struct bio *bio = ci->bio;
1432 	struct dm_target_io *tio;
1433 	int r;
1434 
1435 	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1436 	tio->len_ptr = len;
1437 	r = clone_bio(tio, bio, sector, *len);
1438 	if (r < 0) {
1439 		free_tio(tio);
1440 		return r;
1441 	}
1442 	(void) __map_bio(tio);
1443 
1444 	return 0;
1445 }
1446 
1447 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1448 
1449 static unsigned get_num_discard_bios(struct dm_target *ti)
1450 {
1451 	return ti->num_discard_bios;
1452 }
1453 
1454 static unsigned get_num_secure_erase_bios(struct dm_target *ti)
1455 {
1456 	return ti->num_secure_erase_bios;
1457 }
1458 
1459 static unsigned get_num_write_same_bios(struct dm_target *ti)
1460 {
1461 	return ti->num_write_same_bios;
1462 }
1463 
1464 static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1465 {
1466 	return ti->num_write_zeroes_bios;
1467 }
1468 
1469 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1470 				       unsigned num_bios)
1471 {
1472 	unsigned len = ci->sector_count;
1473 
1474 	/*
1475 	 * Even though the device advertised support for this type of
1476 	 * request, that does not mean every target supports it, and
1477 	 * reconfiguration might also have changed that since the
1478 	 * check was performed.
1479 	 */
1480 	if (!num_bios)
1481 		return -EOPNOTSUPP;
1482 
1483 	__send_duplicate_bios(ci, ti, num_bios, &len);
1484 
1485 	ci->sector += len;
1486 	ci->sector_count -= len;
1487 
1488 	return 0;
1489 }
1490 
1491 static int __send_discard(struct clone_info *ci, struct dm_target *ti)
1492 {
1493 	return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
1494 }
1495 
1496 static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
1497 {
1498 	return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
1499 }
1500 
1501 static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
1502 {
1503 	return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
1504 }
1505 
1506 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
1507 {
1508 	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
1509 }
1510 
1511 static bool is_abnormal_io(struct bio *bio)
1512 {
1513 	bool r = false;
1514 
1515 	switch (bio_op(bio)) {
1516 	case REQ_OP_DISCARD:
1517 	case REQ_OP_SECURE_ERASE:
1518 	case REQ_OP_WRITE_SAME:
1519 	case REQ_OP_WRITE_ZEROES:
1520 		r = true;
1521 		break;
1522 	}
1523 
1524 	return r;
1525 }
1526 
1527 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1528 				  int *result)
1529 {
1530 	struct bio *bio = ci->bio;
1531 
1532 	if (bio_op(bio) == REQ_OP_DISCARD)
1533 		*result = __send_discard(ci, ti);
1534 	else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
1535 		*result = __send_secure_erase(ci, ti);
1536 	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
1537 		*result = __send_write_same(ci, ti);
1538 	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1539 		*result = __send_write_zeroes(ci, ti);
1540 	else
1541 		return false;
1542 
1543 	return true;
1544 }
1545 
1546 /*
1547  * Select the correct strategy for processing a non-flush bio.
1548  */
1549 static int __split_and_process_non_flush(struct clone_info *ci)
1550 {
1551 	struct dm_target *ti;
1552 	unsigned len;
1553 	int r;
1554 
1555 	ti = dm_table_find_target(ci->map, ci->sector);
1556 	if (!dm_target_is_valid(ti))
1557 		return -EIO;
1558 
1559 	if (__process_abnormal_io(ci, ti, &r))
1560 		return r;
1561 
1562 	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1563 
1564 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1565 	if (r < 0)
1566 		return r;
1567 
1568 	ci->sector += len;
1569 	ci->sector_count -= len;
1570 
1571 	return 0;
1572 }
1573 
1574 static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1575 			    struct dm_table *map, struct bio *bio)
1576 {
1577 	ci->map = map;
1578 	ci->io = alloc_io(md, bio);
1579 	ci->sector = bio->bi_iter.bi_sector;
1580 }
1581 
1582 #define __dm_part_stat_sub(part, field, subnd)	\
1583 	(part_stat_get(part, field) -= (subnd))
1584 
1585 /*
1586  * Entry point to split a bio into clones and submit them to the targets.
1587  */
1588 static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1589 					struct dm_table *map, struct bio *bio)
1590 {
1591 	struct clone_info ci;
1592 	blk_qc_t ret = BLK_QC_T_NONE;
1593 	int error = 0;
1594 
1595 	init_clone_info(&ci, md, map, bio);
1596 
1597 	if (bio->bi_opf & REQ_PREFLUSH) {
1598 		struct bio flush_bio;
1599 
1600 		/*
1601 		 * Use an on-stack bio for this, it's safe since we don't
1602 		 * need to reference it after submit. It's just used as
1603 		 * the basis for the clone(s).
1604 		 */
1605 		bio_init(&flush_bio, NULL, 0);
1606 		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1607 		ci.bio = &flush_bio;
1608 		ci.sector_count = 0;
1609 		error = __send_empty_flush(&ci);
1610 		/* dec_pending submits any data associated with flush */
1611 	} else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
1612 		ci.bio = bio;
1613 		ci.sector_count = 0;
1614 		error = __split_and_process_non_flush(&ci);
1615 	} else {
1616 		ci.bio = bio;
1617 		ci.sector_count = bio_sectors(bio);
1618 		while (ci.sector_count && !error) {
1619 			error = __split_and_process_non_flush(&ci);
1620 			if (current->bio_list && ci.sector_count && !error) {
1621 				/*
1622 				 * Remainder must be passed to generic_make_request()
1623 				 * so that it gets handled *after* bios already submitted
1624 				 * have been completely processed.
1625 				 * We take a clone of the original to store in
1626 				 * ci.io->orig_bio to be used by end_io_acct() and
1627 				 * for dec_pending to use for completion handling.
1628 				 */
1629 				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1630 							  GFP_NOIO, &md->queue->bio_split);
1631 				ci.io->orig_bio = b;
1632 
1633 				/*
1634 				 * Adjust IO stats for each split, otherwise upon queue
1635 				 * reentry there will be redundant IO accounting.
1636 				 * NOTE: this is a stop-gap fix, a proper fix involves
1637 				 * significant refactoring of DM core's bio splitting
1638 				 * (by eliminating DM's splitting and just using bio_split)
1639 				 */
1640 				part_stat_lock();
1641 				__dm_part_stat_sub(&dm_disk(md)->part0,
1642 						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1643 				part_stat_unlock();
1644 
1645 				bio_chain(b, bio);
1646 				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1647 				ret = generic_make_request(bio);
1648 				break;
1649 			}
1650 		}
1651 	}
1652 
1653 	/* drop the extra reference count */
1654 	dec_pending(ci.io, errno_to_blk_status(error));
1655 	return ret;
1656 }
1657 
1658 /*
1659  * Optimized variant of __split_and_process_bio that leverages the
1660  * fact that targets that use it do _not_ have a need to split bios.
1661  */
1662 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
1663 			      struct bio *bio, struct dm_target *ti)
1664 {
1665 	struct clone_info ci;
1666 	blk_qc_t ret = BLK_QC_T_NONE;
1667 	int error = 0;
1668 
1669 	init_clone_info(&ci, md, map, bio);
1670 
1671 	if (bio->bi_opf & REQ_PREFLUSH) {
1672 		struct bio flush_bio;
1673 
1674 		/*
1675 		 * Use an on-stack bio for this, it's safe since we don't
1676 		 * need to reference it after submit. It's just used as
1677 		 * the basis for the clone(s).
1678 		 */
1679 		bio_init(&flush_bio, NULL, 0);
1680 		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1681 		ci.bio = &flush_bio;
1682 		ci.sector_count = 0;
1683 		error = __send_empty_flush(&ci);
1684 		/* dec_pending submits any data associated with flush */
1685 	} else {
1686 		struct dm_target_io *tio;
1687 
1688 		ci.bio = bio;
1689 		ci.sector_count = bio_sectors(bio);
1690 		if (__process_abnormal_io(&ci, ti, &error))
1691 			goto out;
1692 
1693 		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
1694 		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
1695 	}
1696 out:
1697 	/* drop the extra reference count */
1698 	dec_pending(ci.io, errno_to_blk_status(error));
1699 	return ret;
1700 }
1701 
1702 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
1703 {
1704 	unsigned len, sector_count;
1705 
1706 	sector_count = bio_sectors(*bio);
1707 	len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
1708 
1709 	if (sector_count > len) {
1710 		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
1711 
1712 		bio_chain(split, *bio);
1713 		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
1714 		generic_make_request(*bio);
1715 		*bio = split;
1716 	}
1717 }
1718 
1719 static blk_qc_t dm_process_bio(struct mapped_device *md,
1720 			       struct dm_table *map, struct bio *bio)
1721 {
1722 	blk_qc_t ret = BLK_QC_T_NONE;
1723 	struct dm_target *ti = md->immutable_target;
1724 
1725 	if (unlikely(!map)) {
1726 		bio_io_error(bio);
1727 		return ret;
1728 	}
1729 
1730 	if (!ti) {
1731 		ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
1732 		if (unlikely(!ti || !dm_target_is_valid(ti))) {
1733 			bio_io_error(bio);
1734 			return ret;
1735 		}
1736 	}
1737 
1738 	/*
1739 	 * If in ->make_request_fn we need to use blk_queue_split(), otherwise
1740 	 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
1741 	 * won't be imposed.
1742 	 */
1743 	if (current->bio_list) {
1744 		blk_queue_split(md->queue, &bio);
1745 		if (!is_abnormal_io(bio))
1746 			dm_queue_split(md, ti, &bio);
1747 	}
1748 
1749 	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1750 		return __process_bio(md, map, bio, ti);
1751 	else
1752 		return __split_and_process_bio(md, map, bio);
1753 }
1754 
1755 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1756 {
1757 	struct mapped_device *md = q->queuedata;
1758 	blk_qc_t ret = BLK_QC_T_NONE;
1759 	int srcu_idx;
1760 	struct dm_table *map;
1761 
1762 	map = dm_get_live_table(md, &srcu_idx);
1763 
1764 	/* if we're suspended, we have to queue this io for later */
1765 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1766 		dm_put_live_table(md, srcu_idx);
1767 
1768 		if (!(bio->bi_opf & REQ_RAHEAD))
1769 			queue_io(md, bio);
1770 		else
1771 			bio_io_error(bio);
1772 		return ret;
1773 	}
1774 
1775 	ret = dm_process_bio(md, map, bio);
1776 
1777 	dm_put_live_table(md, srcu_idx);
1778 	return ret;
1779 }
1780 
1781 static int dm_any_congested(void *congested_data, int bdi_bits)
1782 {
1783 	int r = bdi_bits;
1784 	struct mapped_device *md = congested_data;
1785 	struct dm_table *map;
1786 
1787 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1788 		if (dm_request_based(md)) {
1789 			/*
1790 			 * With request-based DM we only need to check the
1791 			 * top-level queue for congestion.
1792 			 */
1793 			r = md->queue->backing_dev_info->wb.state & bdi_bits;
1794 		} else {
1795 			map = dm_get_live_table_fast(md);
1796 			if (map)
1797 				r = dm_table_any_congested(map, bdi_bits);
1798 			dm_put_live_table_fast(md);
1799 		}
1800 	}
1801 
1802 	return r;
1803 }
1804 
1805 /*-----------------------------------------------------------------
1806  * An IDR is used to keep track of allocated minor numbers.
1807  *---------------------------------------------------------------*/
1808 static void free_minor(int minor)
1809 {
1810 	spin_lock(&_minor_lock);
1811 	idr_remove(&_minor_idr, minor);
1812 	spin_unlock(&_minor_lock);
1813 }
1814 
1815 /*
1816  * See if the device with a specific minor # is free.
1817  */
1818 static int specific_minor(int minor)
1819 {
1820 	int r;
1821 
1822 	if (minor >= (1 << MINORBITS))
1823 		return -EINVAL;
1824 
1825 	idr_preload(GFP_KERNEL);
1826 	spin_lock(&_minor_lock);
1827 
1828 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1829 
1830 	spin_unlock(&_minor_lock);
1831 	idr_preload_end();
1832 	if (r < 0)
1833 		return r == -ENOSPC ? -EBUSY : r;
1834 	return 0;
1835 }
1836 
1837 static int next_free_minor(int *minor)
1838 {
1839 	int r;
1840 
1841 	idr_preload(GFP_KERNEL);
1842 	spin_lock(&_minor_lock);
1843 
1844 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1845 
1846 	spin_unlock(&_minor_lock);
1847 	idr_preload_end();
1848 	if (r < 0)
1849 		return r;
1850 	*minor = r;
1851 	return 0;
1852 }
1853 
1854 static const struct block_device_operations dm_blk_dops;
1855 static const struct dax_operations dm_dax_ops;
1856 
1857 static void dm_wq_work(struct work_struct *work);
1858 
1859 static void dm_init_normal_md_queue(struct mapped_device *md)
1860 {
1861 	/*
1862 	 * Initialize aspects of queue that aren't relevant for blk-mq
1863 	 */
1864 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
1865 }
1866 
1867 static void cleanup_mapped_device(struct mapped_device *md)
1868 {
1869 	if (md->wq)
1870 		destroy_workqueue(md->wq);
1871 	bioset_exit(&md->bs);
1872 	bioset_exit(&md->io_bs);
1873 
1874 	if (md->dax_dev) {
1875 		kill_dax(md->dax_dev);
1876 		put_dax(md->dax_dev);
1877 		md->dax_dev = NULL;
1878 	}
1879 
1880 	if (md->disk) {
1881 		spin_lock(&_minor_lock);
1882 		md->disk->private_data = NULL;
1883 		spin_unlock(&_minor_lock);
1884 		del_gendisk(md->disk);
1885 		put_disk(md->disk);
1886 	}
1887 
1888 	if (md->queue)
1889 		blk_cleanup_queue(md->queue);
1890 
1891 	cleanup_srcu_struct(&md->io_barrier);
1892 
1893 	if (md->bdev) {
1894 		bdput(md->bdev);
1895 		md->bdev = NULL;
1896 	}
1897 
1898 	mutex_destroy(&md->suspend_lock);
1899 	mutex_destroy(&md->type_lock);
1900 	mutex_destroy(&md->table_devices_lock);
1901 
1902 	dm_mq_cleanup_mapped_device(md);
1903 }
1904 
1905 /*
1906  * Allocate and initialise a blank device with a given minor.
1907  */
1908 static struct mapped_device *alloc_dev(int minor)
1909 {
1910 	int r, numa_node_id = dm_get_numa_node();
1911 	struct mapped_device *md;
1912 	void *old_md;
1913 
1914 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1915 	if (!md) {
1916 		DMWARN("unable to allocate device, out of memory.");
1917 		return NULL;
1918 	}
1919 
1920 	if (!try_module_get(THIS_MODULE))
1921 		goto bad_module_get;
1922 
1923 	/* get a minor number for the dev */
1924 	if (minor == DM_ANY_MINOR)
1925 		r = next_free_minor(&minor);
1926 	else
1927 		r = specific_minor(minor);
1928 	if (r < 0)
1929 		goto bad_minor;
1930 
1931 	r = init_srcu_struct(&md->io_barrier);
1932 	if (r < 0)
1933 		goto bad_io_barrier;
1934 
1935 	md->numa_node_id = numa_node_id;
1936 	md->init_tio_pdu = false;
1937 	md->type = DM_TYPE_NONE;
1938 	mutex_init(&md->suspend_lock);
1939 	mutex_init(&md->type_lock);
1940 	mutex_init(&md->table_devices_lock);
1941 	spin_lock_init(&md->deferred_lock);
1942 	atomic_set(&md->holders, 1);
1943 	atomic_set(&md->open_count, 0);
1944 	atomic_set(&md->event_nr, 0);
1945 	atomic_set(&md->uevent_seq, 0);
1946 	INIT_LIST_HEAD(&md->uevent_list);
1947 	INIT_LIST_HEAD(&md->table_devices);
1948 	spin_lock_init(&md->uevent_lock);
1949 
1950 	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1951 	if (!md->queue)
1952 		goto bad;
1953 	md->queue->queuedata = md;
1954 	md->queue->backing_dev_info->congested_data = md;
1955 
1956 	md->disk = alloc_disk_node(1, md->numa_node_id);
1957 	if (!md->disk)
1958 		goto bad;
1959 
1960 	init_waitqueue_head(&md->wait);
1961 	INIT_WORK(&md->work, dm_wq_work);
1962 	init_waitqueue_head(&md->eventq);
1963 	init_completion(&md->kobj_holder.completion);
1964 
1965 	md->disk->major = _major;
1966 	md->disk->first_minor = minor;
1967 	md->disk->fops = &dm_blk_dops;
1968 	md->disk->queue = md->queue;
1969 	md->disk->private_data = md;
1970 	sprintf(md->disk->disk_name, "dm-%d", minor);
1971 
1972 	if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
1973 		md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
1974 		if (!md->dax_dev)
1975 			goto bad;
1976 	}
1977 
1978 	add_disk_no_queue_reg(md->disk);
1979 	format_dev_t(md->name, MKDEV(_major, minor));
1980 
1981 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1982 	if (!md->wq)
1983 		goto bad;
1984 
1985 	md->bdev = bdget_disk(md->disk, 0);
1986 	if (!md->bdev)
1987 		goto bad;
1988 
1989 	dm_stats_init(&md->stats);
1990 
1991 	/* Populate the mapping, nobody knows we exist yet */
1992 	spin_lock(&_minor_lock);
1993 	old_md = idr_replace(&_minor_idr, md, minor);
1994 	spin_unlock(&_minor_lock);
1995 
1996 	BUG_ON(old_md != MINOR_ALLOCED);
1997 
1998 	return md;
1999 
2000 bad:
2001 	cleanup_mapped_device(md);
2002 bad_io_barrier:
2003 	free_minor(minor);
2004 bad_minor:
2005 	module_put(THIS_MODULE);
2006 bad_module_get:
2007 	kvfree(md);
2008 	return NULL;
2009 }
2010 
2011 static void unlock_fs(struct mapped_device *md);
2012 
2013 static void free_dev(struct mapped_device *md)
2014 {
2015 	int minor = MINOR(disk_devt(md->disk));
2016 
2017 	unlock_fs(md);
2018 
2019 	cleanup_mapped_device(md);
2020 
2021 	free_table_devices(&md->table_devices);
2022 	dm_stats_cleanup(&md->stats);
2023 	free_minor(minor);
2024 
2025 	module_put(THIS_MODULE);
2026 	kvfree(md);
2027 }
2028 
2029 static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
2030 {
2031 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2032 	int ret = 0;
2033 
2034 	if (dm_table_bio_based(t)) {
2035 		/*
2036 		 * The md may already have mempools that need changing.
2037 		 * If so, reload bioset because front_pad may have changed
2038 		 * because a different table was loaded.
2039 		 */
2040 		bioset_exit(&md->bs);
2041 		bioset_exit(&md->io_bs);
2042 
2043 	} else if (bioset_initialized(&md->bs)) {
2044 		/*
2045 		 * There's no need to reload with request-based dm
2046 		 * because the size of front_pad doesn't change.
2047 		 * Note for future: If you are to reload bioset,
2048 		 * prep-ed requests in the queue may refer
2049 		 * to bio from the old bioset, so you must walk
2050 		 * through the queue to unprep.
2051 		 */
2052 		goto out;
2053 	}
2054 
2055 	BUG_ON(!p ||
2056 	       bioset_initialized(&md->bs) ||
2057 	       bioset_initialized(&md->io_bs));
2058 
2059 	ret = bioset_init_from_src(&md->bs, &p->bs);
2060 	if (ret)
2061 		goto out;
2062 	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
2063 	if (ret)
2064 		bioset_exit(&md->bs);
2065 out:
2066 	/* mempool bind completed, no longer need any mempools in the table */
2067 	dm_table_free_md_mempools(t);
2068 	return ret;
2069 }
2070 
2071 /*
2072  * Bind a table to the device.
2073  */
2074 static void event_callback(void *context)
2075 {
2076 	unsigned long flags;
2077 	LIST_HEAD(uevents);
2078 	struct mapped_device *md = (struct mapped_device *) context;
2079 
2080 	spin_lock_irqsave(&md->uevent_lock, flags);
2081 	list_splice_init(&md->uevent_list, &uevents);
2082 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2083 
2084 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2085 
2086 	atomic_inc(&md->event_nr);
2087 	wake_up(&md->eventq);
2088 	dm_issue_global_event();
2089 }
2090 
2091 /*
2092  * Protected by md->suspend_lock obtained by dm_swap_table().
2093  */
2094 static void __set_size(struct mapped_device *md, sector_t size)
2095 {
2096 	lockdep_assert_held(&md->suspend_lock);
2097 
2098 	set_capacity(md->disk, size);
2099 
2100 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2101 }
2102 
2103 /*
2104  * Returns old map, which caller must destroy.
2105  */
2106 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2107 			       struct queue_limits *limits)
2108 {
2109 	struct dm_table *old_map;
2110 	struct request_queue *q = md->queue;
2111 	bool request_based = dm_table_request_based(t);
2112 	sector_t size;
2113 	int ret;
2114 
2115 	lockdep_assert_held(&md->suspend_lock);
2116 
2117 	size = dm_table_get_size(t);
2118 
2119 	/*
2120 	 * Wipe any geometry if the size of the table changed.
2121 	 */
2122 	if (size != dm_get_size(md))
2123 		memset(&md->geometry, 0, sizeof(md->geometry));
2124 
2125 	__set_size(md, size);
2126 
2127 	dm_table_event_callback(t, event_callback, md);
2128 
2129 	/*
2130 	 * The queue hasn't been stopped yet, if the old table type wasn't
2131 	 * for request-based during suspension.  So stop it to prevent
2132 	 * I/O mapping before resume.
2133 	 * This must be done before setting the queue restrictions,
2134 	 * because request-based dm may be run just after the setting.
2135 	 */
2136 	if (request_based)
2137 		dm_stop_queue(q);
2138 
2139 	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
2140 		/*
2141 		 * Leverage the fact that request-based DM targets and
2142 		 * NVMe bio based targets are immutable singletons
2143 		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2144 		 *   and __process_bio.
2145 		 */
2146 		md->immutable_target = dm_table_get_immutable_target(t);
2147 	}
2148 
2149 	ret = __bind_mempools(md, t);
2150 	if (ret) {
2151 		old_map = ERR_PTR(ret);
2152 		goto out;
2153 	}
2154 
2155 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2156 	rcu_assign_pointer(md->map, (void *)t);
2157 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2158 
2159 	dm_table_set_restrictions(t, q, limits);
2160 	if (old_map)
2161 		dm_sync_table(md);
2162 
2163 out:
2164 	return old_map;
2165 }
2166 
2167 /*
2168  * Returns unbound table for the caller to free.
2169  */
2170 static struct dm_table *__unbind(struct mapped_device *md)
2171 {
2172 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2173 
2174 	if (!map)
2175 		return NULL;
2176 
2177 	dm_table_event_callback(map, NULL, NULL);
2178 	RCU_INIT_POINTER(md->map, NULL);
2179 	dm_sync_table(md);
2180 
2181 	return map;
2182 }
2183 
2184 /*
2185  * Constructor for a new device.
2186  */
2187 int dm_create(int minor, struct mapped_device **result)
2188 {
2189 	int r;
2190 	struct mapped_device *md;
2191 
2192 	md = alloc_dev(minor);
2193 	if (!md)
2194 		return -ENXIO;
2195 
2196 	r = dm_sysfs_init(md);
2197 	if (r) {
2198 		free_dev(md);
2199 		return r;
2200 	}
2201 
2202 	*result = md;
2203 	return 0;
2204 }
2205 
2206 /*
2207  * Functions to manage md->type.
2208  * All are required to hold md->type_lock.
2209  */
2210 void dm_lock_md_type(struct mapped_device *md)
2211 {
2212 	mutex_lock(&md->type_lock);
2213 }
2214 
2215 void dm_unlock_md_type(struct mapped_device *md)
2216 {
2217 	mutex_unlock(&md->type_lock);
2218 }
2219 
2220 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2221 {
2222 	BUG_ON(!mutex_is_locked(&md->type_lock));
2223 	md->type = type;
2224 }
2225 
2226 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2227 {
2228 	return md->type;
2229 }
2230 
2231 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2232 {
2233 	return md->immutable_target_type;
2234 }
2235 
2236 /*
2237  * The queue_limits are only valid as long as you have a reference
2238  * count on 'md'.
2239  */
2240 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2241 {
2242 	BUG_ON(!atomic_read(&md->holders));
2243 	return &md->queue->limits;
2244 }
2245 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2246 
2247 /*
2248  * Setup the DM device's queue based on md's type
2249  */
2250 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2251 {
2252 	int r;
2253 	struct queue_limits limits;
2254 	enum dm_queue_mode type = dm_get_md_type(md);
2255 
2256 	switch (type) {
2257 	case DM_TYPE_REQUEST_BASED:
2258 		r = dm_mq_init_request_queue(md, t);
2259 		if (r) {
2260 			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2261 			return r;
2262 		}
2263 		break;
2264 	case DM_TYPE_BIO_BASED:
2265 	case DM_TYPE_DAX_BIO_BASED:
2266 	case DM_TYPE_NVME_BIO_BASED:
2267 		dm_init_normal_md_queue(md);
2268 		blk_queue_make_request(md->queue, dm_make_request);
2269 		break;
2270 	case DM_TYPE_NONE:
2271 		WARN_ON_ONCE(true);
2272 		break;
2273 	}
2274 
2275 	r = dm_calculate_queue_limits(t, &limits);
2276 	if (r) {
2277 		DMERR("Cannot calculate initial queue limits");
2278 		return r;
2279 	}
2280 	dm_table_set_restrictions(t, md->queue, &limits);
2281 	blk_register_queue(md->disk);
2282 
2283 	return 0;
2284 }
2285 
2286 struct mapped_device *dm_get_md(dev_t dev)
2287 {
2288 	struct mapped_device *md;
2289 	unsigned minor = MINOR(dev);
2290 
2291 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2292 		return NULL;
2293 
2294 	spin_lock(&_minor_lock);
2295 
2296 	md = idr_find(&_minor_idr, minor);
2297 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2298 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2299 		md = NULL;
2300 		goto out;
2301 	}
2302 	dm_get(md);
2303 out:
2304 	spin_unlock(&_minor_lock);
2305 
2306 	return md;
2307 }
2308 EXPORT_SYMBOL_GPL(dm_get_md);
2309 
2310 void *dm_get_mdptr(struct mapped_device *md)
2311 {
2312 	return md->interface_ptr;
2313 }
2314 
2315 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2316 {
2317 	md->interface_ptr = ptr;
2318 }
2319 
2320 void dm_get(struct mapped_device *md)
2321 {
2322 	atomic_inc(&md->holders);
2323 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2324 }
2325 
2326 int dm_hold(struct mapped_device *md)
2327 {
2328 	spin_lock(&_minor_lock);
2329 	if (test_bit(DMF_FREEING, &md->flags)) {
2330 		spin_unlock(&_minor_lock);
2331 		return -EBUSY;
2332 	}
2333 	dm_get(md);
2334 	spin_unlock(&_minor_lock);
2335 	return 0;
2336 }
2337 EXPORT_SYMBOL_GPL(dm_hold);
2338 
2339 const char *dm_device_name(struct mapped_device *md)
2340 {
2341 	return md->name;
2342 }
2343 EXPORT_SYMBOL_GPL(dm_device_name);
2344 
2345 static void __dm_destroy(struct mapped_device *md, bool wait)
2346 {
2347 	struct dm_table *map;
2348 	int srcu_idx;
2349 
2350 	might_sleep();
2351 
2352 	spin_lock(&_minor_lock);
2353 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2354 	set_bit(DMF_FREEING, &md->flags);
2355 	spin_unlock(&_minor_lock);
2356 
2357 	blk_set_queue_dying(md->queue);
2358 
2359 	/*
2360 	 * Take suspend_lock so that presuspend and postsuspend methods
2361 	 * do not race with internal suspend.
2362 	 */
2363 	mutex_lock(&md->suspend_lock);
2364 	map = dm_get_live_table(md, &srcu_idx);
2365 	if (!dm_suspended_md(md)) {
2366 		dm_table_presuspend_targets(map);
2367 		dm_table_postsuspend_targets(map);
2368 	}
2369 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2370 	dm_put_live_table(md, srcu_idx);
2371 	mutex_unlock(&md->suspend_lock);
2372 
2373 	/*
2374 	 * Rare, but there may be I/O requests still going to complete,
2375 	 * for example.  Wait for all references to disappear.
2376 	 * No one should increment the reference count of the mapped_device,
2377 	 * after the mapped_device state becomes DMF_FREEING.
2378 	 */
2379 	if (wait)
2380 		while (atomic_read(&md->holders))
2381 			msleep(1);
2382 	else if (atomic_read(&md->holders))
2383 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2384 		       dm_device_name(md), atomic_read(&md->holders));
2385 
2386 	dm_sysfs_exit(md);
2387 	dm_table_destroy(__unbind(md));
2388 	free_dev(md);
2389 }
2390 
2391 void dm_destroy(struct mapped_device *md)
2392 {
2393 	__dm_destroy(md, true);
2394 }
2395 
2396 void dm_destroy_immediate(struct mapped_device *md)
2397 {
2398 	__dm_destroy(md, false);
2399 }
2400 
2401 void dm_put(struct mapped_device *md)
2402 {
2403 	atomic_dec(&md->holders);
2404 }
2405 EXPORT_SYMBOL_GPL(dm_put);
2406 
2407 static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2408 {
2409 	int r = 0;
2410 	DEFINE_WAIT(wait);
2411 
2412 	while (1) {
2413 		prepare_to_wait(&md->wait, &wait, task_state);
2414 
2415 		if (!md_in_flight(md))
2416 			break;
2417 
2418 		if (signal_pending_state(task_state, current)) {
2419 			r = -EINTR;
2420 			break;
2421 		}
2422 
2423 		io_schedule();
2424 	}
2425 	finish_wait(&md->wait, &wait);
2426 
2427 	return r;
2428 }
2429 
2430 /*
2431  * Process the deferred bios
2432  */
2433 static void dm_wq_work(struct work_struct *work)
2434 {
2435 	struct mapped_device *md = container_of(work, struct mapped_device,
2436 						work);
2437 	struct bio *c;
2438 	int srcu_idx;
2439 	struct dm_table *map;
2440 
2441 	map = dm_get_live_table(md, &srcu_idx);
2442 
2443 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2444 		spin_lock_irq(&md->deferred_lock);
2445 		c = bio_list_pop(&md->deferred);
2446 		spin_unlock_irq(&md->deferred_lock);
2447 
2448 		if (!c)
2449 			break;
2450 
2451 		if (dm_request_based(md))
2452 			(void) generic_make_request(c);
2453 		else
2454 			(void) dm_process_bio(md, map, c);
2455 	}
2456 
2457 	dm_put_live_table(md, srcu_idx);
2458 }
2459 
2460 static void dm_queue_flush(struct mapped_device *md)
2461 {
2462 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2463 	smp_mb__after_atomic();
2464 	queue_work(md->wq, &md->work);
2465 }
2466 
2467 /*
2468  * Swap in a new table, returning the old one for the caller to destroy.
2469  */
2470 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2471 {
2472 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2473 	struct queue_limits limits;
2474 	int r;
2475 
2476 	mutex_lock(&md->suspend_lock);
2477 
2478 	/* device must be suspended */
2479 	if (!dm_suspended_md(md))
2480 		goto out;
2481 
2482 	/*
2483 	 * If the new table has no data devices, retain the existing limits.
2484 	 * This helps multipath with queue_if_no_path if all paths disappear,
2485 	 * then new I/O is queued based on these limits, and then some paths
2486 	 * reappear.
2487 	 */
2488 	if (dm_table_has_no_data_devices(table)) {
2489 		live_map = dm_get_live_table_fast(md);
2490 		if (live_map)
2491 			limits = md->queue->limits;
2492 		dm_put_live_table_fast(md);
2493 	}
2494 
2495 	if (!live_map) {
2496 		r = dm_calculate_queue_limits(table, &limits);
2497 		if (r) {
2498 			map = ERR_PTR(r);
2499 			goto out;
2500 		}
2501 	}
2502 
2503 	map = __bind(md, table, &limits);
2504 	dm_issue_global_event();
2505 
2506 out:
2507 	mutex_unlock(&md->suspend_lock);
2508 	return map;
2509 }
2510 
2511 /*
2512  * Functions to lock and unlock any filesystem running on the
2513  * device.
2514  */
2515 static int lock_fs(struct mapped_device *md)
2516 {
2517 	int r;
2518 
2519 	WARN_ON(md->frozen_sb);
2520 
2521 	md->frozen_sb = freeze_bdev(md->bdev);
2522 	if (IS_ERR(md->frozen_sb)) {
2523 		r = PTR_ERR(md->frozen_sb);
2524 		md->frozen_sb = NULL;
2525 		return r;
2526 	}
2527 
2528 	set_bit(DMF_FROZEN, &md->flags);
2529 
2530 	return 0;
2531 }
2532 
2533 static void unlock_fs(struct mapped_device *md)
2534 {
2535 	if (!test_bit(DMF_FROZEN, &md->flags))
2536 		return;
2537 
2538 	thaw_bdev(md->bdev, md->frozen_sb);
2539 	md->frozen_sb = NULL;
2540 	clear_bit(DMF_FROZEN, &md->flags);
2541 }
2542 
2543 /*
2544  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2545  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2546  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2547  *
2548  * If __dm_suspend returns 0, the device is completely quiescent
2549  * now. There is no request-processing activity. All new requests
2550  * are being added to md->deferred list.
2551  */
2552 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2553 			unsigned suspend_flags, long task_state,
2554 			int dmf_suspended_flag)
2555 {
2556 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2557 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2558 	int r;
2559 
2560 	lockdep_assert_held(&md->suspend_lock);
2561 
2562 	/*
2563 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2564 	 * This flag is cleared before dm_suspend returns.
2565 	 */
2566 	if (noflush)
2567 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2568 	else
2569 		pr_debug("%s: suspending with flush\n", dm_device_name(md));
2570 
2571 	/*
2572 	 * This gets reverted if there's an error later and the targets
2573 	 * provide the .presuspend_undo hook.
2574 	 */
2575 	dm_table_presuspend_targets(map);
2576 
2577 	/*
2578 	 * Flush I/O to the device.
2579 	 * Any I/O submitted after lock_fs() may not be flushed.
2580 	 * noflush takes precedence over do_lockfs.
2581 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2582 	 */
2583 	if (!noflush && do_lockfs) {
2584 		r = lock_fs(md);
2585 		if (r) {
2586 			dm_table_presuspend_undo_targets(map);
2587 			return r;
2588 		}
2589 	}
2590 
2591 	/*
2592 	 * Here we must make sure that no processes are submitting requests
2593 	 * to target drivers i.e. no one may be executing
2594 	 * __split_and_process_bio. This is called from dm_request and
2595 	 * dm_wq_work.
2596 	 *
2597 	 * To get all processes out of __split_and_process_bio in dm_request,
2598 	 * we take the write lock. To prevent any process from reentering
2599 	 * __split_and_process_bio from dm_request and quiesce the thread
2600 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2601 	 * flush_workqueue(md->wq).
2602 	 */
2603 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2604 	if (map)
2605 		synchronize_srcu(&md->io_barrier);
2606 
2607 	/*
2608 	 * Stop md->queue before flushing md->wq in case request-based
2609 	 * dm defers requests to md->wq from md->queue.
2610 	 */
2611 	if (dm_request_based(md))
2612 		dm_stop_queue(md->queue);
2613 
2614 	flush_workqueue(md->wq);
2615 
2616 	/*
2617 	 * At this point no more requests are entering target request routines.
2618 	 * We call dm_wait_for_completion to wait for all existing requests
2619 	 * to finish.
2620 	 */
2621 	r = dm_wait_for_completion(md, task_state);
2622 	if (!r)
2623 		set_bit(dmf_suspended_flag, &md->flags);
2624 
2625 	if (noflush)
2626 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2627 	if (map)
2628 		synchronize_srcu(&md->io_barrier);
2629 
2630 	/* were we interrupted ? */
2631 	if (r < 0) {
2632 		dm_queue_flush(md);
2633 
2634 		if (dm_request_based(md))
2635 			dm_start_queue(md->queue);
2636 
2637 		unlock_fs(md);
2638 		dm_table_presuspend_undo_targets(map);
2639 		/* pushback list is already flushed, so skip flush */
2640 	}
2641 
2642 	return r;
2643 }
2644 
2645 /*
2646  * We need to be able to change a mapping table under a mounted
2647  * filesystem.  For example we might want to move some data in
2648  * the background.  Before the table can be swapped with
2649  * dm_bind_table, dm_suspend must be called to flush any in
2650  * flight bios and ensure that any further io gets deferred.
2651  */
2652 /*
2653  * Suspend mechanism in request-based dm.
2654  *
2655  * 1. Flush all I/Os by lock_fs() if needed.
2656  * 2. Stop dispatching any I/O by stopping the request_queue.
2657  * 3. Wait for all in-flight I/Os to be completed or requeued.
2658  *
2659  * To abort suspend, start the request_queue.
2660  */
2661 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2662 {
2663 	struct dm_table *map = NULL;
2664 	int r = 0;
2665 
2666 retry:
2667 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2668 
2669 	if (dm_suspended_md(md)) {
2670 		r = -EINVAL;
2671 		goto out_unlock;
2672 	}
2673 
2674 	if (dm_suspended_internally_md(md)) {
2675 		/* already internally suspended, wait for internal resume */
2676 		mutex_unlock(&md->suspend_lock);
2677 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2678 		if (r)
2679 			return r;
2680 		goto retry;
2681 	}
2682 
2683 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2684 
2685 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2686 	if (r)
2687 		goto out_unlock;
2688 
2689 	dm_table_postsuspend_targets(map);
2690 
2691 out_unlock:
2692 	mutex_unlock(&md->suspend_lock);
2693 	return r;
2694 }
2695 
2696 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2697 {
2698 	if (map) {
2699 		int r = dm_table_resume_targets(map);
2700 		if (r)
2701 			return r;
2702 	}
2703 
2704 	dm_queue_flush(md);
2705 
2706 	/*
2707 	 * Flushing deferred I/Os must be done after targets are resumed
2708 	 * so that mapping of targets can work correctly.
2709 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2710 	 */
2711 	if (dm_request_based(md))
2712 		dm_start_queue(md->queue);
2713 
2714 	unlock_fs(md);
2715 
2716 	return 0;
2717 }
2718 
2719 int dm_resume(struct mapped_device *md)
2720 {
2721 	int r;
2722 	struct dm_table *map = NULL;
2723 
2724 retry:
2725 	r = -EINVAL;
2726 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2727 
2728 	if (!dm_suspended_md(md))
2729 		goto out;
2730 
2731 	if (dm_suspended_internally_md(md)) {
2732 		/* already internally suspended, wait for internal resume */
2733 		mutex_unlock(&md->suspend_lock);
2734 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2735 		if (r)
2736 			return r;
2737 		goto retry;
2738 	}
2739 
2740 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2741 	if (!map || !dm_table_get_size(map))
2742 		goto out;
2743 
2744 	r = __dm_resume(md, map);
2745 	if (r)
2746 		goto out;
2747 
2748 	clear_bit(DMF_SUSPENDED, &md->flags);
2749 out:
2750 	mutex_unlock(&md->suspend_lock);
2751 
2752 	return r;
2753 }
2754 
2755 /*
2756  * Internal suspend/resume works like userspace-driven suspend. It waits
2757  * until all bios finish and prevents issuing new bios to the target drivers.
2758  * It may be used only from the kernel.
2759  */
2760 
2761 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2762 {
2763 	struct dm_table *map = NULL;
2764 
2765 	lockdep_assert_held(&md->suspend_lock);
2766 
2767 	if (md->internal_suspend_count++)
2768 		return; /* nested internal suspend */
2769 
2770 	if (dm_suspended_md(md)) {
2771 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2772 		return; /* nest suspend */
2773 	}
2774 
2775 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2776 
2777 	/*
2778 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2779 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2780 	 * would require changing .presuspend to return an error -- avoid this
2781 	 * until there is a need for more elaborate variants of internal suspend.
2782 	 */
2783 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2784 			    DMF_SUSPENDED_INTERNALLY);
2785 
2786 	dm_table_postsuspend_targets(map);
2787 }
2788 
2789 static void __dm_internal_resume(struct mapped_device *md)
2790 {
2791 	BUG_ON(!md->internal_suspend_count);
2792 
2793 	if (--md->internal_suspend_count)
2794 		return; /* resume from nested internal suspend */
2795 
2796 	if (dm_suspended_md(md))
2797 		goto done; /* resume from nested suspend */
2798 
2799 	/*
2800 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2801 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2802 	 */
2803 	(void) __dm_resume(md, NULL);
2804 
2805 done:
2806 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2807 	smp_mb__after_atomic();
2808 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2809 }
2810 
2811 void dm_internal_suspend_noflush(struct mapped_device *md)
2812 {
2813 	mutex_lock(&md->suspend_lock);
2814 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2815 	mutex_unlock(&md->suspend_lock);
2816 }
2817 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2818 
2819 void dm_internal_resume(struct mapped_device *md)
2820 {
2821 	mutex_lock(&md->suspend_lock);
2822 	__dm_internal_resume(md);
2823 	mutex_unlock(&md->suspend_lock);
2824 }
2825 EXPORT_SYMBOL_GPL(dm_internal_resume);
2826 
2827 /*
2828  * Fast variants of internal suspend/resume hold md->suspend_lock,
2829  * which prevents interaction with userspace-driven suspend.
2830  */
2831 
2832 void dm_internal_suspend_fast(struct mapped_device *md)
2833 {
2834 	mutex_lock(&md->suspend_lock);
2835 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2836 		return;
2837 
2838 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2839 	synchronize_srcu(&md->io_barrier);
2840 	flush_workqueue(md->wq);
2841 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2842 }
2843 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2844 
2845 void dm_internal_resume_fast(struct mapped_device *md)
2846 {
2847 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2848 		goto done;
2849 
2850 	dm_queue_flush(md);
2851 
2852 done:
2853 	mutex_unlock(&md->suspend_lock);
2854 }
2855 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2856 
2857 /*-----------------------------------------------------------------
2858  * Event notification.
2859  *---------------------------------------------------------------*/
2860 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2861 		       unsigned cookie)
2862 {
2863 	char udev_cookie[DM_COOKIE_LENGTH];
2864 	char *envp[] = { udev_cookie, NULL };
2865 
2866 	if (!cookie)
2867 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2868 	else {
2869 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2870 			 DM_COOKIE_ENV_VAR_NAME, cookie);
2871 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2872 					  action, envp);
2873 	}
2874 }
2875 
2876 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2877 {
2878 	return atomic_add_return(1, &md->uevent_seq);
2879 }
2880 
2881 uint32_t dm_get_event_nr(struct mapped_device *md)
2882 {
2883 	return atomic_read(&md->event_nr);
2884 }
2885 
2886 int dm_wait_event(struct mapped_device *md, int event_nr)
2887 {
2888 	return wait_event_interruptible(md->eventq,
2889 			(event_nr != atomic_read(&md->event_nr)));
2890 }
2891 
2892 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2893 {
2894 	unsigned long flags;
2895 
2896 	spin_lock_irqsave(&md->uevent_lock, flags);
2897 	list_add(elist, &md->uevent_list);
2898 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2899 }
2900 
2901 /*
2902  * The gendisk is only valid as long as you have a reference
2903  * count on 'md'.
2904  */
2905 struct gendisk *dm_disk(struct mapped_device *md)
2906 {
2907 	return md->disk;
2908 }
2909 EXPORT_SYMBOL_GPL(dm_disk);
2910 
2911 struct kobject *dm_kobject(struct mapped_device *md)
2912 {
2913 	return &md->kobj_holder.kobj;
2914 }
2915 
2916 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2917 {
2918 	struct mapped_device *md;
2919 
2920 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2921 
2922 	spin_lock(&_minor_lock);
2923 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2924 		md = NULL;
2925 		goto out;
2926 	}
2927 	dm_get(md);
2928 out:
2929 	spin_unlock(&_minor_lock);
2930 
2931 	return md;
2932 }
2933 
2934 int dm_suspended_md(struct mapped_device *md)
2935 {
2936 	return test_bit(DMF_SUSPENDED, &md->flags);
2937 }
2938 
2939 int dm_suspended_internally_md(struct mapped_device *md)
2940 {
2941 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2942 }
2943 
2944 int dm_test_deferred_remove_flag(struct mapped_device *md)
2945 {
2946 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2947 }
2948 
2949 int dm_suspended(struct dm_target *ti)
2950 {
2951 	return dm_suspended_md(dm_table_get_md(ti->table));
2952 }
2953 EXPORT_SYMBOL_GPL(dm_suspended);
2954 
2955 int dm_noflush_suspending(struct dm_target *ti)
2956 {
2957 	return __noflush_suspending(dm_table_get_md(ti->table));
2958 }
2959 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2960 
2961 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2962 					    unsigned integrity, unsigned per_io_data_size,
2963 					    unsigned min_pool_size)
2964 {
2965 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2966 	unsigned int pool_size = 0;
2967 	unsigned int front_pad, io_front_pad;
2968 	int ret;
2969 
2970 	if (!pools)
2971 		return NULL;
2972 
2973 	switch (type) {
2974 	case DM_TYPE_BIO_BASED:
2975 	case DM_TYPE_DAX_BIO_BASED:
2976 	case DM_TYPE_NVME_BIO_BASED:
2977 		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
2978 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2979 		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
2980 		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
2981 		if (ret)
2982 			goto out;
2983 		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
2984 			goto out;
2985 		break;
2986 	case DM_TYPE_REQUEST_BASED:
2987 		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2988 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2989 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
2990 		break;
2991 	default:
2992 		BUG();
2993 	}
2994 
2995 	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
2996 	if (ret)
2997 		goto out;
2998 
2999 	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
3000 		goto out;
3001 
3002 	return pools;
3003 
3004 out:
3005 	dm_free_md_mempools(pools);
3006 
3007 	return NULL;
3008 }
3009 
3010 void dm_free_md_mempools(struct dm_md_mempools *pools)
3011 {
3012 	if (!pools)
3013 		return;
3014 
3015 	bioset_exit(&pools->bs);
3016 	bioset_exit(&pools->io_bs);
3017 
3018 	kfree(pools);
3019 }
3020 
3021 struct dm_pr {
3022 	u64	old_key;
3023 	u64	new_key;
3024 	u32	flags;
3025 	bool	fail_early;
3026 };
3027 
3028 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3029 		      void *data)
3030 {
3031 	struct mapped_device *md = bdev->bd_disk->private_data;
3032 	struct dm_table *table;
3033 	struct dm_target *ti;
3034 	int ret = -ENOTTY, srcu_idx;
3035 
3036 	table = dm_get_live_table(md, &srcu_idx);
3037 	if (!table || !dm_table_get_size(table))
3038 		goto out;
3039 
3040 	/* We only support devices that have a single target */
3041 	if (dm_table_get_num_targets(table) != 1)
3042 		goto out;
3043 	ti = dm_table_get_target(table, 0);
3044 
3045 	ret = -EINVAL;
3046 	if (!ti->type->iterate_devices)
3047 		goto out;
3048 
3049 	ret = ti->type->iterate_devices(ti, fn, data);
3050 out:
3051 	dm_put_live_table(md, srcu_idx);
3052 	return ret;
3053 }
3054 
3055 /*
3056  * For register / unregister we need to manually call out to every path.
3057  */
3058 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3059 			    sector_t start, sector_t len, void *data)
3060 {
3061 	struct dm_pr *pr = data;
3062 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3063 
3064 	if (!ops || !ops->pr_register)
3065 		return -EOPNOTSUPP;
3066 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3067 }
3068 
3069 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3070 			  u32 flags)
3071 {
3072 	struct dm_pr pr = {
3073 		.old_key	= old_key,
3074 		.new_key	= new_key,
3075 		.flags		= flags,
3076 		.fail_early	= true,
3077 	};
3078 	int ret;
3079 
3080 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3081 	if (ret && new_key) {
3082 		/* unregister all paths if we failed to register any path */
3083 		pr.old_key = new_key;
3084 		pr.new_key = 0;
3085 		pr.flags = 0;
3086 		pr.fail_early = false;
3087 		dm_call_pr(bdev, __dm_pr_register, &pr);
3088 	}
3089 
3090 	return ret;
3091 }
3092 
3093 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3094 			 u32 flags)
3095 {
3096 	struct mapped_device *md = bdev->bd_disk->private_data;
3097 	const struct pr_ops *ops;
3098 	int r, srcu_idx;
3099 
3100 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3101 	if (r < 0)
3102 		goto out;
3103 
3104 	ops = bdev->bd_disk->fops->pr_ops;
3105 	if (ops && ops->pr_reserve)
3106 		r = ops->pr_reserve(bdev, key, type, flags);
3107 	else
3108 		r = -EOPNOTSUPP;
3109 out:
3110 	dm_unprepare_ioctl(md, srcu_idx);
3111 	return r;
3112 }
3113 
3114 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3115 {
3116 	struct mapped_device *md = bdev->bd_disk->private_data;
3117 	const struct pr_ops *ops;
3118 	int r, srcu_idx;
3119 
3120 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3121 	if (r < 0)
3122 		goto out;
3123 
3124 	ops = bdev->bd_disk->fops->pr_ops;
3125 	if (ops && ops->pr_release)
3126 		r = ops->pr_release(bdev, key, type);
3127 	else
3128 		r = -EOPNOTSUPP;
3129 out:
3130 	dm_unprepare_ioctl(md, srcu_idx);
3131 	return r;
3132 }
3133 
3134 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3135 			 enum pr_type type, bool abort)
3136 {
3137 	struct mapped_device *md = bdev->bd_disk->private_data;
3138 	const struct pr_ops *ops;
3139 	int r, srcu_idx;
3140 
3141 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3142 	if (r < 0)
3143 		goto out;
3144 
3145 	ops = bdev->bd_disk->fops->pr_ops;
3146 	if (ops && ops->pr_preempt)
3147 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3148 	else
3149 		r = -EOPNOTSUPP;
3150 out:
3151 	dm_unprepare_ioctl(md, srcu_idx);
3152 	return r;
3153 }
3154 
3155 static int dm_pr_clear(struct block_device *bdev, u64 key)
3156 {
3157 	struct mapped_device *md = bdev->bd_disk->private_data;
3158 	const struct pr_ops *ops;
3159 	int r, srcu_idx;
3160 
3161 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3162 	if (r < 0)
3163 		goto out;
3164 
3165 	ops = bdev->bd_disk->fops->pr_ops;
3166 	if (ops && ops->pr_clear)
3167 		r = ops->pr_clear(bdev, key);
3168 	else
3169 		r = -EOPNOTSUPP;
3170 out:
3171 	dm_unprepare_ioctl(md, srcu_idx);
3172 	return r;
3173 }
3174 
3175 static const struct pr_ops dm_pr_ops = {
3176 	.pr_register	= dm_pr_register,
3177 	.pr_reserve	= dm_pr_reserve,
3178 	.pr_release	= dm_pr_release,
3179 	.pr_preempt	= dm_pr_preempt,
3180 	.pr_clear	= dm_pr_clear,
3181 };
3182 
3183 static const struct block_device_operations dm_blk_dops = {
3184 	.open = dm_blk_open,
3185 	.release = dm_blk_close,
3186 	.ioctl = dm_blk_ioctl,
3187 	.getgeo = dm_blk_getgeo,
3188 	.report_zones = dm_blk_report_zones,
3189 	.pr_ops = &dm_pr_ops,
3190 	.owner = THIS_MODULE
3191 };
3192 
3193 static const struct dax_operations dm_dax_ops = {
3194 	.direct_access = dm_dax_direct_access,
3195 	.copy_from_iter = dm_dax_copy_from_iter,
3196 	.copy_to_iter = dm_dax_copy_to_iter,
3197 };
3198 
3199 /*
3200  * module hooks
3201  */
3202 module_init(dm_init);
3203 module_exit(dm_exit);
3204 
3205 module_param(major, uint, 0);
3206 MODULE_PARM_DESC(major, "The major number of the device mapper");
3207 
3208 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3209 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3210 
3211 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3212 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3213 
3214 MODULE_DESCRIPTION(DM_NAME " driver");
3215 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3216 MODULE_LICENSE("GPL");
3217