xref: /openbmc/linux/drivers/md/dm-table.c (revision bfad37c5)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/mount.h>
23 #include <linux/dax.h>
24 
25 #define DM_MSG_PREFIX "table"
26 
27 #define NODE_SIZE L1_CACHE_BYTES
28 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
29 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
30 
31 /*
32  * Similar to ceiling(log_size(n))
33  */
34 static unsigned int int_log(unsigned int n, unsigned int base)
35 {
36 	int result = 0;
37 
38 	while (n > 1) {
39 		n = dm_div_up(n, base);
40 		result++;
41 	}
42 
43 	return result;
44 }
45 
46 /*
47  * Calculate the index of the child node of the n'th node k'th key.
48  */
49 static inline unsigned int get_child(unsigned int n, unsigned int k)
50 {
51 	return (n * CHILDREN_PER_NODE) + k;
52 }
53 
54 /*
55  * Return the n'th node of level l from table t.
56  */
57 static inline sector_t *get_node(struct dm_table *t,
58 				 unsigned int l, unsigned int n)
59 {
60 	return t->index[l] + (n * KEYS_PER_NODE);
61 }
62 
63 /*
64  * Return the highest key that you could lookup from the n'th
65  * node on level l of the btree.
66  */
67 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
68 {
69 	for (; l < t->depth - 1; l++)
70 		n = get_child(n, CHILDREN_PER_NODE - 1);
71 
72 	if (n >= t->counts[l])
73 		return (sector_t) - 1;
74 
75 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
76 }
77 
78 /*
79  * Fills in a level of the btree based on the highs of the level
80  * below it.
81  */
82 static int setup_btree_index(unsigned int l, struct dm_table *t)
83 {
84 	unsigned int n, k;
85 	sector_t *node;
86 
87 	for (n = 0U; n < t->counts[l]; n++) {
88 		node = get_node(t, l, n);
89 
90 		for (k = 0U; k < KEYS_PER_NODE; k++)
91 			node[k] = high(t, l + 1, get_child(n, k));
92 	}
93 
94 	return 0;
95 }
96 
97 /*
98  * highs, and targets are managed as dynamic arrays during a
99  * table load.
100  */
101 static int alloc_targets(struct dm_table *t, unsigned int num)
102 {
103 	sector_t *n_highs;
104 	struct dm_target *n_targets;
105 
106 	/*
107 	 * Allocate both the target array and offset array at once.
108 	 */
109 	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
110 			   GFP_KERNEL);
111 	if (!n_highs)
112 		return -ENOMEM;
113 
114 	n_targets = (struct dm_target *) (n_highs + num);
115 
116 	memset(n_highs, -1, sizeof(*n_highs) * num);
117 	kvfree(t->highs);
118 
119 	t->num_allocated = num;
120 	t->highs = n_highs;
121 	t->targets = n_targets;
122 
123 	return 0;
124 }
125 
126 int dm_table_create(struct dm_table **result, fmode_t mode,
127 		    unsigned num_targets, struct mapped_device *md)
128 {
129 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
130 
131 	if (!t)
132 		return -ENOMEM;
133 
134 	INIT_LIST_HEAD(&t->devices);
135 
136 	if (!num_targets)
137 		num_targets = KEYS_PER_NODE;
138 
139 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
140 
141 	if (!num_targets) {
142 		kfree(t);
143 		return -ENOMEM;
144 	}
145 
146 	if (alloc_targets(t, num_targets)) {
147 		kfree(t);
148 		return -ENOMEM;
149 	}
150 
151 	t->type = DM_TYPE_NONE;
152 	t->mode = mode;
153 	t->md = md;
154 	*result = t;
155 	return 0;
156 }
157 
158 static void free_devices(struct list_head *devices, struct mapped_device *md)
159 {
160 	struct list_head *tmp, *next;
161 
162 	list_for_each_safe(tmp, next, devices) {
163 		struct dm_dev_internal *dd =
164 		    list_entry(tmp, struct dm_dev_internal, list);
165 		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
166 		       dm_device_name(md), dd->dm_dev->name);
167 		dm_put_table_device(md, dd->dm_dev);
168 		kfree(dd);
169 	}
170 }
171 
172 static void dm_table_destroy_keyslot_manager(struct dm_table *t);
173 
174 void dm_table_destroy(struct dm_table *t)
175 {
176 	unsigned int i;
177 
178 	if (!t)
179 		return;
180 
181 	/* free the indexes */
182 	if (t->depth >= 2)
183 		kvfree(t->index[t->depth - 2]);
184 
185 	/* free the targets */
186 	for (i = 0; i < t->num_targets; i++) {
187 		struct dm_target *tgt = t->targets + i;
188 
189 		if (tgt->type->dtr)
190 			tgt->type->dtr(tgt);
191 
192 		dm_put_target_type(tgt->type);
193 	}
194 
195 	kvfree(t->highs);
196 
197 	/* free the device list */
198 	free_devices(&t->devices, t->md);
199 
200 	dm_free_md_mempools(t->mempools);
201 
202 	dm_table_destroy_keyslot_manager(t);
203 
204 	kfree(t);
205 }
206 
207 /*
208  * See if we've already got a device in the list.
209  */
210 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
211 {
212 	struct dm_dev_internal *dd;
213 
214 	list_for_each_entry (dd, l, list)
215 		if (dd->dm_dev->bdev->bd_dev == dev)
216 			return dd;
217 
218 	return NULL;
219 }
220 
221 /*
222  * If possible, this checks an area of a destination device is invalid.
223  */
224 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
225 				  sector_t start, sector_t len, void *data)
226 {
227 	struct queue_limits *limits = data;
228 	struct block_device *bdev = dev->bdev;
229 	sector_t dev_size =
230 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
231 	unsigned short logical_block_size_sectors =
232 		limits->logical_block_size >> SECTOR_SHIFT;
233 	char b[BDEVNAME_SIZE];
234 
235 	if (!dev_size)
236 		return 0;
237 
238 	if ((start >= dev_size) || (start + len > dev_size)) {
239 		DMWARN("%s: %s too small for target: "
240 		       "start=%llu, len=%llu, dev_size=%llu",
241 		       dm_device_name(ti->table->md), bdevname(bdev, b),
242 		       (unsigned long long)start,
243 		       (unsigned long long)len,
244 		       (unsigned long long)dev_size);
245 		return 1;
246 	}
247 
248 	/*
249 	 * If the target is mapped to zoned block device(s), check
250 	 * that the zones are not partially mapped.
251 	 */
252 	if (bdev_is_zoned(bdev)) {
253 		unsigned int zone_sectors = bdev_zone_sectors(bdev);
254 
255 		if (start & (zone_sectors - 1)) {
256 			DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
257 			       dm_device_name(ti->table->md),
258 			       (unsigned long long)start,
259 			       zone_sectors, bdevname(bdev, b));
260 			return 1;
261 		}
262 
263 		/*
264 		 * Note: The last zone of a zoned block device may be smaller
265 		 * than other zones. So for a target mapping the end of a
266 		 * zoned block device with such a zone, len would not be zone
267 		 * aligned. We do not allow such last smaller zone to be part
268 		 * of the mapping here to ensure that mappings with multiple
269 		 * devices do not end up with a smaller zone in the middle of
270 		 * the sector range.
271 		 */
272 		if (len & (zone_sectors - 1)) {
273 			DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
274 			       dm_device_name(ti->table->md),
275 			       (unsigned long long)len,
276 			       zone_sectors, bdevname(bdev, b));
277 			return 1;
278 		}
279 	}
280 
281 	if (logical_block_size_sectors <= 1)
282 		return 0;
283 
284 	if (start & (logical_block_size_sectors - 1)) {
285 		DMWARN("%s: start=%llu not aligned to h/w "
286 		       "logical block size %u of %s",
287 		       dm_device_name(ti->table->md),
288 		       (unsigned long long)start,
289 		       limits->logical_block_size, bdevname(bdev, b));
290 		return 1;
291 	}
292 
293 	if (len & (logical_block_size_sectors - 1)) {
294 		DMWARN("%s: len=%llu not aligned to h/w "
295 		       "logical block size %u of %s",
296 		       dm_device_name(ti->table->md),
297 		       (unsigned long long)len,
298 		       limits->logical_block_size, bdevname(bdev, b));
299 		return 1;
300 	}
301 
302 	return 0;
303 }
304 
305 /*
306  * This upgrades the mode on an already open dm_dev, being
307  * careful to leave things as they were if we fail to reopen the
308  * device and not to touch the existing bdev field in case
309  * it is accessed concurrently.
310  */
311 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
312 			struct mapped_device *md)
313 {
314 	int r;
315 	struct dm_dev *old_dev, *new_dev;
316 
317 	old_dev = dd->dm_dev;
318 
319 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
320 				dd->dm_dev->mode | new_mode, &new_dev);
321 	if (r)
322 		return r;
323 
324 	dd->dm_dev = new_dev;
325 	dm_put_table_device(md, old_dev);
326 
327 	return 0;
328 }
329 
330 /*
331  * Convert the path to a device
332  */
333 dev_t dm_get_dev_t(const char *path)
334 {
335 	dev_t dev;
336 
337 	if (lookup_bdev(path, &dev))
338 		dev = name_to_dev_t(path);
339 	return dev;
340 }
341 EXPORT_SYMBOL_GPL(dm_get_dev_t);
342 
343 /*
344  * Add a device to the list, or just increment the usage count if
345  * it's already present.
346  */
347 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
348 		  struct dm_dev **result)
349 {
350 	int r;
351 	dev_t dev;
352 	unsigned int major, minor;
353 	char dummy;
354 	struct dm_dev_internal *dd;
355 	struct dm_table *t = ti->table;
356 
357 	BUG_ON(!t);
358 
359 	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
360 		/* Extract the major/minor numbers */
361 		dev = MKDEV(major, minor);
362 		if (MAJOR(dev) != major || MINOR(dev) != minor)
363 			return -EOVERFLOW;
364 	} else {
365 		dev = dm_get_dev_t(path);
366 		if (!dev)
367 			return -ENODEV;
368 	}
369 
370 	dd = find_device(&t->devices, dev);
371 	if (!dd) {
372 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
373 		if (!dd)
374 			return -ENOMEM;
375 
376 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
377 			kfree(dd);
378 			return r;
379 		}
380 
381 		refcount_set(&dd->count, 1);
382 		list_add(&dd->list, &t->devices);
383 		goto out;
384 
385 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
386 		r = upgrade_mode(dd, mode, t->md);
387 		if (r)
388 			return r;
389 	}
390 	refcount_inc(&dd->count);
391 out:
392 	*result = dd->dm_dev;
393 	return 0;
394 }
395 EXPORT_SYMBOL(dm_get_device);
396 
397 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
398 				sector_t start, sector_t len, void *data)
399 {
400 	struct queue_limits *limits = data;
401 	struct block_device *bdev = dev->bdev;
402 	struct request_queue *q = bdev_get_queue(bdev);
403 	char b[BDEVNAME_SIZE];
404 
405 	if (unlikely(!q)) {
406 		DMWARN("%s: Cannot set limits for nonexistent device %s",
407 		       dm_device_name(ti->table->md), bdevname(bdev, b));
408 		return 0;
409 	}
410 
411 	if (blk_stack_limits(limits, &q->limits,
412 			get_start_sect(bdev) + start) < 0)
413 		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
414 		       "physical_block_size=%u, logical_block_size=%u, "
415 		       "alignment_offset=%u, start=%llu",
416 		       dm_device_name(ti->table->md), bdevname(bdev, b),
417 		       q->limits.physical_block_size,
418 		       q->limits.logical_block_size,
419 		       q->limits.alignment_offset,
420 		       (unsigned long long) start << SECTOR_SHIFT);
421 	return 0;
422 }
423 
424 /*
425  * Decrement a device's use count and remove it if necessary.
426  */
427 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
428 {
429 	int found = 0;
430 	struct list_head *devices = &ti->table->devices;
431 	struct dm_dev_internal *dd;
432 
433 	list_for_each_entry(dd, devices, list) {
434 		if (dd->dm_dev == d) {
435 			found = 1;
436 			break;
437 		}
438 	}
439 	if (!found) {
440 		DMWARN("%s: device %s not in table devices list",
441 		       dm_device_name(ti->table->md), d->name);
442 		return;
443 	}
444 	if (refcount_dec_and_test(&dd->count)) {
445 		dm_put_table_device(ti->table->md, d);
446 		list_del(&dd->list);
447 		kfree(dd);
448 	}
449 }
450 EXPORT_SYMBOL(dm_put_device);
451 
452 /*
453  * Checks to see if the target joins onto the end of the table.
454  */
455 static int adjoin(struct dm_table *table, struct dm_target *ti)
456 {
457 	struct dm_target *prev;
458 
459 	if (!table->num_targets)
460 		return !ti->begin;
461 
462 	prev = &table->targets[table->num_targets - 1];
463 	return (ti->begin == (prev->begin + prev->len));
464 }
465 
466 /*
467  * Used to dynamically allocate the arg array.
468  *
469  * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
470  * process messages even if some device is suspended. These messages have a
471  * small fixed number of arguments.
472  *
473  * On the other hand, dm-switch needs to process bulk data using messages and
474  * excessive use of GFP_NOIO could cause trouble.
475  */
476 static char **realloc_argv(unsigned *size, char **old_argv)
477 {
478 	char **argv;
479 	unsigned new_size;
480 	gfp_t gfp;
481 
482 	if (*size) {
483 		new_size = *size * 2;
484 		gfp = GFP_KERNEL;
485 	} else {
486 		new_size = 8;
487 		gfp = GFP_NOIO;
488 	}
489 	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
490 	if (argv && old_argv) {
491 		memcpy(argv, old_argv, *size * sizeof(*argv));
492 		*size = new_size;
493 	}
494 
495 	kfree(old_argv);
496 	return argv;
497 }
498 
499 /*
500  * Destructively splits up the argument list to pass to ctr.
501  */
502 int dm_split_args(int *argc, char ***argvp, char *input)
503 {
504 	char *start, *end = input, *out, **argv = NULL;
505 	unsigned array_size = 0;
506 
507 	*argc = 0;
508 
509 	if (!input) {
510 		*argvp = NULL;
511 		return 0;
512 	}
513 
514 	argv = realloc_argv(&array_size, argv);
515 	if (!argv)
516 		return -ENOMEM;
517 
518 	while (1) {
519 		/* Skip whitespace */
520 		start = skip_spaces(end);
521 
522 		if (!*start)
523 			break;	/* success, we hit the end */
524 
525 		/* 'out' is used to remove any back-quotes */
526 		end = out = start;
527 		while (*end) {
528 			/* Everything apart from '\0' can be quoted */
529 			if (*end == '\\' && *(end + 1)) {
530 				*out++ = *(end + 1);
531 				end += 2;
532 				continue;
533 			}
534 
535 			if (isspace(*end))
536 				break;	/* end of token */
537 
538 			*out++ = *end++;
539 		}
540 
541 		/* have we already filled the array ? */
542 		if ((*argc + 1) > array_size) {
543 			argv = realloc_argv(&array_size, argv);
544 			if (!argv)
545 				return -ENOMEM;
546 		}
547 
548 		/* we know this is whitespace */
549 		if (*end)
550 			end++;
551 
552 		/* terminate the string and put it in the array */
553 		*out = '\0';
554 		argv[*argc] = start;
555 		(*argc)++;
556 	}
557 
558 	*argvp = argv;
559 	return 0;
560 }
561 
562 /*
563  * Impose necessary and sufficient conditions on a devices's table such
564  * that any incoming bio which respects its logical_block_size can be
565  * processed successfully.  If it falls across the boundary between
566  * two or more targets, the size of each piece it gets split into must
567  * be compatible with the logical_block_size of the target processing it.
568  */
569 static int validate_hardware_logical_block_alignment(struct dm_table *table,
570 						 struct queue_limits *limits)
571 {
572 	/*
573 	 * This function uses arithmetic modulo the logical_block_size
574 	 * (in units of 512-byte sectors).
575 	 */
576 	unsigned short device_logical_block_size_sects =
577 		limits->logical_block_size >> SECTOR_SHIFT;
578 
579 	/*
580 	 * Offset of the start of the next table entry, mod logical_block_size.
581 	 */
582 	unsigned short next_target_start = 0;
583 
584 	/*
585 	 * Given an aligned bio that extends beyond the end of a
586 	 * target, how many sectors must the next target handle?
587 	 */
588 	unsigned short remaining = 0;
589 
590 	struct dm_target *ti;
591 	struct queue_limits ti_limits;
592 	unsigned i;
593 
594 	/*
595 	 * Check each entry in the table in turn.
596 	 */
597 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
598 		ti = dm_table_get_target(table, i);
599 
600 		blk_set_stacking_limits(&ti_limits);
601 
602 		/* combine all target devices' limits */
603 		if (ti->type->iterate_devices)
604 			ti->type->iterate_devices(ti, dm_set_device_limits,
605 						  &ti_limits);
606 
607 		/*
608 		 * If the remaining sectors fall entirely within this
609 		 * table entry are they compatible with its logical_block_size?
610 		 */
611 		if (remaining < ti->len &&
612 		    remaining & ((ti_limits.logical_block_size >>
613 				  SECTOR_SHIFT) - 1))
614 			break;	/* Error */
615 
616 		next_target_start =
617 		    (unsigned short) ((next_target_start + ti->len) &
618 				      (device_logical_block_size_sects - 1));
619 		remaining = next_target_start ?
620 		    device_logical_block_size_sects - next_target_start : 0;
621 	}
622 
623 	if (remaining) {
624 		DMWARN("%s: table line %u (start sect %llu len %llu) "
625 		       "not aligned to h/w logical block size %u",
626 		       dm_device_name(table->md), i,
627 		       (unsigned long long) ti->begin,
628 		       (unsigned long long) ti->len,
629 		       limits->logical_block_size);
630 		return -EINVAL;
631 	}
632 
633 	return 0;
634 }
635 
636 int dm_table_add_target(struct dm_table *t, const char *type,
637 			sector_t start, sector_t len, char *params)
638 {
639 	int r = -EINVAL, argc;
640 	char **argv;
641 	struct dm_target *tgt;
642 
643 	if (t->singleton) {
644 		DMERR("%s: target type %s must appear alone in table",
645 		      dm_device_name(t->md), t->targets->type->name);
646 		return -EINVAL;
647 	}
648 
649 	BUG_ON(t->num_targets >= t->num_allocated);
650 
651 	tgt = t->targets + t->num_targets;
652 	memset(tgt, 0, sizeof(*tgt));
653 
654 	if (!len) {
655 		DMERR("%s: zero-length target", dm_device_name(t->md));
656 		return -EINVAL;
657 	}
658 
659 	tgt->type = dm_get_target_type(type);
660 	if (!tgt->type) {
661 		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
662 		return -EINVAL;
663 	}
664 
665 	if (dm_target_needs_singleton(tgt->type)) {
666 		if (t->num_targets) {
667 			tgt->error = "singleton target type must appear alone in table";
668 			goto bad;
669 		}
670 		t->singleton = true;
671 	}
672 
673 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
674 		tgt->error = "target type may not be included in a read-only table";
675 		goto bad;
676 	}
677 
678 	if (t->immutable_target_type) {
679 		if (t->immutable_target_type != tgt->type) {
680 			tgt->error = "immutable target type cannot be mixed with other target types";
681 			goto bad;
682 		}
683 	} else if (dm_target_is_immutable(tgt->type)) {
684 		if (t->num_targets) {
685 			tgt->error = "immutable target type cannot be mixed with other target types";
686 			goto bad;
687 		}
688 		t->immutable_target_type = tgt->type;
689 	}
690 
691 	if (dm_target_has_integrity(tgt->type))
692 		t->integrity_added = 1;
693 
694 	tgt->table = t;
695 	tgt->begin = start;
696 	tgt->len = len;
697 	tgt->error = "Unknown error";
698 
699 	/*
700 	 * Does this target adjoin the previous one ?
701 	 */
702 	if (!adjoin(t, tgt)) {
703 		tgt->error = "Gap in table";
704 		goto bad;
705 	}
706 
707 	r = dm_split_args(&argc, &argv, params);
708 	if (r) {
709 		tgt->error = "couldn't split parameters (insufficient memory)";
710 		goto bad;
711 	}
712 
713 	r = tgt->type->ctr(tgt, argc, argv);
714 	kfree(argv);
715 	if (r)
716 		goto bad;
717 
718 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
719 
720 	if (!tgt->num_discard_bios && tgt->discards_supported)
721 		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
722 		       dm_device_name(t->md), type);
723 
724 	return 0;
725 
726  bad:
727 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
728 	dm_put_target_type(tgt->type);
729 	return r;
730 }
731 
732 /*
733  * Target argument parsing helpers.
734  */
735 static int validate_next_arg(const struct dm_arg *arg,
736 			     struct dm_arg_set *arg_set,
737 			     unsigned *value, char **error, unsigned grouped)
738 {
739 	const char *arg_str = dm_shift_arg(arg_set);
740 	char dummy;
741 
742 	if (!arg_str ||
743 	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
744 	    (*value < arg->min) ||
745 	    (*value > arg->max) ||
746 	    (grouped && arg_set->argc < *value)) {
747 		*error = arg->error;
748 		return -EINVAL;
749 	}
750 
751 	return 0;
752 }
753 
754 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
755 		unsigned *value, char **error)
756 {
757 	return validate_next_arg(arg, arg_set, value, error, 0);
758 }
759 EXPORT_SYMBOL(dm_read_arg);
760 
761 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
762 		      unsigned *value, char **error)
763 {
764 	return validate_next_arg(arg, arg_set, value, error, 1);
765 }
766 EXPORT_SYMBOL(dm_read_arg_group);
767 
768 const char *dm_shift_arg(struct dm_arg_set *as)
769 {
770 	char *r;
771 
772 	if (as->argc) {
773 		as->argc--;
774 		r = *as->argv;
775 		as->argv++;
776 		return r;
777 	}
778 
779 	return NULL;
780 }
781 EXPORT_SYMBOL(dm_shift_arg);
782 
783 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
784 {
785 	BUG_ON(as->argc < num_args);
786 	as->argc -= num_args;
787 	as->argv += num_args;
788 }
789 EXPORT_SYMBOL(dm_consume_args);
790 
791 static bool __table_type_bio_based(enum dm_queue_mode table_type)
792 {
793 	return (table_type == DM_TYPE_BIO_BASED ||
794 		table_type == DM_TYPE_DAX_BIO_BASED);
795 }
796 
797 static bool __table_type_request_based(enum dm_queue_mode table_type)
798 {
799 	return table_type == DM_TYPE_REQUEST_BASED;
800 }
801 
802 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
803 {
804 	t->type = type;
805 }
806 EXPORT_SYMBOL_GPL(dm_table_set_type);
807 
808 /* validate the dax capability of the target device span */
809 int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
810 			sector_t start, sector_t len, void *data)
811 {
812 	int blocksize = *(int *) data;
813 
814 	return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
815 }
816 
817 /* Check devices support synchronous DAX */
818 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
819 					      sector_t start, sector_t len, void *data)
820 {
821 	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
822 }
823 
824 bool dm_table_supports_dax(struct dm_table *t,
825 			   iterate_devices_callout_fn iterate_fn, int *blocksize)
826 {
827 	struct dm_target *ti;
828 	unsigned i;
829 
830 	/* Ensure that all targets support DAX. */
831 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
832 		ti = dm_table_get_target(t, i);
833 
834 		if (!ti->type->direct_access)
835 			return false;
836 
837 		if (!ti->type->iterate_devices ||
838 		    ti->type->iterate_devices(ti, iterate_fn, blocksize))
839 			return false;
840 	}
841 
842 	return true;
843 }
844 
845 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
846 				  sector_t start, sector_t len, void *data)
847 {
848 	struct block_device *bdev = dev->bdev;
849 	struct request_queue *q = bdev_get_queue(bdev);
850 
851 	/* request-based cannot stack on partitions! */
852 	if (bdev_is_partition(bdev))
853 		return false;
854 
855 	return queue_is_mq(q);
856 }
857 
858 static int dm_table_determine_type(struct dm_table *t)
859 {
860 	unsigned i;
861 	unsigned bio_based = 0, request_based = 0, hybrid = 0;
862 	struct dm_target *tgt;
863 	struct list_head *devices = dm_table_get_devices(t);
864 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
865 	int page_size = PAGE_SIZE;
866 
867 	if (t->type != DM_TYPE_NONE) {
868 		/* target already set the table's type */
869 		if (t->type == DM_TYPE_BIO_BASED) {
870 			/* possibly upgrade to a variant of bio-based */
871 			goto verify_bio_based;
872 		}
873 		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
874 		goto verify_rq_based;
875 	}
876 
877 	for (i = 0; i < t->num_targets; i++) {
878 		tgt = t->targets + i;
879 		if (dm_target_hybrid(tgt))
880 			hybrid = 1;
881 		else if (dm_target_request_based(tgt))
882 			request_based = 1;
883 		else
884 			bio_based = 1;
885 
886 		if (bio_based && request_based) {
887 			DMERR("Inconsistent table: different target types"
888 			      " can't be mixed up");
889 			return -EINVAL;
890 		}
891 	}
892 
893 	if (hybrid && !bio_based && !request_based) {
894 		/*
895 		 * The targets can work either way.
896 		 * Determine the type from the live device.
897 		 * Default to bio-based if device is new.
898 		 */
899 		if (__table_type_request_based(live_md_type))
900 			request_based = 1;
901 		else
902 			bio_based = 1;
903 	}
904 
905 	if (bio_based) {
906 verify_bio_based:
907 		/* We must use this table as bio-based */
908 		t->type = DM_TYPE_BIO_BASED;
909 		if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
910 		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
911 			t->type = DM_TYPE_DAX_BIO_BASED;
912 		}
913 		return 0;
914 	}
915 
916 	BUG_ON(!request_based); /* No targets in this table */
917 
918 	t->type = DM_TYPE_REQUEST_BASED;
919 
920 verify_rq_based:
921 	/*
922 	 * Request-based dm supports only tables that have a single target now.
923 	 * To support multiple targets, request splitting support is needed,
924 	 * and that needs lots of changes in the block-layer.
925 	 * (e.g. request completion process for partial completion.)
926 	 */
927 	if (t->num_targets > 1) {
928 		DMERR("request-based DM doesn't support multiple targets");
929 		return -EINVAL;
930 	}
931 
932 	if (list_empty(devices)) {
933 		int srcu_idx;
934 		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
935 
936 		/* inherit live table's type */
937 		if (live_table)
938 			t->type = live_table->type;
939 		dm_put_live_table(t->md, srcu_idx);
940 		return 0;
941 	}
942 
943 	tgt = dm_table_get_immutable_target(t);
944 	if (!tgt) {
945 		DMERR("table load rejected: immutable target is required");
946 		return -EINVAL;
947 	} else if (tgt->max_io_len) {
948 		DMERR("table load rejected: immutable target that splits IO is not supported");
949 		return -EINVAL;
950 	}
951 
952 	/* Non-request-stackable devices can't be used for request-based dm */
953 	if (!tgt->type->iterate_devices ||
954 	    !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
955 		DMERR("table load rejected: including non-request-stackable devices");
956 		return -EINVAL;
957 	}
958 
959 	return 0;
960 }
961 
962 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
963 {
964 	return t->type;
965 }
966 
967 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
968 {
969 	return t->immutable_target_type;
970 }
971 
972 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
973 {
974 	/* Immutable target is implicitly a singleton */
975 	if (t->num_targets > 1 ||
976 	    !dm_target_is_immutable(t->targets[0].type))
977 		return NULL;
978 
979 	return t->targets;
980 }
981 
982 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
983 {
984 	struct dm_target *ti;
985 	unsigned i;
986 
987 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
988 		ti = dm_table_get_target(t, i);
989 		if (dm_target_is_wildcard(ti->type))
990 			return ti;
991 	}
992 
993 	return NULL;
994 }
995 
996 bool dm_table_bio_based(struct dm_table *t)
997 {
998 	return __table_type_bio_based(dm_table_get_type(t));
999 }
1000 
1001 bool dm_table_request_based(struct dm_table *t)
1002 {
1003 	return __table_type_request_based(dm_table_get_type(t));
1004 }
1005 
1006 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1007 {
1008 	enum dm_queue_mode type = dm_table_get_type(t);
1009 	unsigned per_io_data_size = 0;
1010 	unsigned min_pool_size = 0;
1011 	struct dm_target *ti;
1012 	unsigned i;
1013 
1014 	if (unlikely(type == DM_TYPE_NONE)) {
1015 		DMWARN("no table type is set, can't allocate mempools");
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (__table_type_bio_based(type))
1020 		for (i = 0; i < t->num_targets; i++) {
1021 			ti = t->targets + i;
1022 			per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1023 			min_pool_size = max(min_pool_size, ti->num_flush_bios);
1024 		}
1025 
1026 	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1027 					   per_io_data_size, min_pool_size);
1028 	if (!t->mempools)
1029 		return -ENOMEM;
1030 
1031 	return 0;
1032 }
1033 
1034 void dm_table_free_md_mempools(struct dm_table *t)
1035 {
1036 	dm_free_md_mempools(t->mempools);
1037 	t->mempools = NULL;
1038 }
1039 
1040 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1041 {
1042 	return t->mempools;
1043 }
1044 
1045 static int setup_indexes(struct dm_table *t)
1046 {
1047 	int i;
1048 	unsigned int total = 0;
1049 	sector_t *indexes;
1050 
1051 	/* allocate the space for *all* the indexes */
1052 	for (i = t->depth - 2; i >= 0; i--) {
1053 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1054 		total += t->counts[i];
1055 	}
1056 
1057 	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1058 	if (!indexes)
1059 		return -ENOMEM;
1060 
1061 	/* set up internal nodes, bottom-up */
1062 	for (i = t->depth - 2; i >= 0; i--) {
1063 		t->index[i] = indexes;
1064 		indexes += (KEYS_PER_NODE * t->counts[i]);
1065 		setup_btree_index(i, t);
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 /*
1072  * Builds the btree to index the map.
1073  */
1074 static int dm_table_build_index(struct dm_table *t)
1075 {
1076 	int r = 0;
1077 	unsigned int leaf_nodes;
1078 
1079 	/* how many indexes will the btree have ? */
1080 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1081 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1082 
1083 	/* leaf layer has already been set up */
1084 	t->counts[t->depth - 1] = leaf_nodes;
1085 	t->index[t->depth - 1] = t->highs;
1086 
1087 	if (t->depth >= 2)
1088 		r = setup_indexes(t);
1089 
1090 	return r;
1091 }
1092 
1093 static bool integrity_profile_exists(struct gendisk *disk)
1094 {
1095 	return !!blk_get_integrity(disk);
1096 }
1097 
1098 /*
1099  * Get a disk whose integrity profile reflects the table's profile.
1100  * Returns NULL if integrity support was inconsistent or unavailable.
1101  */
1102 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1103 {
1104 	struct list_head *devices = dm_table_get_devices(t);
1105 	struct dm_dev_internal *dd = NULL;
1106 	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1107 	unsigned i;
1108 
1109 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1110 		struct dm_target *ti = dm_table_get_target(t, i);
1111 		if (!dm_target_passes_integrity(ti->type))
1112 			goto no_integrity;
1113 	}
1114 
1115 	list_for_each_entry(dd, devices, list) {
1116 		template_disk = dd->dm_dev->bdev->bd_disk;
1117 		if (!integrity_profile_exists(template_disk))
1118 			goto no_integrity;
1119 		else if (prev_disk &&
1120 			 blk_integrity_compare(prev_disk, template_disk) < 0)
1121 			goto no_integrity;
1122 		prev_disk = template_disk;
1123 	}
1124 
1125 	return template_disk;
1126 
1127 no_integrity:
1128 	if (prev_disk)
1129 		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1130 		       dm_device_name(t->md),
1131 		       prev_disk->disk_name,
1132 		       template_disk->disk_name);
1133 	return NULL;
1134 }
1135 
1136 /*
1137  * Register the mapped device for blk_integrity support if the
1138  * underlying devices have an integrity profile.  But all devices may
1139  * not have matching profiles (checking all devices isn't reliable
1140  * during table load because this table may use other DM device(s) which
1141  * must be resumed before they will have an initialized integity
1142  * profile).  Consequently, stacked DM devices force a 2 stage integrity
1143  * profile validation: First pass during table load, final pass during
1144  * resume.
1145  */
1146 static int dm_table_register_integrity(struct dm_table *t)
1147 {
1148 	struct mapped_device *md = t->md;
1149 	struct gendisk *template_disk = NULL;
1150 
1151 	/* If target handles integrity itself do not register it here. */
1152 	if (t->integrity_added)
1153 		return 0;
1154 
1155 	template_disk = dm_table_get_integrity_disk(t);
1156 	if (!template_disk)
1157 		return 0;
1158 
1159 	if (!integrity_profile_exists(dm_disk(md))) {
1160 		t->integrity_supported = true;
1161 		/*
1162 		 * Register integrity profile during table load; we can do
1163 		 * this because the final profile must match during resume.
1164 		 */
1165 		blk_integrity_register(dm_disk(md),
1166 				       blk_get_integrity(template_disk));
1167 		return 0;
1168 	}
1169 
1170 	/*
1171 	 * If DM device already has an initialized integrity
1172 	 * profile the new profile should not conflict.
1173 	 */
1174 	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1175 		DMWARN("%s: conflict with existing integrity profile: "
1176 		       "%s profile mismatch",
1177 		       dm_device_name(t->md),
1178 		       template_disk->disk_name);
1179 		return 1;
1180 	}
1181 
1182 	/* Preserve existing integrity profile */
1183 	t->integrity_supported = true;
1184 	return 0;
1185 }
1186 
1187 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1188 
1189 struct dm_keyslot_manager {
1190 	struct blk_keyslot_manager ksm;
1191 	struct mapped_device *md;
1192 };
1193 
1194 struct dm_keyslot_evict_args {
1195 	const struct blk_crypto_key *key;
1196 	int err;
1197 };
1198 
1199 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1200 				     sector_t start, sector_t len, void *data)
1201 {
1202 	struct dm_keyslot_evict_args *args = data;
1203 	int err;
1204 
1205 	err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
1206 	if (!args->err)
1207 		args->err = err;
1208 	/* Always try to evict the key from all devices. */
1209 	return 0;
1210 }
1211 
1212 /*
1213  * When an inline encryption key is evicted from a device-mapper device, evict
1214  * it from all the underlying devices.
1215  */
1216 static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
1217 			    const struct blk_crypto_key *key, unsigned int slot)
1218 {
1219 	struct dm_keyslot_manager *dksm = container_of(ksm,
1220 						       struct dm_keyslot_manager,
1221 						       ksm);
1222 	struct mapped_device *md = dksm->md;
1223 	struct dm_keyslot_evict_args args = { key };
1224 	struct dm_table *t;
1225 	int srcu_idx;
1226 	int i;
1227 	struct dm_target *ti;
1228 
1229 	t = dm_get_live_table(md, &srcu_idx);
1230 	if (!t)
1231 		return 0;
1232 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1233 		ti = dm_table_get_target(t, i);
1234 		if (!ti->type->iterate_devices)
1235 			continue;
1236 		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
1237 	}
1238 	dm_put_live_table(md, srcu_idx);
1239 	return args.err;
1240 }
1241 
1242 static const struct blk_ksm_ll_ops dm_ksm_ll_ops = {
1243 	.keyslot_evict = dm_keyslot_evict,
1244 };
1245 
1246 static int device_intersect_crypto_modes(struct dm_target *ti,
1247 					 struct dm_dev *dev, sector_t start,
1248 					 sector_t len, void *data)
1249 {
1250 	struct blk_keyslot_manager *parent = data;
1251 	struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
1252 
1253 	blk_ksm_intersect_modes(parent, child);
1254 	return 0;
1255 }
1256 
1257 void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
1258 {
1259 	struct dm_keyslot_manager *dksm = container_of(ksm,
1260 						       struct dm_keyslot_manager,
1261 						       ksm);
1262 
1263 	if (!ksm)
1264 		return;
1265 
1266 	blk_ksm_destroy(ksm);
1267 	kfree(dksm);
1268 }
1269 
1270 static void dm_table_destroy_keyslot_manager(struct dm_table *t)
1271 {
1272 	dm_destroy_keyslot_manager(t->ksm);
1273 	t->ksm = NULL;
1274 }
1275 
1276 /*
1277  * Constructs and initializes t->ksm with a keyslot manager that
1278  * represents the common set of crypto capabilities of the devices
1279  * described by the dm_table. However, if the constructed keyslot
1280  * manager does not support a superset of the crypto capabilities
1281  * supported by the current keyslot manager of the mapped_device,
1282  * it returns an error instead, since we don't support restricting
1283  * crypto capabilities on table changes. Finally, if the constructed
1284  * keyslot manager doesn't actually support any crypto modes at all,
1285  * it just returns NULL.
1286  */
1287 static int dm_table_construct_keyslot_manager(struct dm_table *t)
1288 {
1289 	struct dm_keyslot_manager *dksm;
1290 	struct blk_keyslot_manager *ksm;
1291 	struct dm_target *ti;
1292 	unsigned int i;
1293 	bool ksm_is_empty = true;
1294 
1295 	dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
1296 	if (!dksm)
1297 		return -ENOMEM;
1298 	dksm->md = t->md;
1299 
1300 	ksm = &dksm->ksm;
1301 	blk_ksm_init_passthrough(ksm);
1302 	ksm->ksm_ll_ops = dm_ksm_ll_ops;
1303 	ksm->max_dun_bytes_supported = UINT_MAX;
1304 	memset(ksm->crypto_modes_supported, 0xFF,
1305 	       sizeof(ksm->crypto_modes_supported));
1306 
1307 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1308 		ti = dm_table_get_target(t, i);
1309 
1310 		if (!dm_target_passes_crypto(ti->type)) {
1311 			blk_ksm_intersect_modes(ksm, NULL);
1312 			break;
1313 		}
1314 		if (!ti->type->iterate_devices)
1315 			continue;
1316 		ti->type->iterate_devices(ti, device_intersect_crypto_modes,
1317 					  ksm);
1318 	}
1319 
1320 	if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
1321 		DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1322 		dm_destroy_keyslot_manager(ksm);
1323 		return -EINVAL;
1324 	}
1325 
1326 	/*
1327 	 * If the new KSM doesn't actually support any crypto modes, we may as
1328 	 * well represent it with a NULL ksm.
1329 	 */
1330 	ksm_is_empty = true;
1331 	for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
1332 		if (ksm->crypto_modes_supported[i]) {
1333 			ksm_is_empty = false;
1334 			break;
1335 		}
1336 	}
1337 
1338 	if (ksm_is_empty) {
1339 		dm_destroy_keyslot_manager(ksm);
1340 		ksm = NULL;
1341 	}
1342 
1343 	/*
1344 	 * t->ksm is only set temporarily while the table is being set
1345 	 * up, and it gets set to NULL after the capabilities have
1346 	 * been transferred to the request_queue.
1347 	 */
1348 	t->ksm = ksm;
1349 
1350 	return 0;
1351 }
1352 
1353 static void dm_update_keyslot_manager(struct request_queue *q,
1354 				      struct dm_table *t)
1355 {
1356 	if (!t->ksm)
1357 		return;
1358 
1359 	/* Make the ksm less restrictive */
1360 	if (!q->ksm) {
1361 		blk_ksm_register(t->ksm, q);
1362 	} else {
1363 		blk_ksm_update_capabilities(q->ksm, t->ksm);
1364 		dm_destroy_keyslot_manager(t->ksm);
1365 	}
1366 	t->ksm = NULL;
1367 }
1368 
1369 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1370 
1371 static int dm_table_construct_keyslot_manager(struct dm_table *t)
1372 {
1373 	return 0;
1374 }
1375 
1376 void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
1377 {
1378 }
1379 
1380 static void dm_table_destroy_keyslot_manager(struct dm_table *t)
1381 {
1382 }
1383 
1384 static void dm_update_keyslot_manager(struct request_queue *q,
1385 				      struct dm_table *t)
1386 {
1387 }
1388 
1389 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1390 
1391 /*
1392  * Prepares the table for use by building the indices,
1393  * setting the type, and allocating mempools.
1394  */
1395 int dm_table_complete(struct dm_table *t)
1396 {
1397 	int r;
1398 
1399 	r = dm_table_determine_type(t);
1400 	if (r) {
1401 		DMERR("unable to determine table type");
1402 		return r;
1403 	}
1404 
1405 	r = dm_table_build_index(t);
1406 	if (r) {
1407 		DMERR("unable to build btrees");
1408 		return r;
1409 	}
1410 
1411 	r = dm_table_register_integrity(t);
1412 	if (r) {
1413 		DMERR("could not register integrity profile.");
1414 		return r;
1415 	}
1416 
1417 	r = dm_table_construct_keyslot_manager(t);
1418 	if (r) {
1419 		DMERR("could not construct keyslot manager.");
1420 		return r;
1421 	}
1422 
1423 	r = dm_table_alloc_md_mempools(t, t->md);
1424 	if (r)
1425 		DMERR("unable to allocate mempools");
1426 
1427 	return r;
1428 }
1429 
1430 static DEFINE_MUTEX(_event_lock);
1431 void dm_table_event_callback(struct dm_table *t,
1432 			     void (*fn)(void *), void *context)
1433 {
1434 	mutex_lock(&_event_lock);
1435 	t->event_fn = fn;
1436 	t->event_context = context;
1437 	mutex_unlock(&_event_lock);
1438 }
1439 
1440 void dm_table_event(struct dm_table *t)
1441 {
1442 	mutex_lock(&_event_lock);
1443 	if (t->event_fn)
1444 		t->event_fn(t->event_context);
1445 	mutex_unlock(&_event_lock);
1446 }
1447 EXPORT_SYMBOL(dm_table_event);
1448 
1449 inline sector_t dm_table_get_size(struct dm_table *t)
1450 {
1451 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1452 }
1453 EXPORT_SYMBOL(dm_table_get_size);
1454 
1455 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1456 {
1457 	if (index >= t->num_targets)
1458 		return NULL;
1459 
1460 	return t->targets + index;
1461 }
1462 
1463 /*
1464  * Search the btree for the correct target.
1465  *
1466  * Caller should check returned pointer for NULL
1467  * to trap I/O beyond end of device.
1468  */
1469 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1470 {
1471 	unsigned int l, n = 0, k = 0;
1472 	sector_t *node;
1473 
1474 	if (unlikely(sector >= dm_table_get_size(t)))
1475 		return NULL;
1476 
1477 	for (l = 0; l < t->depth; l++) {
1478 		n = get_child(n, k);
1479 		node = get_node(t, l, n);
1480 
1481 		for (k = 0; k < KEYS_PER_NODE; k++)
1482 			if (node[k] >= sector)
1483 				break;
1484 	}
1485 
1486 	return &t->targets[(KEYS_PER_NODE * n) + k];
1487 }
1488 
1489 /*
1490  * type->iterate_devices() should be called when the sanity check needs to
1491  * iterate and check all underlying data devices. iterate_devices() will
1492  * iterate all underlying data devices until it encounters a non-zero return
1493  * code, returned by whether the input iterate_devices_callout_fn, or
1494  * iterate_devices() itself internally.
1495  *
1496  * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1497  * iterate multiple underlying devices internally, in which case a non-zero
1498  * return code returned by iterate_devices_callout_fn will stop the iteration
1499  * in advance.
1500  *
1501  * Cases requiring _any_ underlying device supporting some kind of attribute,
1502  * should use the iteration structure like dm_table_any_dev_attr(), or call
1503  * it directly. @func should handle semantics of positive examples, e.g.
1504  * capable of something.
1505  *
1506  * Cases requiring _all_ underlying devices supporting some kind of attribute,
1507  * should use the iteration structure like dm_table_supports_nowait() or
1508  * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1509  * uses an @anti_func that handle semantics of counter examples, e.g. not
1510  * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1511  */
1512 static bool dm_table_any_dev_attr(struct dm_table *t,
1513 				  iterate_devices_callout_fn func, void *data)
1514 {
1515 	struct dm_target *ti;
1516 	unsigned int i;
1517 
1518 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1519 		ti = dm_table_get_target(t, i);
1520 
1521 		if (ti->type->iterate_devices &&
1522 		    ti->type->iterate_devices(ti, func, data))
1523 			return true;
1524         }
1525 
1526 	return false;
1527 }
1528 
1529 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1530 			sector_t start, sector_t len, void *data)
1531 {
1532 	unsigned *num_devices = data;
1533 
1534 	(*num_devices)++;
1535 
1536 	return 0;
1537 }
1538 
1539 /*
1540  * Check whether a table has no data devices attached using each
1541  * target's iterate_devices method.
1542  * Returns false if the result is unknown because a target doesn't
1543  * support iterate_devices.
1544  */
1545 bool dm_table_has_no_data_devices(struct dm_table *table)
1546 {
1547 	struct dm_target *ti;
1548 	unsigned i, num_devices;
1549 
1550 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1551 		ti = dm_table_get_target(table, i);
1552 
1553 		if (!ti->type->iterate_devices)
1554 			return false;
1555 
1556 		num_devices = 0;
1557 		ti->type->iterate_devices(ti, count_device, &num_devices);
1558 		if (num_devices)
1559 			return false;
1560 	}
1561 
1562 	return true;
1563 }
1564 
1565 static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1566 				  sector_t start, sector_t len, void *data)
1567 {
1568 	struct request_queue *q = bdev_get_queue(dev->bdev);
1569 	enum blk_zoned_model *zoned_model = data;
1570 
1571 	return blk_queue_zoned_model(q) != *zoned_model;
1572 }
1573 
1574 /*
1575  * Check the device zoned model based on the target feature flag. If the target
1576  * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1577  * also accepted but all devices must have the same zoned model. If the target
1578  * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1579  * zoned model with all zoned devices having the same zone size.
1580  */
1581 static bool dm_table_supports_zoned_model(struct dm_table *t,
1582 					  enum blk_zoned_model zoned_model)
1583 {
1584 	struct dm_target *ti;
1585 	unsigned i;
1586 
1587 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1588 		ti = dm_table_get_target(t, i);
1589 
1590 		if (dm_target_supports_zoned_hm(ti->type)) {
1591 			if (!ti->type->iterate_devices ||
1592 			    ti->type->iterate_devices(ti, device_not_zoned_model,
1593 						      &zoned_model))
1594 				return false;
1595 		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1596 			if (zoned_model == BLK_ZONED_HM)
1597 				return false;
1598 		}
1599 	}
1600 
1601 	return true;
1602 }
1603 
1604 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1605 					   sector_t start, sector_t len, void *data)
1606 {
1607 	struct request_queue *q = bdev_get_queue(dev->bdev);
1608 	unsigned int *zone_sectors = data;
1609 
1610 	if (!blk_queue_is_zoned(q))
1611 		return 0;
1612 
1613 	return blk_queue_zone_sectors(q) != *zone_sectors;
1614 }
1615 
1616 /*
1617  * Check consistency of zoned model and zone sectors across all targets. For
1618  * zone sectors, if the destination device is a zoned block device, it shall
1619  * have the specified zone_sectors.
1620  */
1621 static int validate_hardware_zoned_model(struct dm_table *table,
1622 					 enum blk_zoned_model zoned_model,
1623 					 unsigned int zone_sectors)
1624 {
1625 	if (zoned_model == BLK_ZONED_NONE)
1626 		return 0;
1627 
1628 	if (!dm_table_supports_zoned_model(table, zoned_model)) {
1629 		DMERR("%s: zoned model is not consistent across all devices",
1630 		      dm_device_name(table->md));
1631 		return -EINVAL;
1632 	}
1633 
1634 	/* Check zone size validity and compatibility */
1635 	if (!zone_sectors || !is_power_of_2(zone_sectors))
1636 		return -EINVAL;
1637 
1638 	if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
1639 		DMERR("%s: zone sectors is not consistent across all zoned devices",
1640 		      dm_device_name(table->md));
1641 		return -EINVAL;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 /*
1648  * Establish the new table's queue_limits and validate them.
1649  */
1650 int dm_calculate_queue_limits(struct dm_table *table,
1651 			      struct queue_limits *limits)
1652 {
1653 	struct dm_target *ti;
1654 	struct queue_limits ti_limits;
1655 	unsigned i;
1656 	enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1657 	unsigned int zone_sectors = 0;
1658 
1659 	blk_set_stacking_limits(limits);
1660 
1661 	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1662 		blk_set_stacking_limits(&ti_limits);
1663 
1664 		ti = dm_table_get_target(table, i);
1665 
1666 		if (!ti->type->iterate_devices)
1667 			goto combine_limits;
1668 
1669 		/*
1670 		 * Combine queue limits of all the devices this target uses.
1671 		 */
1672 		ti->type->iterate_devices(ti, dm_set_device_limits,
1673 					  &ti_limits);
1674 
1675 		if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1676 			/*
1677 			 * After stacking all limits, validate all devices
1678 			 * in table support this zoned model and zone sectors.
1679 			 */
1680 			zoned_model = ti_limits.zoned;
1681 			zone_sectors = ti_limits.chunk_sectors;
1682 		}
1683 
1684 		/* Set I/O hints portion of queue limits */
1685 		if (ti->type->io_hints)
1686 			ti->type->io_hints(ti, &ti_limits);
1687 
1688 		/*
1689 		 * Check each device area is consistent with the target's
1690 		 * overall queue limits.
1691 		 */
1692 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1693 					      &ti_limits))
1694 			return -EINVAL;
1695 
1696 combine_limits:
1697 		/*
1698 		 * Merge this target's queue limits into the overall limits
1699 		 * for the table.
1700 		 */
1701 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1702 			DMWARN("%s: adding target device "
1703 			       "(start sect %llu len %llu) "
1704 			       "caused an alignment inconsistency",
1705 			       dm_device_name(table->md),
1706 			       (unsigned long long) ti->begin,
1707 			       (unsigned long long) ti->len);
1708 	}
1709 
1710 	/*
1711 	 * Verify that the zoned model and zone sectors, as determined before
1712 	 * any .io_hints override, are the same across all devices in the table.
1713 	 * - this is especially relevant if .io_hints is emulating a disk-managed
1714 	 *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1715 	 * BUT...
1716 	 */
1717 	if (limits->zoned != BLK_ZONED_NONE) {
1718 		/*
1719 		 * ...IF the above limits stacking determined a zoned model
1720 		 * validate that all of the table's devices conform to it.
1721 		 */
1722 		zoned_model = limits->zoned;
1723 		zone_sectors = limits->chunk_sectors;
1724 	}
1725 	if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1726 		return -EINVAL;
1727 
1728 	return validate_hardware_logical_block_alignment(table, limits);
1729 }
1730 
1731 /*
1732  * Verify that all devices have an integrity profile that matches the
1733  * DM device's registered integrity profile.  If the profiles don't
1734  * match then unregister the DM device's integrity profile.
1735  */
1736 static void dm_table_verify_integrity(struct dm_table *t)
1737 {
1738 	struct gendisk *template_disk = NULL;
1739 
1740 	if (t->integrity_added)
1741 		return;
1742 
1743 	if (t->integrity_supported) {
1744 		/*
1745 		 * Verify that the original integrity profile
1746 		 * matches all the devices in this table.
1747 		 */
1748 		template_disk = dm_table_get_integrity_disk(t);
1749 		if (template_disk &&
1750 		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1751 			return;
1752 	}
1753 
1754 	if (integrity_profile_exists(dm_disk(t->md))) {
1755 		DMWARN("%s: unable to establish an integrity profile",
1756 		       dm_device_name(t->md));
1757 		blk_integrity_unregister(dm_disk(t->md));
1758 	}
1759 }
1760 
1761 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1762 				sector_t start, sector_t len, void *data)
1763 {
1764 	unsigned long flush = (unsigned long) data;
1765 	struct request_queue *q = bdev_get_queue(dev->bdev);
1766 
1767 	return (q->queue_flags & flush);
1768 }
1769 
1770 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1771 {
1772 	struct dm_target *ti;
1773 	unsigned i;
1774 
1775 	/*
1776 	 * Require at least one underlying device to support flushes.
1777 	 * t->devices includes internal dm devices such as mirror logs
1778 	 * so we need to use iterate_devices here, which targets
1779 	 * supporting flushes must provide.
1780 	 */
1781 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1782 		ti = dm_table_get_target(t, i);
1783 
1784 		if (!ti->num_flush_bios)
1785 			continue;
1786 
1787 		if (ti->flush_supported)
1788 			return true;
1789 
1790 		if (ti->type->iterate_devices &&
1791 		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1792 			return true;
1793 	}
1794 
1795 	return false;
1796 }
1797 
1798 static int device_dax_write_cache_enabled(struct dm_target *ti,
1799 					  struct dm_dev *dev, sector_t start,
1800 					  sector_t len, void *data)
1801 {
1802 	struct dax_device *dax_dev = dev->dax_dev;
1803 
1804 	if (!dax_dev)
1805 		return false;
1806 
1807 	if (dax_write_cache_enabled(dax_dev))
1808 		return true;
1809 	return false;
1810 }
1811 
1812 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1813 				sector_t start, sector_t len, void *data)
1814 {
1815 	struct request_queue *q = bdev_get_queue(dev->bdev);
1816 
1817 	return !blk_queue_nonrot(q);
1818 }
1819 
1820 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1821 			     sector_t start, sector_t len, void *data)
1822 {
1823 	struct request_queue *q = bdev_get_queue(dev->bdev);
1824 
1825 	return !blk_queue_add_random(q);
1826 }
1827 
1828 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1829 					 sector_t start, sector_t len, void *data)
1830 {
1831 	struct request_queue *q = bdev_get_queue(dev->bdev);
1832 
1833 	return !q->limits.max_write_same_sectors;
1834 }
1835 
1836 static bool dm_table_supports_write_same(struct dm_table *t)
1837 {
1838 	struct dm_target *ti;
1839 	unsigned i;
1840 
1841 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1842 		ti = dm_table_get_target(t, i);
1843 
1844 		if (!ti->num_write_same_bios)
1845 			return false;
1846 
1847 		if (!ti->type->iterate_devices ||
1848 		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1849 			return false;
1850 	}
1851 
1852 	return true;
1853 }
1854 
1855 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1856 					   sector_t start, sector_t len, void *data)
1857 {
1858 	struct request_queue *q = bdev_get_queue(dev->bdev);
1859 
1860 	return !q->limits.max_write_zeroes_sectors;
1861 }
1862 
1863 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1864 {
1865 	struct dm_target *ti;
1866 	unsigned i = 0;
1867 
1868 	while (i < dm_table_get_num_targets(t)) {
1869 		ti = dm_table_get_target(t, i++);
1870 
1871 		if (!ti->num_write_zeroes_bios)
1872 			return false;
1873 
1874 		if (!ti->type->iterate_devices ||
1875 		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1876 			return false;
1877 	}
1878 
1879 	return true;
1880 }
1881 
1882 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1883 				     sector_t start, sector_t len, void *data)
1884 {
1885 	struct request_queue *q = bdev_get_queue(dev->bdev);
1886 
1887 	return !blk_queue_nowait(q);
1888 }
1889 
1890 static bool dm_table_supports_nowait(struct dm_table *t)
1891 {
1892 	struct dm_target *ti;
1893 	unsigned i = 0;
1894 
1895 	while (i < dm_table_get_num_targets(t)) {
1896 		ti = dm_table_get_target(t, i++);
1897 
1898 		if (!dm_target_supports_nowait(ti->type))
1899 			return false;
1900 
1901 		if (!ti->type->iterate_devices ||
1902 		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1903 			return false;
1904 	}
1905 
1906 	return true;
1907 }
1908 
1909 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1910 				      sector_t start, sector_t len, void *data)
1911 {
1912 	struct request_queue *q = bdev_get_queue(dev->bdev);
1913 
1914 	return !blk_queue_discard(q);
1915 }
1916 
1917 static bool dm_table_supports_discards(struct dm_table *t)
1918 {
1919 	struct dm_target *ti;
1920 	unsigned i;
1921 
1922 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1923 		ti = dm_table_get_target(t, i);
1924 
1925 		if (!ti->num_discard_bios)
1926 			return false;
1927 
1928 		/*
1929 		 * Either the target provides discard support (as implied by setting
1930 		 * 'discards_supported') or it relies on _all_ data devices having
1931 		 * discard support.
1932 		 */
1933 		if (!ti->discards_supported &&
1934 		    (!ti->type->iterate_devices ||
1935 		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1936 			return false;
1937 	}
1938 
1939 	return true;
1940 }
1941 
1942 static int device_not_secure_erase_capable(struct dm_target *ti,
1943 					   struct dm_dev *dev, sector_t start,
1944 					   sector_t len, void *data)
1945 {
1946 	struct request_queue *q = bdev_get_queue(dev->bdev);
1947 
1948 	return !blk_queue_secure_erase(q);
1949 }
1950 
1951 static bool dm_table_supports_secure_erase(struct dm_table *t)
1952 {
1953 	struct dm_target *ti;
1954 	unsigned int i;
1955 
1956 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1957 		ti = dm_table_get_target(t, i);
1958 
1959 		if (!ti->num_secure_erase_bios)
1960 			return false;
1961 
1962 		if (!ti->type->iterate_devices ||
1963 		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1964 			return false;
1965 	}
1966 
1967 	return true;
1968 }
1969 
1970 static int device_requires_stable_pages(struct dm_target *ti,
1971 					struct dm_dev *dev, sector_t start,
1972 					sector_t len, void *data)
1973 {
1974 	struct request_queue *q = bdev_get_queue(dev->bdev);
1975 
1976 	return blk_queue_stable_writes(q);
1977 }
1978 
1979 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1980 			      struct queue_limits *limits)
1981 {
1982 	bool wc = false, fua = false;
1983 	int page_size = PAGE_SIZE;
1984 	int r;
1985 
1986 	/*
1987 	 * Copy table's limits to the DM device's request_queue
1988 	 */
1989 	q->limits = *limits;
1990 
1991 	if (dm_table_supports_nowait(t))
1992 		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1993 	else
1994 		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1995 
1996 	if (!dm_table_supports_discards(t)) {
1997 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1998 		/* Must also clear discard limits... */
1999 		q->limits.max_discard_sectors = 0;
2000 		q->limits.max_hw_discard_sectors = 0;
2001 		q->limits.discard_granularity = 0;
2002 		q->limits.discard_alignment = 0;
2003 		q->limits.discard_misaligned = 0;
2004 	} else
2005 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
2006 
2007 	if (dm_table_supports_secure_erase(t))
2008 		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
2009 
2010 	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
2011 		wc = true;
2012 		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
2013 			fua = true;
2014 	}
2015 	blk_queue_write_cache(q, wc, fua);
2016 
2017 	if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
2018 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
2019 		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
2020 			set_dax_synchronous(t->md->dax_dev);
2021 	}
2022 	else
2023 		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2024 
2025 	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2026 		dax_write_cache(t->md->dax_dev, true);
2027 
2028 	/* Ensure that all underlying devices are non-rotational. */
2029 	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2030 		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2031 	else
2032 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2033 
2034 	if (!dm_table_supports_write_same(t))
2035 		q->limits.max_write_same_sectors = 0;
2036 	if (!dm_table_supports_write_zeroes(t))
2037 		q->limits.max_write_zeroes_sectors = 0;
2038 
2039 	dm_table_verify_integrity(t);
2040 
2041 	/*
2042 	 * Some devices don't use blk_integrity but still want stable pages
2043 	 * because they do their own checksumming.
2044 	 * If any underlying device requires stable pages, a table must require
2045 	 * them as well.  Only targets that support iterate_devices are considered:
2046 	 * don't want error, zero, etc to require stable pages.
2047 	 */
2048 	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2049 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2050 	else
2051 		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
2052 
2053 	/*
2054 	 * Determine whether or not this queue's I/O timings contribute
2055 	 * to the entropy pool, Only request-based targets use this.
2056 	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2057 	 * have it set.
2058 	 */
2059 	if (blk_queue_add_random(q) &&
2060 	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
2061 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2062 
2063 	/*
2064 	 * For a zoned target, setup the zones related queue attributes
2065 	 * and resources necessary for zone append emulation if necessary.
2066 	 */
2067 	if (blk_queue_is_zoned(q)) {
2068 		r = dm_set_zones_restrictions(t, q);
2069 		if (r)
2070 			return r;
2071 	}
2072 
2073 	dm_update_keyslot_manager(q, t);
2074 	disk_update_readahead(t->md->disk);
2075 
2076 	return 0;
2077 }
2078 
2079 unsigned int dm_table_get_num_targets(struct dm_table *t)
2080 {
2081 	return t->num_targets;
2082 }
2083 
2084 struct list_head *dm_table_get_devices(struct dm_table *t)
2085 {
2086 	return &t->devices;
2087 }
2088 
2089 fmode_t dm_table_get_mode(struct dm_table *t)
2090 {
2091 	return t->mode;
2092 }
2093 EXPORT_SYMBOL(dm_table_get_mode);
2094 
2095 enum suspend_mode {
2096 	PRESUSPEND,
2097 	PRESUSPEND_UNDO,
2098 	POSTSUSPEND,
2099 };
2100 
2101 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2102 {
2103 	int i = t->num_targets;
2104 	struct dm_target *ti = t->targets;
2105 
2106 	lockdep_assert_held(&t->md->suspend_lock);
2107 
2108 	while (i--) {
2109 		switch (mode) {
2110 		case PRESUSPEND:
2111 			if (ti->type->presuspend)
2112 				ti->type->presuspend(ti);
2113 			break;
2114 		case PRESUSPEND_UNDO:
2115 			if (ti->type->presuspend_undo)
2116 				ti->type->presuspend_undo(ti);
2117 			break;
2118 		case POSTSUSPEND:
2119 			if (ti->type->postsuspend)
2120 				ti->type->postsuspend(ti);
2121 			break;
2122 		}
2123 		ti++;
2124 	}
2125 }
2126 
2127 void dm_table_presuspend_targets(struct dm_table *t)
2128 {
2129 	if (!t)
2130 		return;
2131 
2132 	suspend_targets(t, PRESUSPEND);
2133 }
2134 
2135 void dm_table_presuspend_undo_targets(struct dm_table *t)
2136 {
2137 	if (!t)
2138 		return;
2139 
2140 	suspend_targets(t, PRESUSPEND_UNDO);
2141 }
2142 
2143 void dm_table_postsuspend_targets(struct dm_table *t)
2144 {
2145 	if (!t)
2146 		return;
2147 
2148 	suspend_targets(t, POSTSUSPEND);
2149 }
2150 
2151 int dm_table_resume_targets(struct dm_table *t)
2152 {
2153 	int i, r = 0;
2154 
2155 	lockdep_assert_held(&t->md->suspend_lock);
2156 
2157 	for (i = 0; i < t->num_targets; i++) {
2158 		struct dm_target *ti = t->targets + i;
2159 
2160 		if (!ti->type->preresume)
2161 			continue;
2162 
2163 		r = ti->type->preresume(ti);
2164 		if (r) {
2165 			DMERR("%s: %s: preresume failed, error = %d",
2166 			      dm_device_name(t->md), ti->type->name, r);
2167 			return r;
2168 		}
2169 	}
2170 
2171 	for (i = 0; i < t->num_targets; i++) {
2172 		struct dm_target *ti = t->targets + i;
2173 
2174 		if (ti->type->resume)
2175 			ti->type->resume(ti);
2176 	}
2177 
2178 	return 0;
2179 }
2180 
2181 struct mapped_device *dm_table_get_md(struct dm_table *t)
2182 {
2183 	return t->md;
2184 }
2185 EXPORT_SYMBOL(dm_table_get_md);
2186 
2187 const char *dm_table_device_name(struct dm_table *t)
2188 {
2189 	return dm_device_name(t->md);
2190 }
2191 EXPORT_SYMBOL_GPL(dm_table_device_name);
2192 
2193 void dm_table_run_md_queue_async(struct dm_table *t)
2194 {
2195 	if (!dm_table_request_based(t))
2196 		return;
2197 
2198 	if (t->md->queue)
2199 		blk_mq_run_hw_queues(t->md->queue, true);
2200 }
2201 EXPORT_SYMBOL(dm_table_run_md_queue_async);
2202 
2203