xref: /openbmc/linux/drivers/md/dm-table.c (revision b627b4ed)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/delay.h>
19 #include <asm/atomic.h>
20 
21 #define DM_MSG_PREFIX "table"
22 
23 #define MAX_DEPTH 16
24 #define NODE_SIZE L1_CACHE_BYTES
25 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
26 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
27 
28 /*
29  * The table has always exactly one reference from either mapped_device->map
30  * or hash_cell->new_map. This reference is not counted in table->holders.
31  * A pair of dm_create_table/dm_destroy_table functions is used for table
32  * creation/destruction.
33  *
34  * Temporary references from the other code increase table->holders. A pair
35  * of dm_table_get/dm_table_put functions is used to manipulate it.
36  *
37  * When the table is about to be destroyed, we wait for table->holders to
38  * drop to zero.
39  */
40 
41 struct dm_table {
42 	struct mapped_device *md;
43 	atomic_t holders;
44 
45 	/* btree table */
46 	unsigned int depth;
47 	unsigned int counts[MAX_DEPTH];	/* in nodes */
48 	sector_t *index[MAX_DEPTH];
49 
50 	unsigned int num_targets;
51 	unsigned int num_allocated;
52 	sector_t *highs;
53 	struct dm_target *targets;
54 
55 	/*
56 	 * Indicates the rw permissions for the new logical
57 	 * device.  This should be a combination of FMODE_READ
58 	 * and FMODE_WRITE.
59 	 */
60 	fmode_t mode;
61 
62 	/* a list of devices used by this table */
63 	struct list_head devices;
64 
65 	/*
66 	 * These are optimistic limits taken from all the
67 	 * targets, some targets will need smaller limits.
68 	 */
69 	struct io_restrictions limits;
70 
71 	/* events get handed up using this callback */
72 	void (*event_fn)(void *);
73 	void *event_context;
74 };
75 
76 /*
77  * Similar to ceiling(log_size(n))
78  */
79 static unsigned int int_log(unsigned int n, unsigned int base)
80 {
81 	int result = 0;
82 
83 	while (n > 1) {
84 		n = dm_div_up(n, base);
85 		result++;
86 	}
87 
88 	return result;
89 }
90 
91 /*
92  * Returns the minimum that is _not_ zero, unless both are zero.
93  */
94 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
95 
96 /*
97  * Combine two io_restrictions, always taking the lower value.
98  */
99 static void combine_restrictions_low(struct io_restrictions *lhs,
100 				     struct io_restrictions *rhs)
101 {
102 	lhs->max_sectors =
103 		min_not_zero(lhs->max_sectors, rhs->max_sectors);
104 
105 	lhs->max_phys_segments =
106 		min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
107 
108 	lhs->max_hw_segments =
109 		min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 
111 	lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
112 
113 	lhs->max_segment_size =
114 		min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
115 
116 	lhs->max_hw_sectors =
117 		min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
118 
119 	lhs->seg_boundary_mask =
120 		min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
121 
122 	lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
123 
124 	lhs->no_cluster |= rhs->no_cluster;
125 }
126 
127 /*
128  * Calculate the index of the child node of the n'th node k'th key.
129  */
130 static inline unsigned int get_child(unsigned int n, unsigned int k)
131 {
132 	return (n * CHILDREN_PER_NODE) + k;
133 }
134 
135 /*
136  * Return the n'th node of level l from table t.
137  */
138 static inline sector_t *get_node(struct dm_table *t,
139 				 unsigned int l, unsigned int n)
140 {
141 	return t->index[l] + (n * KEYS_PER_NODE);
142 }
143 
144 /*
145  * Return the highest key that you could lookup from the n'th
146  * node on level l of the btree.
147  */
148 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
149 {
150 	for (; l < t->depth - 1; l++)
151 		n = get_child(n, CHILDREN_PER_NODE - 1);
152 
153 	if (n >= t->counts[l])
154 		return (sector_t) - 1;
155 
156 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
157 }
158 
159 /*
160  * Fills in a level of the btree based on the highs of the level
161  * below it.
162  */
163 static int setup_btree_index(unsigned int l, struct dm_table *t)
164 {
165 	unsigned int n, k;
166 	sector_t *node;
167 
168 	for (n = 0U; n < t->counts[l]; n++) {
169 		node = get_node(t, l, n);
170 
171 		for (k = 0U; k < KEYS_PER_NODE; k++)
172 			node[k] = high(t, l + 1, get_child(n, k));
173 	}
174 
175 	return 0;
176 }
177 
178 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
179 {
180 	unsigned long size;
181 	void *addr;
182 
183 	/*
184 	 * Check that we're not going to overflow.
185 	 */
186 	if (nmemb > (ULONG_MAX / elem_size))
187 		return NULL;
188 
189 	size = nmemb * elem_size;
190 	addr = vmalloc(size);
191 	if (addr)
192 		memset(addr, 0, size);
193 
194 	return addr;
195 }
196 
197 /*
198  * highs, and targets are managed as dynamic arrays during a
199  * table load.
200  */
201 static int alloc_targets(struct dm_table *t, unsigned int num)
202 {
203 	sector_t *n_highs;
204 	struct dm_target *n_targets;
205 	int n = t->num_targets;
206 
207 	/*
208 	 * Allocate both the target array and offset array at once.
209 	 * Append an empty entry to catch sectors beyond the end of
210 	 * the device.
211 	 */
212 	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
213 					  sizeof(sector_t));
214 	if (!n_highs)
215 		return -ENOMEM;
216 
217 	n_targets = (struct dm_target *) (n_highs + num);
218 
219 	if (n) {
220 		memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
221 		memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
222 	}
223 
224 	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
225 	vfree(t->highs);
226 
227 	t->num_allocated = num;
228 	t->highs = n_highs;
229 	t->targets = n_targets;
230 
231 	return 0;
232 }
233 
234 int dm_table_create(struct dm_table **result, fmode_t mode,
235 		    unsigned num_targets, struct mapped_device *md)
236 {
237 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
238 
239 	if (!t)
240 		return -ENOMEM;
241 
242 	INIT_LIST_HEAD(&t->devices);
243 	atomic_set(&t->holders, 0);
244 
245 	if (!num_targets)
246 		num_targets = KEYS_PER_NODE;
247 
248 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
249 
250 	if (alloc_targets(t, num_targets)) {
251 		kfree(t);
252 		t = NULL;
253 		return -ENOMEM;
254 	}
255 
256 	t->mode = mode;
257 	t->md = md;
258 	*result = t;
259 	return 0;
260 }
261 
262 static void free_devices(struct list_head *devices)
263 {
264 	struct list_head *tmp, *next;
265 
266 	list_for_each_safe(tmp, next, devices) {
267 		struct dm_dev_internal *dd =
268 		    list_entry(tmp, struct dm_dev_internal, list);
269 		kfree(dd);
270 	}
271 }
272 
273 void dm_table_destroy(struct dm_table *t)
274 {
275 	unsigned int i;
276 
277 	while (atomic_read(&t->holders))
278 		msleep(1);
279 	smp_mb();
280 
281 	/* free the indexes (see dm_table_complete) */
282 	if (t->depth >= 2)
283 		vfree(t->index[t->depth - 2]);
284 
285 	/* free the targets */
286 	for (i = 0; i < t->num_targets; i++) {
287 		struct dm_target *tgt = t->targets + i;
288 
289 		if (tgt->type->dtr)
290 			tgt->type->dtr(tgt);
291 
292 		dm_put_target_type(tgt->type);
293 	}
294 
295 	vfree(t->highs);
296 
297 	/* free the device list */
298 	if (t->devices.next != &t->devices) {
299 		DMWARN("devices still present during destroy: "
300 		       "dm_table_remove_device calls missing");
301 
302 		free_devices(&t->devices);
303 	}
304 
305 	kfree(t);
306 }
307 
308 void dm_table_get(struct dm_table *t)
309 {
310 	atomic_inc(&t->holders);
311 }
312 
313 void dm_table_put(struct dm_table *t)
314 {
315 	if (!t)
316 		return;
317 
318 	smp_mb__before_atomic_dec();
319 	atomic_dec(&t->holders);
320 }
321 
322 /*
323  * Checks to see if we need to extend highs or targets.
324  */
325 static inline int check_space(struct dm_table *t)
326 {
327 	if (t->num_targets >= t->num_allocated)
328 		return alloc_targets(t, t->num_allocated * 2);
329 
330 	return 0;
331 }
332 
333 /*
334  * See if we've already got a device in the list.
335  */
336 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
337 {
338 	struct dm_dev_internal *dd;
339 
340 	list_for_each_entry (dd, l, list)
341 		if (dd->dm_dev.bdev->bd_dev == dev)
342 			return dd;
343 
344 	return NULL;
345 }
346 
347 /*
348  * Open a device so we can use it as a map destination.
349  */
350 static int open_dev(struct dm_dev_internal *d, dev_t dev,
351 		    struct mapped_device *md)
352 {
353 	static char *_claim_ptr = "I belong to device-mapper";
354 	struct block_device *bdev;
355 
356 	int r;
357 
358 	BUG_ON(d->dm_dev.bdev);
359 
360 	bdev = open_by_devnum(dev, d->dm_dev.mode);
361 	if (IS_ERR(bdev))
362 		return PTR_ERR(bdev);
363 	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
364 	if (r)
365 		blkdev_put(bdev, d->dm_dev.mode);
366 	else
367 		d->dm_dev.bdev = bdev;
368 	return r;
369 }
370 
371 /*
372  * Close a device that we've been using.
373  */
374 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
375 {
376 	if (!d->dm_dev.bdev)
377 		return;
378 
379 	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
380 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
381 	d->dm_dev.bdev = NULL;
382 }
383 
384 /*
385  * If possible, this checks an area of a destination device is valid.
386  */
387 static int check_device_area(struct dm_dev_internal *dd, sector_t start,
388 			     sector_t len)
389 {
390 	sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
391 
392 	if (!dev_size)
393 		return 1;
394 
395 	return ((start < dev_size) && (len <= (dev_size - start)));
396 }
397 
398 /*
399  * This upgrades the mode on an already open dm_dev, being
400  * careful to leave things as they were if we fail to reopen the
401  * device and not to touch the existing bdev field in case
402  * it is accessed concurrently inside dm_table_any_congested().
403  */
404 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
405 			struct mapped_device *md)
406 {
407 	int r;
408 	struct dm_dev_internal dd_new, dd_old;
409 
410 	dd_new = dd_old = *dd;
411 
412 	dd_new.dm_dev.mode |= new_mode;
413 	dd_new.dm_dev.bdev = NULL;
414 
415 	r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
416 	if (r)
417 		return r;
418 
419 	dd->dm_dev.mode |= new_mode;
420 	close_dev(&dd_old, md);
421 
422 	return 0;
423 }
424 
425 /*
426  * Add a device to the list, or just increment the usage count if
427  * it's already present.
428  */
429 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
430 			      const char *path, sector_t start, sector_t len,
431 			      fmode_t mode, struct dm_dev **result)
432 {
433 	int r;
434 	dev_t uninitialized_var(dev);
435 	struct dm_dev_internal *dd;
436 	unsigned int major, minor;
437 
438 	BUG_ON(!t);
439 
440 	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
441 		/* Extract the major/minor numbers */
442 		dev = MKDEV(major, minor);
443 		if (MAJOR(dev) != major || MINOR(dev) != minor)
444 			return -EOVERFLOW;
445 	} else {
446 		/* convert the path to a device */
447 		struct block_device *bdev = lookup_bdev(path);
448 
449 		if (IS_ERR(bdev))
450 			return PTR_ERR(bdev);
451 		dev = bdev->bd_dev;
452 		bdput(bdev);
453 	}
454 
455 	dd = find_device(&t->devices, dev);
456 	if (!dd) {
457 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
458 		if (!dd)
459 			return -ENOMEM;
460 
461 		dd->dm_dev.mode = mode;
462 		dd->dm_dev.bdev = NULL;
463 
464 		if ((r = open_dev(dd, dev, t->md))) {
465 			kfree(dd);
466 			return r;
467 		}
468 
469 		format_dev_t(dd->dm_dev.name, dev);
470 
471 		atomic_set(&dd->count, 0);
472 		list_add(&dd->list, &t->devices);
473 
474 	} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
475 		r = upgrade_mode(dd, mode, t->md);
476 		if (r)
477 			return r;
478 	}
479 	atomic_inc(&dd->count);
480 
481 	if (!check_device_area(dd, start, len)) {
482 		DMWARN("device %s too small for target", path);
483 		dm_put_device(ti, &dd->dm_dev);
484 		return -EINVAL;
485 	}
486 
487 	*result = &dd->dm_dev;
488 
489 	return 0;
490 }
491 
492 void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
493 {
494 	struct request_queue *q = bdev_get_queue(bdev);
495 	struct io_restrictions *rs = &ti->limits;
496 	char b[BDEVNAME_SIZE];
497 
498 	if (unlikely(!q)) {
499 		DMWARN("%s: Cannot set limits for nonexistent device %s",
500 		       dm_device_name(ti->table->md), bdevname(bdev, b));
501 		return;
502 	}
503 
504 	/*
505 	 * Combine the device limits low.
506 	 *
507 	 * FIXME: if we move an io_restriction struct
508 	 *        into q this would just be a call to
509 	 *        combine_restrictions_low()
510 	 */
511 	rs->max_sectors =
512 		min_not_zero(rs->max_sectors, q->max_sectors);
513 
514 	/*
515 	 * Check if merge fn is supported.
516 	 * If not we'll force DM to use PAGE_SIZE or
517 	 * smaller I/O, just to be safe.
518 	 */
519 
520 	if (q->merge_bvec_fn && !ti->type->merge)
521 		rs->max_sectors =
522 			min_not_zero(rs->max_sectors,
523 				     (unsigned int) (PAGE_SIZE >> 9));
524 
525 	rs->max_phys_segments =
526 		min_not_zero(rs->max_phys_segments,
527 			     q->max_phys_segments);
528 
529 	rs->max_hw_segments =
530 		min_not_zero(rs->max_hw_segments, q->max_hw_segments);
531 
532 	rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
533 
534 	rs->max_segment_size =
535 		min_not_zero(rs->max_segment_size, q->max_segment_size);
536 
537 	rs->max_hw_sectors =
538 		min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
539 
540 	rs->seg_boundary_mask =
541 		min_not_zero(rs->seg_boundary_mask,
542 			     q->seg_boundary_mask);
543 
544 	rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
545 
546 	rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
547 }
548 EXPORT_SYMBOL_GPL(dm_set_device_limits);
549 
550 int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
551 		  sector_t len, fmode_t mode, struct dm_dev **result)
552 {
553 	int r = __table_get_device(ti->table, ti, path,
554 				   start, len, mode, result);
555 
556 	if (!r)
557 		dm_set_device_limits(ti, (*result)->bdev);
558 
559 	return r;
560 }
561 
562 /*
563  * Decrement a devices use count and remove it if necessary.
564  */
565 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
566 {
567 	struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
568 						  dm_dev);
569 
570 	if (atomic_dec_and_test(&dd->count)) {
571 		close_dev(dd, ti->table->md);
572 		list_del(&dd->list);
573 		kfree(dd);
574 	}
575 }
576 
577 /*
578  * Checks to see if the target joins onto the end of the table.
579  */
580 static int adjoin(struct dm_table *table, struct dm_target *ti)
581 {
582 	struct dm_target *prev;
583 
584 	if (!table->num_targets)
585 		return !ti->begin;
586 
587 	prev = &table->targets[table->num_targets - 1];
588 	return (ti->begin == (prev->begin + prev->len));
589 }
590 
591 /*
592  * Used to dynamically allocate the arg array.
593  */
594 static char **realloc_argv(unsigned *array_size, char **old_argv)
595 {
596 	char **argv;
597 	unsigned new_size;
598 
599 	new_size = *array_size ? *array_size * 2 : 64;
600 	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
601 	if (argv) {
602 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
603 		*array_size = new_size;
604 	}
605 
606 	kfree(old_argv);
607 	return argv;
608 }
609 
610 /*
611  * Destructively splits up the argument list to pass to ctr.
612  */
613 int dm_split_args(int *argc, char ***argvp, char *input)
614 {
615 	char *start, *end = input, *out, **argv = NULL;
616 	unsigned array_size = 0;
617 
618 	*argc = 0;
619 
620 	if (!input) {
621 		*argvp = NULL;
622 		return 0;
623 	}
624 
625 	argv = realloc_argv(&array_size, argv);
626 	if (!argv)
627 		return -ENOMEM;
628 
629 	while (1) {
630 		start = end;
631 
632 		/* Skip whitespace */
633 		while (*start && isspace(*start))
634 			start++;
635 
636 		if (!*start)
637 			break;	/* success, we hit the end */
638 
639 		/* 'out' is used to remove any back-quotes */
640 		end = out = start;
641 		while (*end) {
642 			/* Everything apart from '\0' can be quoted */
643 			if (*end == '\\' && *(end + 1)) {
644 				*out++ = *(end + 1);
645 				end += 2;
646 				continue;
647 			}
648 
649 			if (isspace(*end))
650 				break;	/* end of token */
651 
652 			*out++ = *end++;
653 		}
654 
655 		/* have we already filled the array ? */
656 		if ((*argc + 1) > array_size) {
657 			argv = realloc_argv(&array_size, argv);
658 			if (!argv)
659 				return -ENOMEM;
660 		}
661 
662 		/* we know this is whitespace */
663 		if (*end)
664 			end++;
665 
666 		/* terminate the string and put it in the array */
667 		*out = '\0';
668 		argv[*argc] = start;
669 		(*argc)++;
670 	}
671 
672 	*argvp = argv;
673 	return 0;
674 }
675 
676 static void check_for_valid_limits(struct io_restrictions *rs)
677 {
678 	if (!rs->max_sectors)
679 		rs->max_sectors = SAFE_MAX_SECTORS;
680 	if (!rs->max_hw_sectors)
681 		rs->max_hw_sectors = SAFE_MAX_SECTORS;
682 	if (!rs->max_phys_segments)
683 		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 	if (!rs->max_hw_segments)
685 		rs->max_hw_segments = MAX_HW_SEGMENTS;
686 	if (!rs->hardsect_size)
687 		rs->hardsect_size = 1 << SECTOR_SHIFT;
688 	if (!rs->max_segment_size)
689 		rs->max_segment_size = MAX_SEGMENT_SIZE;
690 	if (!rs->seg_boundary_mask)
691 		rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
692 	if (!rs->bounce_pfn)
693 		rs->bounce_pfn = -1;
694 }
695 
696 int dm_table_add_target(struct dm_table *t, const char *type,
697 			sector_t start, sector_t len, char *params)
698 {
699 	int r = -EINVAL, argc;
700 	char **argv;
701 	struct dm_target *tgt;
702 
703 	if ((r = check_space(t)))
704 		return r;
705 
706 	tgt = t->targets + t->num_targets;
707 	memset(tgt, 0, sizeof(*tgt));
708 
709 	if (!len) {
710 		DMERR("%s: zero-length target", dm_device_name(t->md));
711 		return -EINVAL;
712 	}
713 
714 	tgt->type = dm_get_target_type(type);
715 	if (!tgt->type) {
716 		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
717 		      type);
718 		return -EINVAL;
719 	}
720 
721 	tgt->table = t;
722 	tgt->begin = start;
723 	tgt->len = len;
724 	tgt->error = "Unknown error";
725 
726 	/*
727 	 * Does this target adjoin the previous one ?
728 	 */
729 	if (!adjoin(t, tgt)) {
730 		tgt->error = "Gap in table";
731 		r = -EINVAL;
732 		goto bad;
733 	}
734 
735 	r = dm_split_args(&argc, &argv, params);
736 	if (r) {
737 		tgt->error = "couldn't split parameters (insufficient memory)";
738 		goto bad;
739 	}
740 
741 	r = tgt->type->ctr(tgt, argc, argv);
742 	kfree(argv);
743 	if (r)
744 		goto bad;
745 
746 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
747 
748 	/* FIXME: the plan is to combine high here and then have
749 	 * the merge fn apply the target level restrictions. */
750 	combine_restrictions_low(&t->limits, &tgt->limits);
751 	return 0;
752 
753  bad:
754 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
755 	dm_put_target_type(tgt->type);
756 	return r;
757 }
758 
759 static int setup_indexes(struct dm_table *t)
760 {
761 	int i;
762 	unsigned int total = 0;
763 	sector_t *indexes;
764 
765 	/* allocate the space for *all* the indexes */
766 	for (i = t->depth - 2; i >= 0; i--) {
767 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
768 		total += t->counts[i];
769 	}
770 
771 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
772 	if (!indexes)
773 		return -ENOMEM;
774 
775 	/* set up internal nodes, bottom-up */
776 	for (i = t->depth - 2; i >= 0; i--) {
777 		t->index[i] = indexes;
778 		indexes += (KEYS_PER_NODE * t->counts[i]);
779 		setup_btree_index(i, t);
780 	}
781 
782 	return 0;
783 }
784 
785 /*
786  * Builds the btree to index the map.
787  */
788 int dm_table_complete(struct dm_table *t)
789 {
790 	int r = 0;
791 	unsigned int leaf_nodes;
792 
793 	check_for_valid_limits(&t->limits);
794 
795 	/* how many indexes will the btree have ? */
796 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
797 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
798 
799 	/* leaf layer has already been set up */
800 	t->counts[t->depth - 1] = leaf_nodes;
801 	t->index[t->depth - 1] = t->highs;
802 
803 	if (t->depth >= 2)
804 		r = setup_indexes(t);
805 
806 	return r;
807 }
808 
809 static DEFINE_MUTEX(_event_lock);
810 void dm_table_event_callback(struct dm_table *t,
811 			     void (*fn)(void *), void *context)
812 {
813 	mutex_lock(&_event_lock);
814 	t->event_fn = fn;
815 	t->event_context = context;
816 	mutex_unlock(&_event_lock);
817 }
818 
819 void dm_table_event(struct dm_table *t)
820 {
821 	/*
822 	 * You can no longer call dm_table_event() from interrupt
823 	 * context, use a bottom half instead.
824 	 */
825 	BUG_ON(in_interrupt());
826 
827 	mutex_lock(&_event_lock);
828 	if (t->event_fn)
829 		t->event_fn(t->event_context);
830 	mutex_unlock(&_event_lock);
831 }
832 
833 sector_t dm_table_get_size(struct dm_table *t)
834 {
835 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
836 }
837 
838 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
839 {
840 	if (index >= t->num_targets)
841 		return NULL;
842 
843 	return t->targets + index;
844 }
845 
846 /*
847  * Search the btree for the correct target.
848  *
849  * Caller should check returned pointer with dm_target_is_valid()
850  * to trap I/O beyond end of device.
851  */
852 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
853 {
854 	unsigned int l, n = 0, k = 0;
855 	sector_t *node;
856 
857 	for (l = 0; l < t->depth; l++) {
858 		n = get_child(n, k);
859 		node = get_node(t, l, n);
860 
861 		for (k = 0; k < KEYS_PER_NODE; k++)
862 			if (node[k] >= sector)
863 				break;
864 	}
865 
866 	return &t->targets[(KEYS_PER_NODE * n) + k];
867 }
868 
869 /*
870  * Set the integrity profile for this device if all devices used have
871  * matching profiles.
872  */
873 static void dm_table_set_integrity(struct dm_table *t)
874 {
875 	struct list_head *devices = dm_table_get_devices(t);
876 	struct dm_dev_internal *prev = NULL, *dd = NULL;
877 
878 	if (!blk_get_integrity(dm_disk(t->md)))
879 		return;
880 
881 	list_for_each_entry(dd, devices, list) {
882 		if (prev &&
883 		    blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
884 					  dd->dm_dev.bdev->bd_disk) < 0) {
885 			DMWARN("%s: integrity not set: %s and %s mismatch",
886 			       dm_device_name(t->md),
887 			       prev->dm_dev.bdev->bd_disk->disk_name,
888 			       dd->dm_dev.bdev->bd_disk->disk_name);
889 			goto no_integrity;
890 		}
891 		prev = dd;
892 	}
893 
894 	if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
895 		goto no_integrity;
896 
897 	blk_integrity_register(dm_disk(t->md),
898 			       bdev_get_integrity(prev->dm_dev.bdev));
899 
900 	return;
901 
902 no_integrity:
903 	blk_integrity_register(dm_disk(t->md), NULL);
904 
905 	return;
906 }
907 
908 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
909 {
910 	/*
911 	 * Make sure we obey the optimistic sub devices
912 	 * restrictions.
913 	 */
914 	blk_queue_max_sectors(q, t->limits.max_sectors);
915 	q->max_phys_segments = t->limits.max_phys_segments;
916 	q->max_hw_segments = t->limits.max_hw_segments;
917 	q->hardsect_size = t->limits.hardsect_size;
918 	q->max_segment_size = t->limits.max_segment_size;
919 	q->max_hw_sectors = t->limits.max_hw_sectors;
920 	q->seg_boundary_mask = t->limits.seg_boundary_mask;
921 	q->bounce_pfn = t->limits.bounce_pfn;
922 
923 	if (t->limits.no_cluster)
924 		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
925 	else
926 		queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
927 
928 	dm_table_set_integrity(t);
929 }
930 
931 unsigned int dm_table_get_num_targets(struct dm_table *t)
932 {
933 	return t->num_targets;
934 }
935 
936 struct list_head *dm_table_get_devices(struct dm_table *t)
937 {
938 	return &t->devices;
939 }
940 
941 fmode_t dm_table_get_mode(struct dm_table *t)
942 {
943 	return t->mode;
944 }
945 
946 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
947 {
948 	int i = t->num_targets;
949 	struct dm_target *ti = t->targets;
950 
951 	while (i--) {
952 		if (postsuspend) {
953 			if (ti->type->postsuspend)
954 				ti->type->postsuspend(ti);
955 		} else if (ti->type->presuspend)
956 			ti->type->presuspend(ti);
957 
958 		ti++;
959 	}
960 }
961 
962 void dm_table_presuspend_targets(struct dm_table *t)
963 {
964 	if (!t)
965 		return;
966 
967 	suspend_targets(t, 0);
968 }
969 
970 void dm_table_postsuspend_targets(struct dm_table *t)
971 {
972 	if (!t)
973 		return;
974 
975 	suspend_targets(t, 1);
976 }
977 
978 int dm_table_resume_targets(struct dm_table *t)
979 {
980 	int i, r = 0;
981 
982 	for (i = 0; i < t->num_targets; i++) {
983 		struct dm_target *ti = t->targets + i;
984 
985 		if (!ti->type->preresume)
986 			continue;
987 
988 		r = ti->type->preresume(ti);
989 		if (r)
990 			return r;
991 	}
992 
993 	for (i = 0; i < t->num_targets; i++) {
994 		struct dm_target *ti = t->targets + i;
995 
996 		if (ti->type->resume)
997 			ti->type->resume(ti);
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1004 {
1005 	struct dm_dev_internal *dd;
1006 	struct list_head *devices = dm_table_get_devices(t);
1007 	int r = 0;
1008 
1009 	list_for_each_entry(dd, devices, list) {
1010 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1011 		char b[BDEVNAME_SIZE];
1012 
1013 		if (likely(q))
1014 			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1015 		else
1016 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1017 				     dm_device_name(t->md),
1018 				     bdevname(dd->dm_dev.bdev, b));
1019 	}
1020 
1021 	return r;
1022 }
1023 
1024 void dm_table_unplug_all(struct dm_table *t)
1025 {
1026 	struct dm_dev_internal *dd;
1027 	struct list_head *devices = dm_table_get_devices(t);
1028 
1029 	list_for_each_entry(dd, devices, list) {
1030 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1031 		char b[BDEVNAME_SIZE];
1032 
1033 		if (likely(q))
1034 			blk_unplug(q);
1035 		else
1036 			DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1037 				     dm_device_name(t->md),
1038 				     bdevname(dd->dm_dev.bdev, b));
1039 	}
1040 }
1041 
1042 struct mapped_device *dm_table_get_md(struct dm_table *t)
1043 {
1044 	dm_get(t->md);
1045 
1046 	return t->md;
1047 }
1048 
1049 EXPORT_SYMBOL(dm_vcalloc);
1050 EXPORT_SYMBOL(dm_get_device);
1051 EXPORT_SYMBOL(dm_put_device);
1052 EXPORT_SYMBOL(dm_table_event);
1053 EXPORT_SYMBOL(dm_table_get_size);
1054 EXPORT_SYMBOL(dm_table_get_mode);
1055 EXPORT_SYMBOL(dm_table_get_md);
1056 EXPORT_SYMBOL(dm_table_put);
1057 EXPORT_SYMBOL(dm_table_get);
1058 EXPORT_SYMBOL(dm_table_unplug_all);
1059