xref: /openbmc/linux/drivers/md/dm-table.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <asm/atomic.h>
19 
20 #define DM_MSG_PREFIX "table"
21 
22 #define MAX_DEPTH 16
23 #define NODE_SIZE L1_CACHE_BYTES
24 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
26 
27 struct dm_table {
28 	struct mapped_device *md;
29 	atomic_t holders;
30 
31 	/* btree table */
32 	unsigned int depth;
33 	unsigned int counts[MAX_DEPTH];	/* in nodes */
34 	sector_t *index[MAX_DEPTH];
35 
36 	unsigned int num_targets;
37 	unsigned int num_allocated;
38 	sector_t *highs;
39 	struct dm_target *targets;
40 
41 	/*
42 	 * Indicates the rw permissions for the new logical
43 	 * device.  This should be a combination of FMODE_READ
44 	 * and FMODE_WRITE.
45 	 */
46 	fmode_t mode;
47 
48 	/* a list of devices used by this table */
49 	struct list_head devices;
50 
51 	/*
52 	 * These are optimistic limits taken from all the
53 	 * targets, some targets will need smaller limits.
54 	 */
55 	struct io_restrictions limits;
56 
57 	/* events get handed up using this callback */
58 	void (*event_fn)(void *);
59 	void *event_context;
60 };
61 
62 /*
63  * Similar to ceiling(log_size(n))
64  */
65 static unsigned int int_log(unsigned int n, unsigned int base)
66 {
67 	int result = 0;
68 
69 	while (n > 1) {
70 		n = dm_div_up(n, base);
71 		result++;
72 	}
73 
74 	return result;
75 }
76 
77 /*
78  * Returns the minimum that is _not_ zero, unless both are zero.
79  */
80 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
81 
82 /*
83  * Combine two io_restrictions, always taking the lower value.
84  */
85 static void combine_restrictions_low(struct io_restrictions *lhs,
86 				     struct io_restrictions *rhs)
87 {
88 	lhs->max_sectors =
89 		min_not_zero(lhs->max_sectors, rhs->max_sectors);
90 
91 	lhs->max_phys_segments =
92 		min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
93 
94 	lhs->max_hw_segments =
95 		min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
96 
97 	lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
98 
99 	lhs->max_segment_size =
100 		min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
101 
102 	lhs->max_hw_sectors =
103 		min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
104 
105 	lhs->seg_boundary_mask =
106 		min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
107 
108 	lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
109 
110 	lhs->no_cluster |= rhs->no_cluster;
111 }
112 
113 /*
114  * Calculate the index of the child node of the n'th node k'th key.
115  */
116 static inline unsigned int get_child(unsigned int n, unsigned int k)
117 {
118 	return (n * CHILDREN_PER_NODE) + k;
119 }
120 
121 /*
122  * Return the n'th node of level l from table t.
123  */
124 static inline sector_t *get_node(struct dm_table *t,
125 				 unsigned int l, unsigned int n)
126 {
127 	return t->index[l] + (n * KEYS_PER_NODE);
128 }
129 
130 /*
131  * Return the highest key that you could lookup from the n'th
132  * node on level l of the btree.
133  */
134 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
135 {
136 	for (; l < t->depth - 1; l++)
137 		n = get_child(n, CHILDREN_PER_NODE - 1);
138 
139 	if (n >= t->counts[l])
140 		return (sector_t) - 1;
141 
142 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
143 }
144 
145 /*
146  * Fills in a level of the btree based on the highs of the level
147  * below it.
148  */
149 static int setup_btree_index(unsigned int l, struct dm_table *t)
150 {
151 	unsigned int n, k;
152 	sector_t *node;
153 
154 	for (n = 0U; n < t->counts[l]; n++) {
155 		node = get_node(t, l, n);
156 
157 		for (k = 0U; k < KEYS_PER_NODE; k++)
158 			node[k] = high(t, l + 1, get_child(n, k));
159 	}
160 
161 	return 0;
162 }
163 
164 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
165 {
166 	unsigned long size;
167 	void *addr;
168 
169 	/*
170 	 * Check that we're not going to overflow.
171 	 */
172 	if (nmemb > (ULONG_MAX / elem_size))
173 		return NULL;
174 
175 	size = nmemb * elem_size;
176 	addr = vmalloc(size);
177 	if (addr)
178 		memset(addr, 0, size);
179 
180 	return addr;
181 }
182 
183 /*
184  * highs, and targets are managed as dynamic arrays during a
185  * table load.
186  */
187 static int alloc_targets(struct dm_table *t, unsigned int num)
188 {
189 	sector_t *n_highs;
190 	struct dm_target *n_targets;
191 	int n = t->num_targets;
192 
193 	/*
194 	 * Allocate both the target array and offset array at once.
195 	 * Append an empty entry to catch sectors beyond the end of
196 	 * the device.
197 	 */
198 	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
199 					  sizeof(sector_t));
200 	if (!n_highs)
201 		return -ENOMEM;
202 
203 	n_targets = (struct dm_target *) (n_highs + num);
204 
205 	if (n) {
206 		memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
207 		memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
208 	}
209 
210 	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
211 	vfree(t->highs);
212 
213 	t->num_allocated = num;
214 	t->highs = n_highs;
215 	t->targets = n_targets;
216 
217 	return 0;
218 }
219 
220 int dm_table_create(struct dm_table **result, fmode_t mode,
221 		    unsigned num_targets, struct mapped_device *md)
222 {
223 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
224 
225 	if (!t)
226 		return -ENOMEM;
227 
228 	INIT_LIST_HEAD(&t->devices);
229 	atomic_set(&t->holders, 1);
230 
231 	if (!num_targets)
232 		num_targets = KEYS_PER_NODE;
233 
234 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
235 
236 	if (alloc_targets(t, num_targets)) {
237 		kfree(t);
238 		t = NULL;
239 		return -ENOMEM;
240 	}
241 
242 	t->mode = mode;
243 	t->md = md;
244 	*result = t;
245 	return 0;
246 }
247 
248 static void free_devices(struct list_head *devices)
249 {
250 	struct list_head *tmp, *next;
251 
252 	list_for_each_safe(tmp, next, devices) {
253 		struct dm_dev_internal *dd =
254 		    list_entry(tmp, struct dm_dev_internal, list);
255 		kfree(dd);
256 	}
257 }
258 
259 static void table_destroy(struct dm_table *t)
260 {
261 	unsigned int i;
262 
263 	/* free the indexes (see dm_table_complete) */
264 	if (t->depth >= 2)
265 		vfree(t->index[t->depth - 2]);
266 
267 	/* free the targets */
268 	for (i = 0; i < t->num_targets; i++) {
269 		struct dm_target *tgt = t->targets + i;
270 
271 		if (tgt->type->dtr)
272 			tgt->type->dtr(tgt);
273 
274 		dm_put_target_type(tgt->type);
275 	}
276 
277 	vfree(t->highs);
278 
279 	/* free the device list */
280 	if (t->devices.next != &t->devices) {
281 		DMWARN("devices still present during destroy: "
282 		       "dm_table_remove_device calls missing");
283 
284 		free_devices(&t->devices);
285 	}
286 
287 	kfree(t);
288 }
289 
290 void dm_table_get(struct dm_table *t)
291 {
292 	atomic_inc(&t->holders);
293 }
294 
295 void dm_table_put(struct dm_table *t)
296 {
297 	if (!t)
298 		return;
299 
300 	if (atomic_dec_and_test(&t->holders))
301 		table_destroy(t);
302 }
303 
304 /*
305  * Checks to see if we need to extend highs or targets.
306  */
307 static inline int check_space(struct dm_table *t)
308 {
309 	if (t->num_targets >= t->num_allocated)
310 		return alloc_targets(t, t->num_allocated * 2);
311 
312 	return 0;
313 }
314 
315 /*
316  * See if we've already got a device in the list.
317  */
318 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
319 {
320 	struct dm_dev_internal *dd;
321 
322 	list_for_each_entry (dd, l, list)
323 		if (dd->dm_dev.bdev->bd_dev == dev)
324 			return dd;
325 
326 	return NULL;
327 }
328 
329 /*
330  * Open a device so we can use it as a map destination.
331  */
332 static int open_dev(struct dm_dev_internal *d, dev_t dev,
333 		    struct mapped_device *md)
334 {
335 	static char *_claim_ptr = "I belong to device-mapper";
336 	struct block_device *bdev;
337 
338 	int r;
339 
340 	BUG_ON(d->dm_dev.bdev);
341 
342 	bdev = open_by_devnum(dev, d->dm_dev.mode);
343 	if (IS_ERR(bdev))
344 		return PTR_ERR(bdev);
345 	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
346 	if (r)
347 		blkdev_put(bdev, d->dm_dev.mode);
348 	else
349 		d->dm_dev.bdev = bdev;
350 	return r;
351 }
352 
353 /*
354  * Close a device that we've been using.
355  */
356 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
357 {
358 	if (!d->dm_dev.bdev)
359 		return;
360 
361 	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
362 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
363 	d->dm_dev.bdev = NULL;
364 }
365 
366 /*
367  * If possible, this checks an area of a destination device is valid.
368  */
369 static int check_device_area(struct dm_dev_internal *dd, sector_t start,
370 			     sector_t len)
371 {
372 	sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
373 
374 	if (!dev_size)
375 		return 1;
376 
377 	return ((start < dev_size) && (len <= (dev_size - start)));
378 }
379 
380 /*
381  * This upgrades the mode on an already open dm_dev.  Being
382  * careful to leave things as they were if we fail to reopen the
383  * device.
384  */
385 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
386 			struct mapped_device *md)
387 {
388 	int r;
389 	struct dm_dev_internal dd_copy;
390 	dev_t dev = dd->dm_dev.bdev->bd_dev;
391 
392 	dd_copy = *dd;
393 
394 	dd->dm_dev.mode |= new_mode;
395 	dd->dm_dev.bdev = NULL;
396 	r = open_dev(dd, dev, md);
397 	if (!r)
398 		close_dev(&dd_copy, md);
399 	else
400 		*dd = dd_copy;
401 
402 	return r;
403 }
404 
405 /*
406  * Add a device to the list, or just increment the usage count if
407  * it's already present.
408  */
409 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
410 			      const char *path, sector_t start, sector_t len,
411 			      fmode_t mode, struct dm_dev **result)
412 {
413 	int r;
414 	dev_t uninitialized_var(dev);
415 	struct dm_dev_internal *dd;
416 	unsigned int major, minor;
417 
418 	BUG_ON(!t);
419 
420 	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
421 		/* Extract the major/minor numbers */
422 		dev = MKDEV(major, minor);
423 		if (MAJOR(dev) != major || MINOR(dev) != minor)
424 			return -EOVERFLOW;
425 	} else {
426 		/* convert the path to a device */
427 		struct block_device *bdev = lookup_bdev(path);
428 
429 		if (IS_ERR(bdev))
430 			return PTR_ERR(bdev);
431 		dev = bdev->bd_dev;
432 		bdput(bdev);
433 	}
434 
435 	dd = find_device(&t->devices, dev);
436 	if (!dd) {
437 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
438 		if (!dd)
439 			return -ENOMEM;
440 
441 		dd->dm_dev.mode = mode;
442 		dd->dm_dev.bdev = NULL;
443 
444 		if ((r = open_dev(dd, dev, t->md))) {
445 			kfree(dd);
446 			return r;
447 		}
448 
449 		format_dev_t(dd->dm_dev.name, dev);
450 
451 		atomic_set(&dd->count, 0);
452 		list_add(&dd->list, &t->devices);
453 
454 	} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
455 		r = upgrade_mode(dd, mode, t->md);
456 		if (r)
457 			return r;
458 	}
459 	atomic_inc(&dd->count);
460 
461 	if (!check_device_area(dd, start, len)) {
462 		DMWARN("device %s too small for target", path);
463 		dm_put_device(ti, &dd->dm_dev);
464 		return -EINVAL;
465 	}
466 
467 	*result = &dd->dm_dev;
468 
469 	return 0;
470 }
471 
472 void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
473 {
474 	struct request_queue *q = bdev_get_queue(bdev);
475 	struct io_restrictions *rs = &ti->limits;
476 	char b[BDEVNAME_SIZE];
477 
478 	if (unlikely(!q)) {
479 		DMWARN("%s: Cannot set limits for nonexistent device %s",
480 		       dm_device_name(ti->table->md), bdevname(bdev, b));
481 		return;
482 	}
483 
484 	/*
485 	 * Combine the device limits low.
486 	 *
487 	 * FIXME: if we move an io_restriction struct
488 	 *        into q this would just be a call to
489 	 *        combine_restrictions_low()
490 	 */
491 	rs->max_sectors =
492 		min_not_zero(rs->max_sectors, q->max_sectors);
493 
494 	/*
495 	 * Check if merge fn is supported.
496 	 * If not we'll force DM to use PAGE_SIZE or
497 	 * smaller I/O, just to be safe.
498 	 */
499 
500 	if (q->merge_bvec_fn && !ti->type->merge)
501 		rs->max_sectors =
502 			min_not_zero(rs->max_sectors,
503 				     (unsigned int) (PAGE_SIZE >> 9));
504 
505 	rs->max_phys_segments =
506 		min_not_zero(rs->max_phys_segments,
507 			     q->max_phys_segments);
508 
509 	rs->max_hw_segments =
510 		min_not_zero(rs->max_hw_segments, q->max_hw_segments);
511 
512 	rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
513 
514 	rs->max_segment_size =
515 		min_not_zero(rs->max_segment_size, q->max_segment_size);
516 
517 	rs->max_hw_sectors =
518 		min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
519 
520 	rs->seg_boundary_mask =
521 		min_not_zero(rs->seg_boundary_mask,
522 			     q->seg_boundary_mask);
523 
524 	rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
525 
526 	rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
527 }
528 EXPORT_SYMBOL_GPL(dm_set_device_limits);
529 
530 int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
531 		  sector_t len, fmode_t mode, struct dm_dev **result)
532 {
533 	int r = __table_get_device(ti->table, ti, path,
534 				   start, len, mode, result);
535 
536 	if (!r)
537 		dm_set_device_limits(ti, (*result)->bdev);
538 
539 	return r;
540 }
541 
542 /*
543  * Decrement a devices use count and remove it if necessary.
544  */
545 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
546 {
547 	struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
548 						  dm_dev);
549 
550 	if (atomic_dec_and_test(&dd->count)) {
551 		close_dev(dd, ti->table->md);
552 		list_del(&dd->list);
553 		kfree(dd);
554 	}
555 }
556 
557 /*
558  * Checks to see if the target joins onto the end of the table.
559  */
560 static int adjoin(struct dm_table *table, struct dm_target *ti)
561 {
562 	struct dm_target *prev;
563 
564 	if (!table->num_targets)
565 		return !ti->begin;
566 
567 	prev = &table->targets[table->num_targets - 1];
568 	return (ti->begin == (prev->begin + prev->len));
569 }
570 
571 /*
572  * Used to dynamically allocate the arg array.
573  */
574 static char **realloc_argv(unsigned *array_size, char **old_argv)
575 {
576 	char **argv;
577 	unsigned new_size;
578 
579 	new_size = *array_size ? *array_size * 2 : 64;
580 	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
581 	if (argv) {
582 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
583 		*array_size = new_size;
584 	}
585 
586 	kfree(old_argv);
587 	return argv;
588 }
589 
590 /*
591  * Destructively splits up the argument list to pass to ctr.
592  */
593 int dm_split_args(int *argc, char ***argvp, char *input)
594 {
595 	char *start, *end = input, *out, **argv = NULL;
596 	unsigned array_size = 0;
597 
598 	*argc = 0;
599 
600 	if (!input) {
601 		*argvp = NULL;
602 		return 0;
603 	}
604 
605 	argv = realloc_argv(&array_size, argv);
606 	if (!argv)
607 		return -ENOMEM;
608 
609 	while (1) {
610 		start = end;
611 
612 		/* Skip whitespace */
613 		while (*start && isspace(*start))
614 			start++;
615 
616 		if (!*start)
617 			break;	/* success, we hit the end */
618 
619 		/* 'out' is used to remove any back-quotes */
620 		end = out = start;
621 		while (*end) {
622 			/* Everything apart from '\0' can be quoted */
623 			if (*end == '\\' && *(end + 1)) {
624 				*out++ = *(end + 1);
625 				end += 2;
626 				continue;
627 			}
628 
629 			if (isspace(*end))
630 				break;	/* end of token */
631 
632 			*out++ = *end++;
633 		}
634 
635 		/* have we already filled the array ? */
636 		if ((*argc + 1) > array_size) {
637 			argv = realloc_argv(&array_size, argv);
638 			if (!argv)
639 				return -ENOMEM;
640 		}
641 
642 		/* we know this is whitespace */
643 		if (*end)
644 			end++;
645 
646 		/* terminate the string and put it in the array */
647 		*out = '\0';
648 		argv[*argc] = start;
649 		(*argc)++;
650 	}
651 
652 	*argvp = argv;
653 	return 0;
654 }
655 
656 static void check_for_valid_limits(struct io_restrictions *rs)
657 {
658 	if (!rs->max_sectors)
659 		rs->max_sectors = SAFE_MAX_SECTORS;
660 	if (!rs->max_hw_sectors)
661 		rs->max_hw_sectors = SAFE_MAX_SECTORS;
662 	if (!rs->max_phys_segments)
663 		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
664 	if (!rs->max_hw_segments)
665 		rs->max_hw_segments = MAX_HW_SEGMENTS;
666 	if (!rs->hardsect_size)
667 		rs->hardsect_size = 1 << SECTOR_SHIFT;
668 	if (!rs->max_segment_size)
669 		rs->max_segment_size = MAX_SEGMENT_SIZE;
670 	if (!rs->seg_boundary_mask)
671 		rs->seg_boundary_mask = -1;
672 	if (!rs->bounce_pfn)
673 		rs->bounce_pfn = -1;
674 }
675 
676 int dm_table_add_target(struct dm_table *t, const char *type,
677 			sector_t start, sector_t len, char *params)
678 {
679 	int r = -EINVAL, argc;
680 	char **argv;
681 	struct dm_target *tgt;
682 
683 	if ((r = check_space(t)))
684 		return r;
685 
686 	tgt = t->targets + t->num_targets;
687 	memset(tgt, 0, sizeof(*tgt));
688 
689 	if (!len) {
690 		DMERR("%s: zero-length target", dm_device_name(t->md));
691 		return -EINVAL;
692 	}
693 
694 	tgt->type = dm_get_target_type(type);
695 	if (!tgt->type) {
696 		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
697 		      type);
698 		return -EINVAL;
699 	}
700 
701 	tgt->table = t;
702 	tgt->begin = start;
703 	tgt->len = len;
704 	tgt->error = "Unknown error";
705 
706 	/*
707 	 * Does this target adjoin the previous one ?
708 	 */
709 	if (!adjoin(t, tgt)) {
710 		tgt->error = "Gap in table";
711 		r = -EINVAL;
712 		goto bad;
713 	}
714 
715 	r = dm_split_args(&argc, &argv, params);
716 	if (r) {
717 		tgt->error = "couldn't split parameters (insufficient memory)";
718 		goto bad;
719 	}
720 
721 	r = tgt->type->ctr(tgt, argc, argv);
722 	kfree(argv);
723 	if (r)
724 		goto bad;
725 
726 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
727 
728 	/* FIXME: the plan is to combine high here and then have
729 	 * the merge fn apply the target level restrictions. */
730 	combine_restrictions_low(&t->limits, &tgt->limits);
731 	return 0;
732 
733  bad:
734 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
735 	dm_put_target_type(tgt->type);
736 	return r;
737 }
738 
739 static int setup_indexes(struct dm_table *t)
740 {
741 	int i;
742 	unsigned int total = 0;
743 	sector_t *indexes;
744 
745 	/* allocate the space for *all* the indexes */
746 	for (i = t->depth - 2; i >= 0; i--) {
747 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
748 		total += t->counts[i];
749 	}
750 
751 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
752 	if (!indexes)
753 		return -ENOMEM;
754 
755 	/* set up internal nodes, bottom-up */
756 	for (i = t->depth - 2; i >= 0; i--) {
757 		t->index[i] = indexes;
758 		indexes += (KEYS_PER_NODE * t->counts[i]);
759 		setup_btree_index(i, t);
760 	}
761 
762 	return 0;
763 }
764 
765 /*
766  * Builds the btree to index the map.
767  */
768 int dm_table_complete(struct dm_table *t)
769 {
770 	int r = 0;
771 	unsigned int leaf_nodes;
772 
773 	check_for_valid_limits(&t->limits);
774 
775 	/* how many indexes will the btree have ? */
776 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
777 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
778 
779 	/* leaf layer has already been set up */
780 	t->counts[t->depth - 1] = leaf_nodes;
781 	t->index[t->depth - 1] = t->highs;
782 
783 	if (t->depth >= 2)
784 		r = setup_indexes(t);
785 
786 	return r;
787 }
788 
789 static DEFINE_MUTEX(_event_lock);
790 void dm_table_event_callback(struct dm_table *t,
791 			     void (*fn)(void *), void *context)
792 {
793 	mutex_lock(&_event_lock);
794 	t->event_fn = fn;
795 	t->event_context = context;
796 	mutex_unlock(&_event_lock);
797 }
798 
799 void dm_table_event(struct dm_table *t)
800 {
801 	/*
802 	 * You can no longer call dm_table_event() from interrupt
803 	 * context, use a bottom half instead.
804 	 */
805 	BUG_ON(in_interrupt());
806 
807 	mutex_lock(&_event_lock);
808 	if (t->event_fn)
809 		t->event_fn(t->event_context);
810 	mutex_unlock(&_event_lock);
811 }
812 
813 sector_t dm_table_get_size(struct dm_table *t)
814 {
815 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
816 }
817 
818 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
819 {
820 	if (index >= t->num_targets)
821 		return NULL;
822 
823 	return t->targets + index;
824 }
825 
826 /*
827  * Search the btree for the correct target.
828  *
829  * Caller should check returned pointer with dm_target_is_valid()
830  * to trap I/O beyond end of device.
831  */
832 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
833 {
834 	unsigned int l, n = 0, k = 0;
835 	sector_t *node;
836 
837 	for (l = 0; l < t->depth; l++) {
838 		n = get_child(n, k);
839 		node = get_node(t, l, n);
840 
841 		for (k = 0; k < KEYS_PER_NODE; k++)
842 			if (node[k] >= sector)
843 				break;
844 	}
845 
846 	return &t->targets[(KEYS_PER_NODE * n) + k];
847 }
848 
849 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
850 {
851 	/*
852 	 * Make sure we obey the optimistic sub devices
853 	 * restrictions.
854 	 */
855 	blk_queue_max_sectors(q, t->limits.max_sectors);
856 	q->max_phys_segments = t->limits.max_phys_segments;
857 	q->max_hw_segments = t->limits.max_hw_segments;
858 	q->hardsect_size = t->limits.hardsect_size;
859 	q->max_segment_size = t->limits.max_segment_size;
860 	q->max_hw_sectors = t->limits.max_hw_sectors;
861 	q->seg_boundary_mask = t->limits.seg_boundary_mask;
862 	q->bounce_pfn = t->limits.bounce_pfn;
863 
864 	if (t->limits.no_cluster)
865 		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
866 	else
867 		queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
868 
869 }
870 
871 unsigned int dm_table_get_num_targets(struct dm_table *t)
872 {
873 	return t->num_targets;
874 }
875 
876 struct list_head *dm_table_get_devices(struct dm_table *t)
877 {
878 	return &t->devices;
879 }
880 
881 fmode_t dm_table_get_mode(struct dm_table *t)
882 {
883 	return t->mode;
884 }
885 
886 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
887 {
888 	int i = t->num_targets;
889 	struct dm_target *ti = t->targets;
890 
891 	while (i--) {
892 		if (postsuspend) {
893 			if (ti->type->postsuspend)
894 				ti->type->postsuspend(ti);
895 		} else if (ti->type->presuspend)
896 			ti->type->presuspend(ti);
897 
898 		ti++;
899 	}
900 }
901 
902 void dm_table_presuspend_targets(struct dm_table *t)
903 {
904 	if (!t)
905 		return;
906 
907 	suspend_targets(t, 0);
908 }
909 
910 void dm_table_postsuspend_targets(struct dm_table *t)
911 {
912 	if (!t)
913 		return;
914 
915 	suspend_targets(t, 1);
916 }
917 
918 int dm_table_resume_targets(struct dm_table *t)
919 {
920 	int i, r = 0;
921 
922 	for (i = 0; i < t->num_targets; i++) {
923 		struct dm_target *ti = t->targets + i;
924 
925 		if (!ti->type->preresume)
926 			continue;
927 
928 		r = ti->type->preresume(ti);
929 		if (r)
930 			return r;
931 	}
932 
933 	for (i = 0; i < t->num_targets; i++) {
934 		struct dm_target *ti = t->targets + i;
935 
936 		if (ti->type->resume)
937 			ti->type->resume(ti);
938 	}
939 
940 	return 0;
941 }
942 
943 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
944 {
945 	struct dm_dev_internal *dd;
946 	struct list_head *devices = dm_table_get_devices(t);
947 	int r = 0;
948 
949 	list_for_each_entry(dd, devices, list) {
950 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
951 		char b[BDEVNAME_SIZE];
952 
953 		if (likely(q))
954 			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
955 		else
956 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
957 				     dm_device_name(t->md),
958 				     bdevname(dd->dm_dev.bdev, b));
959 	}
960 
961 	return r;
962 }
963 
964 void dm_table_unplug_all(struct dm_table *t)
965 {
966 	struct dm_dev_internal *dd;
967 	struct list_head *devices = dm_table_get_devices(t);
968 
969 	list_for_each_entry(dd, devices, list) {
970 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
971 		char b[BDEVNAME_SIZE];
972 
973 		if (likely(q))
974 			blk_unplug(q);
975 		else
976 			DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
977 				     dm_device_name(t->md),
978 				     bdevname(dd->dm_dev.bdev, b));
979 	}
980 }
981 
982 struct mapped_device *dm_table_get_md(struct dm_table *t)
983 {
984 	dm_get(t->md);
985 
986 	return t->md;
987 }
988 
989 EXPORT_SYMBOL(dm_vcalloc);
990 EXPORT_SYMBOL(dm_get_device);
991 EXPORT_SYMBOL(dm_put_device);
992 EXPORT_SYMBOL(dm_table_event);
993 EXPORT_SYMBOL(dm_table_get_size);
994 EXPORT_SYMBOL(dm_table_get_mode);
995 EXPORT_SYMBOL(dm_table_get_md);
996 EXPORT_SYMBOL(dm_table_put);
997 EXPORT_SYMBOL(dm_table_get);
998 EXPORT_SYMBOL(dm_table_unplug_all);
999