1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-btree.h"
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
10 
11 #include <linux/export.h>
12 
13 /*
14  * Removing an entry from a btree
15  * ==============================
16  *
17  * A very important constraint for our btree is that no node, except the
18  * root, may have fewer than a certain number of entries.
19  * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
20  *
21  * Ensuring this is complicated by the way we want to only ever hold the
22  * locks on 2 nodes concurrently, and only change nodes in a top to bottom
23  * fashion.
24  *
25  * Each node may have a left or right sibling.  When decending the spine,
26  * if a node contains only MIN_ENTRIES then we try and increase this to at
27  * least MIN_ENTRIES + 1.  We do this in the following ways:
28  *
29  * [A] No siblings => this can only happen if the node is the root, in which
30  *     case we copy the childs contents over the root.
31  *
32  * [B] No left sibling
33  *     ==> rebalance(node, right sibling)
34  *
35  * [C] No right sibling
36  *     ==> rebalance(left sibling, node)
37  *
38  * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
39  *     ==> delete node adding it's contents to left and right
40  *
41  * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
42  *     ==> rebalance(left, node, right)
43  *
44  * After these operations it's possible that the our original node no
45  * longer contains the desired sub tree.  For this reason this rebalancing
46  * is performed on the children of the current node.  This also avoids
47  * having a special case for the root.
48  *
49  * Once this rebalancing has occurred we can then step into the child node
50  * for internal nodes.  Or delete the entry for leaf nodes.
51  */
52 
53 /*
54  * Some little utilities for moving node data around.
55  */
56 static void node_shift(struct btree_node *n, int shift)
57 {
58 	uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
59 	uint32_t value_size = le32_to_cpu(n->header.value_size);
60 
61 	if (shift < 0) {
62 		shift = -shift;
63 		BUG_ON(shift > nr_entries);
64 		BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift));
65 		memmove(key_ptr(n, 0),
66 			key_ptr(n, shift),
67 			(nr_entries - shift) * sizeof(__le64));
68 		memmove(value_ptr(n, 0),
69 			value_ptr(n, shift),
70 			(nr_entries - shift) * value_size);
71 	} else {
72 		BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries));
73 		memmove(key_ptr(n, shift),
74 			key_ptr(n, 0),
75 			nr_entries * sizeof(__le64));
76 		memmove(value_ptr(n, shift),
77 			value_ptr(n, 0),
78 			nr_entries * value_size);
79 	}
80 }
81 
82 static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
83 {
84 	uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
85 	uint32_t value_size = le32_to_cpu(left->header.value_size);
86 	BUG_ON(value_size != le32_to_cpu(right->header.value_size));
87 
88 	if (shift < 0) {
89 		shift = -shift;
90 		BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
91 		memcpy(key_ptr(left, nr_left),
92 		       key_ptr(right, 0),
93 		       shift * sizeof(__le64));
94 		memcpy(value_ptr(left, nr_left),
95 		       value_ptr(right, 0),
96 		       shift * value_size);
97 	} else {
98 		BUG_ON(shift > le32_to_cpu(right->header.max_entries));
99 		memcpy(key_ptr(right, 0),
100 		       key_ptr(left, nr_left - shift),
101 		       shift * sizeof(__le64));
102 		memcpy(value_ptr(right, 0),
103 		       value_ptr(left, nr_left - shift),
104 		       shift * value_size);
105 	}
106 }
107 
108 /*
109  * Delete a specific entry from a leaf node.
110  */
111 static void delete_at(struct btree_node *n, unsigned index)
112 {
113 	unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
114 	unsigned nr_to_copy = nr_entries - (index + 1);
115 	uint32_t value_size = le32_to_cpu(n->header.value_size);
116 	BUG_ON(index >= nr_entries);
117 
118 	if (nr_to_copy) {
119 		memmove(key_ptr(n, index),
120 			key_ptr(n, index + 1),
121 			nr_to_copy * sizeof(__le64));
122 
123 		memmove(value_ptr(n, index),
124 			value_ptr(n, index + 1),
125 			nr_to_copy * value_size);
126 	}
127 
128 	n->header.nr_entries = cpu_to_le32(nr_entries - 1);
129 }
130 
131 static unsigned merge_threshold(struct btree_node *n)
132 {
133 	return le32_to_cpu(n->header.max_entries) / 3;
134 }
135 
136 struct child {
137 	unsigned index;
138 	struct dm_block *block;
139 	struct btree_node *n;
140 };
141 
142 static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
143 		      struct btree_node *parent,
144 		      unsigned index, struct child *result)
145 {
146 	int r, inc;
147 	dm_block_t root;
148 
149 	result->index = index;
150 	root = value64(parent, index);
151 
152 	r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
153 			       &result->block, &inc);
154 	if (r)
155 		return r;
156 
157 	result->n = dm_block_data(result->block);
158 
159 	if (inc)
160 		inc_children(info->tm, result->n, vt);
161 
162 	*((__le64 *) value_ptr(parent, index)) =
163 		cpu_to_le64(dm_block_location(result->block));
164 
165 	return 0;
166 }
167 
168 static int exit_child(struct dm_btree_info *info, struct child *c)
169 {
170 	return dm_tm_unlock(info->tm, c->block);
171 }
172 
173 static void shift(struct btree_node *left, struct btree_node *right, int count)
174 {
175 	uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
176 	uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
177 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
178 	uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
179 
180 	BUG_ON(max_entries != r_max_entries);
181 	BUG_ON(nr_left - count > max_entries);
182 	BUG_ON(nr_right + count > max_entries);
183 
184 	if (!count)
185 		return;
186 
187 	if (count > 0) {
188 		node_shift(right, count);
189 		node_copy(left, right, count);
190 	} else {
191 		node_copy(left, right, count);
192 		node_shift(right, count);
193 	}
194 
195 	left->header.nr_entries = cpu_to_le32(nr_left - count);
196 	right->header.nr_entries = cpu_to_le32(nr_right + count);
197 }
198 
199 static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
200 			 struct child *l, struct child *r)
201 {
202 	struct btree_node *left = l->n;
203 	struct btree_node *right = r->n;
204 	uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
205 	uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
206 	unsigned threshold = 2 * merge_threshold(left) + 1;
207 
208 	if (nr_left + nr_right < threshold) {
209 		/*
210 		 * Merge
211 		 */
212 		node_copy(left, right, -nr_right);
213 		left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
214 		delete_at(parent, r->index);
215 
216 		/*
217 		 * We need to decrement the right block, but not it's
218 		 * children, since they're still referenced by left.
219 		 */
220 		dm_tm_dec(info->tm, dm_block_location(r->block));
221 	} else {
222 		/*
223 		 * Rebalance.
224 		 */
225 		unsigned target_left = (nr_left + nr_right) / 2;
226 		shift(left, right, nr_left - target_left);
227 		*key_ptr(parent, r->index) = right->keys[0];
228 	}
229 }
230 
231 static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
232 		      struct dm_btree_value_type *vt, unsigned left_index)
233 {
234 	int r;
235 	struct btree_node *parent;
236 	struct child left, right;
237 
238 	parent = dm_block_data(shadow_current(s));
239 
240 	r = init_child(info, vt, parent, left_index, &left);
241 	if (r)
242 		return r;
243 
244 	r = init_child(info, vt, parent, left_index + 1, &right);
245 	if (r) {
246 		exit_child(info, &left);
247 		return r;
248 	}
249 
250 	__rebalance2(info, parent, &left, &right);
251 
252 	r = exit_child(info, &left);
253 	if (r) {
254 		exit_child(info, &right);
255 		return r;
256 	}
257 
258 	return exit_child(info, &right);
259 }
260 
261 /*
262  * We dump as many entries from center as possible into left, then the rest
263  * in right, then rebalance2.  This wastes some cpu, but I want something
264  * simple atm.
265  */
266 static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
267 			       struct child *l, struct child *c, struct child *r,
268 			       struct btree_node *left, struct btree_node *center, struct btree_node *right,
269 			       uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
270 {
271 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
272 	unsigned shift = min(max_entries - nr_left, nr_center);
273 
274 	BUG_ON(nr_left + shift > max_entries);
275 	node_copy(left, center, -shift);
276 	left->header.nr_entries = cpu_to_le32(nr_left + shift);
277 
278 	if (shift != nr_center) {
279 		shift = nr_center - shift;
280 		BUG_ON((nr_right + shift) > max_entries);
281 		node_shift(right, shift);
282 		node_copy(center, right, shift);
283 		right->header.nr_entries = cpu_to_le32(nr_right + shift);
284 	}
285 	*key_ptr(parent, r->index) = right->keys[0];
286 
287 	delete_at(parent, c->index);
288 	r->index--;
289 
290 	dm_tm_dec(info->tm, dm_block_location(c->block));
291 	__rebalance2(info, parent, l, r);
292 }
293 
294 /*
295  * Redistributes entries among 3 sibling nodes.
296  */
297 static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
298 			  struct child *l, struct child *c, struct child *r,
299 			  struct btree_node *left, struct btree_node *center, struct btree_node *right,
300 			  uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
301 {
302 	int s;
303 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
304 	unsigned target = (nr_left + nr_center + nr_right) / 3;
305 	BUG_ON(target > max_entries);
306 
307 	if (nr_left < nr_right) {
308 		s = nr_left - target;
309 
310 		if (s < 0 && nr_center < -s) {
311 			/* not enough in central node */
312 			shift(left, center, -nr_center);
313 			s += nr_center;
314 			shift(left, right, s);
315 			nr_right += s;
316 		} else
317 			shift(left, center, s);
318 
319 		shift(center, right, target - nr_right);
320 
321 	} else {
322 		s = target - nr_right;
323 		if (s > 0 && nr_center < s) {
324 			/* not enough in central node */
325 			shift(center, right, nr_center);
326 			s -= nr_center;
327 			shift(left, right, s);
328 			nr_left -= s;
329 		} else
330 			shift(center, right, s);
331 
332 		shift(left, center, nr_left - target);
333 	}
334 
335 	*key_ptr(parent, c->index) = center->keys[0];
336 	*key_ptr(parent, r->index) = right->keys[0];
337 }
338 
339 static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
340 			 struct child *l, struct child *c, struct child *r)
341 {
342 	struct btree_node *left = l->n;
343 	struct btree_node *center = c->n;
344 	struct btree_node *right = r->n;
345 
346 	uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
347 	uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
348 	uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
349 
350 	unsigned threshold = merge_threshold(left) * 4 + 1;
351 
352 	BUG_ON(left->header.max_entries != center->header.max_entries);
353 	BUG_ON(center->header.max_entries != right->header.max_entries);
354 
355 	if ((nr_left + nr_center + nr_right) < threshold)
356 		delete_center_node(info, parent, l, c, r, left, center, right,
357 				   nr_left, nr_center, nr_right);
358 	else
359 		redistribute3(info, parent, l, c, r, left, center, right,
360 			      nr_left, nr_center, nr_right);
361 }
362 
363 static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
364 		      struct dm_btree_value_type *vt, unsigned left_index)
365 {
366 	int r;
367 	struct btree_node *parent = dm_block_data(shadow_current(s));
368 	struct child left, center, right;
369 
370 	/*
371 	 * FIXME: fill out an array?
372 	 */
373 	r = init_child(info, vt, parent, left_index, &left);
374 	if (r)
375 		return r;
376 
377 	r = init_child(info, vt, parent, left_index + 1, &center);
378 	if (r) {
379 		exit_child(info, &left);
380 		return r;
381 	}
382 
383 	r = init_child(info, vt, parent, left_index + 2, &right);
384 	if (r) {
385 		exit_child(info, &left);
386 		exit_child(info, &center);
387 		return r;
388 	}
389 
390 	__rebalance3(info, parent, &left, &center, &right);
391 
392 	r = exit_child(info, &left);
393 	if (r) {
394 		exit_child(info, &center);
395 		exit_child(info, &right);
396 		return r;
397 	}
398 
399 	r = exit_child(info, &center);
400 	if (r) {
401 		exit_child(info, &right);
402 		return r;
403 	}
404 
405 	r = exit_child(info, &right);
406 	if (r)
407 		return r;
408 
409 	return 0;
410 }
411 
412 static int rebalance_children(struct shadow_spine *s,
413 			      struct dm_btree_info *info,
414 			      struct dm_btree_value_type *vt, uint64_t key)
415 {
416 	int i, r, has_left_sibling, has_right_sibling;
417 	struct btree_node *n;
418 
419 	n = dm_block_data(shadow_current(s));
420 
421 	if (le32_to_cpu(n->header.nr_entries) == 1) {
422 		struct dm_block *child;
423 		dm_block_t b = value64(n, 0);
424 
425 		r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
426 		if (r)
427 			return r;
428 
429 		memcpy(n, dm_block_data(child),
430 		       dm_bm_block_size(dm_tm_get_bm(info->tm)));
431 		r = dm_tm_unlock(info->tm, child);
432 		if (r)
433 			return r;
434 
435 		dm_tm_dec(info->tm, dm_block_location(child));
436 		return 0;
437 	}
438 
439 	i = lower_bound(n, key);
440 	if (i < 0)
441 		return -ENODATA;
442 
443 	has_left_sibling = i > 0;
444 	has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
445 
446 	if (!has_left_sibling)
447 		r = rebalance2(s, info, vt, i);
448 
449 	else if (!has_right_sibling)
450 		r = rebalance2(s, info, vt, i - 1);
451 
452 	else
453 		r = rebalance3(s, info, vt, i - 1);
454 
455 	return r;
456 }
457 
458 static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
459 {
460 	int i = lower_bound(n, key);
461 
462 	if ((i < 0) ||
463 	    (i >= le32_to_cpu(n->header.nr_entries)) ||
464 	    (le64_to_cpu(n->keys[i]) != key))
465 		return -ENODATA;
466 
467 	*index = i;
468 
469 	return 0;
470 }
471 
472 /*
473  * Prepares for removal from one level of the hierarchy.  The caller must
474  * call delete_at() to remove the entry at index.
475  */
476 static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
477 		      struct dm_btree_value_type *vt, dm_block_t root,
478 		      uint64_t key, unsigned *index)
479 {
480 	int i = *index, r;
481 	struct btree_node *n;
482 
483 	for (;;) {
484 		r = shadow_step(s, root, vt);
485 		if (r < 0)
486 			break;
487 
488 		/*
489 		 * We have to patch up the parent node, ugly, but I don't
490 		 * see a way to do this automatically as part of the spine
491 		 * op.
492 		 */
493 		if (shadow_has_parent(s)) {
494 			__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
495 			memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
496 			       &location, sizeof(__le64));
497 		}
498 
499 		n = dm_block_data(shadow_current(s));
500 
501 		if (le32_to_cpu(n->header.flags) & LEAF_NODE)
502 			return do_leaf(n, key, index);
503 
504 		r = rebalance_children(s, info, vt, key);
505 		if (r)
506 			break;
507 
508 		n = dm_block_data(shadow_current(s));
509 		if (le32_to_cpu(n->header.flags) & LEAF_NODE)
510 			return do_leaf(n, key, index);
511 
512 		i = lower_bound(n, key);
513 
514 		/*
515 		 * We know the key is present, or else
516 		 * rebalance_children would have returned
517 		 * -ENODATA
518 		 */
519 		root = value64(n, i);
520 	}
521 
522 	return r;
523 }
524 
525 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
526 		    uint64_t *keys, dm_block_t *new_root)
527 {
528 	unsigned level, last_level = info->levels - 1;
529 	int index = 0, r = 0;
530 	struct shadow_spine spine;
531 	struct btree_node *n;
532 	struct dm_btree_value_type le64_vt;
533 
534 	init_le64_type(info->tm, &le64_vt);
535 	init_shadow_spine(&spine, info);
536 	for (level = 0; level < info->levels; level++) {
537 		r = remove_raw(&spine, info,
538 			       (level == last_level ?
539 				&info->value_type : &le64_vt),
540 			       root, keys[level], (unsigned *)&index);
541 		if (r < 0)
542 			break;
543 
544 		n = dm_block_data(shadow_current(&spine));
545 		if (level != last_level) {
546 			root = value64(n, index);
547 			continue;
548 		}
549 
550 		BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
551 
552 		if (info->value_type.dec)
553 			info->value_type.dec(info->value_type.context,
554 					     value_ptr(n, index));
555 
556 		delete_at(n, index);
557 	}
558 
559 	*new_root = shadow_root(&spine);
560 	exit_shadow_spine(&spine);
561 
562 	return r;
563 }
564 EXPORT_SYMBOL_GPL(dm_btree_remove);
565 
566 /*----------------------------------------------------------------*/
567 
568 static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
569 			  struct dm_btree_value_type *vt, dm_block_t root,
570 			  uint64_t key, int *index)
571 {
572 	int i = *index, r;
573 	struct btree_node *n;
574 
575 	for (;;) {
576 		r = shadow_step(s, root, vt);
577 		if (r < 0)
578 			break;
579 
580 		/*
581 		 * We have to patch up the parent node, ugly, but I don't
582 		 * see a way to do this automatically as part of the spine
583 		 * op.
584 		 */
585 		if (shadow_has_parent(s)) {
586 			__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
587 			memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
588 			       &location, sizeof(__le64));
589 		}
590 
591 		n = dm_block_data(shadow_current(s));
592 
593 		if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
594 			*index = lower_bound(n, key);
595 			return 0;
596 		}
597 
598 		r = rebalance_children(s, info, vt, key);
599 		if (r)
600 			break;
601 
602 		n = dm_block_data(shadow_current(s));
603 		if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
604 			*index = lower_bound(n, key);
605 			return 0;
606 		}
607 
608 		i = lower_bound(n, key);
609 
610 		/*
611 		 * We know the key is present, or else
612 		 * rebalance_children would have returned
613 		 * -ENODATA
614 		 */
615 		root = value64(n, i);
616 	}
617 
618 	return r;
619 }
620 
621 static int remove_one(struct dm_btree_info *info, dm_block_t root,
622 		      uint64_t *keys, uint64_t end_key,
623 		      dm_block_t *new_root, unsigned *nr_removed)
624 {
625 	unsigned level, last_level = info->levels - 1;
626 	int index = 0, r = 0;
627 	struct shadow_spine spine;
628 	struct btree_node *n;
629 	struct dm_btree_value_type le64_vt;
630 	uint64_t k;
631 
632 	init_le64_type(info->tm, &le64_vt);
633 	init_shadow_spine(&spine, info);
634 	for (level = 0; level < last_level; level++) {
635 		r = remove_raw(&spine, info, &le64_vt,
636 			       root, keys[level], (unsigned *) &index);
637 		if (r < 0)
638 			goto out;
639 
640 		n = dm_block_data(shadow_current(&spine));
641 		root = value64(n, index);
642 	}
643 
644 	r = remove_nearest(&spine, info, &info->value_type,
645 			   root, keys[last_level], &index);
646 	if (r < 0)
647 		goto out;
648 
649 	n = dm_block_data(shadow_current(&spine));
650 
651 	if (index < 0)
652 		index = 0;
653 
654 	if (index >= le32_to_cpu(n->header.nr_entries)) {
655 		r = -ENODATA;
656 		goto out;
657 	}
658 
659 	k = le64_to_cpu(n->keys[index]);
660 	if (k >= keys[last_level] && k < end_key) {
661 		if (info->value_type.dec)
662 			info->value_type.dec(info->value_type.context,
663 					     value_ptr(n, index));
664 
665 		delete_at(n, index);
666 		keys[last_level] = k + 1ull;
667 
668 	} else
669 		r = -ENODATA;
670 
671 out:
672 	*new_root = shadow_root(&spine);
673 	exit_shadow_spine(&spine);
674 
675 	return r;
676 }
677 
678 int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
679 			   uint64_t *first_key, uint64_t end_key,
680 			   dm_block_t *new_root, unsigned *nr_removed)
681 {
682 	int r;
683 
684 	*nr_removed = 0;
685 	do {
686 		r = remove_one(info, root, first_key, end_key, &root, nr_removed);
687 		if (!r)
688 			(*nr_removed)++;
689 	} while (!r);
690 
691 	*new_root = root;
692 	return r == -ENODATA ? 0 : r;
693 }
694 EXPORT_SYMBOL_GPL(dm_btree_remove_leaves);
695