1 /* 2 * Copyright (C) 2011 Red Hat, Inc. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-btree-internal.h" 8 #include "dm-space-map.h" 9 #include "dm-transaction-manager.h" 10 11 #include <linux/export.h> 12 #include <linux/device-mapper.h> 13 14 #define DM_MSG_PREFIX "btree" 15 16 /*---------------------------------------------------------------- 17 * Array manipulation 18 *--------------------------------------------------------------*/ 19 static void memcpy_disk(void *dest, const void *src, size_t len) 20 __dm_written_to_disk(src) 21 { 22 memcpy(dest, src, len); 23 __dm_unbless_for_disk(src); 24 } 25 26 static void array_insert(void *base, size_t elt_size, unsigned nr_elts, 27 unsigned index, void *elt) 28 __dm_written_to_disk(elt) 29 { 30 if (index < nr_elts) 31 memmove(base + (elt_size * (index + 1)), 32 base + (elt_size * index), 33 (nr_elts - index) * elt_size); 34 35 memcpy_disk(base + (elt_size * index), elt, elt_size); 36 } 37 38 /*----------------------------------------------------------------*/ 39 40 /* makes the assumption that no two keys are the same. */ 41 static int bsearch(struct btree_node *n, uint64_t key, int want_hi) 42 { 43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries); 44 45 while (hi - lo > 1) { 46 int mid = lo + ((hi - lo) / 2); 47 uint64_t mid_key = le64_to_cpu(n->keys[mid]); 48 49 if (mid_key == key) 50 return mid; 51 52 if (mid_key < key) 53 lo = mid; 54 else 55 hi = mid; 56 } 57 58 return want_hi ? hi : lo; 59 } 60 61 int lower_bound(struct btree_node *n, uint64_t key) 62 { 63 return bsearch(n, key, 0); 64 } 65 66 static int upper_bound(struct btree_node *n, uint64_t key) 67 { 68 return bsearch(n, key, 1); 69 } 70 71 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 72 struct dm_btree_value_type *vt) 73 { 74 unsigned i; 75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); 76 77 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) 78 for (i = 0; i < nr_entries; i++) 79 dm_tm_inc(tm, value64(n, i)); 80 else if (vt->inc) 81 for (i = 0; i < nr_entries; i++) 82 vt->inc(vt->context, value_ptr(n, i)); 83 } 84 85 static int insert_at(size_t value_size, struct btree_node *node, unsigned index, 86 uint64_t key, void *value) 87 __dm_written_to_disk(value) 88 { 89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); 90 __le64 key_le = cpu_to_le64(key); 91 92 if (index > nr_entries || 93 index >= le32_to_cpu(node->header.max_entries)) { 94 DMERR("too many entries in btree node for insert"); 95 __dm_unbless_for_disk(value); 96 return -ENOMEM; 97 } 98 99 __dm_bless_for_disk(&key_le); 100 101 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le); 102 array_insert(value_base(node), value_size, nr_entries, index, value); 103 node->header.nr_entries = cpu_to_le32(nr_entries + 1); 104 105 return 0; 106 } 107 108 /*----------------------------------------------------------------*/ 109 110 /* 111 * We want 3n entries (for some n). This works more nicely for repeated 112 * insert remove loops than (2n + 1). 113 */ 114 static uint32_t calc_max_entries(size_t value_size, size_t block_size) 115 { 116 uint32_t total, n; 117 size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */ 118 119 block_size -= sizeof(struct node_header); 120 total = block_size / elt_size; 121 n = total / 3; /* rounds down */ 122 123 return 3 * n; 124 } 125 126 int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root) 127 { 128 int r; 129 struct dm_block *b; 130 struct btree_node *n; 131 size_t block_size; 132 uint32_t max_entries; 133 134 r = new_block(info, &b); 135 if (r < 0) 136 return r; 137 138 block_size = dm_bm_block_size(dm_tm_get_bm(info->tm)); 139 max_entries = calc_max_entries(info->value_type.size, block_size); 140 141 n = dm_block_data(b); 142 memset(n, 0, block_size); 143 n->header.flags = cpu_to_le32(LEAF_NODE); 144 n->header.nr_entries = cpu_to_le32(0); 145 n->header.max_entries = cpu_to_le32(max_entries); 146 n->header.value_size = cpu_to_le32(info->value_type.size); 147 148 *root = dm_block_location(b); 149 unlock_block(info, b); 150 151 return 0; 152 } 153 EXPORT_SYMBOL_GPL(dm_btree_empty); 154 155 /*----------------------------------------------------------------*/ 156 157 /* 158 * Deletion uses a recursive algorithm, since we have limited stack space 159 * we explicitly manage our own stack on the heap. 160 */ 161 #define MAX_SPINE_DEPTH 64 162 struct frame { 163 struct dm_block *b; 164 struct btree_node *n; 165 unsigned level; 166 unsigned nr_children; 167 unsigned current_child; 168 }; 169 170 struct del_stack { 171 struct dm_btree_info *info; 172 struct dm_transaction_manager *tm; 173 int top; 174 struct frame spine[MAX_SPINE_DEPTH]; 175 }; 176 177 static int top_frame(struct del_stack *s, struct frame **f) 178 { 179 if (s->top < 0) { 180 DMERR("btree deletion stack empty"); 181 return -EINVAL; 182 } 183 184 *f = s->spine + s->top; 185 186 return 0; 187 } 188 189 static int unprocessed_frames(struct del_stack *s) 190 { 191 return s->top >= 0; 192 } 193 194 static void prefetch_children(struct del_stack *s, struct frame *f) 195 { 196 unsigned i; 197 struct dm_block_manager *bm = dm_tm_get_bm(s->tm); 198 199 for (i = 0; i < f->nr_children; i++) 200 dm_bm_prefetch(bm, value64(f->n, i)); 201 } 202 203 static bool is_internal_level(struct dm_btree_info *info, struct frame *f) 204 { 205 return f->level < (info->levels - 1); 206 } 207 208 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) 209 { 210 int r; 211 uint32_t ref_count; 212 213 if (s->top >= MAX_SPINE_DEPTH - 1) { 214 DMERR("btree deletion stack out of memory"); 215 return -ENOMEM; 216 } 217 218 r = dm_tm_ref(s->tm, b, &ref_count); 219 if (r) 220 return r; 221 222 if (ref_count > 1) 223 /* 224 * This is a shared node, so we can just decrement it's 225 * reference counter and leave the children. 226 */ 227 dm_tm_dec(s->tm, b); 228 229 else { 230 uint32_t flags; 231 struct frame *f = s->spine + ++s->top; 232 233 r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b); 234 if (r) { 235 s->top--; 236 return r; 237 } 238 239 f->n = dm_block_data(f->b); 240 f->level = level; 241 f->nr_children = le32_to_cpu(f->n->header.nr_entries); 242 f->current_child = 0; 243 244 flags = le32_to_cpu(f->n->header.flags); 245 if (flags & INTERNAL_NODE || is_internal_level(s->info, f)) 246 prefetch_children(s, f); 247 } 248 249 return 0; 250 } 251 252 static void pop_frame(struct del_stack *s) 253 { 254 struct frame *f = s->spine + s->top--; 255 256 dm_tm_dec(s->tm, dm_block_location(f->b)); 257 dm_tm_unlock(s->tm, f->b); 258 } 259 260 static void unlock_all_frames(struct del_stack *s) 261 { 262 struct frame *f; 263 264 while (unprocessed_frames(s)) { 265 f = s->spine + s->top--; 266 dm_tm_unlock(s->tm, f->b); 267 } 268 } 269 270 int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 271 { 272 int r; 273 struct del_stack *s; 274 275 /* 276 * dm_btree_del() is called via an ioctl, as such should be 277 * considered an FS op. We can't recurse back into the FS, so we 278 * allocate GFP_NOFS. 279 */ 280 s = kmalloc(sizeof(*s), GFP_NOFS); 281 if (!s) 282 return -ENOMEM; 283 s->info = info; 284 s->tm = info->tm; 285 s->top = -1; 286 287 r = push_frame(s, root, 0); 288 if (r) 289 goto out; 290 291 while (unprocessed_frames(s)) { 292 uint32_t flags; 293 struct frame *f; 294 dm_block_t b; 295 296 r = top_frame(s, &f); 297 if (r) 298 goto out; 299 300 if (f->current_child >= f->nr_children) { 301 pop_frame(s); 302 continue; 303 } 304 305 flags = le32_to_cpu(f->n->header.flags); 306 if (flags & INTERNAL_NODE) { 307 b = value64(f->n, f->current_child); 308 f->current_child++; 309 r = push_frame(s, b, f->level); 310 if (r) 311 goto out; 312 313 } else if (is_internal_level(info, f)) { 314 b = value64(f->n, f->current_child); 315 f->current_child++; 316 r = push_frame(s, b, f->level + 1); 317 if (r) 318 goto out; 319 320 } else { 321 if (info->value_type.dec) { 322 unsigned i; 323 324 for (i = 0; i < f->nr_children; i++) 325 info->value_type.dec(info->value_type.context, 326 value_ptr(f->n, i)); 327 } 328 pop_frame(s); 329 } 330 } 331 out: 332 if (r) { 333 /* cleanup all frames of del_stack */ 334 unlock_all_frames(s); 335 } 336 kfree(s); 337 338 return r; 339 } 340 EXPORT_SYMBOL_GPL(dm_btree_del); 341 342 /*----------------------------------------------------------------*/ 343 344 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, 345 int (*search_fn)(struct btree_node *, uint64_t), 346 uint64_t *result_key, void *v, size_t value_size) 347 { 348 int i, r; 349 uint32_t flags, nr_entries; 350 351 do { 352 r = ro_step(s, block); 353 if (r < 0) 354 return r; 355 356 i = search_fn(ro_node(s), key); 357 358 flags = le32_to_cpu(ro_node(s)->header.flags); 359 nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries); 360 if (i < 0 || i >= nr_entries) 361 return -ENODATA; 362 363 if (flags & INTERNAL_NODE) 364 block = value64(ro_node(s), i); 365 366 } while (!(flags & LEAF_NODE)); 367 368 *result_key = le64_to_cpu(ro_node(s)->keys[i]); 369 if (v) 370 memcpy(v, value_ptr(ro_node(s), i), value_size); 371 372 return 0; 373 } 374 375 int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root, 376 uint64_t *keys, void *value_le) 377 { 378 unsigned level, last_level = info->levels - 1; 379 int r = -ENODATA; 380 uint64_t rkey; 381 __le64 internal_value_le; 382 struct ro_spine spine; 383 384 init_ro_spine(&spine, info); 385 for (level = 0; level < info->levels; level++) { 386 size_t size; 387 void *value_p; 388 389 if (level == last_level) { 390 value_p = value_le; 391 size = info->value_type.size; 392 393 } else { 394 value_p = &internal_value_le; 395 size = sizeof(uint64_t); 396 } 397 398 r = btree_lookup_raw(&spine, root, keys[level], 399 lower_bound, &rkey, 400 value_p, size); 401 402 if (!r) { 403 if (rkey != keys[level]) { 404 exit_ro_spine(&spine); 405 return -ENODATA; 406 } 407 } else { 408 exit_ro_spine(&spine); 409 return r; 410 } 411 412 root = le64_to_cpu(internal_value_le); 413 } 414 exit_ro_spine(&spine); 415 416 return r; 417 } 418 EXPORT_SYMBOL_GPL(dm_btree_lookup); 419 420 static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root, 421 uint64_t key, uint64_t *rkey, void *value_le) 422 { 423 int r, i; 424 uint32_t flags, nr_entries; 425 struct dm_block *node; 426 struct btree_node *n; 427 428 r = bn_read_lock(info, root, &node); 429 if (r) 430 return r; 431 432 n = dm_block_data(node); 433 flags = le32_to_cpu(n->header.flags); 434 nr_entries = le32_to_cpu(n->header.nr_entries); 435 436 if (flags & INTERNAL_NODE) { 437 i = lower_bound(n, key); 438 if (i < 0) { 439 /* 440 * avoid early -ENODATA return when all entries are 441 * higher than the search @key. 442 */ 443 i = 0; 444 } 445 if (i >= nr_entries) { 446 r = -ENODATA; 447 goto out; 448 } 449 450 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le); 451 if (r == -ENODATA && i < (nr_entries - 1)) { 452 i++; 453 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le); 454 } 455 456 } else { 457 i = upper_bound(n, key); 458 if (i < 0 || i >= nr_entries) { 459 r = -ENODATA; 460 goto out; 461 } 462 463 *rkey = le64_to_cpu(n->keys[i]); 464 memcpy(value_le, value_ptr(n, i), info->value_type.size); 465 } 466 out: 467 dm_tm_unlock(info->tm, node); 468 return r; 469 } 470 471 int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root, 472 uint64_t *keys, uint64_t *rkey, void *value_le) 473 { 474 unsigned level; 475 int r = -ENODATA; 476 __le64 internal_value_le; 477 struct ro_spine spine; 478 479 init_ro_spine(&spine, info); 480 for (level = 0; level < info->levels - 1u; level++) { 481 r = btree_lookup_raw(&spine, root, keys[level], 482 lower_bound, rkey, 483 &internal_value_le, sizeof(uint64_t)); 484 if (r) 485 goto out; 486 487 if (*rkey != keys[level]) { 488 r = -ENODATA; 489 goto out; 490 } 491 492 root = le64_to_cpu(internal_value_le); 493 } 494 495 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le); 496 out: 497 exit_ro_spine(&spine); 498 return r; 499 } 500 501 EXPORT_SYMBOL_GPL(dm_btree_lookup_next); 502 503 /* 504 * Splits a node by creating a sibling node and shifting half the nodes 505 * contents across. Assumes there is a parent node, and it has room for 506 * another child. 507 * 508 * Before: 509 * +--------+ 510 * | Parent | 511 * +--------+ 512 * | 513 * v 514 * +----------+ 515 * | A ++++++ | 516 * +----------+ 517 * 518 * 519 * After: 520 * +--------+ 521 * | Parent | 522 * +--------+ 523 * | | 524 * v +------+ 525 * +---------+ | 526 * | A* +++ | v 527 * +---------+ +-------+ 528 * | B +++ | 529 * +-------+ 530 * 531 * Where A* is a shadow of A. 532 */ 533 static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index, 534 uint64_t key) 535 { 536 int r; 537 size_t size; 538 unsigned nr_left, nr_right; 539 struct dm_block *left, *right, *parent; 540 struct btree_node *ln, *rn, *pn; 541 __le64 location; 542 543 left = shadow_current(s); 544 545 r = new_block(s->info, &right); 546 if (r < 0) 547 return r; 548 549 ln = dm_block_data(left); 550 rn = dm_block_data(right); 551 552 nr_left = le32_to_cpu(ln->header.nr_entries) / 2; 553 nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left; 554 555 ln->header.nr_entries = cpu_to_le32(nr_left); 556 557 rn->header.flags = ln->header.flags; 558 rn->header.nr_entries = cpu_to_le32(nr_right); 559 rn->header.max_entries = ln->header.max_entries; 560 rn->header.value_size = ln->header.value_size; 561 memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0])); 562 563 size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ? 564 sizeof(uint64_t) : s->info->value_type.size; 565 memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left), 566 size * nr_right); 567 568 /* 569 * Patch up the parent 570 */ 571 parent = shadow_parent(s); 572 573 pn = dm_block_data(parent); 574 location = cpu_to_le64(dm_block_location(left)); 575 __dm_bless_for_disk(&location); 576 memcpy_disk(value_ptr(pn, parent_index), 577 &location, sizeof(__le64)); 578 579 location = cpu_to_le64(dm_block_location(right)); 580 __dm_bless_for_disk(&location); 581 582 r = insert_at(sizeof(__le64), pn, parent_index + 1, 583 le64_to_cpu(rn->keys[0]), &location); 584 if (r) { 585 unlock_block(s->info, right); 586 return r; 587 } 588 589 if (key < le64_to_cpu(rn->keys[0])) { 590 unlock_block(s->info, right); 591 s->nodes[1] = left; 592 } else { 593 unlock_block(s->info, left); 594 s->nodes[1] = right; 595 } 596 597 return 0; 598 } 599 600 /* 601 * Splits a node by creating two new children beneath the given node. 602 * 603 * Before: 604 * +----------+ 605 * | A ++++++ | 606 * +----------+ 607 * 608 * 609 * After: 610 * +------------+ 611 * | A (shadow) | 612 * +------------+ 613 * | | 614 * +------+ +----+ 615 * | | 616 * v v 617 * +-------+ +-------+ 618 * | B +++ | | C +++ | 619 * +-------+ +-------+ 620 */ 621 static int btree_split_beneath(struct shadow_spine *s, uint64_t key) 622 { 623 int r; 624 size_t size; 625 unsigned nr_left, nr_right; 626 struct dm_block *left, *right, *new_parent; 627 struct btree_node *pn, *ln, *rn; 628 __le64 val; 629 630 new_parent = shadow_current(s); 631 632 pn = dm_block_data(new_parent); 633 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? 634 sizeof(__le64) : s->info->value_type.size; 635 636 /* create & init the left block */ 637 r = new_block(s->info, &left); 638 if (r < 0) 639 return r; 640 641 ln = dm_block_data(left); 642 nr_left = le32_to_cpu(pn->header.nr_entries) / 2; 643 644 ln->header.flags = pn->header.flags; 645 ln->header.nr_entries = cpu_to_le32(nr_left); 646 ln->header.max_entries = pn->header.max_entries; 647 ln->header.value_size = pn->header.value_size; 648 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); 649 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); 650 651 /* create & init the right block */ 652 r = new_block(s->info, &right); 653 if (r < 0) { 654 unlock_block(s->info, left); 655 return r; 656 } 657 658 rn = dm_block_data(right); 659 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; 660 661 rn->header.flags = pn->header.flags; 662 rn->header.nr_entries = cpu_to_le32(nr_right); 663 rn->header.max_entries = pn->header.max_entries; 664 rn->header.value_size = pn->header.value_size; 665 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); 666 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), 667 nr_right * size); 668 669 /* new_parent should just point to l and r now */ 670 pn->header.flags = cpu_to_le32(INTERNAL_NODE); 671 pn->header.nr_entries = cpu_to_le32(2); 672 pn->header.max_entries = cpu_to_le32( 673 calc_max_entries(sizeof(__le64), 674 dm_bm_block_size( 675 dm_tm_get_bm(s->info->tm)))); 676 pn->header.value_size = cpu_to_le32(sizeof(__le64)); 677 678 val = cpu_to_le64(dm_block_location(left)); 679 __dm_bless_for_disk(&val); 680 pn->keys[0] = ln->keys[0]; 681 memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64)); 682 683 val = cpu_to_le64(dm_block_location(right)); 684 __dm_bless_for_disk(&val); 685 pn->keys[1] = rn->keys[0]; 686 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 687 688 unlock_block(s->info, left); 689 unlock_block(s->info, right); 690 return 0; 691 } 692 693 static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, 694 struct dm_btree_value_type *vt, 695 uint64_t key, unsigned *index) 696 { 697 int r, i = *index, top = 1; 698 struct btree_node *node; 699 700 for (;;) { 701 r = shadow_step(s, root, vt); 702 if (r < 0) 703 return r; 704 705 node = dm_block_data(shadow_current(s)); 706 707 /* 708 * We have to patch up the parent node, ugly, but I don't 709 * see a way to do this automatically as part of the spine 710 * op. 711 */ 712 if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */ 713 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); 714 715 __dm_bless_for_disk(&location); 716 memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i), 717 &location, sizeof(__le64)); 718 } 719 720 node = dm_block_data(shadow_current(s)); 721 722 if (node->header.nr_entries == node->header.max_entries) { 723 if (top) 724 r = btree_split_beneath(s, key); 725 else 726 r = btree_split_sibling(s, i, key); 727 728 if (r < 0) 729 return r; 730 } 731 732 node = dm_block_data(shadow_current(s)); 733 734 i = lower_bound(node, key); 735 736 if (le32_to_cpu(node->header.flags) & LEAF_NODE) 737 break; 738 739 if (i < 0) { 740 /* change the bounds on the lowest key */ 741 node->keys[0] = cpu_to_le64(key); 742 i = 0; 743 } 744 745 root = value64(node, i); 746 top = 0; 747 } 748 749 if (i < 0 || le64_to_cpu(node->keys[i]) != key) 750 i++; 751 752 *index = i; 753 return 0; 754 } 755 756 static bool need_insert(struct btree_node *node, uint64_t *keys, 757 unsigned level, unsigned index) 758 { 759 return ((index >= le32_to_cpu(node->header.nr_entries)) || 760 (le64_to_cpu(node->keys[index]) != keys[level])); 761 } 762 763 static int insert(struct dm_btree_info *info, dm_block_t root, 764 uint64_t *keys, void *value, dm_block_t *new_root, 765 int *inserted) 766 __dm_written_to_disk(value) 767 { 768 int r; 769 unsigned level, index = -1, last_level = info->levels - 1; 770 dm_block_t block = root; 771 struct shadow_spine spine; 772 struct btree_node *n; 773 struct dm_btree_value_type le64_type; 774 775 init_le64_type(info->tm, &le64_type); 776 init_shadow_spine(&spine, info); 777 778 for (level = 0; level < (info->levels - 1); level++) { 779 r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index); 780 if (r < 0) 781 goto bad; 782 783 n = dm_block_data(shadow_current(&spine)); 784 785 if (need_insert(n, keys, level, index)) { 786 dm_block_t new_tree; 787 __le64 new_le; 788 789 r = dm_btree_empty(info, &new_tree); 790 if (r < 0) 791 goto bad; 792 793 new_le = cpu_to_le64(new_tree); 794 __dm_bless_for_disk(&new_le); 795 796 r = insert_at(sizeof(uint64_t), n, index, 797 keys[level], &new_le); 798 if (r) 799 goto bad; 800 } 801 802 if (level < last_level) 803 block = value64(n, index); 804 } 805 806 r = btree_insert_raw(&spine, block, &info->value_type, 807 keys[level], &index); 808 if (r < 0) 809 goto bad; 810 811 n = dm_block_data(shadow_current(&spine)); 812 813 if (need_insert(n, keys, level, index)) { 814 if (inserted) 815 *inserted = 1; 816 817 r = insert_at(info->value_type.size, n, index, 818 keys[level], value); 819 if (r) 820 goto bad_unblessed; 821 } else { 822 if (inserted) 823 *inserted = 0; 824 825 if (info->value_type.dec && 826 (!info->value_type.equal || 827 !info->value_type.equal( 828 info->value_type.context, 829 value_ptr(n, index), 830 value))) { 831 info->value_type.dec(info->value_type.context, 832 value_ptr(n, index)); 833 } 834 memcpy_disk(value_ptr(n, index), 835 value, info->value_type.size); 836 } 837 838 *new_root = shadow_root(&spine); 839 exit_shadow_spine(&spine); 840 841 return 0; 842 843 bad: 844 __dm_unbless_for_disk(value); 845 bad_unblessed: 846 exit_shadow_spine(&spine); 847 return r; 848 } 849 850 int dm_btree_insert(struct dm_btree_info *info, dm_block_t root, 851 uint64_t *keys, void *value, dm_block_t *new_root) 852 __dm_written_to_disk(value) 853 { 854 return insert(info, root, keys, value, new_root, NULL); 855 } 856 EXPORT_SYMBOL_GPL(dm_btree_insert); 857 858 int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root, 859 uint64_t *keys, void *value, dm_block_t *new_root, 860 int *inserted) 861 __dm_written_to_disk(value) 862 { 863 return insert(info, root, keys, value, new_root, inserted); 864 } 865 EXPORT_SYMBOL_GPL(dm_btree_insert_notify); 866 867 /*----------------------------------------------------------------*/ 868 869 static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest, 870 uint64_t *result_key, dm_block_t *next_block) 871 { 872 int i, r; 873 uint32_t flags; 874 875 do { 876 r = ro_step(s, block); 877 if (r < 0) 878 return r; 879 880 flags = le32_to_cpu(ro_node(s)->header.flags); 881 i = le32_to_cpu(ro_node(s)->header.nr_entries); 882 if (!i) 883 return -ENODATA; 884 else 885 i--; 886 887 if (find_highest) 888 *result_key = le64_to_cpu(ro_node(s)->keys[i]); 889 else 890 *result_key = le64_to_cpu(ro_node(s)->keys[0]); 891 892 if (next_block || flags & INTERNAL_NODE) { 893 if (find_highest) 894 block = value64(ro_node(s), i); 895 else 896 block = value64(ro_node(s), 0); 897 } 898 899 } while (flags & INTERNAL_NODE); 900 901 if (next_block) 902 *next_block = block; 903 return 0; 904 } 905 906 static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root, 907 bool find_highest, uint64_t *result_keys) 908 { 909 int r = 0, count = 0, level; 910 struct ro_spine spine; 911 912 init_ro_spine(&spine, info); 913 for (level = 0; level < info->levels; level++) { 914 r = find_key(&spine, root, find_highest, result_keys + level, 915 level == info->levels - 1 ? NULL : &root); 916 if (r == -ENODATA) { 917 r = 0; 918 break; 919 920 } else if (r) 921 break; 922 923 count++; 924 } 925 exit_ro_spine(&spine); 926 927 return r ? r : count; 928 } 929 930 int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root, 931 uint64_t *result_keys) 932 { 933 return dm_btree_find_key(info, root, true, result_keys); 934 } 935 EXPORT_SYMBOL_GPL(dm_btree_find_highest_key); 936 937 int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root, 938 uint64_t *result_keys) 939 { 940 return dm_btree_find_key(info, root, false, result_keys); 941 } 942 EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key); 943 944 /*----------------------------------------------------------------*/ 945 946 /* 947 * FIXME: We shouldn't use a recursive algorithm when we have limited stack 948 * space. Also this only works for single level trees. 949 */ 950 static int walk_node(struct dm_btree_info *info, dm_block_t block, 951 int (*fn)(void *context, uint64_t *keys, void *leaf), 952 void *context) 953 { 954 int r; 955 unsigned i, nr; 956 struct dm_block *node; 957 struct btree_node *n; 958 uint64_t keys; 959 960 r = bn_read_lock(info, block, &node); 961 if (r) 962 return r; 963 964 n = dm_block_data(node); 965 966 nr = le32_to_cpu(n->header.nr_entries); 967 for (i = 0; i < nr; i++) { 968 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { 969 r = walk_node(info, value64(n, i), fn, context); 970 if (r) 971 goto out; 972 } else { 973 keys = le64_to_cpu(*key_ptr(n, i)); 974 r = fn(context, &keys, value_ptr(n, i)); 975 if (r) 976 goto out; 977 } 978 } 979 980 out: 981 dm_tm_unlock(info->tm, node); 982 return r; 983 } 984 985 int dm_btree_walk(struct dm_btree_info *info, dm_block_t root, 986 int (*fn)(void *context, uint64_t *keys, void *leaf), 987 void *context) 988 { 989 BUG_ON(info->levels > 1); 990 return walk_node(info, root, fn, context); 991 } 992 EXPORT_SYMBOL_GPL(dm_btree_walk); 993 994 /*----------------------------------------------------------------*/ 995 996 static void prefetch_values(struct dm_btree_cursor *c) 997 { 998 unsigned i, nr; 999 __le64 value_le; 1000 struct cursor_node *n = c->nodes + c->depth - 1; 1001 struct btree_node *bn = dm_block_data(n->b); 1002 struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm); 1003 1004 BUG_ON(c->info->value_type.size != sizeof(value_le)); 1005 1006 nr = le32_to_cpu(bn->header.nr_entries); 1007 for (i = 0; i < nr; i++) { 1008 memcpy(&value_le, value_ptr(bn, i), sizeof(value_le)); 1009 dm_bm_prefetch(bm, le64_to_cpu(value_le)); 1010 } 1011 } 1012 1013 static bool leaf_node(struct dm_btree_cursor *c) 1014 { 1015 struct cursor_node *n = c->nodes + c->depth - 1; 1016 struct btree_node *bn = dm_block_data(n->b); 1017 1018 return le32_to_cpu(bn->header.flags) & LEAF_NODE; 1019 } 1020 1021 static int push_node(struct dm_btree_cursor *c, dm_block_t b) 1022 { 1023 int r; 1024 struct cursor_node *n = c->nodes + c->depth; 1025 1026 if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) { 1027 DMERR("couldn't push cursor node, stack depth too high"); 1028 return -EINVAL; 1029 } 1030 1031 r = bn_read_lock(c->info, b, &n->b); 1032 if (r) 1033 return r; 1034 1035 n->index = 0; 1036 c->depth++; 1037 1038 if (c->prefetch_leaves || !leaf_node(c)) 1039 prefetch_values(c); 1040 1041 return 0; 1042 } 1043 1044 static void pop_node(struct dm_btree_cursor *c) 1045 { 1046 c->depth--; 1047 unlock_block(c->info, c->nodes[c->depth].b); 1048 } 1049 1050 static int inc_or_backtrack(struct dm_btree_cursor *c) 1051 { 1052 struct cursor_node *n; 1053 struct btree_node *bn; 1054 1055 for (;;) { 1056 if (!c->depth) 1057 return -ENODATA; 1058 1059 n = c->nodes + c->depth - 1; 1060 bn = dm_block_data(n->b); 1061 1062 n->index++; 1063 if (n->index < le32_to_cpu(bn->header.nr_entries)) 1064 break; 1065 1066 pop_node(c); 1067 } 1068 1069 return 0; 1070 } 1071 1072 static int find_leaf(struct dm_btree_cursor *c) 1073 { 1074 int r = 0; 1075 struct cursor_node *n; 1076 struct btree_node *bn; 1077 __le64 value_le; 1078 1079 for (;;) { 1080 n = c->nodes + c->depth - 1; 1081 bn = dm_block_data(n->b); 1082 1083 if (le32_to_cpu(bn->header.flags) & LEAF_NODE) 1084 break; 1085 1086 memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le)); 1087 r = push_node(c, le64_to_cpu(value_le)); 1088 if (r) { 1089 DMERR("push_node failed"); 1090 break; 1091 } 1092 } 1093 1094 if (!r && (le32_to_cpu(bn->header.nr_entries) == 0)) 1095 return -ENODATA; 1096 1097 return r; 1098 } 1099 1100 int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root, 1101 bool prefetch_leaves, struct dm_btree_cursor *c) 1102 { 1103 int r; 1104 1105 c->info = info; 1106 c->root = root; 1107 c->depth = 0; 1108 c->prefetch_leaves = prefetch_leaves; 1109 1110 r = push_node(c, root); 1111 if (r) 1112 return r; 1113 1114 return find_leaf(c); 1115 } 1116 EXPORT_SYMBOL_GPL(dm_btree_cursor_begin); 1117 1118 void dm_btree_cursor_end(struct dm_btree_cursor *c) 1119 { 1120 while (c->depth) 1121 pop_node(c); 1122 } 1123 EXPORT_SYMBOL_GPL(dm_btree_cursor_end); 1124 1125 int dm_btree_cursor_next(struct dm_btree_cursor *c) 1126 { 1127 int r = inc_or_backtrack(c); 1128 if (!r) { 1129 r = find_leaf(c); 1130 if (r) 1131 DMERR("find_leaf failed"); 1132 } 1133 1134 return r; 1135 } 1136 EXPORT_SYMBOL_GPL(dm_btree_cursor_next); 1137 1138 int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count) 1139 { 1140 int r = 0; 1141 1142 while (count-- && !r) 1143 r = dm_btree_cursor_next(c); 1144 1145 return r; 1146 } 1147 EXPORT_SYMBOL_GPL(dm_btree_cursor_skip); 1148 1149 int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le) 1150 { 1151 if (c->depth) { 1152 struct cursor_node *n = c->nodes + c->depth - 1; 1153 struct btree_node *bn = dm_block_data(n->b); 1154 1155 if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE) 1156 return -EINVAL; 1157 1158 *key = le64_to_cpu(*key_ptr(bn, n->index)); 1159 memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size); 1160 return 0; 1161 1162 } else 1163 return -ENODATA; 1164 } 1165 EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value); 1166