1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for working with individual keys, and sorted sets of keys with in a 4 * btree node 5 * 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ 10 11 #include "util.h" 12 #include "bset.h" 13 14 #include <linux/console.h> 15 #include <linux/sched/clock.h> 16 #include <linux/random.h> 17 #include <linux/prefetch.h> 18 19 #ifdef CONFIG_BCACHE_DEBUG 20 21 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set) 22 { 23 struct bkey *k, *next; 24 25 for (k = i->start; k < bset_bkey_last(i); k = next) { 26 next = bkey_next(k); 27 28 pr_err("block %u key %u/%u: ", set, 29 (unsigned int) ((u64 *) k - i->d), i->keys); 30 31 if (b->ops->key_dump) 32 b->ops->key_dump(b, k); 33 else 34 pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); 35 36 if (next < bset_bkey_last(i) && 37 bkey_cmp(k, b->ops->is_extents ? 38 &START_KEY(next) : next) > 0) 39 pr_err("Key skipped backwards\n"); 40 } 41 } 42 43 void bch_dump_bucket(struct btree_keys *b) 44 { 45 unsigned int i; 46 47 console_lock(); 48 for (i = 0; i <= b->nsets; i++) 49 bch_dump_bset(b, b->set[i].data, 50 bset_sector_offset(b, b->set[i].data)); 51 console_unlock(); 52 } 53 54 int __bch_count_data(struct btree_keys *b) 55 { 56 unsigned int ret = 0; 57 struct btree_iter iter; 58 struct bkey *k; 59 60 if (b->ops->is_extents) 61 for_each_key(b, k, &iter) 62 ret += KEY_SIZE(k); 63 return ret; 64 } 65 66 void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) 67 { 68 va_list args; 69 struct bkey *k, *p = NULL; 70 struct btree_iter iter; 71 const char *err; 72 73 for_each_key(b, k, &iter) { 74 if (b->ops->is_extents) { 75 err = "Keys out of order"; 76 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) 77 goto bug; 78 79 if (bch_ptr_invalid(b, k)) 80 continue; 81 82 err = "Overlapping keys"; 83 if (p && bkey_cmp(p, &START_KEY(k)) > 0) 84 goto bug; 85 } else { 86 if (bch_ptr_bad(b, k)) 87 continue; 88 89 err = "Duplicate keys"; 90 if (p && !bkey_cmp(p, k)) 91 goto bug; 92 } 93 p = k; 94 } 95 #if 0 96 err = "Key larger than btree node key"; 97 if (p && bkey_cmp(p, &b->key) > 0) 98 goto bug; 99 #endif 100 return; 101 bug: 102 bch_dump_bucket(b); 103 104 va_start(args, fmt); 105 vprintk(fmt, args); 106 va_end(args); 107 108 panic("bch_check_keys error: %s:\n", err); 109 } 110 111 static void bch_btree_iter_next_check(struct btree_iter *iter) 112 { 113 struct bkey *k = iter->data->k, *next = bkey_next(k); 114 115 if (next < iter->data->end && 116 bkey_cmp(k, iter->b->ops->is_extents ? 117 &START_KEY(next) : next) > 0) { 118 bch_dump_bucket(iter->b); 119 panic("Key skipped backwards\n"); 120 } 121 } 122 123 #else 124 125 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} 126 127 #endif 128 129 /* Keylists */ 130 131 int __bch_keylist_realloc(struct keylist *l, unsigned int u64s) 132 { 133 size_t oldsize = bch_keylist_nkeys(l); 134 size_t newsize = oldsize + u64s; 135 uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; 136 uint64_t *new_keys; 137 138 newsize = roundup_pow_of_two(newsize); 139 140 if (newsize <= KEYLIST_INLINE || 141 roundup_pow_of_two(oldsize) == newsize) 142 return 0; 143 144 new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); 145 146 if (!new_keys) 147 return -ENOMEM; 148 149 if (!old_keys) 150 memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); 151 152 l->keys_p = new_keys; 153 l->top_p = new_keys + oldsize; 154 155 return 0; 156 } 157 158 struct bkey *bch_keylist_pop(struct keylist *l) 159 { 160 struct bkey *k = l->keys; 161 162 if (k == l->top) 163 return NULL; 164 165 while (bkey_next(k) != l->top) 166 k = bkey_next(k); 167 168 return l->top = k; 169 } 170 171 void bch_keylist_pop_front(struct keylist *l) 172 { 173 l->top_p -= bkey_u64s(l->keys); 174 175 memmove(l->keys, 176 bkey_next(l->keys), 177 bch_keylist_bytes(l)); 178 } 179 180 /* Key/pointer manipulation */ 181 182 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, 183 unsigned int i) 184 { 185 BUG_ON(i > KEY_PTRS(src)); 186 187 /* Only copy the header, key, and one pointer. */ 188 memcpy(dest, src, 2 * sizeof(uint64_t)); 189 dest->ptr[0] = src->ptr[i]; 190 SET_KEY_PTRS(dest, 1); 191 /* We didn't copy the checksum so clear that bit. */ 192 SET_KEY_CSUM(dest, 0); 193 } 194 195 bool __bch_cut_front(const struct bkey *where, struct bkey *k) 196 { 197 unsigned int i, len = 0; 198 199 if (bkey_cmp(where, &START_KEY(k)) <= 0) 200 return false; 201 202 if (bkey_cmp(where, k) < 0) 203 len = KEY_OFFSET(k) - KEY_OFFSET(where); 204 else 205 bkey_copy_key(k, where); 206 207 for (i = 0; i < KEY_PTRS(k); i++) 208 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); 209 210 BUG_ON(len > KEY_SIZE(k)); 211 SET_KEY_SIZE(k, len); 212 return true; 213 } 214 215 bool __bch_cut_back(const struct bkey *where, struct bkey *k) 216 { 217 unsigned int len = 0; 218 219 if (bkey_cmp(where, k) >= 0) 220 return false; 221 222 BUG_ON(KEY_INODE(where) != KEY_INODE(k)); 223 224 if (bkey_cmp(where, &START_KEY(k)) > 0) 225 len = KEY_OFFSET(where) - KEY_START(k); 226 227 bkey_copy_key(k, where); 228 229 BUG_ON(len > KEY_SIZE(k)); 230 SET_KEY_SIZE(k, len); 231 return true; 232 } 233 234 /* Auxiliary search trees */ 235 236 /* 32 bits total: */ 237 #define BKEY_MID_BITS 3 238 #define BKEY_EXPONENT_BITS 7 239 #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) 240 #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) 241 242 struct bkey_float { 243 unsigned int exponent:BKEY_EXPONENT_BITS; 244 unsigned int m:BKEY_MID_BITS; 245 unsigned int mantissa:BKEY_MANTISSA_BITS; 246 } __packed; 247 248 /* 249 * BSET_CACHELINE was originally intended to match the hardware cacheline size - 250 * it used to be 64, but I realized the lookup code would touch slightly less 251 * memory if it was 128. 252 * 253 * It definites the number of bytes (in struct bset) per struct bkey_float in 254 * the auxiliar search tree - when we're done searching the bset_float tree we 255 * have this many bytes left that we do a linear search over. 256 * 257 * Since (after level 5) every level of the bset_tree is on a new cacheline, 258 * we're touching one fewer cacheline in the bset tree in exchange for one more 259 * cacheline in the linear search - but the linear search might stop before it 260 * gets to the second cacheline. 261 */ 262 263 #define BSET_CACHELINE 128 264 265 /* Space required for the btree node keys */ 266 static inline size_t btree_keys_bytes(struct btree_keys *b) 267 { 268 return PAGE_SIZE << b->page_order; 269 } 270 271 static inline size_t btree_keys_cachelines(struct btree_keys *b) 272 { 273 return btree_keys_bytes(b) / BSET_CACHELINE; 274 } 275 276 /* Space required for the auxiliary search trees */ 277 static inline size_t bset_tree_bytes(struct btree_keys *b) 278 { 279 return btree_keys_cachelines(b) * sizeof(struct bkey_float); 280 } 281 282 /* Space required for the prev pointers */ 283 static inline size_t bset_prev_bytes(struct btree_keys *b) 284 { 285 return btree_keys_cachelines(b) * sizeof(uint8_t); 286 } 287 288 /* Memory allocation */ 289 290 void bch_btree_keys_free(struct btree_keys *b) 291 { 292 struct bset_tree *t = b->set; 293 294 if (bset_prev_bytes(b) < PAGE_SIZE) 295 kfree(t->prev); 296 else 297 free_pages((unsigned long) t->prev, 298 get_order(bset_prev_bytes(b))); 299 300 if (bset_tree_bytes(b) < PAGE_SIZE) 301 kfree(t->tree); 302 else 303 free_pages((unsigned long) t->tree, 304 get_order(bset_tree_bytes(b))); 305 306 free_pages((unsigned long) t->data, b->page_order); 307 308 t->prev = NULL; 309 t->tree = NULL; 310 t->data = NULL; 311 } 312 EXPORT_SYMBOL(bch_btree_keys_free); 313 314 int bch_btree_keys_alloc(struct btree_keys *b, 315 unsigned int page_order, 316 gfp_t gfp) 317 { 318 struct bset_tree *t = b->set; 319 320 BUG_ON(t->data); 321 322 b->page_order = page_order; 323 324 t->data = (void *) __get_free_pages(gfp, b->page_order); 325 if (!t->data) 326 goto err; 327 328 t->tree = bset_tree_bytes(b) < PAGE_SIZE 329 ? kmalloc(bset_tree_bytes(b), gfp) 330 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); 331 if (!t->tree) 332 goto err; 333 334 t->prev = bset_prev_bytes(b) < PAGE_SIZE 335 ? kmalloc(bset_prev_bytes(b), gfp) 336 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); 337 if (!t->prev) 338 goto err; 339 340 return 0; 341 err: 342 bch_btree_keys_free(b); 343 return -ENOMEM; 344 } 345 EXPORT_SYMBOL(bch_btree_keys_alloc); 346 347 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, 348 bool *expensive_debug_checks) 349 { 350 b->ops = ops; 351 b->expensive_debug_checks = expensive_debug_checks; 352 b->nsets = 0; 353 b->last_set_unwritten = 0; 354 355 /* 356 * struct btree_keys in embedded in struct btree, and struct 357 * bset_tree is embedded into struct btree_keys. They are all 358 * initialized as 0 by kzalloc() in mca_bucket_alloc(), and 359 * b->set[0].data is allocated in bch_btree_keys_alloc(), so we 360 * don't have to initiate b->set[].size and b->set[].data here 361 * any more. 362 */ 363 } 364 EXPORT_SYMBOL(bch_btree_keys_init); 365 366 /* Binary tree stuff for auxiliary search trees */ 367 368 /* 369 * return array index next to j when does in-order traverse 370 * of a binary tree which is stored in a linear array 371 */ 372 static unsigned int inorder_next(unsigned int j, unsigned int size) 373 { 374 if (j * 2 + 1 < size) { 375 j = j * 2 + 1; 376 377 while (j * 2 < size) 378 j *= 2; 379 } else 380 j >>= ffz(j) + 1; 381 382 return j; 383 } 384 385 /* 386 * return array index previous to j when does in-order traverse 387 * of a binary tree which is stored in a linear array 388 */ 389 static unsigned int inorder_prev(unsigned int j, unsigned int size) 390 { 391 if (j * 2 < size) { 392 j = j * 2; 393 394 while (j * 2 + 1 < size) 395 j = j * 2 + 1; 396 } else 397 j >>= ffs(j); 398 399 return j; 400 } 401 402 /* 403 * I have no idea why this code works... and I'm the one who wrote it 404 * 405 * However, I do know what it does: 406 * Given a binary tree constructed in an array (i.e. how you normally implement 407 * a heap), it converts a node in the tree - referenced by array index - to the 408 * index it would have if you did an inorder traversal. 409 * 410 * Also tested for every j, size up to size somewhere around 6 million. 411 * 412 * The binary tree starts at array index 1, not 0 413 * extra is a function of size: 414 * extra = (size - rounddown_pow_of_two(size - 1)) << 1; 415 */ 416 static unsigned int __to_inorder(unsigned int j, 417 unsigned int size, 418 unsigned int extra) 419 { 420 unsigned int b = fls(j); 421 unsigned int shift = fls(size - 1) - b; 422 423 j ^= 1U << (b - 1); 424 j <<= 1; 425 j |= 1; 426 j <<= shift; 427 428 if (j > extra) 429 j -= (j - extra) >> 1; 430 431 return j; 432 } 433 434 /* 435 * Return the cacheline index in bset_tree->data, where j is index 436 * from a linear array which stores the auxiliar binary tree 437 */ 438 static unsigned int to_inorder(unsigned int j, struct bset_tree *t) 439 { 440 return __to_inorder(j, t->size, t->extra); 441 } 442 443 static unsigned int __inorder_to_tree(unsigned int j, 444 unsigned int size, 445 unsigned int extra) 446 { 447 unsigned int shift; 448 449 if (j > extra) 450 j += j - extra; 451 452 shift = ffs(j); 453 454 j >>= shift; 455 j |= roundup_pow_of_two(size) >> shift; 456 457 return j; 458 } 459 460 /* 461 * Return an index from a linear array which stores the auxiliar binary 462 * tree, j is the cacheline index of t->data. 463 */ 464 static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t) 465 { 466 return __inorder_to_tree(j, t->size, t->extra); 467 } 468 469 #if 0 470 void inorder_test(void) 471 { 472 unsigned long done = 0; 473 ktime_t start = ktime_get(); 474 475 for (unsigned int size = 2; 476 size < 65536000; 477 size++) { 478 unsigned int extra = 479 (size - rounddown_pow_of_two(size - 1)) << 1; 480 unsigned int i = 1, j = rounddown_pow_of_two(size - 1); 481 482 if (!(size % 4096)) 483 pr_notice("loop %u, %llu per us\n", size, 484 done / ktime_us_delta(ktime_get(), start)); 485 486 while (1) { 487 if (__inorder_to_tree(i, size, extra) != j) 488 panic("size %10u j %10u i %10u", size, j, i); 489 490 if (__to_inorder(j, size, extra) != i) 491 panic("size %10u j %10u i %10u", size, j, i); 492 493 if (j == rounddown_pow_of_two(size) - 1) 494 break; 495 496 BUG_ON(inorder_prev(inorder_next(j, size), size) != j); 497 498 j = inorder_next(j, size); 499 i++; 500 } 501 502 done += size - 1; 503 } 504 } 505 #endif 506 507 /* 508 * Cacheline/offset <-> bkey pointer arithmetic: 509 * 510 * t->tree is a binary search tree in an array; each node corresponds to a key 511 * in one cacheline in t->set (BSET_CACHELINE bytes). 512 * 513 * This means we don't have to store the full index of the key that a node in 514 * the binary tree points to; to_inorder() gives us the cacheline, and then 515 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. 516 * 517 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to 518 * make this work. 519 * 520 * To construct the bfloat for an arbitrary key we need to know what the key 521 * immediately preceding it is: we have to check if the two keys differ in the 522 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size 523 * of the previous key so we can walk backwards to it from t->tree[j]'s key. 524 */ 525 526 static struct bkey *cacheline_to_bkey(struct bset_tree *t, 527 unsigned int cacheline, 528 unsigned int offset) 529 { 530 return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; 531 } 532 533 static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k) 534 { 535 return ((void *) k - (void *) t->data) / BSET_CACHELINE; 536 } 537 538 static unsigned int bkey_to_cacheline_offset(struct bset_tree *t, 539 unsigned int cacheline, 540 struct bkey *k) 541 { 542 return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); 543 } 544 545 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j) 546 { 547 return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); 548 } 549 550 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j) 551 { 552 return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); 553 } 554 555 /* 556 * For the write set - the one we're currently inserting keys into - we don't 557 * maintain a full search tree, we just keep a simple lookup table in t->prev. 558 */ 559 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline) 560 { 561 return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); 562 } 563 564 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) 565 { 566 low >>= shift; 567 low |= (high << 1) << (63U - shift); 568 return low; 569 } 570 571 /* 572 * Calculate mantissa value for struct bkey_float. 573 * If most significant bit of f->exponent is not set, then 574 * - f->exponent >> 6 is 0 575 * - p[0] points to bkey->low 576 * - p[-1] borrows bits from KEY_INODE() of bkey->high 577 * if most isgnificant bits of f->exponent is set, then 578 * - f->exponent >> 6 is 1 579 * - p[0] points to bits from KEY_INODE() of bkey->high 580 * - p[-1] points to other bits from KEY_INODE() of 581 * bkey->high too. 582 * See make_bfloat() to check when most significant bit of f->exponent 583 * is set or not. 584 */ 585 static inline unsigned int bfloat_mantissa(const struct bkey *k, 586 struct bkey_float *f) 587 { 588 const uint64_t *p = &k->low - (f->exponent >> 6); 589 590 return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; 591 } 592 593 static void make_bfloat(struct bset_tree *t, unsigned int j) 594 { 595 struct bkey_float *f = &t->tree[j]; 596 struct bkey *m = tree_to_bkey(t, j); 597 struct bkey *p = tree_to_prev_bkey(t, j); 598 599 struct bkey *l = is_power_of_2(j) 600 ? t->data->start 601 : tree_to_prev_bkey(t, j >> ffs(j)); 602 603 struct bkey *r = is_power_of_2(j + 1) 604 ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end)) 605 : tree_to_bkey(t, j >> (ffz(j) + 1)); 606 607 BUG_ON(m < l || m > r); 608 BUG_ON(bkey_next(p) != m); 609 610 /* 611 * If l and r have different KEY_INODE values (different backing 612 * device), f->exponent records how many least significant bits 613 * are different in KEY_INODE values and sets most significant 614 * bits to 1 (by +64). 615 * If l and r have same KEY_INODE value, f->exponent records 616 * how many different bits in least significant bits of bkey->low. 617 * See bfloat_mantiss() how the most significant bit of 618 * f->exponent is used to calculate bfloat mantissa value. 619 */ 620 if (KEY_INODE(l) != KEY_INODE(r)) 621 f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; 622 else 623 f->exponent = fls64(r->low ^ l->low); 624 625 f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); 626 627 /* 628 * Setting f->exponent = 127 flags this node as failed, and causes the 629 * lookup code to fall back to comparing against the original key. 630 */ 631 632 if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f)) 633 f->mantissa = bfloat_mantissa(m, f) - 1; 634 else 635 f->exponent = 127; 636 } 637 638 static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) 639 { 640 if (t != b->set) { 641 unsigned int j = roundup(t[-1].size, 642 64 / sizeof(struct bkey_float)); 643 644 t->tree = t[-1].tree + j; 645 t->prev = t[-1].prev + j; 646 } 647 648 while (t < b->set + MAX_BSETS) 649 t++->size = 0; 650 } 651 652 static void bch_bset_build_unwritten_tree(struct btree_keys *b) 653 { 654 struct bset_tree *t = bset_tree_last(b); 655 656 BUG_ON(b->last_set_unwritten); 657 b->last_set_unwritten = 1; 658 659 bset_alloc_tree(b, t); 660 661 if (t->tree != b->set->tree + btree_keys_cachelines(b)) { 662 t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); 663 t->size = 1; 664 } 665 } 666 667 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) 668 { 669 if (i != b->set->data) { 670 b->set[++b->nsets].data = i; 671 i->seq = b->set->data->seq; 672 } else 673 get_random_bytes(&i->seq, sizeof(uint64_t)); 674 675 i->magic = magic; 676 i->version = 0; 677 i->keys = 0; 678 679 bch_bset_build_unwritten_tree(b); 680 } 681 EXPORT_SYMBOL(bch_bset_init_next); 682 683 /* 684 * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to 685 * accelerate bkey search in a btree node (pointed by bset_tree->data in 686 * memory). After search in the auxiliar tree by calling bset_search_tree(), 687 * a struct bset_search_iter is returned which indicates range [l, r] from 688 * bset_tree->data where the searching bkey might be inside. Then a followed 689 * linear comparison does the exact search, see __bch_bset_search() for how 690 * the auxiliary tree is used. 691 */ 692 void bch_bset_build_written_tree(struct btree_keys *b) 693 { 694 struct bset_tree *t = bset_tree_last(b); 695 struct bkey *prev = NULL, *k = t->data->start; 696 unsigned int j, cacheline = 1; 697 698 b->last_set_unwritten = 0; 699 700 bset_alloc_tree(b, t); 701 702 t->size = min_t(unsigned int, 703 bkey_to_cacheline(t, bset_bkey_last(t->data)), 704 b->set->tree + btree_keys_cachelines(b) - t->tree); 705 706 if (t->size < 2) { 707 t->size = 0; 708 return; 709 } 710 711 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; 712 713 /* First we figure out where the first key in each cacheline is */ 714 for (j = inorder_next(0, t->size); 715 j; 716 j = inorder_next(j, t->size)) { 717 while (bkey_to_cacheline(t, k) < cacheline) 718 prev = k, k = bkey_next(k); 719 720 t->prev[j] = bkey_u64s(prev); 721 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); 722 } 723 724 while (bkey_next(k) != bset_bkey_last(t->data)) 725 k = bkey_next(k); 726 727 t->end = *k; 728 729 /* Then we build the tree */ 730 for (j = inorder_next(0, t->size); 731 j; 732 j = inorder_next(j, t->size)) 733 make_bfloat(t, j); 734 } 735 EXPORT_SYMBOL(bch_bset_build_written_tree); 736 737 /* Insert */ 738 739 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) 740 { 741 struct bset_tree *t; 742 unsigned int inorder, j = 1; 743 744 for (t = b->set; t <= bset_tree_last(b); t++) 745 if (k < bset_bkey_last(t->data)) 746 goto found_set; 747 748 BUG(); 749 found_set: 750 if (!t->size || !bset_written(b, t)) 751 return; 752 753 inorder = bkey_to_cacheline(t, k); 754 755 if (k == t->data->start) 756 goto fix_left; 757 758 if (bkey_next(k) == bset_bkey_last(t->data)) { 759 t->end = *k; 760 goto fix_right; 761 } 762 763 j = inorder_to_tree(inorder, t); 764 765 if (j && 766 j < t->size && 767 k == tree_to_bkey(t, j)) 768 fix_left: do { 769 make_bfloat(t, j); 770 j = j * 2; 771 } while (j < t->size); 772 773 j = inorder_to_tree(inorder + 1, t); 774 775 if (j && 776 j < t->size && 777 k == tree_to_prev_bkey(t, j)) 778 fix_right: do { 779 make_bfloat(t, j); 780 j = j * 2 + 1; 781 } while (j < t->size); 782 } 783 EXPORT_SYMBOL(bch_bset_fix_invalidated_key); 784 785 static void bch_bset_fix_lookup_table(struct btree_keys *b, 786 struct bset_tree *t, 787 struct bkey *k) 788 { 789 unsigned int shift = bkey_u64s(k); 790 unsigned int j = bkey_to_cacheline(t, k); 791 792 /* We're getting called from btree_split() or btree_gc, just bail out */ 793 if (!t->size) 794 return; 795 796 /* 797 * k is the key we just inserted; we need to find the entry in the 798 * lookup table for the first key that is strictly greater than k: 799 * it's either k's cacheline or the next one 800 */ 801 while (j < t->size && 802 table_to_bkey(t, j) <= k) 803 j++; 804 805 /* 806 * Adjust all the lookup table entries, and find a new key for any that 807 * have gotten too big 808 */ 809 for (; j < t->size; j++) { 810 t->prev[j] += shift; 811 812 if (t->prev[j] > 7) { 813 k = table_to_bkey(t, j - 1); 814 815 while (k < cacheline_to_bkey(t, j, 0)) 816 k = bkey_next(k); 817 818 t->prev[j] = bkey_to_cacheline_offset(t, j, k); 819 } 820 } 821 822 if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) 823 return; 824 825 /* Possibly add a new entry to the end of the lookup table */ 826 827 for (k = table_to_bkey(t, t->size - 1); 828 k != bset_bkey_last(t->data); 829 k = bkey_next(k)) 830 if (t->size == bkey_to_cacheline(t, k)) { 831 t->prev[t->size] = 832 bkey_to_cacheline_offset(t, t->size, k); 833 t->size++; 834 } 835 } 836 837 /* 838 * Tries to merge l and r: l should be lower than r 839 * Returns true if we were able to merge. If we did merge, l will be the merged 840 * key, r will be untouched. 841 */ 842 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) 843 { 844 if (!b->ops->key_merge) 845 return false; 846 847 /* 848 * Generic header checks 849 * Assumes left and right are in order 850 * Left and right must be exactly aligned 851 */ 852 if (!bch_bkey_equal_header(l, r) || 853 bkey_cmp(l, &START_KEY(r))) 854 return false; 855 856 return b->ops->key_merge(b, l, r); 857 } 858 EXPORT_SYMBOL(bch_bkey_try_merge); 859 860 void bch_bset_insert(struct btree_keys *b, struct bkey *where, 861 struct bkey *insert) 862 { 863 struct bset_tree *t = bset_tree_last(b); 864 865 BUG_ON(!b->last_set_unwritten); 866 BUG_ON(bset_byte_offset(b, t->data) + 867 __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > 868 PAGE_SIZE << b->page_order); 869 870 memmove((uint64_t *) where + bkey_u64s(insert), 871 where, 872 (void *) bset_bkey_last(t->data) - (void *) where); 873 874 t->data->keys += bkey_u64s(insert); 875 bkey_copy(where, insert); 876 bch_bset_fix_lookup_table(b, t, where); 877 } 878 EXPORT_SYMBOL(bch_bset_insert); 879 880 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, 881 struct bkey *replace_key) 882 { 883 unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; 884 struct bset *i = bset_tree_last(b)->data; 885 struct bkey *m, *prev = NULL; 886 struct btree_iter iter; 887 struct bkey preceding_key_on_stack = ZERO_KEY; 888 struct bkey *preceding_key_p = &preceding_key_on_stack; 889 890 BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); 891 892 /* 893 * If k has preceding key, preceding_key_p will be set to address 894 * of k's preceding key; otherwise preceding_key_p will be set 895 * to NULL inside preceding_key(). 896 */ 897 if (b->ops->is_extents) 898 preceding_key(&START_KEY(k), &preceding_key_p); 899 else 900 preceding_key(k, &preceding_key_p); 901 902 m = bch_btree_iter_init(b, &iter, preceding_key_p); 903 904 if (b->ops->insert_fixup(b, k, &iter, replace_key)) 905 return status; 906 907 status = BTREE_INSERT_STATUS_INSERT; 908 909 while (m != bset_bkey_last(i) && 910 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) 911 prev = m, m = bkey_next(m); 912 913 /* prev is in the tree, if we merge we're done */ 914 status = BTREE_INSERT_STATUS_BACK_MERGE; 915 if (prev && 916 bch_bkey_try_merge(b, prev, k)) 917 goto merged; 918 #if 0 919 status = BTREE_INSERT_STATUS_OVERWROTE; 920 if (m != bset_bkey_last(i) && 921 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) 922 goto copy; 923 #endif 924 status = BTREE_INSERT_STATUS_FRONT_MERGE; 925 if (m != bset_bkey_last(i) && 926 bch_bkey_try_merge(b, k, m)) 927 goto copy; 928 929 bch_bset_insert(b, m, k); 930 copy: bkey_copy(m, k); 931 merged: 932 return status; 933 } 934 EXPORT_SYMBOL(bch_btree_insert_key); 935 936 /* Lookup */ 937 938 struct bset_search_iter { 939 struct bkey *l, *r; 940 }; 941 942 static struct bset_search_iter bset_search_write_set(struct bset_tree *t, 943 const struct bkey *search) 944 { 945 unsigned int li = 0, ri = t->size; 946 947 while (li + 1 != ri) { 948 unsigned int m = (li + ri) >> 1; 949 950 if (bkey_cmp(table_to_bkey(t, m), search) > 0) 951 ri = m; 952 else 953 li = m; 954 } 955 956 return (struct bset_search_iter) { 957 table_to_bkey(t, li), 958 ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data) 959 }; 960 } 961 962 static struct bset_search_iter bset_search_tree(struct bset_tree *t, 963 const struct bkey *search) 964 { 965 struct bkey *l, *r; 966 struct bkey_float *f; 967 unsigned int inorder, j, n = 1; 968 969 do { 970 unsigned int p = n << 4; 971 972 if (p < t->size) 973 prefetch(&t->tree[p]); 974 975 j = n; 976 f = &t->tree[j]; 977 978 if (likely(f->exponent != 127)) { 979 if (f->mantissa >= bfloat_mantissa(search, f)) 980 n = j * 2; 981 else 982 n = j * 2 + 1; 983 } else { 984 if (bkey_cmp(tree_to_bkey(t, j), search) > 0) 985 n = j * 2; 986 else 987 n = j * 2 + 1; 988 } 989 } while (n < t->size); 990 991 inorder = to_inorder(j, t); 992 993 /* 994 * n would have been the node we recursed to - the low bit tells us if 995 * we recursed left or recursed right. 996 */ 997 if (n & 1) { 998 l = cacheline_to_bkey(t, inorder, f->m); 999 1000 if (++inorder != t->size) { 1001 f = &t->tree[inorder_next(j, t->size)]; 1002 r = cacheline_to_bkey(t, inorder, f->m); 1003 } else 1004 r = bset_bkey_last(t->data); 1005 } else { 1006 r = cacheline_to_bkey(t, inorder, f->m); 1007 1008 if (--inorder) { 1009 f = &t->tree[inorder_prev(j, t->size)]; 1010 l = cacheline_to_bkey(t, inorder, f->m); 1011 } else 1012 l = t->data->start; 1013 } 1014 1015 return (struct bset_search_iter) {l, r}; 1016 } 1017 1018 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, 1019 const struct bkey *search) 1020 { 1021 struct bset_search_iter i; 1022 1023 /* 1024 * First, we search for a cacheline, then lastly we do a linear search 1025 * within that cacheline. 1026 * 1027 * To search for the cacheline, there's three different possibilities: 1028 * * The set is too small to have a search tree, so we just do a linear 1029 * search over the whole set. 1030 * * The set is the one we're currently inserting into; keeping a full 1031 * auxiliary search tree up to date would be too expensive, so we 1032 * use a much simpler lookup table to do a binary search - 1033 * bset_search_write_set(). 1034 * * Or we use the auxiliary search tree we constructed earlier - 1035 * bset_search_tree() 1036 */ 1037 1038 if (unlikely(!t->size)) { 1039 i.l = t->data->start; 1040 i.r = bset_bkey_last(t->data); 1041 } else if (bset_written(b, t)) { 1042 /* 1043 * Each node in the auxiliary search tree covers a certain range 1044 * of bits, and keys above and below the set it covers might 1045 * differ outside those bits - so we have to special case the 1046 * start and end - handle that here: 1047 */ 1048 1049 if (unlikely(bkey_cmp(search, &t->end) >= 0)) 1050 return bset_bkey_last(t->data); 1051 1052 if (unlikely(bkey_cmp(search, t->data->start) < 0)) 1053 return t->data->start; 1054 1055 i = bset_search_tree(t, search); 1056 } else { 1057 BUG_ON(!b->nsets && 1058 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); 1059 1060 i = bset_search_write_set(t, search); 1061 } 1062 1063 if (btree_keys_expensive_checks(b)) { 1064 BUG_ON(bset_written(b, t) && 1065 i.l != t->data->start && 1066 bkey_cmp(tree_to_prev_bkey(t, 1067 inorder_to_tree(bkey_to_cacheline(t, i.l), t)), 1068 search) > 0); 1069 1070 BUG_ON(i.r != bset_bkey_last(t->data) && 1071 bkey_cmp(i.r, search) <= 0); 1072 } 1073 1074 while (likely(i.l != i.r) && 1075 bkey_cmp(i.l, search) <= 0) 1076 i.l = bkey_next(i.l); 1077 1078 return i.l; 1079 } 1080 EXPORT_SYMBOL(__bch_bset_search); 1081 1082 /* Btree iterator */ 1083 1084 typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, 1085 struct btree_iter_set); 1086 1087 static inline bool btree_iter_cmp(struct btree_iter_set l, 1088 struct btree_iter_set r) 1089 { 1090 return bkey_cmp(l.k, r.k) > 0; 1091 } 1092 1093 static inline bool btree_iter_end(struct btree_iter *iter) 1094 { 1095 return !iter->used; 1096 } 1097 1098 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, 1099 struct bkey *end) 1100 { 1101 if (k != end) 1102 BUG_ON(!heap_add(iter, 1103 ((struct btree_iter_set) { k, end }), 1104 btree_iter_cmp)); 1105 } 1106 1107 static struct bkey *__bch_btree_iter_init(struct btree_keys *b, 1108 struct btree_iter *iter, 1109 struct bkey *search, 1110 struct bset_tree *start) 1111 { 1112 struct bkey *ret = NULL; 1113 1114 iter->size = ARRAY_SIZE(iter->data); 1115 iter->used = 0; 1116 1117 #ifdef CONFIG_BCACHE_DEBUG 1118 iter->b = b; 1119 #endif 1120 1121 for (; start <= bset_tree_last(b); start++) { 1122 ret = bch_bset_search(b, start, search); 1123 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); 1124 } 1125 1126 return ret; 1127 } 1128 1129 struct bkey *bch_btree_iter_init(struct btree_keys *b, 1130 struct btree_iter *iter, 1131 struct bkey *search) 1132 { 1133 return __bch_btree_iter_init(b, iter, search, b->set); 1134 } 1135 EXPORT_SYMBOL(bch_btree_iter_init); 1136 1137 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, 1138 btree_iter_cmp_fn *cmp) 1139 { 1140 struct btree_iter_set b __maybe_unused; 1141 struct bkey *ret = NULL; 1142 1143 if (!btree_iter_end(iter)) { 1144 bch_btree_iter_next_check(iter); 1145 1146 ret = iter->data->k; 1147 iter->data->k = bkey_next(iter->data->k); 1148 1149 if (iter->data->k > iter->data->end) { 1150 WARN_ONCE(1, "bset was corrupt!\n"); 1151 iter->data->k = iter->data->end; 1152 } 1153 1154 if (iter->data->k == iter->data->end) 1155 heap_pop(iter, b, cmp); 1156 else 1157 heap_sift(iter, 0, cmp); 1158 } 1159 1160 return ret; 1161 } 1162 1163 struct bkey *bch_btree_iter_next(struct btree_iter *iter) 1164 { 1165 return __bch_btree_iter_next(iter, btree_iter_cmp); 1166 1167 } 1168 EXPORT_SYMBOL(bch_btree_iter_next); 1169 1170 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, 1171 struct btree_keys *b, ptr_filter_fn fn) 1172 { 1173 struct bkey *ret; 1174 1175 do { 1176 ret = bch_btree_iter_next(iter); 1177 } while (ret && fn(b, ret)); 1178 1179 return ret; 1180 } 1181 1182 /* Mergesort */ 1183 1184 void bch_bset_sort_state_free(struct bset_sort_state *state) 1185 { 1186 mempool_exit(&state->pool); 1187 } 1188 1189 int bch_bset_sort_state_init(struct bset_sort_state *state, 1190 unsigned int page_order) 1191 { 1192 spin_lock_init(&state->time.lock); 1193 1194 state->page_order = page_order; 1195 state->crit_factor = int_sqrt(1 << page_order); 1196 1197 return mempool_init_page_pool(&state->pool, 1, page_order); 1198 } 1199 EXPORT_SYMBOL(bch_bset_sort_state_init); 1200 1201 static void btree_mergesort(struct btree_keys *b, struct bset *out, 1202 struct btree_iter *iter, 1203 bool fixup, bool remove_stale) 1204 { 1205 int i; 1206 struct bkey *k, *last = NULL; 1207 BKEY_PADDED(k) tmp; 1208 bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale 1209 ? bch_ptr_bad 1210 : bch_ptr_invalid; 1211 1212 /* Heapify the iterator, using our comparison function */ 1213 for (i = iter->used / 2 - 1; i >= 0; --i) 1214 heap_sift(iter, i, b->ops->sort_cmp); 1215 1216 while (!btree_iter_end(iter)) { 1217 if (b->ops->sort_fixup && fixup) 1218 k = b->ops->sort_fixup(iter, &tmp.k); 1219 else 1220 k = NULL; 1221 1222 if (!k) 1223 k = __bch_btree_iter_next(iter, b->ops->sort_cmp); 1224 1225 if (bad(b, k)) 1226 continue; 1227 1228 if (!last) { 1229 last = out->start; 1230 bkey_copy(last, k); 1231 } else if (!bch_bkey_try_merge(b, last, k)) { 1232 last = bkey_next(last); 1233 bkey_copy(last, k); 1234 } 1235 } 1236 1237 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; 1238 1239 pr_debug("sorted %i keys", out->keys); 1240 } 1241 1242 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, 1243 unsigned int start, unsigned int order, bool fixup, 1244 struct bset_sort_state *state) 1245 { 1246 uint64_t start_time; 1247 bool used_mempool = false; 1248 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, 1249 order); 1250 if (!out) { 1251 struct page *outp; 1252 1253 BUG_ON(order > state->page_order); 1254 1255 outp = mempool_alloc(&state->pool, GFP_NOIO); 1256 out = page_address(outp); 1257 used_mempool = true; 1258 order = state->page_order; 1259 } 1260 1261 start_time = local_clock(); 1262 1263 btree_mergesort(b, out, iter, fixup, false); 1264 b->nsets = start; 1265 1266 if (!start && order == b->page_order) { 1267 /* 1268 * Our temporary buffer is the same size as the btree node's 1269 * buffer, we can just swap buffers instead of doing a big 1270 * memcpy() 1271 */ 1272 1273 out->magic = b->set->data->magic; 1274 out->seq = b->set->data->seq; 1275 out->version = b->set->data->version; 1276 swap(out, b->set->data); 1277 } else { 1278 b->set[start].data->keys = out->keys; 1279 memcpy(b->set[start].data->start, out->start, 1280 (void *) bset_bkey_last(out) - (void *) out->start); 1281 } 1282 1283 if (used_mempool) 1284 mempool_free(virt_to_page(out), &state->pool); 1285 else 1286 free_pages((unsigned long) out, order); 1287 1288 bch_bset_build_written_tree(b); 1289 1290 if (!start) 1291 bch_time_stats_update(&state->time, start_time); 1292 } 1293 1294 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, 1295 struct bset_sort_state *state) 1296 { 1297 size_t order = b->page_order, keys = 0; 1298 struct btree_iter iter; 1299 int oldsize = bch_count_data(b); 1300 1301 __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); 1302 1303 if (start) { 1304 unsigned int i; 1305 1306 for (i = start; i <= b->nsets; i++) 1307 keys += b->set[i].data->keys; 1308 1309 order = get_order(__set_bytes(b->set->data, keys)); 1310 } 1311 1312 __btree_sort(b, &iter, start, order, false, state); 1313 1314 EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); 1315 } 1316 EXPORT_SYMBOL(bch_btree_sort_partial); 1317 1318 void bch_btree_sort_and_fix_extents(struct btree_keys *b, 1319 struct btree_iter *iter, 1320 struct bset_sort_state *state) 1321 { 1322 __btree_sort(b, iter, 0, b->page_order, true, state); 1323 } 1324 1325 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, 1326 struct bset_sort_state *state) 1327 { 1328 uint64_t start_time = local_clock(); 1329 struct btree_iter iter; 1330 1331 bch_btree_iter_init(b, &iter, NULL); 1332 1333 btree_mergesort(b, new->set->data, &iter, false, true); 1334 1335 bch_time_stats_update(&state->time, start_time); 1336 1337 new->set->size = 0; // XXX: why? 1338 } 1339 1340 #define SORT_CRIT (4096 / sizeof(uint64_t)) 1341 1342 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) 1343 { 1344 unsigned int crit = SORT_CRIT; 1345 int i; 1346 1347 /* Don't sort if nothing to do */ 1348 if (!b->nsets) 1349 goto out; 1350 1351 for (i = b->nsets - 1; i >= 0; --i) { 1352 crit *= state->crit_factor; 1353 1354 if (b->set[i].data->keys < crit) { 1355 bch_btree_sort_partial(b, i, state); 1356 return; 1357 } 1358 } 1359 1360 /* Sort if we'd overflow */ 1361 if (b->nsets + 1 == MAX_BSETS) { 1362 bch_btree_sort(b, state); 1363 return; 1364 } 1365 1366 out: 1367 bch_bset_build_written_tree(b); 1368 } 1369 EXPORT_SYMBOL(bch_btree_sort_lazy); 1370 1371 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) 1372 { 1373 unsigned int i; 1374 1375 for (i = 0; i <= b->nsets; i++) { 1376 struct bset_tree *t = &b->set[i]; 1377 size_t bytes = t->data->keys * sizeof(uint64_t); 1378 size_t j; 1379 1380 if (bset_written(b, t)) { 1381 stats->sets_written++; 1382 stats->bytes_written += bytes; 1383 1384 stats->floats += t->size - 1; 1385 1386 for (j = 1; j < t->size; j++) 1387 if (t->tree[j].exponent == 127) 1388 stats->failed++; 1389 } else { 1390 stats->sets_unwritten++; 1391 stats->bytes_unwritten += bytes; 1392 } 1393 } 1394 } 1395