1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 4 * 5 * Uses a block device as cache for other block devices; optimized for SSDs. 6 * All allocation is done in buckets, which should match the erase block size 7 * of the device. 8 * 9 * Buckets containing cached data are kept on a heap sorted by priority; 10 * bucket priority is increased on cache hit, and periodically all the buckets 11 * on the heap have their priority scaled down. This currently is just used as 12 * an LRU but in the future should allow for more intelligent heuristics. 13 * 14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 15 * counter. Garbage collection is used to remove stale pointers. 16 * 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 18 * as keys are inserted we only sort the pages that have not yet been written. 19 * When garbage collection is run, we resort the entire node. 20 * 21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst. 22 */ 23 24 #include "bcache.h" 25 #include "btree.h" 26 #include "debug.h" 27 #include "extents.h" 28 29 #include <linux/slab.h> 30 #include <linux/bitops.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/prefetch.h> 34 #include <linux/random.h> 35 #include <linux/rcupdate.h> 36 #include <linux/sched/clock.h> 37 #include <linux/rculist.h> 38 #include <linux/delay.h> 39 #include <trace/events/bcache.h> 40 41 /* 42 * Todo: 43 * register_bcache: Return errors out to userspace correctly 44 * 45 * Writeback: don't undirty key until after a cache flush 46 * 47 * Create an iterator for key pointers 48 * 49 * On btree write error, mark bucket such that it won't be freed from the cache 50 * 51 * Journalling: 52 * Check for bad keys in replay 53 * Propagate barriers 54 * Refcount journal entries in journal_replay 55 * 56 * Garbage collection: 57 * Finish incremental gc 58 * Gc should free old UUIDs, data for invalid UUIDs 59 * 60 * Provide a way to list backing device UUIDs we have data cached for, and 61 * probably how long it's been since we've seen them, and a way to invalidate 62 * dirty data for devices that will never be attached again 63 * 64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 65 * that based on that and how much dirty data we have we can keep writeback 66 * from being starved 67 * 68 * Add a tracepoint or somesuch to watch for writeback starvation 69 * 70 * When btree depth > 1 and splitting an interior node, we have to make sure 71 * alloc_bucket() cannot fail. This should be true but is not completely 72 * obvious. 73 * 74 * Plugging? 75 * 76 * If data write is less than hard sector size of ssd, round up offset in open 77 * bucket to the next whole sector 78 * 79 * Superblock needs to be fleshed out for multiple cache devices 80 * 81 * Add a sysfs tunable for the number of writeback IOs in flight 82 * 83 * Add a sysfs tunable for the number of open data buckets 84 * 85 * IO tracking: Can we track when one process is doing io on behalf of another? 86 * IO tracking: Don't use just an average, weigh more recent stuff higher 87 * 88 * Test module load/unload 89 */ 90 91 #define MAX_NEED_GC 64 92 #define MAX_SAVE_PRIO 72 93 #define MAX_GC_TIMES 100 94 #define MIN_GC_NODES 100 95 #define GC_SLEEP_MS 100 96 97 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 98 99 #define PTR_HASH(c, k) \ 100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 101 102 static struct workqueue_struct *btree_io_wq; 103 104 #define insert_lock(s, b) ((b)->level <= (s)->lock) 105 106 107 static inline struct bset *write_block(struct btree *b) 108 { 109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); 110 } 111 112 static void bch_btree_init_next(struct btree *b) 113 { 114 /* If not a leaf node, always sort */ 115 if (b->level && b->keys.nsets) 116 bch_btree_sort(&b->keys, &b->c->sort); 117 else 118 bch_btree_sort_lazy(&b->keys, &b->c->sort); 119 120 if (b->written < btree_blocks(b)) 121 bch_bset_init_next(&b->keys, write_block(b), 122 bset_magic(&b->c->cache->sb)); 123 124 } 125 126 /* Btree key manipulation */ 127 128 void bkey_put(struct cache_set *c, struct bkey *k) 129 { 130 unsigned int i; 131 132 for (i = 0; i < KEY_PTRS(k); i++) 133 if (ptr_available(c, k, i)) 134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 135 } 136 137 /* Btree IO */ 138 139 static uint64_t btree_csum_set(struct btree *b, struct bset *i) 140 { 141 uint64_t crc = b->key.ptr[0]; 142 void *data = (void *) i + 8, *end = bset_bkey_last(i); 143 144 crc = crc64_be(crc, data, end - data); 145 return crc ^ 0xffffffffffffffffULL; 146 } 147 148 void bch_btree_node_read_done(struct btree *b) 149 { 150 const char *err = "bad btree header"; 151 struct bset *i = btree_bset_first(b); 152 struct btree_iter *iter; 153 154 /* 155 * c->fill_iter can allocate an iterator with more memory space 156 * than static MAX_BSETS. 157 * See the comment arount cache_set->fill_iter. 158 */ 159 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); 160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; 161 iter->used = 0; 162 163 #ifdef CONFIG_BCACHE_DEBUG 164 iter->b = &b->keys; 165 #endif 166 167 if (!i->seq) 168 goto err; 169 170 for (; 171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 172 i = write_block(b)) { 173 err = "unsupported bset version"; 174 if (i->version > BCACHE_BSET_VERSION) 175 goto err; 176 177 err = "bad btree header"; 178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > 179 btree_blocks(b)) 180 goto err; 181 182 err = "bad magic"; 183 if (i->magic != bset_magic(&b->c->cache->sb)) 184 goto err; 185 186 err = "bad checksum"; 187 switch (i->version) { 188 case 0: 189 if (i->csum != csum_set(i)) 190 goto err; 191 break; 192 case BCACHE_BSET_VERSION: 193 if (i->csum != btree_csum_set(b, i)) 194 goto err; 195 break; 196 } 197 198 err = "empty set"; 199 if (i != b->keys.set[0].data && !i->keys) 200 goto err; 201 202 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 203 204 b->written += set_blocks(i, block_bytes(b->c->cache)); 205 } 206 207 err = "corrupted btree"; 208 for (i = write_block(b); 209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 210 i = ((void *) i) + block_bytes(b->c->cache)) 211 if (i->seq == b->keys.set[0].data->seq) 212 goto err; 213 214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 215 216 i = b->keys.set[0].data; 217 err = "short btree key"; 218 if (b->keys.set[0].size && 219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 220 goto err; 221 222 if (b->written < btree_blocks(b)) 223 bch_bset_init_next(&b->keys, write_block(b), 224 bset_magic(&b->c->cache->sb)); 225 out: 226 mempool_free(iter, &b->c->fill_iter); 227 return; 228 err: 229 set_btree_node_io_error(b); 230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 231 err, PTR_BUCKET_NR(b->c, &b->key, 0), 232 bset_block_offset(b, i), i->keys); 233 goto out; 234 } 235 236 static void btree_node_read_endio(struct bio *bio) 237 { 238 struct closure *cl = bio->bi_private; 239 240 closure_put(cl); 241 } 242 243 static void bch_btree_node_read(struct btree *b) 244 { 245 uint64_t start_time = local_clock(); 246 struct closure cl; 247 struct bio *bio; 248 249 trace_bcache_btree_read(b); 250 251 closure_init_stack(&cl); 252 253 bio = bch_bbio_alloc(b->c); 254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 255 bio->bi_end_io = btree_node_read_endio; 256 bio->bi_private = &cl; 257 bio->bi_opf = REQ_OP_READ | REQ_META; 258 259 bch_bio_map(bio, b->keys.set[0].data); 260 261 bch_submit_bbio(bio, b->c, &b->key, 0); 262 closure_sync(&cl); 263 264 if (bio->bi_status) 265 set_btree_node_io_error(b); 266 267 bch_bbio_free(bio, b->c); 268 269 if (btree_node_io_error(b)) 270 goto err; 271 272 bch_btree_node_read_done(b); 273 bch_time_stats_update(&b->c->btree_read_time, start_time); 274 275 return; 276 err: 277 bch_cache_set_error(b->c, "io error reading bucket %zu", 278 PTR_BUCKET_NR(b->c, &b->key, 0)); 279 } 280 281 static void btree_complete_write(struct btree *b, struct btree_write *w) 282 { 283 if (w->prio_blocked && 284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 285 wake_up_allocators(b->c); 286 287 if (w->journal) { 288 atomic_dec_bug(w->journal); 289 __closure_wake_up(&b->c->journal.wait); 290 } 291 292 w->prio_blocked = 0; 293 w->journal = NULL; 294 } 295 296 static void btree_node_write_unlock(struct closure *cl) 297 { 298 struct btree *b = container_of(cl, struct btree, io); 299 300 up(&b->io_mutex); 301 } 302 303 static void __btree_node_write_done(struct closure *cl) 304 { 305 struct btree *b = container_of(cl, struct btree, io); 306 struct btree_write *w = btree_prev_write(b); 307 308 bch_bbio_free(b->bio, b->c); 309 b->bio = NULL; 310 btree_complete_write(b, w); 311 312 if (btree_node_dirty(b)) 313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); 314 315 closure_return_with_destructor(cl, btree_node_write_unlock); 316 } 317 318 static void btree_node_write_done(struct closure *cl) 319 { 320 struct btree *b = container_of(cl, struct btree, io); 321 322 bio_free_pages(b->bio); 323 __btree_node_write_done(cl); 324 } 325 326 static void btree_node_write_endio(struct bio *bio) 327 { 328 struct closure *cl = bio->bi_private; 329 struct btree *b = container_of(cl, struct btree, io); 330 331 if (bio->bi_status) 332 set_btree_node_io_error(b); 333 334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); 335 closure_put(cl); 336 } 337 338 static void do_btree_node_write(struct btree *b) 339 { 340 struct closure *cl = &b->io; 341 struct bset *i = btree_bset_last(b); 342 BKEY_PADDED(key) k; 343 344 i->version = BCACHE_BSET_VERSION; 345 i->csum = btree_csum_set(b, i); 346 347 BUG_ON(b->bio); 348 b->bio = bch_bbio_alloc(b->c); 349 350 b->bio->bi_end_io = btree_node_write_endio; 351 b->bio->bi_private = cl; 352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); 353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 354 bch_bio_map(b->bio, i); 355 356 /* 357 * If we're appending to a leaf node, we don't technically need FUA - 358 * this write just needs to be persisted before the next journal write, 359 * which will be marked FLUSH|FUA. 360 * 361 * Similarly if we're writing a new btree root - the pointer is going to 362 * be in the next journal entry. 363 * 364 * But if we're writing a new btree node (that isn't a root) or 365 * appending to a non leaf btree node, we need either FUA or a flush 366 * when we write the parent with the new pointer. FUA is cheaper than a 367 * flush, and writes appending to leaf nodes aren't blocking anything so 368 * just make all btree node writes FUA to keep things sane. 369 */ 370 371 bkey_copy(&k.key, &b->key); 372 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 373 bset_sector_offset(&b->keys, i)); 374 375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 376 struct bio_vec *bv; 377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 378 struct bvec_iter_all iter_all; 379 380 bio_for_each_segment_all(bv, b->bio, iter_all) { 381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); 382 addr += PAGE_SIZE; 383 } 384 385 bch_submit_bbio(b->bio, b->c, &k.key, 0); 386 387 continue_at(cl, btree_node_write_done, NULL); 388 } else { 389 /* 390 * No problem for multipage bvec since the bio is 391 * just allocated 392 */ 393 b->bio->bi_vcnt = 0; 394 bch_bio_map(b->bio, i); 395 396 bch_submit_bbio(b->bio, b->c, &k.key, 0); 397 398 closure_sync(cl); 399 continue_at_nobarrier(cl, __btree_node_write_done, NULL); 400 } 401 } 402 403 void __bch_btree_node_write(struct btree *b, struct closure *parent) 404 { 405 struct bset *i = btree_bset_last(b); 406 407 lockdep_assert_held(&b->write_lock); 408 409 trace_bcache_btree_write(b); 410 411 BUG_ON(current->bio_list); 412 BUG_ON(b->written >= btree_blocks(b)); 413 BUG_ON(b->written && !i->keys); 414 BUG_ON(btree_bset_first(b)->seq != i->seq); 415 bch_check_keys(&b->keys, "writing"); 416 417 cancel_delayed_work(&b->work); 418 419 /* If caller isn't waiting for write, parent refcount is cache set */ 420 down(&b->io_mutex); 421 closure_init(&b->io, parent ?: &b->c->cl); 422 423 clear_bit(BTREE_NODE_dirty, &b->flags); 424 change_bit(BTREE_NODE_write_idx, &b->flags); 425 426 do_btree_node_write(b); 427 428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, 429 &b->c->cache->btree_sectors_written); 430 431 b->written += set_blocks(i, block_bytes(b->c->cache)); 432 } 433 434 void bch_btree_node_write(struct btree *b, struct closure *parent) 435 { 436 unsigned int nsets = b->keys.nsets; 437 438 lockdep_assert_held(&b->lock); 439 440 __bch_btree_node_write(b, parent); 441 442 /* 443 * do verify if there was more than one set initially (i.e. we did a 444 * sort) and we sorted down to a single set: 445 */ 446 if (nsets && !b->keys.nsets) 447 bch_btree_verify(b); 448 449 bch_btree_init_next(b); 450 } 451 452 static void bch_btree_node_write_sync(struct btree *b) 453 { 454 struct closure cl; 455 456 closure_init_stack(&cl); 457 458 mutex_lock(&b->write_lock); 459 bch_btree_node_write(b, &cl); 460 mutex_unlock(&b->write_lock); 461 462 closure_sync(&cl); 463 } 464 465 static void btree_node_write_work(struct work_struct *w) 466 { 467 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 468 469 mutex_lock(&b->write_lock); 470 if (btree_node_dirty(b)) 471 __bch_btree_node_write(b, NULL); 472 mutex_unlock(&b->write_lock); 473 } 474 475 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 476 { 477 struct bset *i = btree_bset_last(b); 478 struct btree_write *w = btree_current_write(b); 479 480 lockdep_assert_held(&b->write_lock); 481 482 BUG_ON(!b->written); 483 BUG_ON(!i->keys); 484 485 if (!btree_node_dirty(b)) 486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); 487 488 set_btree_node_dirty(b); 489 490 /* 491 * w->journal is always the oldest journal pin of all bkeys 492 * in the leaf node, to make sure the oldest jset seq won't 493 * be increased before this btree node is flushed. 494 */ 495 if (journal_ref) { 496 if (w->journal && 497 journal_pin_cmp(b->c, w->journal, journal_ref)) { 498 atomic_dec_bug(w->journal); 499 w->journal = NULL; 500 } 501 502 if (!w->journal) { 503 w->journal = journal_ref; 504 atomic_inc(w->journal); 505 } 506 } 507 508 /* Force write if set is too big */ 509 if (set_bytes(i) > PAGE_SIZE - 48 && 510 !current->bio_list) 511 bch_btree_node_write(b, NULL); 512 } 513 514 /* 515 * Btree in memory cache - allocation/freeing 516 * mca -> memory cache 517 */ 518 519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \ 520 ? c->root->level : 1) * 8 + 16) 521 #define mca_can_free(c) \ 522 max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 523 524 static void mca_data_free(struct btree *b) 525 { 526 BUG_ON(b->io_mutex.count != 1); 527 528 bch_btree_keys_free(&b->keys); 529 530 b->c->btree_cache_used--; 531 list_move(&b->list, &b->c->btree_cache_freed); 532 } 533 534 static void mca_bucket_free(struct btree *b) 535 { 536 BUG_ON(btree_node_dirty(b)); 537 538 b->key.ptr[0] = 0; 539 hlist_del_init_rcu(&b->hash); 540 list_move(&b->list, &b->c->btree_cache_freeable); 541 } 542 543 static unsigned int btree_order(struct bkey *k) 544 { 545 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 546 } 547 548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 549 { 550 if (!bch_btree_keys_alloc(&b->keys, 551 max_t(unsigned int, 552 ilog2(b->c->btree_pages), 553 btree_order(k)), 554 gfp)) { 555 b->c->btree_cache_used++; 556 list_move(&b->list, &b->c->btree_cache); 557 } else { 558 list_move(&b->list, &b->c->btree_cache_freed); 559 } 560 } 561 562 #define cmp_int(l, r) ((l > r) - (l < r)) 563 564 #ifdef CONFIG_PROVE_LOCKING 565 static int btree_lock_cmp_fn(const struct lockdep_map *_a, 566 const struct lockdep_map *_b) 567 { 568 const struct btree *a = container_of(_a, struct btree, lock.dep_map); 569 const struct btree *b = container_of(_b, struct btree, lock.dep_map); 570 571 return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); 572 } 573 574 static void btree_lock_print_fn(const struct lockdep_map *map) 575 { 576 const struct btree *b = container_of(map, struct btree, lock.dep_map); 577 578 printk(KERN_CONT " l=%u %llu:%llu", b->level, 579 KEY_INODE(&b->key), KEY_OFFSET(&b->key)); 580 } 581 #endif 582 583 static struct btree *mca_bucket_alloc(struct cache_set *c, 584 struct bkey *k, gfp_t gfp) 585 { 586 /* 587 * kzalloc() is necessary here for initialization, 588 * see code comments in bch_btree_keys_init(). 589 */ 590 struct btree *b = kzalloc(sizeof(struct btree), gfp); 591 592 if (!b) 593 return NULL; 594 595 init_rwsem(&b->lock); 596 lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); 597 mutex_init(&b->write_lock); 598 lockdep_set_novalidate_class(&b->write_lock); 599 INIT_LIST_HEAD(&b->list); 600 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 601 b->c = c; 602 sema_init(&b->io_mutex, 1); 603 604 mca_data_alloc(b, k, gfp); 605 return b; 606 } 607 608 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) 609 { 610 struct closure cl; 611 612 closure_init_stack(&cl); 613 lockdep_assert_held(&b->c->bucket_lock); 614 615 if (!down_write_trylock(&b->lock)) 616 return -ENOMEM; 617 618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 619 620 if (b->keys.page_order < min_order) 621 goto out_unlock; 622 623 if (!flush) { 624 if (btree_node_dirty(b)) 625 goto out_unlock; 626 627 if (down_trylock(&b->io_mutex)) 628 goto out_unlock; 629 up(&b->io_mutex); 630 } 631 632 retry: 633 /* 634 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by 635 * __bch_btree_node_write(). To avoid an extra flush, acquire 636 * b->write_lock before checking BTREE_NODE_dirty bit. 637 */ 638 mutex_lock(&b->write_lock); 639 /* 640 * If this btree node is selected in btree_flush_write() by journal 641 * code, delay and retry until the node is flushed by journal code 642 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write(). 643 */ 644 if (btree_node_journal_flush(b)) { 645 pr_debug("bnode %p is flushing by journal, retry\n", b); 646 mutex_unlock(&b->write_lock); 647 udelay(1); 648 goto retry; 649 } 650 651 if (btree_node_dirty(b)) 652 __bch_btree_node_write(b, &cl); 653 mutex_unlock(&b->write_lock); 654 655 closure_sync(&cl); 656 657 /* wait for any in flight btree write */ 658 down(&b->io_mutex); 659 up(&b->io_mutex); 660 661 return 0; 662 out_unlock: 663 rw_unlock(true, b); 664 return -ENOMEM; 665 } 666 667 static unsigned long bch_mca_scan(struct shrinker *shrink, 668 struct shrink_control *sc) 669 { 670 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 671 struct btree *b, *t; 672 unsigned long i, nr = sc->nr_to_scan; 673 unsigned long freed = 0; 674 unsigned int btree_cache_used; 675 676 if (c->shrinker_disabled) 677 return SHRINK_STOP; 678 679 if (c->btree_cache_alloc_lock) 680 return SHRINK_STOP; 681 682 /* Return -1 if we can't do anything right now */ 683 if (sc->gfp_mask & __GFP_IO) 684 mutex_lock(&c->bucket_lock); 685 else if (!mutex_trylock(&c->bucket_lock)) 686 return -1; 687 688 /* 689 * It's _really_ critical that we don't free too many btree nodes - we 690 * have to always leave ourselves a reserve. The reserve is how we 691 * guarantee that allocating memory for a new btree node can always 692 * succeed, so that inserting keys into the btree can always succeed and 693 * IO can always make forward progress: 694 */ 695 nr /= c->btree_pages; 696 if (nr == 0) 697 nr = 1; 698 nr = min_t(unsigned long, nr, mca_can_free(c)); 699 700 i = 0; 701 btree_cache_used = c->btree_cache_used; 702 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { 703 if (nr <= 0) 704 goto out; 705 706 if (!mca_reap(b, 0, false)) { 707 mca_data_free(b); 708 rw_unlock(true, b); 709 freed++; 710 } 711 nr--; 712 i++; 713 } 714 715 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { 716 if (nr <= 0 || i >= btree_cache_used) 717 goto out; 718 719 if (!mca_reap(b, 0, false)) { 720 mca_bucket_free(b); 721 mca_data_free(b); 722 rw_unlock(true, b); 723 freed++; 724 } 725 726 nr--; 727 i++; 728 } 729 out: 730 mutex_unlock(&c->bucket_lock); 731 return freed * c->btree_pages; 732 } 733 734 static unsigned long bch_mca_count(struct shrinker *shrink, 735 struct shrink_control *sc) 736 { 737 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 738 739 if (c->shrinker_disabled) 740 return 0; 741 742 if (c->btree_cache_alloc_lock) 743 return 0; 744 745 return mca_can_free(c) * c->btree_pages; 746 } 747 748 void bch_btree_cache_free(struct cache_set *c) 749 { 750 struct btree *b; 751 struct closure cl; 752 753 closure_init_stack(&cl); 754 755 if (c->shrink.list.next) 756 unregister_shrinker(&c->shrink); 757 758 mutex_lock(&c->bucket_lock); 759 760 #ifdef CONFIG_BCACHE_DEBUG 761 if (c->verify_data) 762 list_move(&c->verify_data->list, &c->btree_cache); 763 764 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb))); 765 #endif 766 767 list_splice(&c->btree_cache_freeable, 768 &c->btree_cache); 769 770 while (!list_empty(&c->btree_cache)) { 771 b = list_first_entry(&c->btree_cache, struct btree, list); 772 773 /* 774 * This function is called by cache_set_free(), no I/O 775 * request on cache now, it is unnecessary to acquire 776 * b->write_lock before clearing BTREE_NODE_dirty anymore. 777 */ 778 if (btree_node_dirty(b)) { 779 btree_complete_write(b, btree_current_write(b)); 780 clear_bit(BTREE_NODE_dirty, &b->flags); 781 } 782 mca_data_free(b); 783 } 784 785 while (!list_empty(&c->btree_cache_freed)) { 786 b = list_first_entry(&c->btree_cache_freed, 787 struct btree, list); 788 list_del(&b->list); 789 cancel_delayed_work_sync(&b->work); 790 kfree(b); 791 } 792 793 mutex_unlock(&c->bucket_lock); 794 } 795 796 int bch_btree_cache_alloc(struct cache_set *c) 797 { 798 unsigned int i; 799 800 for (i = 0; i < mca_reserve(c); i++) 801 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 802 return -ENOMEM; 803 804 list_splice_init(&c->btree_cache, 805 &c->btree_cache_freeable); 806 807 #ifdef CONFIG_BCACHE_DEBUG 808 mutex_init(&c->verify_lock); 809 810 c->verify_ondisk = (void *) 811 __get_free_pages(GFP_KERNEL|__GFP_COMP, 812 ilog2(meta_bucket_pages(&c->cache->sb))); 813 if (!c->verify_ondisk) { 814 /* 815 * Don't worry about the mca_rereserve buckets 816 * allocated in previous for-loop, they will be 817 * handled properly in bch_cache_set_unregister(). 818 */ 819 return -ENOMEM; 820 } 821 822 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 823 824 if (c->verify_data && 825 c->verify_data->keys.set->data) 826 list_del_init(&c->verify_data->list); 827 else 828 c->verify_data = NULL; 829 #endif 830 831 c->shrink.count_objects = bch_mca_count; 832 c->shrink.scan_objects = bch_mca_scan; 833 c->shrink.seeks = 4; 834 c->shrink.batch = c->btree_pages * 2; 835 836 if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid)) 837 pr_warn("bcache: %s: could not register shrinker\n", 838 __func__); 839 840 return 0; 841 } 842 843 /* Btree in memory cache - hash table */ 844 845 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 846 { 847 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 848 } 849 850 static struct btree *mca_find(struct cache_set *c, struct bkey *k) 851 { 852 struct btree *b; 853 854 rcu_read_lock(); 855 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 856 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 857 goto out; 858 b = NULL; 859 out: 860 rcu_read_unlock(); 861 return b; 862 } 863 864 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 865 { 866 spin_lock(&c->btree_cannibalize_lock); 867 if (likely(c->btree_cache_alloc_lock == NULL)) { 868 c->btree_cache_alloc_lock = current; 869 } else if (c->btree_cache_alloc_lock != current) { 870 if (op) 871 prepare_to_wait(&c->btree_cache_wait, &op->wait, 872 TASK_UNINTERRUPTIBLE); 873 spin_unlock(&c->btree_cannibalize_lock); 874 return -EINTR; 875 } 876 spin_unlock(&c->btree_cannibalize_lock); 877 878 return 0; 879 } 880 881 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 882 struct bkey *k) 883 { 884 struct btree *b; 885 886 trace_bcache_btree_cache_cannibalize(c); 887 888 if (mca_cannibalize_lock(c, op)) 889 return ERR_PTR(-EINTR); 890 891 list_for_each_entry_reverse(b, &c->btree_cache, list) 892 if (!mca_reap(b, btree_order(k), false)) 893 return b; 894 895 list_for_each_entry_reverse(b, &c->btree_cache, list) 896 if (!mca_reap(b, btree_order(k), true)) 897 return b; 898 899 WARN(1, "btree cache cannibalize failed\n"); 900 return ERR_PTR(-ENOMEM); 901 } 902 903 /* 904 * We can only have one thread cannibalizing other cached btree nodes at a time, 905 * or we'll deadlock. We use an open coded mutex to ensure that, which a 906 * cannibalize_bucket() will take. This means every time we unlock the root of 907 * the btree, we need to release this lock if we have it held. 908 */ 909 void bch_cannibalize_unlock(struct cache_set *c) 910 { 911 spin_lock(&c->btree_cannibalize_lock); 912 if (c->btree_cache_alloc_lock == current) { 913 c->btree_cache_alloc_lock = NULL; 914 wake_up(&c->btree_cache_wait); 915 } 916 spin_unlock(&c->btree_cannibalize_lock); 917 } 918 919 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 920 struct bkey *k, int level) 921 { 922 struct btree *b; 923 924 BUG_ON(current->bio_list); 925 926 lockdep_assert_held(&c->bucket_lock); 927 928 if (mca_find(c, k)) 929 return NULL; 930 931 /* btree_free() doesn't free memory; it sticks the node on the end of 932 * the list. Check if there's any freed nodes there: 933 */ 934 list_for_each_entry(b, &c->btree_cache_freeable, list) 935 if (!mca_reap(b, btree_order(k), false)) 936 goto out; 937 938 /* We never free struct btree itself, just the memory that holds the on 939 * disk node. Check the freed list before allocating a new one: 940 */ 941 list_for_each_entry(b, &c->btree_cache_freed, list) 942 if (!mca_reap(b, 0, false)) { 943 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 944 if (!b->keys.set[0].data) 945 goto err; 946 else 947 goto out; 948 } 949 950 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 951 if (!b) 952 goto err; 953 954 BUG_ON(!down_write_trylock(&b->lock)); 955 if (!b->keys.set->data) 956 goto err; 957 out: 958 BUG_ON(b->io_mutex.count != 1); 959 960 bkey_copy(&b->key, k); 961 list_move(&b->list, &c->btree_cache); 962 hlist_del_init_rcu(&b->hash); 963 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 964 965 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 966 b->parent = (void *) ~0UL; 967 b->flags = 0; 968 b->written = 0; 969 b->level = level; 970 971 if (!b->level) 972 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 973 &b->c->expensive_debug_checks); 974 else 975 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 976 &b->c->expensive_debug_checks); 977 978 return b; 979 err: 980 if (b) 981 rw_unlock(true, b); 982 983 b = mca_cannibalize(c, op, k); 984 if (!IS_ERR(b)) 985 goto out; 986 987 return b; 988 } 989 990 /* 991 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 992 * in from disk if necessary. 993 * 994 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN. 995 * 996 * The btree node will have either a read or a write lock held, depending on 997 * level and op->lock. 998 * 999 * Note: Only error code or btree pointer will be returned, it is unncessary 1000 * for callers to check NULL pointer. 1001 */ 1002 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 1003 struct bkey *k, int level, bool write, 1004 struct btree *parent) 1005 { 1006 int i = 0; 1007 struct btree *b; 1008 1009 BUG_ON(level < 0); 1010 retry: 1011 b = mca_find(c, k); 1012 1013 if (!b) { 1014 if (current->bio_list) 1015 return ERR_PTR(-EAGAIN); 1016 1017 mutex_lock(&c->bucket_lock); 1018 b = mca_alloc(c, op, k, level); 1019 mutex_unlock(&c->bucket_lock); 1020 1021 if (!b) 1022 goto retry; 1023 if (IS_ERR(b)) 1024 return b; 1025 1026 bch_btree_node_read(b); 1027 1028 if (!write) 1029 downgrade_write(&b->lock); 1030 } else { 1031 rw_lock(write, b, level); 1032 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 1033 rw_unlock(write, b); 1034 goto retry; 1035 } 1036 BUG_ON(b->level != level); 1037 } 1038 1039 if (btree_node_io_error(b)) { 1040 rw_unlock(write, b); 1041 return ERR_PTR(-EIO); 1042 } 1043 1044 BUG_ON(!b->written); 1045 1046 b->parent = parent; 1047 1048 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1049 prefetch(b->keys.set[i].tree); 1050 prefetch(b->keys.set[i].data); 1051 } 1052 1053 for (; i <= b->keys.nsets; i++) 1054 prefetch(b->keys.set[i].data); 1055 1056 return b; 1057 } 1058 1059 static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1060 { 1061 struct btree *b; 1062 1063 mutex_lock(&parent->c->bucket_lock); 1064 b = mca_alloc(parent->c, NULL, k, parent->level - 1); 1065 mutex_unlock(&parent->c->bucket_lock); 1066 1067 if (!IS_ERR_OR_NULL(b)) { 1068 b->parent = parent; 1069 bch_btree_node_read(b); 1070 rw_unlock(true, b); 1071 } 1072 } 1073 1074 /* Btree alloc */ 1075 1076 static void btree_node_free(struct btree *b) 1077 { 1078 trace_bcache_btree_node_free(b); 1079 1080 BUG_ON(b == b->c->root); 1081 1082 retry: 1083 mutex_lock(&b->write_lock); 1084 /* 1085 * If the btree node is selected and flushing in btree_flush_write(), 1086 * delay and retry until the BTREE_NODE_journal_flush bit cleared, 1087 * then it is safe to free the btree node here. Otherwise this btree 1088 * node will be in race condition. 1089 */ 1090 if (btree_node_journal_flush(b)) { 1091 mutex_unlock(&b->write_lock); 1092 pr_debug("bnode %p journal_flush set, retry\n", b); 1093 udelay(1); 1094 goto retry; 1095 } 1096 1097 if (btree_node_dirty(b)) { 1098 btree_complete_write(b, btree_current_write(b)); 1099 clear_bit(BTREE_NODE_dirty, &b->flags); 1100 } 1101 1102 mutex_unlock(&b->write_lock); 1103 1104 cancel_delayed_work(&b->work); 1105 1106 mutex_lock(&b->c->bucket_lock); 1107 bch_bucket_free(b->c, &b->key); 1108 mca_bucket_free(b); 1109 mutex_unlock(&b->c->bucket_lock); 1110 } 1111 1112 /* 1113 * Only error code or btree pointer will be returned, it is unncessary for 1114 * callers to check NULL pointer. 1115 */ 1116 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1117 int level, bool wait, 1118 struct btree *parent) 1119 { 1120 BKEY_PADDED(key) k; 1121 struct btree *b; 1122 1123 mutex_lock(&c->bucket_lock); 1124 retry: 1125 /* return ERR_PTR(-EAGAIN) when it fails */ 1126 b = ERR_PTR(-EAGAIN); 1127 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) 1128 goto err; 1129 1130 bkey_put(c, &k.key); 1131 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1132 1133 b = mca_alloc(c, op, &k.key, level); 1134 if (IS_ERR(b)) 1135 goto err_free; 1136 1137 if (!b) { 1138 cache_bug(c, 1139 "Tried to allocate bucket that was in btree cache"); 1140 goto retry; 1141 } 1142 1143 b->parent = parent; 1144 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); 1145 1146 mutex_unlock(&c->bucket_lock); 1147 1148 trace_bcache_btree_node_alloc(b); 1149 return b; 1150 err_free: 1151 bch_bucket_free(c, &k.key); 1152 err: 1153 mutex_unlock(&c->bucket_lock); 1154 1155 trace_bcache_btree_node_alloc_fail(c); 1156 return b; 1157 } 1158 1159 static struct btree *bch_btree_node_alloc(struct cache_set *c, 1160 struct btree_op *op, int level, 1161 struct btree *parent) 1162 { 1163 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1164 } 1165 1166 static struct btree *btree_node_alloc_replacement(struct btree *b, 1167 struct btree_op *op) 1168 { 1169 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1170 1171 if (!IS_ERR(n)) { 1172 mutex_lock(&n->write_lock); 1173 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 1174 bkey_copy_key(&n->key, &b->key); 1175 mutex_unlock(&n->write_lock); 1176 } 1177 1178 return n; 1179 } 1180 1181 static void make_btree_freeing_key(struct btree *b, struct bkey *k) 1182 { 1183 unsigned int i; 1184 1185 mutex_lock(&b->c->bucket_lock); 1186 1187 atomic_inc(&b->c->prio_blocked); 1188 1189 bkey_copy(k, &b->key); 1190 bkey_copy_key(k, &ZERO_KEY); 1191 1192 for (i = 0; i < KEY_PTRS(k); i++) 1193 SET_PTR_GEN(k, i, 1194 bch_inc_gen(b->c->cache, 1195 PTR_BUCKET(b->c, &b->key, i))); 1196 1197 mutex_unlock(&b->c->bucket_lock); 1198 } 1199 1200 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1201 { 1202 struct cache_set *c = b->c; 1203 struct cache *ca = c->cache; 1204 unsigned int reserve = (c->root->level - b->level) * 2 + 1; 1205 1206 mutex_lock(&c->bucket_lock); 1207 1208 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1209 if (op) 1210 prepare_to_wait(&c->btree_cache_wait, &op->wait, 1211 TASK_UNINTERRUPTIBLE); 1212 mutex_unlock(&c->bucket_lock); 1213 return -EINTR; 1214 } 1215 1216 mutex_unlock(&c->bucket_lock); 1217 1218 return mca_cannibalize_lock(b->c, op); 1219 } 1220 1221 /* Garbage collection */ 1222 1223 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1224 struct bkey *k) 1225 { 1226 uint8_t stale = 0; 1227 unsigned int i; 1228 struct bucket *g; 1229 1230 /* 1231 * ptr_invalid() can't return true for the keys that mark btree nodes as 1232 * freed, but since ptr_bad() returns true we'll never actually use them 1233 * for anything and thus we don't want mark their pointers here 1234 */ 1235 if (!bkey_cmp(k, &ZERO_KEY)) 1236 return stale; 1237 1238 for (i = 0; i < KEY_PTRS(k); i++) { 1239 if (!ptr_available(c, k, i)) 1240 continue; 1241 1242 g = PTR_BUCKET(c, k, i); 1243 1244 if (gen_after(g->last_gc, PTR_GEN(k, i))) 1245 g->last_gc = PTR_GEN(k, i); 1246 1247 if (ptr_stale(c, k, i)) { 1248 stale = max(stale, ptr_stale(c, k, i)); 1249 continue; 1250 } 1251 1252 cache_bug_on(GC_MARK(g) && 1253 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1254 c, "inconsistent ptrs: mark = %llu, level = %i", 1255 GC_MARK(g), level); 1256 1257 if (level) 1258 SET_GC_MARK(g, GC_MARK_METADATA); 1259 else if (KEY_DIRTY(k)) 1260 SET_GC_MARK(g, GC_MARK_DIRTY); 1261 else if (!GC_MARK(g)) 1262 SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1263 1264 /* guard against overflow */ 1265 SET_GC_SECTORS_USED(g, min_t(unsigned int, 1266 GC_SECTORS_USED(g) + KEY_SIZE(k), 1267 MAX_GC_SECTORS_USED)); 1268 1269 BUG_ON(!GC_SECTORS_USED(g)); 1270 } 1271 1272 return stale; 1273 } 1274 1275 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1276 1277 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1278 { 1279 unsigned int i; 1280 1281 for (i = 0; i < KEY_PTRS(k); i++) 1282 if (ptr_available(c, k, i) && 1283 !ptr_stale(c, k, i)) { 1284 struct bucket *b = PTR_BUCKET(c, k, i); 1285 1286 b->gen = PTR_GEN(k, i); 1287 1288 if (level && bkey_cmp(k, &ZERO_KEY)) 1289 b->prio = BTREE_PRIO; 1290 else if (!level && b->prio == BTREE_PRIO) 1291 b->prio = INITIAL_PRIO; 1292 } 1293 1294 __bch_btree_mark_key(c, level, k); 1295 } 1296 1297 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) 1298 { 1299 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; 1300 } 1301 1302 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1303 { 1304 uint8_t stale = 0; 1305 unsigned int keys = 0, good_keys = 0; 1306 struct bkey *k; 1307 struct btree_iter_stack iter; 1308 struct bset_tree *t; 1309 1310 gc->nodes++; 1311 1312 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1313 stale = max(stale, btree_mark_key(b, k)); 1314 keys++; 1315 1316 if (bch_ptr_bad(&b->keys, k)) 1317 continue; 1318 1319 gc->key_bytes += bkey_u64s(k); 1320 gc->nkeys++; 1321 good_keys++; 1322 1323 gc->data += KEY_SIZE(k); 1324 } 1325 1326 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1327 btree_bug_on(t->size && 1328 bset_written(&b->keys, t) && 1329 bkey_cmp(&b->key, &t->end) < 0, 1330 b, "found short btree key in gc"); 1331 1332 if (b->c->gc_always_rewrite) 1333 return true; 1334 1335 if (stale > 10) 1336 return true; 1337 1338 if ((keys - good_keys) * 2 > keys) 1339 return true; 1340 1341 return false; 1342 } 1343 1344 #define GC_MERGE_NODES 4U 1345 1346 struct gc_merge_info { 1347 struct btree *b; 1348 unsigned int keys; 1349 }; 1350 1351 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 1352 struct keylist *insert_keys, 1353 atomic_t *journal_ref, 1354 struct bkey *replace_key); 1355 1356 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1357 struct gc_stat *gc, struct gc_merge_info *r) 1358 { 1359 unsigned int i, nodes = 0, keys = 0, blocks; 1360 struct btree *new_nodes[GC_MERGE_NODES]; 1361 struct keylist keylist; 1362 struct closure cl; 1363 struct bkey *k; 1364 1365 bch_keylist_init(&keylist); 1366 1367 if (btree_check_reserve(b, NULL)) 1368 return 0; 1369 1370 memset(new_nodes, 0, sizeof(new_nodes)); 1371 closure_init_stack(&cl); 1372 1373 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1374 keys += r[nodes++].keys; 1375 1376 blocks = btree_default_blocks(b->c) * 2 / 3; 1377 1378 if (nodes < 2 || 1379 __set_blocks(b->keys.set[0].data, keys, 1380 block_bytes(b->c->cache)) > blocks * (nodes - 1)) 1381 return 0; 1382 1383 for (i = 0; i < nodes; i++) { 1384 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1385 if (IS_ERR(new_nodes[i])) 1386 goto out_nocoalesce; 1387 } 1388 1389 /* 1390 * We have to check the reserve here, after we've allocated our new 1391 * nodes, to make sure the insert below will succeed - we also check 1392 * before as an optimization to potentially avoid a bunch of expensive 1393 * allocs/sorts 1394 */ 1395 if (btree_check_reserve(b, NULL)) 1396 goto out_nocoalesce; 1397 1398 for (i = 0; i < nodes; i++) 1399 mutex_lock(&new_nodes[i]->write_lock); 1400 1401 for (i = nodes - 1; i > 0; --i) { 1402 struct bset *n1 = btree_bset_first(new_nodes[i]); 1403 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1404 struct bkey *k, *last = NULL; 1405 1406 keys = 0; 1407 1408 if (i > 1) { 1409 for (k = n2->start; 1410 k < bset_bkey_last(n2); 1411 k = bkey_next(k)) { 1412 if (__set_blocks(n1, n1->keys + keys + 1413 bkey_u64s(k), 1414 block_bytes(b->c->cache)) > blocks) 1415 break; 1416 1417 last = k; 1418 keys += bkey_u64s(k); 1419 } 1420 } else { 1421 /* 1422 * Last node we're not getting rid of - we're getting 1423 * rid of the node at r[0]. Have to try and fit all of 1424 * the remaining keys into this node; we can't ensure 1425 * they will always fit due to rounding and variable 1426 * length keys (shouldn't be possible in practice, 1427 * though) 1428 */ 1429 if (__set_blocks(n1, n1->keys + n2->keys, 1430 block_bytes(b->c->cache)) > 1431 btree_blocks(new_nodes[i])) 1432 goto out_unlock_nocoalesce; 1433 1434 keys = n2->keys; 1435 /* Take the key of the node we're getting rid of */ 1436 last = &r->b->key; 1437 } 1438 1439 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > 1440 btree_blocks(new_nodes[i])); 1441 1442 if (last) 1443 bkey_copy_key(&new_nodes[i]->key, last); 1444 1445 memcpy(bset_bkey_last(n1), 1446 n2->start, 1447 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1448 1449 n1->keys += keys; 1450 r[i].keys = n1->keys; 1451 1452 memmove(n2->start, 1453 bset_bkey_idx(n2, keys), 1454 (void *) bset_bkey_last(n2) - 1455 (void *) bset_bkey_idx(n2, keys)); 1456 1457 n2->keys -= keys; 1458 1459 if (__bch_keylist_realloc(&keylist, 1460 bkey_u64s(&new_nodes[i]->key))) 1461 goto out_unlock_nocoalesce; 1462 1463 bch_btree_node_write(new_nodes[i], &cl); 1464 bch_keylist_add(&keylist, &new_nodes[i]->key); 1465 } 1466 1467 for (i = 0; i < nodes; i++) 1468 mutex_unlock(&new_nodes[i]->write_lock); 1469 1470 closure_sync(&cl); 1471 1472 /* We emptied out this node */ 1473 BUG_ON(btree_bset_first(new_nodes[0])->keys); 1474 btree_node_free(new_nodes[0]); 1475 rw_unlock(true, new_nodes[0]); 1476 new_nodes[0] = NULL; 1477 1478 for (i = 0; i < nodes; i++) { 1479 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1480 goto out_nocoalesce; 1481 1482 make_btree_freeing_key(r[i].b, keylist.top); 1483 bch_keylist_push(&keylist); 1484 } 1485 1486 bch_btree_insert_node(b, op, &keylist, NULL, NULL); 1487 BUG_ON(!bch_keylist_empty(&keylist)); 1488 1489 for (i = 0; i < nodes; i++) { 1490 btree_node_free(r[i].b); 1491 rw_unlock(true, r[i].b); 1492 1493 r[i].b = new_nodes[i]; 1494 } 1495 1496 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1497 r[nodes - 1].b = ERR_PTR(-EINTR); 1498 1499 trace_bcache_btree_gc_coalesce(nodes); 1500 gc->nodes--; 1501 1502 bch_keylist_free(&keylist); 1503 1504 /* Invalidated our iterator */ 1505 return -EINTR; 1506 1507 out_unlock_nocoalesce: 1508 for (i = 0; i < nodes; i++) 1509 mutex_unlock(&new_nodes[i]->write_lock); 1510 1511 out_nocoalesce: 1512 closure_sync(&cl); 1513 1514 while ((k = bch_keylist_pop(&keylist))) 1515 if (!bkey_cmp(k, &ZERO_KEY)) 1516 atomic_dec(&b->c->prio_blocked); 1517 bch_keylist_free(&keylist); 1518 1519 for (i = 0; i < nodes; i++) 1520 if (!IS_ERR_OR_NULL(new_nodes[i])) { 1521 btree_node_free(new_nodes[i]); 1522 rw_unlock(true, new_nodes[i]); 1523 } 1524 return 0; 1525 } 1526 1527 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 1528 struct btree *replace) 1529 { 1530 struct keylist keys; 1531 struct btree *n; 1532 1533 if (btree_check_reserve(b, NULL)) 1534 return 0; 1535 1536 n = btree_node_alloc_replacement(replace, NULL); 1537 if (IS_ERR(n)) 1538 return 0; 1539 1540 /* recheck reserve after allocating replacement node */ 1541 if (btree_check_reserve(b, NULL)) { 1542 btree_node_free(n); 1543 rw_unlock(true, n); 1544 return 0; 1545 } 1546 1547 bch_btree_node_write_sync(n); 1548 1549 bch_keylist_init(&keys); 1550 bch_keylist_add(&keys, &n->key); 1551 1552 make_btree_freeing_key(replace, keys.top); 1553 bch_keylist_push(&keys); 1554 1555 bch_btree_insert_node(b, op, &keys, NULL, NULL); 1556 BUG_ON(!bch_keylist_empty(&keys)); 1557 1558 btree_node_free(replace); 1559 rw_unlock(true, n); 1560 1561 /* Invalidated our iterator */ 1562 return -EINTR; 1563 } 1564 1565 static unsigned int btree_gc_count_keys(struct btree *b) 1566 { 1567 struct bkey *k; 1568 struct btree_iter_stack iter; 1569 unsigned int ret = 0; 1570 1571 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1572 ret += bkey_u64s(k); 1573 1574 return ret; 1575 } 1576 1577 static size_t btree_gc_min_nodes(struct cache_set *c) 1578 { 1579 size_t min_nodes; 1580 1581 /* 1582 * Since incremental GC would stop 100ms when front 1583 * side I/O comes, so when there are many btree nodes, 1584 * if GC only processes constant (100) nodes each time, 1585 * GC would last a long time, and the front side I/Os 1586 * would run out of the buckets (since no new bucket 1587 * can be allocated during GC), and be blocked again. 1588 * So GC should not process constant nodes, but varied 1589 * nodes according to the number of btree nodes, which 1590 * realized by dividing GC into constant(100) times, 1591 * so when there are many btree nodes, GC can process 1592 * more nodes each time, otherwise, GC will process less 1593 * nodes each time (but no less than MIN_GC_NODES) 1594 */ 1595 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; 1596 if (min_nodes < MIN_GC_NODES) 1597 min_nodes = MIN_GC_NODES; 1598 1599 return min_nodes; 1600 } 1601 1602 1603 static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1604 struct closure *writes, struct gc_stat *gc) 1605 { 1606 int ret = 0; 1607 bool should_rewrite; 1608 struct bkey *k; 1609 struct btree_iter_stack iter; 1610 struct gc_merge_info r[GC_MERGE_NODES]; 1611 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1612 1613 bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); 1614 1615 for (i = r; i < r + ARRAY_SIZE(r); i++) 1616 i->b = ERR_PTR(-EINTR); 1617 1618 while (1) { 1619 k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 1620 bch_ptr_bad); 1621 if (k) { 1622 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1623 true, b); 1624 if (IS_ERR(r->b)) { 1625 ret = PTR_ERR(r->b); 1626 break; 1627 } 1628 1629 r->keys = btree_gc_count_keys(r->b); 1630 1631 ret = btree_gc_coalesce(b, op, gc, r); 1632 if (ret) 1633 break; 1634 } 1635 1636 if (!last->b) 1637 break; 1638 1639 if (!IS_ERR(last->b)) { 1640 should_rewrite = btree_gc_mark_node(last->b, gc); 1641 if (should_rewrite) { 1642 ret = btree_gc_rewrite_node(b, op, last->b); 1643 if (ret) 1644 break; 1645 } 1646 1647 if (last->b->level) { 1648 ret = btree_gc_recurse(last->b, op, writes, gc); 1649 if (ret) 1650 break; 1651 } 1652 1653 bkey_copy_key(&b->c->gc_done, &last->b->key); 1654 1655 /* 1656 * Must flush leaf nodes before gc ends, since replace 1657 * operations aren't journalled 1658 */ 1659 mutex_lock(&last->b->write_lock); 1660 if (btree_node_dirty(last->b)) 1661 bch_btree_node_write(last->b, writes); 1662 mutex_unlock(&last->b->write_lock); 1663 rw_unlock(true, last->b); 1664 } 1665 1666 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1667 r->b = NULL; 1668 1669 if (atomic_read(&b->c->search_inflight) && 1670 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { 1671 gc->nodes_pre = gc->nodes; 1672 ret = -EAGAIN; 1673 break; 1674 } 1675 1676 if (need_resched()) { 1677 ret = -EAGAIN; 1678 break; 1679 } 1680 } 1681 1682 for (i = r; i < r + ARRAY_SIZE(r); i++) 1683 if (!IS_ERR_OR_NULL(i->b)) { 1684 mutex_lock(&i->b->write_lock); 1685 if (btree_node_dirty(i->b)) 1686 bch_btree_node_write(i->b, writes); 1687 mutex_unlock(&i->b->write_lock); 1688 rw_unlock(true, i->b); 1689 } 1690 1691 return ret; 1692 } 1693 1694 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1695 struct closure *writes, struct gc_stat *gc) 1696 { 1697 struct btree *n = NULL; 1698 int ret = 0; 1699 bool should_rewrite; 1700 1701 should_rewrite = btree_gc_mark_node(b, gc); 1702 if (should_rewrite) { 1703 n = btree_node_alloc_replacement(b, NULL); 1704 1705 if (!IS_ERR(n)) { 1706 bch_btree_node_write_sync(n); 1707 1708 bch_btree_set_root(n); 1709 btree_node_free(b); 1710 rw_unlock(true, n); 1711 1712 return -EINTR; 1713 } 1714 } 1715 1716 __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1717 1718 if (b->level) { 1719 ret = btree_gc_recurse(b, op, writes, gc); 1720 if (ret) 1721 return ret; 1722 } 1723 1724 bkey_copy_key(&b->c->gc_done, &b->key); 1725 1726 return ret; 1727 } 1728 1729 static void btree_gc_start(struct cache_set *c) 1730 { 1731 struct cache *ca; 1732 struct bucket *b; 1733 1734 if (!c->gc_mark_valid) 1735 return; 1736 1737 mutex_lock(&c->bucket_lock); 1738 1739 c->gc_mark_valid = 0; 1740 c->gc_done = ZERO_KEY; 1741 1742 ca = c->cache; 1743 for_each_bucket(b, ca) { 1744 b->last_gc = b->gen; 1745 if (!atomic_read(&b->pin)) { 1746 SET_GC_MARK(b, 0); 1747 SET_GC_SECTORS_USED(b, 0); 1748 } 1749 } 1750 1751 mutex_unlock(&c->bucket_lock); 1752 } 1753 1754 static void bch_btree_gc_finish(struct cache_set *c) 1755 { 1756 struct bucket *b; 1757 struct cache *ca; 1758 unsigned int i, j; 1759 uint64_t *k; 1760 1761 mutex_lock(&c->bucket_lock); 1762 1763 set_gc_sectors(c); 1764 c->gc_mark_valid = 1; 1765 c->need_gc = 0; 1766 1767 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1768 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1769 GC_MARK_METADATA); 1770 1771 /* don't reclaim buckets to which writeback keys point */ 1772 rcu_read_lock(); 1773 for (i = 0; i < c->devices_max_used; i++) { 1774 struct bcache_device *d = c->devices[i]; 1775 struct cached_dev *dc; 1776 struct keybuf_key *w, *n; 1777 1778 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1779 continue; 1780 dc = container_of(d, struct cached_dev, disk); 1781 1782 spin_lock(&dc->writeback_keys.lock); 1783 rbtree_postorder_for_each_entry_safe(w, n, 1784 &dc->writeback_keys.keys, node) 1785 for (j = 0; j < KEY_PTRS(&w->key); j++) 1786 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1787 GC_MARK_DIRTY); 1788 spin_unlock(&dc->writeback_keys.lock); 1789 } 1790 rcu_read_unlock(); 1791 1792 c->avail_nbuckets = 0; 1793 1794 ca = c->cache; 1795 ca->invalidate_needs_gc = 0; 1796 1797 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) 1798 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1799 1800 for (k = ca->prio_buckets; 1801 k < ca->prio_buckets + prio_buckets(ca) * 2; k++) 1802 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1803 1804 for_each_bucket(b, ca) { 1805 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1806 1807 if (atomic_read(&b->pin)) 1808 continue; 1809 1810 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1811 1812 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1813 c->avail_nbuckets++; 1814 } 1815 1816 mutex_unlock(&c->bucket_lock); 1817 } 1818 1819 static void bch_btree_gc(struct cache_set *c) 1820 { 1821 int ret; 1822 struct gc_stat stats; 1823 struct closure writes; 1824 struct btree_op op; 1825 uint64_t start_time = local_clock(); 1826 1827 trace_bcache_gc_start(c); 1828 1829 memset(&stats, 0, sizeof(struct gc_stat)); 1830 closure_init_stack(&writes); 1831 bch_btree_op_init(&op, SHRT_MAX); 1832 1833 btree_gc_start(c); 1834 1835 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ 1836 do { 1837 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); 1838 closure_sync(&writes); 1839 cond_resched(); 1840 1841 if (ret == -EAGAIN) 1842 schedule_timeout_interruptible(msecs_to_jiffies 1843 (GC_SLEEP_MS)); 1844 else if (ret) 1845 pr_warn("gc failed!\n"); 1846 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1847 1848 bch_btree_gc_finish(c); 1849 wake_up_allocators(c); 1850 1851 bch_time_stats_update(&c->btree_gc_time, start_time); 1852 1853 stats.key_bytes *= sizeof(uint64_t); 1854 stats.data <<= 9; 1855 bch_update_bucket_in_use(c, &stats); 1856 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1857 1858 trace_bcache_gc_end(c); 1859 1860 bch_moving_gc(c); 1861 } 1862 1863 static bool gc_should_run(struct cache_set *c) 1864 { 1865 struct cache *ca = c->cache; 1866 1867 if (ca->invalidate_needs_gc) 1868 return true; 1869 1870 if (atomic_read(&c->sectors_to_gc) < 0) 1871 return true; 1872 1873 return false; 1874 } 1875 1876 static int bch_gc_thread(void *arg) 1877 { 1878 struct cache_set *c = arg; 1879 1880 while (1) { 1881 wait_event_interruptible(c->gc_wait, 1882 kthread_should_stop() || 1883 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || 1884 gc_should_run(c)); 1885 1886 if (kthread_should_stop() || 1887 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1888 break; 1889 1890 set_gc_sectors(c); 1891 bch_btree_gc(c); 1892 } 1893 1894 wait_for_kthread_stop(); 1895 return 0; 1896 } 1897 1898 int bch_gc_thread_start(struct cache_set *c) 1899 { 1900 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 1901 return PTR_ERR_OR_ZERO(c->gc_thread); 1902 } 1903 1904 /* Initial partial gc */ 1905 1906 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1907 { 1908 int ret = 0; 1909 struct bkey *k, *p = NULL; 1910 struct btree_iter_stack iter; 1911 1912 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1913 bch_initial_mark_key(b->c, b->level, k); 1914 1915 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1916 1917 if (b->level) { 1918 bch_btree_iter_stack_init(&b->keys, &iter, NULL); 1919 1920 do { 1921 k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 1922 bch_ptr_bad); 1923 if (k) { 1924 btree_node_prefetch(b, k); 1925 /* 1926 * initiallize c->gc_stats.nodes 1927 * for incremental GC 1928 */ 1929 b->c->gc_stats.nodes++; 1930 } 1931 1932 if (p) 1933 ret = bcache_btree(check_recurse, p, b, op); 1934 1935 p = k; 1936 } while (p && !ret); 1937 } 1938 1939 return ret; 1940 } 1941 1942 1943 static int bch_btree_check_thread(void *arg) 1944 { 1945 int ret; 1946 struct btree_check_info *info = arg; 1947 struct btree_check_state *check_state = info->state; 1948 struct cache_set *c = check_state->c; 1949 struct btree_iter_stack iter; 1950 struct bkey *k, *p; 1951 int cur_idx, prev_idx, skip_nr; 1952 1953 k = p = NULL; 1954 cur_idx = prev_idx = 0; 1955 ret = 0; 1956 1957 /* root node keys are checked before thread created */ 1958 bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); 1959 k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); 1960 BUG_ON(!k); 1961 1962 p = k; 1963 while (k) { 1964 /* 1965 * Fetch a root node key index, skip the keys which 1966 * should be fetched by other threads, then check the 1967 * sub-tree indexed by the fetched key. 1968 */ 1969 spin_lock(&check_state->idx_lock); 1970 cur_idx = check_state->key_idx; 1971 check_state->key_idx++; 1972 spin_unlock(&check_state->idx_lock); 1973 1974 skip_nr = cur_idx - prev_idx; 1975 1976 while (skip_nr) { 1977 k = bch_btree_iter_next_filter(&iter.iter, 1978 &c->root->keys, 1979 bch_ptr_bad); 1980 if (k) 1981 p = k; 1982 else { 1983 /* 1984 * No more keys to check in root node, 1985 * current checking threads are enough, 1986 * stop creating more. 1987 */ 1988 atomic_set(&check_state->enough, 1); 1989 /* Update check_state->enough earlier */ 1990 smp_mb__after_atomic(); 1991 goto out; 1992 } 1993 skip_nr--; 1994 cond_resched(); 1995 } 1996 1997 if (p) { 1998 struct btree_op op; 1999 2000 btree_node_prefetch(c->root, p); 2001 c->gc_stats.nodes++; 2002 bch_btree_op_init(&op, 0); 2003 ret = bcache_btree(check_recurse, p, c->root, &op); 2004 /* 2005 * The op may be added to cache_set's btree_cache_wait 2006 * in mca_cannibalize(), must ensure it is removed from 2007 * the list and release btree_cache_alloc_lock before 2008 * free op memory. 2009 * Otherwise, the btree_cache_wait will be damaged. 2010 */ 2011 bch_cannibalize_unlock(c); 2012 finish_wait(&c->btree_cache_wait, &(&op)->wait); 2013 if (ret) 2014 goto out; 2015 } 2016 p = NULL; 2017 prev_idx = cur_idx; 2018 cond_resched(); 2019 } 2020 2021 out: 2022 info->result = ret; 2023 /* update check_state->started among all CPUs */ 2024 smp_mb__before_atomic(); 2025 if (atomic_dec_and_test(&check_state->started)) 2026 wake_up(&check_state->wait); 2027 2028 return ret; 2029 } 2030 2031 2032 2033 static int bch_btree_chkthread_nr(void) 2034 { 2035 int n = num_online_cpus()/2; 2036 2037 if (n == 0) 2038 n = 1; 2039 else if (n > BCH_BTR_CHKTHREAD_MAX) 2040 n = BCH_BTR_CHKTHREAD_MAX; 2041 2042 return n; 2043 } 2044 2045 int bch_btree_check(struct cache_set *c) 2046 { 2047 int ret = 0; 2048 int i; 2049 struct bkey *k = NULL; 2050 struct btree_iter_stack iter; 2051 struct btree_check_state check_state; 2052 2053 /* check and mark root node keys */ 2054 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) 2055 bch_initial_mark_key(c, c->root->level, k); 2056 2057 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); 2058 2059 if (c->root->level == 0) 2060 return 0; 2061 2062 memset(&check_state, 0, sizeof(struct btree_check_state)); 2063 check_state.c = c; 2064 check_state.total_threads = bch_btree_chkthread_nr(); 2065 check_state.key_idx = 0; 2066 spin_lock_init(&check_state.idx_lock); 2067 atomic_set(&check_state.started, 0); 2068 atomic_set(&check_state.enough, 0); 2069 init_waitqueue_head(&check_state.wait); 2070 2071 rw_lock(0, c->root, c->root->level); 2072 /* 2073 * Run multiple threads to check btree nodes in parallel, 2074 * if check_state.enough is non-zero, it means current 2075 * running check threads are enough, unncessary to create 2076 * more. 2077 */ 2078 for (i = 0; i < check_state.total_threads; i++) { 2079 /* fetch latest check_state.enough earlier */ 2080 smp_mb__before_atomic(); 2081 if (atomic_read(&check_state.enough)) 2082 break; 2083 2084 check_state.infos[i].result = 0; 2085 check_state.infos[i].state = &check_state; 2086 2087 check_state.infos[i].thread = 2088 kthread_run(bch_btree_check_thread, 2089 &check_state.infos[i], 2090 "bch_btrchk[%d]", i); 2091 if (IS_ERR(check_state.infos[i].thread)) { 2092 pr_err("fails to run thread bch_btrchk[%d]\n", i); 2093 for (--i; i >= 0; i--) 2094 kthread_stop(check_state.infos[i].thread); 2095 ret = -ENOMEM; 2096 goto out; 2097 } 2098 atomic_inc(&check_state.started); 2099 } 2100 2101 /* 2102 * Must wait for all threads to stop. 2103 */ 2104 wait_event(check_state.wait, atomic_read(&check_state.started) == 0); 2105 2106 for (i = 0; i < check_state.total_threads; i++) { 2107 if (check_state.infos[i].result) { 2108 ret = check_state.infos[i].result; 2109 goto out; 2110 } 2111 } 2112 2113 out: 2114 rw_unlock(0, c->root); 2115 return ret; 2116 } 2117 2118 void bch_initial_gc_finish(struct cache_set *c) 2119 { 2120 struct cache *ca = c->cache; 2121 struct bucket *b; 2122 2123 bch_btree_gc_finish(c); 2124 2125 mutex_lock(&c->bucket_lock); 2126 2127 /* 2128 * We need to put some unused buckets directly on the prio freelist in 2129 * order to get the allocator thread started - it needs freed buckets in 2130 * order to rewrite the prios and gens, and it needs to rewrite prios 2131 * and gens in order to free buckets. 2132 * 2133 * This is only safe for buckets that have no live data in them, which 2134 * there should always be some of. 2135 */ 2136 for_each_bucket(b, ca) { 2137 if (fifo_full(&ca->free[RESERVE_PRIO]) && 2138 fifo_full(&ca->free[RESERVE_BTREE])) 2139 break; 2140 2141 if (bch_can_invalidate_bucket(ca, b) && 2142 !GC_MARK(b)) { 2143 __bch_invalidate_one_bucket(ca, b); 2144 if (!fifo_push(&ca->free[RESERVE_PRIO], 2145 b - ca->buckets)) 2146 fifo_push(&ca->free[RESERVE_BTREE], 2147 b - ca->buckets); 2148 } 2149 } 2150 2151 mutex_unlock(&c->bucket_lock); 2152 } 2153 2154 /* Btree insertion */ 2155 2156 static bool btree_insert_key(struct btree *b, struct bkey *k, 2157 struct bkey *replace_key) 2158 { 2159 unsigned int status; 2160 2161 BUG_ON(bkey_cmp(k, &b->key) > 0); 2162 2163 status = bch_btree_insert_key(&b->keys, k, replace_key); 2164 if (status != BTREE_INSERT_STATUS_NO_INSERT) { 2165 bch_check_keys(&b->keys, "%u for %s", status, 2166 replace_key ? "replace" : "insert"); 2167 2168 trace_bcache_btree_insert_key(b, k, replace_key != NULL, 2169 status); 2170 return true; 2171 } else 2172 return false; 2173 } 2174 2175 static size_t insert_u64s_remaining(struct btree *b) 2176 { 2177 long ret = bch_btree_keys_u64s_remaining(&b->keys); 2178 2179 /* 2180 * Might land in the middle of an existing extent and have to split it 2181 */ 2182 if (b->keys.ops->is_extents) 2183 ret -= KEY_MAX_U64S; 2184 2185 return max(ret, 0L); 2186 } 2187 2188 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 2189 struct keylist *insert_keys, 2190 struct bkey *replace_key) 2191 { 2192 bool ret = false; 2193 int oldsize = bch_count_data(&b->keys); 2194 2195 while (!bch_keylist_empty(insert_keys)) { 2196 struct bkey *k = insert_keys->keys; 2197 2198 if (bkey_u64s(k) > insert_u64s_remaining(b)) 2199 break; 2200 2201 if (bkey_cmp(k, &b->key) <= 0) { 2202 if (!b->level) 2203 bkey_put(b->c, k); 2204 2205 ret |= btree_insert_key(b, k, replace_key); 2206 bch_keylist_pop_front(insert_keys); 2207 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 2208 BKEY_PADDED(key) temp; 2209 bkey_copy(&temp.key, insert_keys->keys); 2210 2211 bch_cut_back(&b->key, &temp.key); 2212 bch_cut_front(&b->key, insert_keys->keys); 2213 2214 ret |= btree_insert_key(b, &temp.key, replace_key); 2215 break; 2216 } else { 2217 break; 2218 } 2219 } 2220 2221 if (!ret) 2222 op->insert_collision = true; 2223 2224 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 2225 2226 BUG_ON(bch_count_data(&b->keys) < oldsize); 2227 return ret; 2228 } 2229 2230 static int btree_split(struct btree *b, struct btree_op *op, 2231 struct keylist *insert_keys, 2232 struct bkey *replace_key) 2233 { 2234 bool split; 2235 struct btree *n1, *n2 = NULL, *n3 = NULL; 2236 uint64_t start_time = local_clock(); 2237 struct closure cl; 2238 struct keylist parent_keys; 2239 2240 closure_init_stack(&cl); 2241 bch_keylist_init(&parent_keys); 2242 2243 if (btree_check_reserve(b, op)) { 2244 if (!b->level) 2245 return -EINTR; 2246 else 2247 WARN(1, "insufficient reserve for split\n"); 2248 } 2249 2250 n1 = btree_node_alloc_replacement(b, op); 2251 if (IS_ERR(n1)) 2252 goto err; 2253 2254 split = set_blocks(btree_bset_first(n1), 2255 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; 2256 2257 if (split) { 2258 unsigned int keys = 0; 2259 2260 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 2261 2262 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 2263 if (IS_ERR(n2)) 2264 goto err_free1; 2265 2266 if (!b->parent) { 2267 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 2268 if (IS_ERR(n3)) 2269 goto err_free2; 2270 } 2271 2272 mutex_lock(&n1->write_lock); 2273 mutex_lock(&n2->write_lock); 2274 2275 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2276 2277 /* 2278 * Has to be a linear search because we don't have an auxiliary 2279 * search tree yet 2280 */ 2281 2282 while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2283 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2284 keys)); 2285 2286 bkey_copy_key(&n1->key, 2287 bset_bkey_idx(btree_bset_first(n1), keys)); 2288 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2289 2290 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2291 btree_bset_first(n1)->keys = keys; 2292 2293 memcpy(btree_bset_first(n2)->start, 2294 bset_bkey_last(btree_bset_first(n1)), 2295 btree_bset_first(n2)->keys * sizeof(uint64_t)); 2296 2297 bkey_copy_key(&n2->key, &b->key); 2298 2299 bch_keylist_add(&parent_keys, &n2->key); 2300 bch_btree_node_write(n2, &cl); 2301 mutex_unlock(&n2->write_lock); 2302 rw_unlock(true, n2); 2303 } else { 2304 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2305 2306 mutex_lock(&n1->write_lock); 2307 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2308 } 2309 2310 bch_keylist_add(&parent_keys, &n1->key); 2311 bch_btree_node_write(n1, &cl); 2312 mutex_unlock(&n1->write_lock); 2313 2314 if (n3) { 2315 /* Depth increases, make a new root */ 2316 mutex_lock(&n3->write_lock); 2317 bkey_copy_key(&n3->key, &MAX_KEY); 2318 bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2319 bch_btree_node_write(n3, &cl); 2320 mutex_unlock(&n3->write_lock); 2321 2322 closure_sync(&cl); 2323 bch_btree_set_root(n3); 2324 rw_unlock(true, n3); 2325 } else if (!b->parent) { 2326 /* Root filled up but didn't need to be split */ 2327 closure_sync(&cl); 2328 bch_btree_set_root(n1); 2329 } else { 2330 /* Split a non root node */ 2331 closure_sync(&cl); 2332 make_btree_freeing_key(b, parent_keys.top); 2333 bch_keylist_push(&parent_keys); 2334 2335 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 2336 BUG_ON(!bch_keylist_empty(&parent_keys)); 2337 } 2338 2339 btree_node_free(b); 2340 rw_unlock(true, n1); 2341 2342 bch_time_stats_update(&b->c->btree_split_time, start_time); 2343 2344 return 0; 2345 err_free2: 2346 bkey_put(b->c, &n2->key); 2347 btree_node_free(n2); 2348 rw_unlock(true, n2); 2349 err_free1: 2350 bkey_put(b->c, &n1->key); 2351 btree_node_free(n1); 2352 rw_unlock(true, n1); 2353 err: 2354 WARN(1, "bcache: btree split failed (level %u)", b->level); 2355 2356 if (n3 == ERR_PTR(-EAGAIN) || 2357 n2 == ERR_PTR(-EAGAIN) || 2358 n1 == ERR_PTR(-EAGAIN)) 2359 return -EAGAIN; 2360 2361 return -ENOMEM; 2362 } 2363 2364 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2365 struct keylist *insert_keys, 2366 atomic_t *journal_ref, 2367 struct bkey *replace_key) 2368 { 2369 struct closure cl; 2370 2371 BUG_ON(b->level && replace_key); 2372 2373 closure_init_stack(&cl); 2374 2375 mutex_lock(&b->write_lock); 2376 2377 if (write_block(b) != btree_bset_last(b) && 2378 b->keys.last_set_unwritten) 2379 bch_btree_init_next(b); /* just wrote a set */ 2380 2381 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 2382 mutex_unlock(&b->write_lock); 2383 goto split; 2384 } 2385 2386 BUG_ON(write_block(b) != btree_bset_last(b)); 2387 2388 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 2389 if (!b->level) 2390 bch_btree_leaf_dirty(b, journal_ref); 2391 else 2392 bch_btree_node_write(b, &cl); 2393 } 2394 2395 mutex_unlock(&b->write_lock); 2396 2397 /* wait for btree node write if necessary, after unlock */ 2398 closure_sync(&cl); 2399 2400 return 0; 2401 split: 2402 if (current->bio_list) { 2403 op->lock = b->c->root->level + 1; 2404 return -EAGAIN; 2405 } else if (op->lock <= b->c->root->level) { 2406 op->lock = b->c->root->level + 1; 2407 return -EINTR; 2408 } else { 2409 /* Invalidated all iterators */ 2410 int ret = btree_split(b, op, insert_keys, replace_key); 2411 2412 if (bch_keylist_empty(insert_keys)) 2413 return 0; 2414 else if (!ret) 2415 return -EINTR; 2416 return ret; 2417 } 2418 } 2419 2420 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2421 struct bkey *check_key) 2422 { 2423 int ret = -EINTR; 2424 uint64_t btree_ptr = b->key.ptr[0]; 2425 unsigned long seq = b->seq; 2426 struct keylist insert; 2427 bool upgrade = op->lock == -1; 2428 2429 bch_keylist_init(&insert); 2430 2431 if (upgrade) { 2432 rw_unlock(false, b); 2433 rw_lock(true, b, b->level); 2434 2435 if (b->key.ptr[0] != btree_ptr || 2436 b->seq != seq + 1) { 2437 op->lock = b->level; 2438 goto out; 2439 } 2440 } 2441 2442 SET_KEY_PTRS(check_key, 1); 2443 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2444 2445 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2446 2447 bch_keylist_add(&insert, check_key); 2448 2449 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2450 2451 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2452 out: 2453 if (upgrade) 2454 downgrade_write(&b->lock); 2455 return ret; 2456 } 2457 2458 struct btree_insert_op { 2459 struct btree_op op; 2460 struct keylist *keys; 2461 atomic_t *journal_ref; 2462 struct bkey *replace_key; 2463 }; 2464 2465 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2466 { 2467 struct btree_insert_op *op = container_of(b_op, 2468 struct btree_insert_op, op); 2469 2470 int ret = bch_btree_insert_node(b, &op->op, op->keys, 2471 op->journal_ref, op->replace_key); 2472 if (ret && !bch_keylist_empty(op->keys)) 2473 return ret; 2474 else 2475 return MAP_DONE; 2476 } 2477 2478 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2479 atomic_t *journal_ref, struct bkey *replace_key) 2480 { 2481 struct btree_insert_op op; 2482 int ret = 0; 2483 2484 BUG_ON(current->bio_list); 2485 BUG_ON(bch_keylist_empty(keys)); 2486 2487 bch_btree_op_init(&op.op, 0); 2488 op.keys = keys; 2489 op.journal_ref = journal_ref; 2490 op.replace_key = replace_key; 2491 2492 while (!ret && !bch_keylist_empty(keys)) { 2493 op.op.lock = 0; 2494 ret = bch_btree_map_leaf_nodes(&op.op, c, 2495 &START_KEY(keys->keys), 2496 btree_insert_fn); 2497 } 2498 2499 if (ret) { 2500 struct bkey *k; 2501 2502 pr_err("error %i\n", ret); 2503 2504 while ((k = bch_keylist_pop(keys))) 2505 bkey_put(c, k); 2506 } else if (op.op.insert_collision) 2507 ret = -ESRCH; 2508 2509 return ret; 2510 } 2511 2512 void bch_btree_set_root(struct btree *b) 2513 { 2514 unsigned int i; 2515 struct closure cl; 2516 2517 closure_init_stack(&cl); 2518 2519 trace_bcache_btree_set_root(b); 2520 2521 BUG_ON(!b->written); 2522 2523 for (i = 0; i < KEY_PTRS(&b->key); i++) 2524 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2525 2526 mutex_lock(&b->c->bucket_lock); 2527 list_del_init(&b->list); 2528 mutex_unlock(&b->c->bucket_lock); 2529 2530 b->c->root = b; 2531 2532 bch_journal_meta(b->c, &cl); 2533 closure_sync(&cl); 2534 } 2535 2536 /* Map across nodes or keys */ 2537 2538 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2539 struct bkey *from, 2540 btree_map_nodes_fn *fn, int flags) 2541 { 2542 int ret = MAP_CONTINUE; 2543 2544 if (b->level) { 2545 struct bkey *k; 2546 struct btree_iter_stack iter; 2547 2548 bch_btree_iter_stack_init(&b->keys, &iter, from); 2549 2550 while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 2551 bch_ptr_bad))) { 2552 ret = bcache_btree(map_nodes_recurse, k, b, 2553 op, from, fn, flags); 2554 from = NULL; 2555 2556 if (ret != MAP_CONTINUE) 2557 return ret; 2558 } 2559 } 2560 2561 if (!b->level || flags == MAP_ALL_NODES) 2562 ret = fn(op, b); 2563 2564 return ret; 2565 } 2566 2567 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2568 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2569 { 2570 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); 2571 } 2572 2573 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2574 struct bkey *from, btree_map_keys_fn *fn, 2575 int flags) 2576 { 2577 int ret = MAP_CONTINUE; 2578 struct bkey *k; 2579 struct btree_iter_stack iter; 2580 2581 bch_btree_iter_stack_init(&b->keys, &iter, from); 2582 2583 while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 2584 bch_ptr_bad))) { 2585 ret = !b->level 2586 ? fn(op, b, k) 2587 : bcache_btree(map_keys_recurse, k, 2588 b, op, from, fn, flags); 2589 from = NULL; 2590 2591 if (ret != MAP_CONTINUE) 2592 return ret; 2593 } 2594 2595 if (!b->level && (flags & MAP_END_KEY)) 2596 ret = fn(op, b, &KEY(KEY_INODE(&b->key), 2597 KEY_OFFSET(&b->key), 0)); 2598 2599 return ret; 2600 } 2601 2602 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2603 struct bkey *from, btree_map_keys_fn *fn, int flags) 2604 { 2605 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); 2606 } 2607 2608 /* Keybuf code */ 2609 2610 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2611 { 2612 /* Overlapping keys compare equal */ 2613 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2614 return -1; 2615 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2616 return 1; 2617 return 0; 2618 } 2619 2620 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2621 struct keybuf_key *r) 2622 { 2623 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2624 } 2625 2626 struct refill { 2627 struct btree_op op; 2628 unsigned int nr_found; 2629 struct keybuf *buf; 2630 struct bkey *end; 2631 keybuf_pred_fn *pred; 2632 }; 2633 2634 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 2635 struct bkey *k) 2636 { 2637 struct refill *refill = container_of(op, struct refill, op); 2638 struct keybuf *buf = refill->buf; 2639 int ret = MAP_CONTINUE; 2640 2641 if (bkey_cmp(k, refill->end) > 0) { 2642 ret = MAP_DONE; 2643 goto out; 2644 } 2645 2646 if (!KEY_SIZE(k)) /* end key */ 2647 goto out; 2648 2649 if (refill->pred(buf, k)) { 2650 struct keybuf_key *w; 2651 2652 spin_lock(&buf->lock); 2653 2654 w = array_alloc(&buf->freelist); 2655 if (!w) { 2656 spin_unlock(&buf->lock); 2657 return MAP_DONE; 2658 } 2659 2660 w->private = NULL; 2661 bkey_copy(&w->key, k); 2662 2663 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2664 array_free(&buf->freelist, w); 2665 else 2666 refill->nr_found++; 2667 2668 if (array_freelist_empty(&buf->freelist)) 2669 ret = MAP_DONE; 2670 2671 spin_unlock(&buf->lock); 2672 } 2673 out: 2674 buf->last_scanned = *k; 2675 return ret; 2676 } 2677 2678 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2679 struct bkey *end, keybuf_pred_fn *pred) 2680 { 2681 struct bkey start = buf->last_scanned; 2682 struct refill refill; 2683 2684 cond_resched(); 2685 2686 bch_btree_op_init(&refill.op, -1); 2687 refill.nr_found = 0; 2688 refill.buf = buf; 2689 refill.end = end; 2690 refill.pred = pred; 2691 2692 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 2693 refill_keybuf_fn, MAP_END_KEY); 2694 2695 trace_bcache_keyscan(refill.nr_found, 2696 KEY_INODE(&start), KEY_OFFSET(&start), 2697 KEY_INODE(&buf->last_scanned), 2698 KEY_OFFSET(&buf->last_scanned)); 2699 2700 spin_lock(&buf->lock); 2701 2702 if (!RB_EMPTY_ROOT(&buf->keys)) { 2703 struct keybuf_key *w; 2704 2705 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2706 buf->start = START_KEY(&w->key); 2707 2708 w = RB_LAST(&buf->keys, struct keybuf_key, node); 2709 buf->end = w->key; 2710 } else { 2711 buf->start = MAX_KEY; 2712 buf->end = MAX_KEY; 2713 } 2714 2715 spin_unlock(&buf->lock); 2716 } 2717 2718 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2719 { 2720 rb_erase(&w->node, &buf->keys); 2721 array_free(&buf->freelist, w); 2722 } 2723 2724 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2725 { 2726 spin_lock(&buf->lock); 2727 __bch_keybuf_del(buf, w); 2728 spin_unlock(&buf->lock); 2729 } 2730 2731 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2732 struct bkey *end) 2733 { 2734 bool ret = false; 2735 struct keybuf_key *p, *w, s; 2736 2737 s.key = *start; 2738 2739 if (bkey_cmp(end, &buf->start) <= 0 || 2740 bkey_cmp(start, &buf->end) >= 0) 2741 return false; 2742 2743 spin_lock(&buf->lock); 2744 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2745 2746 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2747 p = w; 2748 w = RB_NEXT(w, node); 2749 2750 if (p->private) 2751 ret = true; 2752 else 2753 __bch_keybuf_del(buf, p); 2754 } 2755 2756 spin_unlock(&buf->lock); 2757 return ret; 2758 } 2759 2760 struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2761 { 2762 struct keybuf_key *w; 2763 2764 spin_lock(&buf->lock); 2765 2766 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2767 2768 while (w && w->private) 2769 w = RB_NEXT(w, node); 2770 2771 if (w) 2772 w->private = ERR_PTR(-EINTR); 2773 2774 spin_unlock(&buf->lock); 2775 return w; 2776 } 2777 2778 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2779 struct keybuf *buf, 2780 struct bkey *end, 2781 keybuf_pred_fn *pred) 2782 { 2783 struct keybuf_key *ret; 2784 2785 while (1) { 2786 ret = bch_keybuf_next(buf); 2787 if (ret) 2788 break; 2789 2790 if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2791 pr_debug("scan finished\n"); 2792 break; 2793 } 2794 2795 bch_refill_keybuf(c, buf, end, pred); 2796 } 2797 2798 return ret; 2799 } 2800 2801 void bch_keybuf_init(struct keybuf *buf) 2802 { 2803 buf->last_scanned = MAX_KEY; 2804 buf->keys = RB_ROOT; 2805 2806 spin_lock_init(&buf->lock); 2807 array_allocator_init(&buf->freelist); 2808 } 2809 2810 void bch_btree_exit(void) 2811 { 2812 if (btree_io_wq) 2813 destroy_workqueue(btree_io_wq); 2814 } 2815 2816 int __init bch_btree_init(void) 2817 { 2818 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0); 2819 if (!btree_io_wq) 2820 return -ENOMEM; 2821 2822 return 0; 2823 } 2824