1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 4 * 5 * Uses a block device as cache for other block devices; optimized for SSDs. 6 * All allocation is done in buckets, which should match the erase block size 7 * of the device. 8 * 9 * Buckets containing cached data are kept on a heap sorted by priority; 10 * bucket priority is increased on cache hit, and periodically all the buckets 11 * on the heap have their priority scaled down. This currently is just used as 12 * an LRU but in the future should allow for more intelligent heuristics. 13 * 14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 15 * counter. Garbage collection is used to remove stale pointers. 16 * 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 18 * as keys are inserted we only sort the pages that have not yet been written. 19 * When garbage collection is run, we resort the entire node. 20 * 21 * All configuration is done via sysfs; see Documentation/bcache.txt. 22 */ 23 24 #include "bcache.h" 25 #include "btree.h" 26 #include "debug.h" 27 #include "extents.h" 28 29 #include <linux/slab.h> 30 #include <linux/bitops.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/prefetch.h> 34 #include <linux/random.h> 35 #include <linux/rcupdate.h> 36 #include <linux/sched/clock.h> 37 #include <linux/rculist.h> 38 39 #include <trace/events/bcache.h> 40 41 /* 42 * Todo: 43 * register_bcache: Return errors out to userspace correctly 44 * 45 * Writeback: don't undirty key until after a cache flush 46 * 47 * Create an iterator for key pointers 48 * 49 * On btree write error, mark bucket such that it won't be freed from the cache 50 * 51 * Journalling: 52 * Check for bad keys in replay 53 * Propagate barriers 54 * Refcount journal entries in journal_replay 55 * 56 * Garbage collection: 57 * Finish incremental gc 58 * Gc should free old UUIDs, data for invalid UUIDs 59 * 60 * Provide a way to list backing device UUIDs we have data cached for, and 61 * probably how long it's been since we've seen them, and a way to invalidate 62 * dirty data for devices that will never be attached again 63 * 64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 65 * that based on that and how much dirty data we have we can keep writeback 66 * from being starved 67 * 68 * Add a tracepoint or somesuch to watch for writeback starvation 69 * 70 * When btree depth > 1 and splitting an interior node, we have to make sure 71 * alloc_bucket() cannot fail. This should be true but is not completely 72 * obvious. 73 * 74 * Plugging? 75 * 76 * If data write is less than hard sector size of ssd, round up offset in open 77 * bucket to the next whole sector 78 * 79 * Superblock needs to be fleshed out for multiple cache devices 80 * 81 * Add a sysfs tunable for the number of writeback IOs in flight 82 * 83 * Add a sysfs tunable for the number of open data buckets 84 * 85 * IO tracking: Can we track when one process is doing io on behalf of another? 86 * IO tracking: Don't use just an average, weigh more recent stuff higher 87 * 88 * Test module load/unload 89 */ 90 91 #define MAX_NEED_GC 64 92 #define MAX_SAVE_PRIO 72 93 94 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 95 96 #define PTR_HASH(c, k) \ 97 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 98 99 #define insert_lock(s, b) ((b)->level <= (s)->lock) 100 101 /* 102 * These macros are for recursing down the btree - they handle the details of 103 * locking and looking up nodes in the cache for you. They're best treated as 104 * mere syntax when reading code that uses them. 105 * 106 * op->lock determines whether we take a read or a write lock at a given depth. 107 * If you've got a read lock and find that you need a write lock (i.e. you're 108 * going to have to split), set op->lock and return -EINTR; btree_root() will 109 * call you again and you'll have the correct lock. 110 */ 111 112 /** 113 * btree - recurse down the btree on a specified key 114 * @fn: function to call, which will be passed the child node 115 * @key: key to recurse on 116 * @b: parent btree node 117 * @op: pointer to struct btree_op 118 */ 119 #define btree(fn, key, b, op, ...) \ 120 ({ \ 121 int _r, l = (b)->level - 1; \ 122 bool _w = l <= (op)->lock; \ 123 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ 124 _w, b); \ 125 if (!IS_ERR(_child)) { \ 126 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ 127 rw_unlock(_w, _child); \ 128 } else \ 129 _r = PTR_ERR(_child); \ 130 _r; \ 131 }) 132 133 /** 134 * btree_root - call a function on the root of the btree 135 * @fn: function to call, which will be passed the child node 136 * @c: cache set 137 * @op: pointer to struct btree_op 138 */ 139 #define btree_root(fn, c, op, ...) \ 140 ({ \ 141 int _r = -EINTR; \ 142 do { \ 143 struct btree *_b = (c)->root; \ 144 bool _w = insert_lock(op, _b); \ 145 rw_lock(_w, _b, _b->level); \ 146 if (_b == (c)->root && \ 147 _w == insert_lock(op, _b)) { \ 148 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 149 } \ 150 rw_unlock(_w, _b); \ 151 bch_cannibalize_unlock(c); \ 152 if (_r == -EINTR) \ 153 schedule(); \ 154 } while (_r == -EINTR); \ 155 \ 156 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ 157 _r; \ 158 }) 159 160 static inline struct bset *write_block(struct btree *b) 161 { 162 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); 163 } 164 165 static void bch_btree_init_next(struct btree *b) 166 { 167 /* If not a leaf node, always sort */ 168 if (b->level && b->keys.nsets) 169 bch_btree_sort(&b->keys, &b->c->sort); 170 else 171 bch_btree_sort_lazy(&b->keys, &b->c->sort); 172 173 if (b->written < btree_blocks(b)) 174 bch_bset_init_next(&b->keys, write_block(b), 175 bset_magic(&b->c->sb)); 176 177 } 178 179 /* Btree key manipulation */ 180 181 void bkey_put(struct cache_set *c, struct bkey *k) 182 { 183 unsigned i; 184 185 for (i = 0; i < KEY_PTRS(k); i++) 186 if (ptr_available(c, k, i)) 187 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 188 } 189 190 /* Btree IO */ 191 192 static uint64_t btree_csum_set(struct btree *b, struct bset *i) 193 { 194 uint64_t crc = b->key.ptr[0]; 195 void *data = (void *) i + 8, *end = bset_bkey_last(i); 196 197 crc = bch_crc64_update(crc, data, end - data); 198 return crc ^ 0xffffffffffffffffULL; 199 } 200 201 void bch_btree_node_read_done(struct btree *b) 202 { 203 const char *err = "bad btree header"; 204 struct bset *i = btree_bset_first(b); 205 struct btree_iter *iter; 206 207 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); 208 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; 209 iter->used = 0; 210 211 #ifdef CONFIG_BCACHE_DEBUG 212 iter->b = &b->keys; 213 #endif 214 215 if (!i->seq) 216 goto err; 217 218 for (; 219 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 220 i = write_block(b)) { 221 err = "unsupported bset version"; 222 if (i->version > BCACHE_BSET_VERSION) 223 goto err; 224 225 err = "bad btree header"; 226 if (b->written + set_blocks(i, block_bytes(b->c)) > 227 btree_blocks(b)) 228 goto err; 229 230 err = "bad magic"; 231 if (i->magic != bset_magic(&b->c->sb)) 232 goto err; 233 234 err = "bad checksum"; 235 switch (i->version) { 236 case 0: 237 if (i->csum != csum_set(i)) 238 goto err; 239 break; 240 case BCACHE_BSET_VERSION: 241 if (i->csum != btree_csum_set(b, i)) 242 goto err; 243 break; 244 } 245 246 err = "empty set"; 247 if (i != b->keys.set[0].data && !i->keys) 248 goto err; 249 250 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 251 252 b->written += set_blocks(i, block_bytes(b->c)); 253 } 254 255 err = "corrupted btree"; 256 for (i = write_block(b); 257 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 258 i = ((void *) i) + block_bytes(b->c)) 259 if (i->seq == b->keys.set[0].data->seq) 260 goto err; 261 262 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 263 264 i = b->keys.set[0].data; 265 err = "short btree key"; 266 if (b->keys.set[0].size && 267 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 268 goto err; 269 270 if (b->written < btree_blocks(b)) 271 bch_bset_init_next(&b->keys, write_block(b), 272 bset_magic(&b->c->sb)); 273 out: 274 mempool_free(iter, b->c->fill_iter); 275 return; 276 err: 277 set_btree_node_io_error(b); 278 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 279 err, PTR_BUCKET_NR(b->c, &b->key, 0), 280 bset_block_offset(b, i), i->keys); 281 goto out; 282 } 283 284 static void btree_node_read_endio(struct bio *bio) 285 { 286 struct closure *cl = bio->bi_private; 287 closure_put(cl); 288 } 289 290 static void bch_btree_node_read(struct btree *b) 291 { 292 uint64_t start_time = local_clock(); 293 struct closure cl; 294 struct bio *bio; 295 296 trace_bcache_btree_read(b); 297 298 closure_init_stack(&cl); 299 300 bio = bch_bbio_alloc(b->c); 301 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_private = &cl; 304 bio->bi_opf = REQ_OP_READ | REQ_META; 305 306 bch_bio_map(bio, b->keys.set[0].data); 307 308 bch_submit_bbio(bio, b->c, &b->key, 0); 309 closure_sync(&cl); 310 311 if (bio->bi_status) 312 set_btree_node_io_error(b); 313 314 bch_bbio_free(bio, b->c); 315 316 if (btree_node_io_error(b)) 317 goto err; 318 319 bch_btree_node_read_done(b); 320 bch_time_stats_update(&b->c->btree_read_time, start_time); 321 322 return; 323 err: 324 bch_cache_set_error(b->c, "io error reading bucket %zu", 325 PTR_BUCKET_NR(b->c, &b->key, 0)); 326 } 327 328 static void btree_complete_write(struct btree *b, struct btree_write *w) 329 { 330 if (w->prio_blocked && 331 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 332 wake_up_allocators(b->c); 333 334 if (w->journal) { 335 atomic_dec_bug(w->journal); 336 __closure_wake_up(&b->c->journal.wait); 337 } 338 339 w->prio_blocked = 0; 340 w->journal = NULL; 341 } 342 343 static void btree_node_write_unlock(struct closure *cl) 344 { 345 struct btree *b = container_of(cl, struct btree, io); 346 347 up(&b->io_mutex); 348 } 349 350 static void __btree_node_write_done(struct closure *cl) 351 { 352 struct btree *b = container_of(cl, struct btree, io); 353 struct btree_write *w = btree_prev_write(b); 354 355 bch_bbio_free(b->bio, b->c); 356 b->bio = NULL; 357 btree_complete_write(b, w); 358 359 if (btree_node_dirty(b)) 360 schedule_delayed_work(&b->work, 30 * HZ); 361 362 closure_return_with_destructor(cl, btree_node_write_unlock); 363 } 364 365 static void btree_node_write_done(struct closure *cl) 366 { 367 struct btree *b = container_of(cl, struct btree, io); 368 369 bio_free_pages(b->bio); 370 __btree_node_write_done(cl); 371 } 372 373 static void btree_node_write_endio(struct bio *bio) 374 { 375 struct closure *cl = bio->bi_private; 376 struct btree *b = container_of(cl, struct btree, io); 377 378 if (bio->bi_status) 379 set_btree_node_io_error(b); 380 381 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); 382 closure_put(cl); 383 } 384 385 static void do_btree_node_write(struct btree *b) 386 { 387 struct closure *cl = &b->io; 388 struct bset *i = btree_bset_last(b); 389 BKEY_PADDED(key) k; 390 391 i->version = BCACHE_BSET_VERSION; 392 i->csum = btree_csum_set(b, i); 393 394 BUG_ON(b->bio); 395 b->bio = bch_bbio_alloc(b->c); 396 397 b->bio->bi_end_io = btree_node_write_endio; 398 b->bio->bi_private = cl; 399 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 400 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 401 bch_bio_map(b->bio, i); 402 403 /* 404 * If we're appending to a leaf node, we don't technically need FUA - 405 * this write just needs to be persisted before the next journal write, 406 * which will be marked FLUSH|FUA. 407 * 408 * Similarly if we're writing a new btree root - the pointer is going to 409 * be in the next journal entry. 410 * 411 * But if we're writing a new btree node (that isn't a root) or 412 * appending to a non leaf btree node, we need either FUA or a flush 413 * when we write the parent with the new pointer. FUA is cheaper than a 414 * flush, and writes appending to leaf nodes aren't blocking anything so 415 * just make all btree node writes FUA to keep things sane. 416 */ 417 418 bkey_copy(&k.key, &b->key); 419 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 420 bset_sector_offset(&b->keys, i)); 421 422 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 423 int j; 424 struct bio_vec *bv; 425 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 426 427 bio_for_each_segment_all(bv, b->bio, j) 428 memcpy(page_address(bv->bv_page), 429 base + j * PAGE_SIZE, PAGE_SIZE); 430 431 bch_submit_bbio(b->bio, b->c, &k.key, 0); 432 433 continue_at(cl, btree_node_write_done, NULL); 434 } else { 435 b->bio->bi_vcnt = 0; 436 bch_bio_map(b->bio, i); 437 438 bch_submit_bbio(b->bio, b->c, &k.key, 0); 439 440 closure_sync(cl); 441 continue_at_nobarrier(cl, __btree_node_write_done, NULL); 442 } 443 } 444 445 void __bch_btree_node_write(struct btree *b, struct closure *parent) 446 { 447 struct bset *i = btree_bset_last(b); 448 449 lockdep_assert_held(&b->write_lock); 450 451 trace_bcache_btree_write(b); 452 453 BUG_ON(current->bio_list); 454 BUG_ON(b->written >= btree_blocks(b)); 455 BUG_ON(b->written && !i->keys); 456 BUG_ON(btree_bset_first(b)->seq != i->seq); 457 bch_check_keys(&b->keys, "writing"); 458 459 cancel_delayed_work(&b->work); 460 461 /* If caller isn't waiting for write, parent refcount is cache set */ 462 down(&b->io_mutex); 463 closure_init(&b->io, parent ?: &b->c->cl); 464 465 clear_bit(BTREE_NODE_dirty, &b->flags); 466 change_bit(BTREE_NODE_write_idx, &b->flags); 467 468 do_btree_node_write(b); 469 470 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, 471 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 472 473 b->written += set_blocks(i, block_bytes(b->c)); 474 } 475 476 void bch_btree_node_write(struct btree *b, struct closure *parent) 477 { 478 unsigned nsets = b->keys.nsets; 479 480 lockdep_assert_held(&b->lock); 481 482 __bch_btree_node_write(b, parent); 483 484 /* 485 * do verify if there was more than one set initially (i.e. we did a 486 * sort) and we sorted down to a single set: 487 */ 488 if (nsets && !b->keys.nsets) 489 bch_btree_verify(b); 490 491 bch_btree_init_next(b); 492 } 493 494 static void bch_btree_node_write_sync(struct btree *b) 495 { 496 struct closure cl; 497 498 closure_init_stack(&cl); 499 500 mutex_lock(&b->write_lock); 501 bch_btree_node_write(b, &cl); 502 mutex_unlock(&b->write_lock); 503 504 closure_sync(&cl); 505 } 506 507 static void btree_node_write_work(struct work_struct *w) 508 { 509 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 510 511 mutex_lock(&b->write_lock); 512 if (btree_node_dirty(b)) 513 __bch_btree_node_write(b, NULL); 514 mutex_unlock(&b->write_lock); 515 } 516 517 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 518 { 519 struct bset *i = btree_bset_last(b); 520 struct btree_write *w = btree_current_write(b); 521 522 lockdep_assert_held(&b->write_lock); 523 524 BUG_ON(!b->written); 525 BUG_ON(!i->keys); 526 527 if (!btree_node_dirty(b)) 528 schedule_delayed_work(&b->work, 30 * HZ); 529 530 set_btree_node_dirty(b); 531 532 if (journal_ref) { 533 if (w->journal && 534 journal_pin_cmp(b->c, w->journal, journal_ref)) { 535 atomic_dec_bug(w->journal); 536 w->journal = NULL; 537 } 538 539 if (!w->journal) { 540 w->journal = journal_ref; 541 atomic_inc(w->journal); 542 } 543 } 544 545 /* Force write if set is too big */ 546 if (set_bytes(i) > PAGE_SIZE - 48 && 547 !current->bio_list) 548 bch_btree_node_write(b, NULL); 549 } 550 551 /* 552 * Btree in memory cache - allocation/freeing 553 * mca -> memory cache 554 */ 555 556 #define mca_reserve(c) (((c->root && c->root->level) \ 557 ? c->root->level : 1) * 8 + 16) 558 #define mca_can_free(c) \ 559 max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 560 561 static void mca_data_free(struct btree *b) 562 { 563 BUG_ON(b->io_mutex.count != 1); 564 565 bch_btree_keys_free(&b->keys); 566 567 b->c->btree_cache_used--; 568 list_move(&b->list, &b->c->btree_cache_freed); 569 } 570 571 static void mca_bucket_free(struct btree *b) 572 { 573 BUG_ON(btree_node_dirty(b)); 574 575 b->key.ptr[0] = 0; 576 hlist_del_init_rcu(&b->hash); 577 list_move(&b->list, &b->c->btree_cache_freeable); 578 } 579 580 static unsigned btree_order(struct bkey *k) 581 { 582 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 583 } 584 585 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 586 { 587 if (!bch_btree_keys_alloc(&b->keys, 588 max_t(unsigned, 589 ilog2(b->c->btree_pages), 590 btree_order(k)), 591 gfp)) { 592 b->c->btree_cache_used++; 593 list_move(&b->list, &b->c->btree_cache); 594 } else { 595 list_move(&b->list, &b->c->btree_cache_freed); 596 } 597 } 598 599 static struct btree *mca_bucket_alloc(struct cache_set *c, 600 struct bkey *k, gfp_t gfp) 601 { 602 struct btree *b = kzalloc(sizeof(struct btree), gfp); 603 if (!b) 604 return NULL; 605 606 init_rwsem(&b->lock); 607 lockdep_set_novalidate_class(&b->lock); 608 mutex_init(&b->write_lock); 609 lockdep_set_novalidate_class(&b->write_lock); 610 INIT_LIST_HEAD(&b->list); 611 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 612 b->c = c; 613 sema_init(&b->io_mutex, 1); 614 615 mca_data_alloc(b, k, gfp); 616 return b; 617 } 618 619 static int mca_reap(struct btree *b, unsigned min_order, bool flush) 620 { 621 struct closure cl; 622 623 closure_init_stack(&cl); 624 lockdep_assert_held(&b->c->bucket_lock); 625 626 if (!down_write_trylock(&b->lock)) 627 return -ENOMEM; 628 629 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 630 631 if (b->keys.page_order < min_order) 632 goto out_unlock; 633 634 if (!flush) { 635 if (btree_node_dirty(b)) 636 goto out_unlock; 637 638 if (down_trylock(&b->io_mutex)) 639 goto out_unlock; 640 up(&b->io_mutex); 641 } 642 643 mutex_lock(&b->write_lock); 644 if (btree_node_dirty(b)) 645 __bch_btree_node_write(b, &cl); 646 mutex_unlock(&b->write_lock); 647 648 closure_sync(&cl); 649 650 /* wait for any in flight btree write */ 651 down(&b->io_mutex); 652 up(&b->io_mutex); 653 654 return 0; 655 out_unlock: 656 rw_unlock(true, b); 657 return -ENOMEM; 658 } 659 660 static unsigned long bch_mca_scan(struct shrinker *shrink, 661 struct shrink_control *sc) 662 { 663 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 664 struct btree *b, *t; 665 unsigned long i, nr = sc->nr_to_scan; 666 unsigned long freed = 0; 667 668 if (c->shrinker_disabled) 669 return SHRINK_STOP; 670 671 if (c->btree_cache_alloc_lock) 672 return SHRINK_STOP; 673 674 /* Return -1 if we can't do anything right now */ 675 if (sc->gfp_mask & __GFP_IO) 676 mutex_lock(&c->bucket_lock); 677 else if (!mutex_trylock(&c->bucket_lock)) 678 return -1; 679 680 /* 681 * It's _really_ critical that we don't free too many btree nodes - we 682 * have to always leave ourselves a reserve. The reserve is how we 683 * guarantee that allocating memory for a new btree node can always 684 * succeed, so that inserting keys into the btree can always succeed and 685 * IO can always make forward progress: 686 */ 687 nr /= c->btree_pages; 688 nr = min_t(unsigned long, nr, mca_can_free(c)); 689 690 i = 0; 691 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { 692 if (freed >= nr) 693 break; 694 695 if (++i > 3 && 696 !mca_reap(b, 0, false)) { 697 mca_data_free(b); 698 rw_unlock(true, b); 699 freed++; 700 } 701 } 702 703 for (i = 0; (nr--) && i < c->btree_cache_used; i++) { 704 if (list_empty(&c->btree_cache)) 705 goto out; 706 707 b = list_first_entry(&c->btree_cache, struct btree, list); 708 list_rotate_left(&c->btree_cache); 709 710 if (!b->accessed && 711 !mca_reap(b, 0, false)) { 712 mca_bucket_free(b); 713 mca_data_free(b); 714 rw_unlock(true, b); 715 freed++; 716 } else 717 b->accessed = 0; 718 } 719 out: 720 mutex_unlock(&c->bucket_lock); 721 return freed; 722 } 723 724 static unsigned long bch_mca_count(struct shrinker *shrink, 725 struct shrink_control *sc) 726 { 727 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 728 729 if (c->shrinker_disabled) 730 return 0; 731 732 if (c->btree_cache_alloc_lock) 733 return 0; 734 735 return mca_can_free(c) * c->btree_pages; 736 } 737 738 void bch_btree_cache_free(struct cache_set *c) 739 { 740 struct btree *b; 741 struct closure cl; 742 closure_init_stack(&cl); 743 744 if (c->shrink.list.next) 745 unregister_shrinker(&c->shrink); 746 747 mutex_lock(&c->bucket_lock); 748 749 #ifdef CONFIG_BCACHE_DEBUG 750 if (c->verify_data) 751 list_move(&c->verify_data->list, &c->btree_cache); 752 753 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); 754 #endif 755 756 list_splice(&c->btree_cache_freeable, 757 &c->btree_cache); 758 759 while (!list_empty(&c->btree_cache)) { 760 b = list_first_entry(&c->btree_cache, struct btree, list); 761 762 if (btree_node_dirty(b)) 763 btree_complete_write(b, btree_current_write(b)); 764 clear_bit(BTREE_NODE_dirty, &b->flags); 765 766 mca_data_free(b); 767 } 768 769 while (!list_empty(&c->btree_cache_freed)) { 770 b = list_first_entry(&c->btree_cache_freed, 771 struct btree, list); 772 list_del(&b->list); 773 cancel_delayed_work_sync(&b->work); 774 kfree(b); 775 } 776 777 mutex_unlock(&c->bucket_lock); 778 } 779 780 int bch_btree_cache_alloc(struct cache_set *c) 781 { 782 unsigned i; 783 784 for (i = 0; i < mca_reserve(c); i++) 785 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 786 return -ENOMEM; 787 788 list_splice_init(&c->btree_cache, 789 &c->btree_cache_freeable); 790 791 #ifdef CONFIG_BCACHE_DEBUG 792 mutex_init(&c->verify_lock); 793 794 c->verify_ondisk = (void *) 795 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); 796 797 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 798 799 if (c->verify_data && 800 c->verify_data->keys.set->data) 801 list_del_init(&c->verify_data->list); 802 else 803 c->verify_data = NULL; 804 #endif 805 806 c->shrink.count_objects = bch_mca_count; 807 c->shrink.scan_objects = bch_mca_scan; 808 c->shrink.seeks = 4; 809 c->shrink.batch = c->btree_pages * 2; 810 811 if (register_shrinker(&c->shrink)) 812 pr_warn("bcache: %s: could not register shrinker", 813 __func__); 814 815 return 0; 816 } 817 818 /* Btree in memory cache - hash table */ 819 820 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 821 { 822 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 823 } 824 825 static struct btree *mca_find(struct cache_set *c, struct bkey *k) 826 { 827 struct btree *b; 828 829 rcu_read_lock(); 830 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 831 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 832 goto out; 833 b = NULL; 834 out: 835 rcu_read_unlock(); 836 return b; 837 } 838 839 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 840 { 841 struct task_struct *old; 842 843 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); 844 if (old && old != current) { 845 if (op) 846 prepare_to_wait(&c->btree_cache_wait, &op->wait, 847 TASK_UNINTERRUPTIBLE); 848 return -EINTR; 849 } 850 851 return 0; 852 } 853 854 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 855 struct bkey *k) 856 { 857 struct btree *b; 858 859 trace_bcache_btree_cache_cannibalize(c); 860 861 if (mca_cannibalize_lock(c, op)) 862 return ERR_PTR(-EINTR); 863 864 list_for_each_entry_reverse(b, &c->btree_cache, list) 865 if (!mca_reap(b, btree_order(k), false)) 866 return b; 867 868 list_for_each_entry_reverse(b, &c->btree_cache, list) 869 if (!mca_reap(b, btree_order(k), true)) 870 return b; 871 872 WARN(1, "btree cache cannibalize failed\n"); 873 return ERR_PTR(-ENOMEM); 874 } 875 876 /* 877 * We can only have one thread cannibalizing other cached btree nodes at a time, 878 * or we'll deadlock. We use an open coded mutex to ensure that, which a 879 * cannibalize_bucket() will take. This means every time we unlock the root of 880 * the btree, we need to release this lock if we have it held. 881 */ 882 static void bch_cannibalize_unlock(struct cache_set *c) 883 { 884 if (c->btree_cache_alloc_lock == current) { 885 c->btree_cache_alloc_lock = NULL; 886 wake_up(&c->btree_cache_wait); 887 } 888 } 889 890 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 891 struct bkey *k, int level) 892 { 893 struct btree *b; 894 895 BUG_ON(current->bio_list); 896 897 lockdep_assert_held(&c->bucket_lock); 898 899 if (mca_find(c, k)) 900 return NULL; 901 902 /* btree_free() doesn't free memory; it sticks the node on the end of 903 * the list. Check if there's any freed nodes there: 904 */ 905 list_for_each_entry(b, &c->btree_cache_freeable, list) 906 if (!mca_reap(b, btree_order(k), false)) 907 goto out; 908 909 /* We never free struct btree itself, just the memory that holds the on 910 * disk node. Check the freed list before allocating a new one: 911 */ 912 list_for_each_entry(b, &c->btree_cache_freed, list) 913 if (!mca_reap(b, 0, false)) { 914 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 915 if (!b->keys.set[0].data) 916 goto err; 917 else 918 goto out; 919 } 920 921 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 922 if (!b) 923 goto err; 924 925 BUG_ON(!down_write_trylock(&b->lock)); 926 if (!b->keys.set->data) 927 goto err; 928 out: 929 BUG_ON(b->io_mutex.count != 1); 930 931 bkey_copy(&b->key, k); 932 list_move(&b->list, &c->btree_cache); 933 hlist_del_init_rcu(&b->hash); 934 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 935 936 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 937 b->parent = (void *) ~0UL; 938 b->flags = 0; 939 b->written = 0; 940 b->level = level; 941 942 if (!b->level) 943 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 944 &b->c->expensive_debug_checks); 945 else 946 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 947 &b->c->expensive_debug_checks); 948 949 return b; 950 err: 951 if (b) 952 rw_unlock(true, b); 953 954 b = mca_cannibalize(c, op, k); 955 if (!IS_ERR(b)) 956 goto out; 957 958 return b; 959 } 960 961 /** 962 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 963 * in from disk if necessary. 964 * 965 * If IO is necessary and running under generic_make_request, returns -EAGAIN. 966 * 967 * The btree node will have either a read or a write lock held, depending on 968 * level and op->lock. 969 */ 970 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 971 struct bkey *k, int level, bool write, 972 struct btree *parent) 973 { 974 int i = 0; 975 struct btree *b; 976 977 BUG_ON(level < 0); 978 retry: 979 b = mca_find(c, k); 980 981 if (!b) { 982 if (current->bio_list) 983 return ERR_PTR(-EAGAIN); 984 985 mutex_lock(&c->bucket_lock); 986 b = mca_alloc(c, op, k, level); 987 mutex_unlock(&c->bucket_lock); 988 989 if (!b) 990 goto retry; 991 if (IS_ERR(b)) 992 return b; 993 994 bch_btree_node_read(b); 995 996 if (!write) 997 downgrade_write(&b->lock); 998 } else { 999 rw_lock(write, b, level); 1000 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 1001 rw_unlock(write, b); 1002 goto retry; 1003 } 1004 BUG_ON(b->level != level); 1005 } 1006 1007 b->parent = parent; 1008 b->accessed = 1; 1009 1010 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1011 prefetch(b->keys.set[i].tree); 1012 prefetch(b->keys.set[i].data); 1013 } 1014 1015 for (; i <= b->keys.nsets; i++) 1016 prefetch(b->keys.set[i].data); 1017 1018 if (btree_node_io_error(b)) { 1019 rw_unlock(write, b); 1020 return ERR_PTR(-EIO); 1021 } 1022 1023 BUG_ON(!b->written); 1024 1025 return b; 1026 } 1027 1028 static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1029 { 1030 struct btree *b; 1031 1032 mutex_lock(&parent->c->bucket_lock); 1033 b = mca_alloc(parent->c, NULL, k, parent->level - 1); 1034 mutex_unlock(&parent->c->bucket_lock); 1035 1036 if (!IS_ERR_OR_NULL(b)) { 1037 b->parent = parent; 1038 bch_btree_node_read(b); 1039 rw_unlock(true, b); 1040 } 1041 } 1042 1043 /* Btree alloc */ 1044 1045 static void btree_node_free(struct btree *b) 1046 { 1047 trace_bcache_btree_node_free(b); 1048 1049 BUG_ON(b == b->c->root); 1050 1051 mutex_lock(&b->write_lock); 1052 1053 if (btree_node_dirty(b)) 1054 btree_complete_write(b, btree_current_write(b)); 1055 clear_bit(BTREE_NODE_dirty, &b->flags); 1056 1057 mutex_unlock(&b->write_lock); 1058 1059 cancel_delayed_work(&b->work); 1060 1061 mutex_lock(&b->c->bucket_lock); 1062 bch_bucket_free(b->c, &b->key); 1063 mca_bucket_free(b); 1064 mutex_unlock(&b->c->bucket_lock); 1065 } 1066 1067 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1068 int level, bool wait, 1069 struct btree *parent) 1070 { 1071 BKEY_PADDED(key) k; 1072 struct btree *b = ERR_PTR(-EAGAIN); 1073 1074 mutex_lock(&c->bucket_lock); 1075 retry: 1076 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) 1077 goto err; 1078 1079 bkey_put(c, &k.key); 1080 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1081 1082 b = mca_alloc(c, op, &k.key, level); 1083 if (IS_ERR(b)) 1084 goto err_free; 1085 1086 if (!b) { 1087 cache_bug(c, 1088 "Tried to allocate bucket that was in btree cache"); 1089 goto retry; 1090 } 1091 1092 b->accessed = 1; 1093 b->parent = parent; 1094 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); 1095 1096 mutex_unlock(&c->bucket_lock); 1097 1098 trace_bcache_btree_node_alloc(b); 1099 return b; 1100 err_free: 1101 bch_bucket_free(c, &k.key); 1102 err: 1103 mutex_unlock(&c->bucket_lock); 1104 1105 trace_bcache_btree_node_alloc_fail(c); 1106 return b; 1107 } 1108 1109 static struct btree *bch_btree_node_alloc(struct cache_set *c, 1110 struct btree_op *op, int level, 1111 struct btree *parent) 1112 { 1113 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1114 } 1115 1116 static struct btree *btree_node_alloc_replacement(struct btree *b, 1117 struct btree_op *op) 1118 { 1119 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1120 if (!IS_ERR_OR_NULL(n)) { 1121 mutex_lock(&n->write_lock); 1122 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 1123 bkey_copy_key(&n->key, &b->key); 1124 mutex_unlock(&n->write_lock); 1125 } 1126 1127 return n; 1128 } 1129 1130 static void make_btree_freeing_key(struct btree *b, struct bkey *k) 1131 { 1132 unsigned i; 1133 1134 mutex_lock(&b->c->bucket_lock); 1135 1136 atomic_inc(&b->c->prio_blocked); 1137 1138 bkey_copy(k, &b->key); 1139 bkey_copy_key(k, &ZERO_KEY); 1140 1141 for (i = 0; i < KEY_PTRS(k); i++) 1142 SET_PTR_GEN(k, i, 1143 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), 1144 PTR_BUCKET(b->c, &b->key, i))); 1145 1146 mutex_unlock(&b->c->bucket_lock); 1147 } 1148 1149 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1150 { 1151 struct cache_set *c = b->c; 1152 struct cache *ca; 1153 unsigned i, reserve = (c->root->level - b->level) * 2 + 1; 1154 1155 mutex_lock(&c->bucket_lock); 1156 1157 for_each_cache(ca, c, i) 1158 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1159 if (op) 1160 prepare_to_wait(&c->btree_cache_wait, &op->wait, 1161 TASK_UNINTERRUPTIBLE); 1162 mutex_unlock(&c->bucket_lock); 1163 return -EINTR; 1164 } 1165 1166 mutex_unlock(&c->bucket_lock); 1167 1168 return mca_cannibalize_lock(b->c, op); 1169 } 1170 1171 /* Garbage collection */ 1172 1173 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1174 struct bkey *k) 1175 { 1176 uint8_t stale = 0; 1177 unsigned i; 1178 struct bucket *g; 1179 1180 /* 1181 * ptr_invalid() can't return true for the keys that mark btree nodes as 1182 * freed, but since ptr_bad() returns true we'll never actually use them 1183 * for anything and thus we don't want mark their pointers here 1184 */ 1185 if (!bkey_cmp(k, &ZERO_KEY)) 1186 return stale; 1187 1188 for (i = 0; i < KEY_PTRS(k); i++) { 1189 if (!ptr_available(c, k, i)) 1190 continue; 1191 1192 g = PTR_BUCKET(c, k, i); 1193 1194 if (gen_after(g->last_gc, PTR_GEN(k, i))) 1195 g->last_gc = PTR_GEN(k, i); 1196 1197 if (ptr_stale(c, k, i)) { 1198 stale = max(stale, ptr_stale(c, k, i)); 1199 continue; 1200 } 1201 1202 cache_bug_on(GC_MARK(g) && 1203 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1204 c, "inconsistent ptrs: mark = %llu, level = %i", 1205 GC_MARK(g), level); 1206 1207 if (level) 1208 SET_GC_MARK(g, GC_MARK_METADATA); 1209 else if (KEY_DIRTY(k)) 1210 SET_GC_MARK(g, GC_MARK_DIRTY); 1211 else if (!GC_MARK(g)) 1212 SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1213 1214 /* guard against overflow */ 1215 SET_GC_SECTORS_USED(g, min_t(unsigned, 1216 GC_SECTORS_USED(g) + KEY_SIZE(k), 1217 MAX_GC_SECTORS_USED)); 1218 1219 BUG_ON(!GC_SECTORS_USED(g)); 1220 } 1221 1222 return stale; 1223 } 1224 1225 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1226 1227 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1228 { 1229 unsigned i; 1230 1231 for (i = 0; i < KEY_PTRS(k); i++) 1232 if (ptr_available(c, k, i) && 1233 !ptr_stale(c, k, i)) { 1234 struct bucket *b = PTR_BUCKET(c, k, i); 1235 1236 b->gen = PTR_GEN(k, i); 1237 1238 if (level && bkey_cmp(k, &ZERO_KEY)) 1239 b->prio = BTREE_PRIO; 1240 else if (!level && b->prio == BTREE_PRIO) 1241 b->prio = INITIAL_PRIO; 1242 } 1243 1244 __bch_btree_mark_key(c, level, k); 1245 } 1246 1247 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) 1248 { 1249 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; 1250 } 1251 1252 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1253 { 1254 uint8_t stale = 0; 1255 unsigned keys = 0, good_keys = 0; 1256 struct bkey *k; 1257 struct btree_iter iter; 1258 struct bset_tree *t; 1259 1260 gc->nodes++; 1261 1262 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1263 stale = max(stale, btree_mark_key(b, k)); 1264 keys++; 1265 1266 if (bch_ptr_bad(&b->keys, k)) 1267 continue; 1268 1269 gc->key_bytes += bkey_u64s(k); 1270 gc->nkeys++; 1271 good_keys++; 1272 1273 gc->data += KEY_SIZE(k); 1274 } 1275 1276 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1277 btree_bug_on(t->size && 1278 bset_written(&b->keys, t) && 1279 bkey_cmp(&b->key, &t->end) < 0, 1280 b, "found short btree key in gc"); 1281 1282 if (b->c->gc_always_rewrite) 1283 return true; 1284 1285 if (stale > 10) 1286 return true; 1287 1288 if ((keys - good_keys) * 2 > keys) 1289 return true; 1290 1291 return false; 1292 } 1293 1294 #define GC_MERGE_NODES 4U 1295 1296 struct gc_merge_info { 1297 struct btree *b; 1298 unsigned keys; 1299 }; 1300 1301 static int bch_btree_insert_node(struct btree *, struct btree_op *, 1302 struct keylist *, atomic_t *, struct bkey *); 1303 1304 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1305 struct gc_stat *gc, struct gc_merge_info *r) 1306 { 1307 unsigned i, nodes = 0, keys = 0, blocks; 1308 struct btree *new_nodes[GC_MERGE_NODES]; 1309 struct keylist keylist; 1310 struct closure cl; 1311 struct bkey *k; 1312 1313 bch_keylist_init(&keylist); 1314 1315 if (btree_check_reserve(b, NULL)) 1316 return 0; 1317 1318 memset(new_nodes, 0, sizeof(new_nodes)); 1319 closure_init_stack(&cl); 1320 1321 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1322 keys += r[nodes++].keys; 1323 1324 blocks = btree_default_blocks(b->c) * 2 / 3; 1325 1326 if (nodes < 2 || 1327 __set_blocks(b->keys.set[0].data, keys, 1328 block_bytes(b->c)) > blocks * (nodes - 1)) 1329 return 0; 1330 1331 for (i = 0; i < nodes; i++) { 1332 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1333 if (IS_ERR_OR_NULL(new_nodes[i])) 1334 goto out_nocoalesce; 1335 } 1336 1337 /* 1338 * We have to check the reserve here, after we've allocated our new 1339 * nodes, to make sure the insert below will succeed - we also check 1340 * before as an optimization to potentially avoid a bunch of expensive 1341 * allocs/sorts 1342 */ 1343 if (btree_check_reserve(b, NULL)) 1344 goto out_nocoalesce; 1345 1346 for (i = 0; i < nodes; i++) 1347 mutex_lock(&new_nodes[i]->write_lock); 1348 1349 for (i = nodes - 1; i > 0; --i) { 1350 struct bset *n1 = btree_bset_first(new_nodes[i]); 1351 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1352 struct bkey *k, *last = NULL; 1353 1354 keys = 0; 1355 1356 if (i > 1) { 1357 for (k = n2->start; 1358 k < bset_bkey_last(n2); 1359 k = bkey_next(k)) { 1360 if (__set_blocks(n1, n1->keys + keys + 1361 bkey_u64s(k), 1362 block_bytes(b->c)) > blocks) 1363 break; 1364 1365 last = k; 1366 keys += bkey_u64s(k); 1367 } 1368 } else { 1369 /* 1370 * Last node we're not getting rid of - we're getting 1371 * rid of the node at r[0]. Have to try and fit all of 1372 * the remaining keys into this node; we can't ensure 1373 * they will always fit due to rounding and variable 1374 * length keys (shouldn't be possible in practice, 1375 * though) 1376 */ 1377 if (__set_blocks(n1, n1->keys + n2->keys, 1378 block_bytes(b->c)) > 1379 btree_blocks(new_nodes[i])) 1380 goto out_nocoalesce; 1381 1382 keys = n2->keys; 1383 /* Take the key of the node we're getting rid of */ 1384 last = &r->b->key; 1385 } 1386 1387 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > 1388 btree_blocks(new_nodes[i])); 1389 1390 if (last) 1391 bkey_copy_key(&new_nodes[i]->key, last); 1392 1393 memcpy(bset_bkey_last(n1), 1394 n2->start, 1395 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1396 1397 n1->keys += keys; 1398 r[i].keys = n1->keys; 1399 1400 memmove(n2->start, 1401 bset_bkey_idx(n2, keys), 1402 (void *) bset_bkey_last(n2) - 1403 (void *) bset_bkey_idx(n2, keys)); 1404 1405 n2->keys -= keys; 1406 1407 if (__bch_keylist_realloc(&keylist, 1408 bkey_u64s(&new_nodes[i]->key))) 1409 goto out_nocoalesce; 1410 1411 bch_btree_node_write(new_nodes[i], &cl); 1412 bch_keylist_add(&keylist, &new_nodes[i]->key); 1413 } 1414 1415 for (i = 0; i < nodes; i++) 1416 mutex_unlock(&new_nodes[i]->write_lock); 1417 1418 closure_sync(&cl); 1419 1420 /* We emptied out this node */ 1421 BUG_ON(btree_bset_first(new_nodes[0])->keys); 1422 btree_node_free(new_nodes[0]); 1423 rw_unlock(true, new_nodes[0]); 1424 new_nodes[0] = NULL; 1425 1426 for (i = 0; i < nodes; i++) { 1427 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1428 goto out_nocoalesce; 1429 1430 make_btree_freeing_key(r[i].b, keylist.top); 1431 bch_keylist_push(&keylist); 1432 } 1433 1434 bch_btree_insert_node(b, op, &keylist, NULL, NULL); 1435 BUG_ON(!bch_keylist_empty(&keylist)); 1436 1437 for (i = 0; i < nodes; i++) { 1438 btree_node_free(r[i].b); 1439 rw_unlock(true, r[i].b); 1440 1441 r[i].b = new_nodes[i]; 1442 } 1443 1444 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1445 r[nodes - 1].b = ERR_PTR(-EINTR); 1446 1447 trace_bcache_btree_gc_coalesce(nodes); 1448 gc->nodes--; 1449 1450 bch_keylist_free(&keylist); 1451 1452 /* Invalidated our iterator */ 1453 return -EINTR; 1454 1455 out_nocoalesce: 1456 closure_sync(&cl); 1457 bch_keylist_free(&keylist); 1458 1459 while ((k = bch_keylist_pop(&keylist))) 1460 if (!bkey_cmp(k, &ZERO_KEY)) 1461 atomic_dec(&b->c->prio_blocked); 1462 1463 for (i = 0; i < nodes; i++) 1464 if (!IS_ERR_OR_NULL(new_nodes[i])) { 1465 btree_node_free(new_nodes[i]); 1466 rw_unlock(true, new_nodes[i]); 1467 } 1468 return 0; 1469 } 1470 1471 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 1472 struct btree *replace) 1473 { 1474 struct keylist keys; 1475 struct btree *n; 1476 1477 if (btree_check_reserve(b, NULL)) 1478 return 0; 1479 1480 n = btree_node_alloc_replacement(replace, NULL); 1481 1482 /* recheck reserve after allocating replacement node */ 1483 if (btree_check_reserve(b, NULL)) { 1484 btree_node_free(n); 1485 rw_unlock(true, n); 1486 return 0; 1487 } 1488 1489 bch_btree_node_write_sync(n); 1490 1491 bch_keylist_init(&keys); 1492 bch_keylist_add(&keys, &n->key); 1493 1494 make_btree_freeing_key(replace, keys.top); 1495 bch_keylist_push(&keys); 1496 1497 bch_btree_insert_node(b, op, &keys, NULL, NULL); 1498 BUG_ON(!bch_keylist_empty(&keys)); 1499 1500 btree_node_free(replace); 1501 rw_unlock(true, n); 1502 1503 /* Invalidated our iterator */ 1504 return -EINTR; 1505 } 1506 1507 static unsigned btree_gc_count_keys(struct btree *b) 1508 { 1509 struct bkey *k; 1510 struct btree_iter iter; 1511 unsigned ret = 0; 1512 1513 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1514 ret += bkey_u64s(k); 1515 1516 return ret; 1517 } 1518 1519 static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1520 struct closure *writes, struct gc_stat *gc) 1521 { 1522 int ret = 0; 1523 bool should_rewrite; 1524 struct bkey *k; 1525 struct btree_iter iter; 1526 struct gc_merge_info r[GC_MERGE_NODES]; 1527 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1528 1529 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1530 1531 for (i = r; i < r + ARRAY_SIZE(r); i++) 1532 i->b = ERR_PTR(-EINTR); 1533 1534 while (1) { 1535 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1536 if (k) { 1537 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1538 true, b); 1539 if (IS_ERR(r->b)) { 1540 ret = PTR_ERR(r->b); 1541 break; 1542 } 1543 1544 r->keys = btree_gc_count_keys(r->b); 1545 1546 ret = btree_gc_coalesce(b, op, gc, r); 1547 if (ret) 1548 break; 1549 } 1550 1551 if (!last->b) 1552 break; 1553 1554 if (!IS_ERR(last->b)) { 1555 should_rewrite = btree_gc_mark_node(last->b, gc); 1556 if (should_rewrite) { 1557 ret = btree_gc_rewrite_node(b, op, last->b); 1558 if (ret) 1559 break; 1560 } 1561 1562 if (last->b->level) { 1563 ret = btree_gc_recurse(last->b, op, writes, gc); 1564 if (ret) 1565 break; 1566 } 1567 1568 bkey_copy_key(&b->c->gc_done, &last->b->key); 1569 1570 /* 1571 * Must flush leaf nodes before gc ends, since replace 1572 * operations aren't journalled 1573 */ 1574 mutex_lock(&last->b->write_lock); 1575 if (btree_node_dirty(last->b)) 1576 bch_btree_node_write(last->b, writes); 1577 mutex_unlock(&last->b->write_lock); 1578 rw_unlock(true, last->b); 1579 } 1580 1581 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1582 r->b = NULL; 1583 1584 if (need_resched()) { 1585 ret = -EAGAIN; 1586 break; 1587 } 1588 } 1589 1590 for (i = r; i < r + ARRAY_SIZE(r); i++) 1591 if (!IS_ERR_OR_NULL(i->b)) { 1592 mutex_lock(&i->b->write_lock); 1593 if (btree_node_dirty(i->b)) 1594 bch_btree_node_write(i->b, writes); 1595 mutex_unlock(&i->b->write_lock); 1596 rw_unlock(true, i->b); 1597 } 1598 1599 return ret; 1600 } 1601 1602 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1603 struct closure *writes, struct gc_stat *gc) 1604 { 1605 struct btree *n = NULL; 1606 int ret = 0; 1607 bool should_rewrite; 1608 1609 should_rewrite = btree_gc_mark_node(b, gc); 1610 if (should_rewrite) { 1611 n = btree_node_alloc_replacement(b, NULL); 1612 1613 if (!IS_ERR_OR_NULL(n)) { 1614 bch_btree_node_write_sync(n); 1615 1616 bch_btree_set_root(n); 1617 btree_node_free(b); 1618 rw_unlock(true, n); 1619 1620 return -EINTR; 1621 } 1622 } 1623 1624 __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1625 1626 if (b->level) { 1627 ret = btree_gc_recurse(b, op, writes, gc); 1628 if (ret) 1629 return ret; 1630 } 1631 1632 bkey_copy_key(&b->c->gc_done, &b->key); 1633 1634 return ret; 1635 } 1636 1637 static void btree_gc_start(struct cache_set *c) 1638 { 1639 struct cache *ca; 1640 struct bucket *b; 1641 unsigned i; 1642 1643 if (!c->gc_mark_valid) 1644 return; 1645 1646 mutex_lock(&c->bucket_lock); 1647 1648 c->gc_mark_valid = 0; 1649 c->gc_done = ZERO_KEY; 1650 1651 for_each_cache(ca, c, i) 1652 for_each_bucket(b, ca) { 1653 b->last_gc = b->gen; 1654 if (!atomic_read(&b->pin)) { 1655 SET_GC_MARK(b, 0); 1656 SET_GC_SECTORS_USED(b, 0); 1657 } 1658 } 1659 1660 mutex_unlock(&c->bucket_lock); 1661 } 1662 1663 static void bch_btree_gc_finish(struct cache_set *c) 1664 { 1665 struct bucket *b; 1666 struct cache *ca; 1667 unsigned i; 1668 1669 mutex_lock(&c->bucket_lock); 1670 1671 set_gc_sectors(c); 1672 c->gc_mark_valid = 1; 1673 c->need_gc = 0; 1674 1675 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1676 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1677 GC_MARK_METADATA); 1678 1679 /* don't reclaim buckets to which writeback keys point */ 1680 rcu_read_lock(); 1681 for (i = 0; i < c->nr_uuids; i++) { 1682 struct bcache_device *d = c->devices[i]; 1683 struct cached_dev *dc; 1684 struct keybuf_key *w, *n; 1685 unsigned j; 1686 1687 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1688 continue; 1689 dc = container_of(d, struct cached_dev, disk); 1690 1691 spin_lock(&dc->writeback_keys.lock); 1692 rbtree_postorder_for_each_entry_safe(w, n, 1693 &dc->writeback_keys.keys, node) 1694 for (j = 0; j < KEY_PTRS(&w->key); j++) 1695 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1696 GC_MARK_DIRTY); 1697 spin_unlock(&dc->writeback_keys.lock); 1698 } 1699 rcu_read_unlock(); 1700 1701 c->avail_nbuckets = 0; 1702 for_each_cache(ca, c, i) { 1703 uint64_t *i; 1704 1705 ca->invalidate_needs_gc = 0; 1706 1707 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) 1708 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1709 1710 for (i = ca->prio_buckets; 1711 i < ca->prio_buckets + prio_buckets(ca) * 2; i++) 1712 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1713 1714 for_each_bucket(b, ca) { 1715 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1716 1717 if (atomic_read(&b->pin)) 1718 continue; 1719 1720 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1721 1722 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1723 c->avail_nbuckets++; 1724 } 1725 } 1726 1727 mutex_unlock(&c->bucket_lock); 1728 } 1729 1730 static void bch_btree_gc(struct cache_set *c) 1731 { 1732 int ret; 1733 struct gc_stat stats; 1734 struct closure writes; 1735 struct btree_op op; 1736 uint64_t start_time = local_clock(); 1737 1738 trace_bcache_gc_start(c); 1739 1740 memset(&stats, 0, sizeof(struct gc_stat)); 1741 closure_init_stack(&writes); 1742 bch_btree_op_init(&op, SHRT_MAX); 1743 1744 btree_gc_start(c); 1745 1746 do { 1747 ret = btree_root(gc_root, c, &op, &writes, &stats); 1748 closure_sync(&writes); 1749 cond_resched(); 1750 1751 if (ret && ret != -EAGAIN) 1752 pr_warn("gc failed!"); 1753 } while (ret); 1754 1755 bch_btree_gc_finish(c); 1756 wake_up_allocators(c); 1757 1758 bch_time_stats_update(&c->btree_gc_time, start_time); 1759 1760 stats.key_bytes *= sizeof(uint64_t); 1761 stats.data <<= 9; 1762 bch_update_bucket_in_use(c, &stats); 1763 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1764 1765 trace_bcache_gc_end(c); 1766 1767 bch_moving_gc(c); 1768 } 1769 1770 static bool gc_should_run(struct cache_set *c) 1771 { 1772 struct cache *ca; 1773 unsigned i; 1774 1775 for_each_cache(ca, c, i) 1776 if (ca->invalidate_needs_gc) 1777 return true; 1778 1779 if (atomic_read(&c->sectors_to_gc) < 0) 1780 return true; 1781 1782 return false; 1783 } 1784 1785 static int bch_gc_thread(void *arg) 1786 { 1787 struct cache_set *c = arg; 1788 1789 while (1) { 1790 wait_event_interruptible(c->gc_wait, 1791 kthread_should_stop() || gc_should_run(c)); 1792 1793 if (kthread_should_stop()) 1794 break; 1795 1796 set_gc_sectors(c); 1797 bch_btree_gc(c); 1798 } 1799 1800 return 0; 1801 } 1802 1803 int bch_gc_thread_start(struct cache_set *c) 1804 { 1805 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 1806 if (IS_ERR(c->gc_thread)) 1807 return PTR_ERR(c->gc_thread); 1808 1809 return 0; 1810 } 1811 1812 /* Initial partial gc */ 1813 1814 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1815 { 1816 int ret = 0; 1817 struct bkey *k, *p = NULL; 1818 struct btree_iter iter; 1819 1820 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1821 bch_initial_mark_key(b->c, b->level, k); 1822 1823 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1824 1825 if (b->level) { 1826 bch_btree_iter_init(&b->keys, &iter, NULL); 1827 1828 do { 1829 k = bch_btree_iter_next_filter(&iter, &b->keys, 1830 bch_ptr_bad); 1831 if (k) 1832 btree_node_prefetch(b, k); 1833 1834 if (p) 1835 ret = btree(check_recurse, p, b, op); 1836 1837 p = k; 1838 } while (p && !ret); 1839 } 1840 1841 return ret; 1842 } 1843 1844 int bch_btree_check(struct cache_set *c) 1845 { 1846 struct btree_op op; 1847 1848 bch_btree_op_init(&op, SHRT_MAX); 1849 1850 return btree_root(check_recurse, c, &op); 1851 } 1852 1853 void bch_initial_gc_finish(struct cache_set *c) 1854 { 1855 struct cache *ca; 1856 struct bucket *b; 1857 unsigned i; 1858 1859 bch_btree_gc_finish(c); 1860 1861 mutex_lock(&c->bucket_lock); 1862 1863 /* 1864 * We need to put some unused buckets directly on the prio freelist in 1865 * order to get the allocator thread started - it needs freed buckets in 1866 * order to rewrite the prios and gens, and it needs to rewrite prios 1867 * and gens in order to free buckets. 1868 * 1869 * This is only safe for buckets that have no live data in them, which 1870 * there should always be some of. 1871 */ 1872 for_each_cache(ca, c, i) { 1873 for_each_bucket(b, ca) { 1874 if (fifo_full(&ca->free[RESERVE_PRIO])) 1875 break; 1876 1877 if (bch_can_invalidate_bucket(ca, b) && 1878 !GC_MARK(b)) { 1879 __bch_invalidate_one_bucket(ca, b); 1880 fifo_push(&ca->free[RESERVE_PRIO], 1881 b - ca->buckets); 1882 } 1883 } 1884 } 1885 1886 mutex_unlock(&c->bucket_lock); 1887 } 1888 1889 /* Btree insertion */ 1890 1891 static bool btree_insert_key(struct btree *b, struct bkey *k, 1892 struct bkey *replace_key) 1893 { 1894 unsigned status; 1895 1896 BUG_ON(bkey_cmp(k, &b->key) > 0); 1897 1898 status = bch_btree_insert_key(&b->keys, k, replace_key); 1899 if (status != BTREE_INSERT_STATUS_NO_INSERT) { 1900 bch_check_keys(&b->keys, "%u for %s", status, 1901 replace_key ? "replace" : "insert"); 1902 1903 trace_bcache_btree_insert_key(b, k, replace_key != NULL, 1904 status); 1905 return true; 1906 } else 1907 return false; 1908 } 1909 1910 static size_t insert_u64s_remaining(struct btree *b) 1911 { 1912 long ret = bch_btree_keys_u64s_remaining(&b->keys); 1913 1914 /* 1915 * Might land in the middle of an existing extent and have to split it 1916 */ 1917 if (b->keys.ops->is_extents) 1918 ret -= KEY_MAX_U64S; 1919 1920 return max(ret, 0L); 1921 } 1922 1923 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 1924 struct keylist *insert_keys, 1925 struct bkey *replace_key) 1926 { 1927 bool ret = false; 1928 int oldsize = bch_count_data(&b->keys); 1929 1930 while (!bch_keylist_empty(insert_keys)) { 1931 struct bkey *k = insert_keys->keys; 1932 1933 if (bkey_u64s(k) > insert_u64s_remaining(b)) 1934 break; 1935 1936 if (bkey_cmp(k, &b->key) <= 0) { 1937 if (!b->level) 1938 bkey_put(b->c, k); 1939 1940 ret |= btree_insert_key(b, k, replace_key); 1941 bch_keylist_pop_front(insert_keys); 1942 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 1943 BKEY_PADDED(key) temp; 1944 bkey_copy(&temp.key, insert_keys->keys); 1945 1946 bch_cut_back(&b->key, &temp.key); 1947 bch_cut_front(&b->key, insert_keys->keys); 1948 1949 ret |= btree_insert_key(b, &temp.key, replace_key); 1950 break; 1951 } else { 1952 break; 1953 } 1954 } 1955 1956 if (!ret) 1957 op->insert_collision = true; 1958 1959 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 1960 1961 BUG_ON(bch_count_data(&b->keys) < oldsize); 1962 return ret; 1963 } 1964 1965 static int btree_split(struct btree *b, struct btree_op *op, 1966 struct keylist *insert_keys, 1967 struct bkey *replace_key) 1968 { 1969 bool split; 1970 struct btree *n1, *n2 = NULL, *n3 = NULL; 1971 uint64_t start_time = local_clock(); 1972 struct closure cl; 1973 struct keylist parent_keys; 1974 1975 closure_init_stack(&cl); 1976 bch_keylist_init(&parent_keys); 1977 1978 if (btree_check_reserve(b, op)) { 1979 if (!b->level) 1980 return -EINTR; 1981 else 1982 WARN(1, "insufficient reserve for split\n"); 1983 } 1984 1985 n1 = btree_node_alloc_replacement(b, op); 1986 if (IS_ERR(n1)) 1987 goto err; 1988 1989 split = set_blocks(btree_bset_first(n1), 1990 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; 1991 1992 if (split) { 1993 unsigned keys = 0; 1994 1995 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 1996 1997 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1998 if (IS_ERR(n2)) 1999 goto err_free1; 2000 2001 if (!b->parent) { 2002 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 2003 if (IS_ERR(n3)) 2004 goto err_free2; 2005 } 2006 2007 mutex_lock(&n1->write_lock); 2008 mutex_lock(&n2->write_lock); 2009 2010 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2011 2012 /* 2013 * Has to be a linear search because we don't have an auxiliary 2014 * search tree yet 2015 */ 2016 2017 while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2018 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2019 keys)); 2020 2021 bkey_copy_key(&n1->key, 2022 bset_bkey_idx(btree_bset_first(n1), keys)); 2023 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2024 2025 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2026 btree_bset_first(n1)->keys = keys; 2027 2028 memcpy(btree_bset_first(n2)->start, 2029 bset_bkey_last(btree_bset_first(n1)), 2030 btree_bset_first(n2)->keys * sizeof(uint64_t)); 2031 2032 bkey_copy_key(&n2->key, &b->key); 2033 2034 bch_keylist_add(&parent_keys, &n2->key); 2035 bch_btree_node_write(n2, &cl); 2036 mutex_unlock(&n2->write_lock); 2037 rw_unlock(true, n2); 2038 } else { 2039 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2040 2041 mutex_lock(&n1->write_lock); 2042 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2043 } 2044 2045 bch_keylist_add(&parent_keys, &n1->key); 2046 bch_btree_node_write(n1, &cl); 2047 mutex_unlock(&n1->write_lock); 2048 2049 if (n3) { 2050 /* Depth increases, make a new root */ 2051 mutex_lock(&n3->write_lock); 2052 bkey_copy_key(&n3->key, &MAX_KEY); 2053 bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2054 bch_btree_node_write(n3, &cl); 2055 mutex_unlock(&n3->write_lock); 2056 2057 closure_sync(&cl); 2058 bch_btree_set_root(n3); 2059 rw_unlock(true, n3); 2060 } else if (!b->parent) { 2061 /* Root filled up but didn't need to be split */ 2062 closure_sync(&cl); 2063 bch_btree_set_root(n1); 2064 } else { 2065 /* Split a non root node */ 2066 closure_sync(&cl); 2067 make_btree_freeing_key(b, parent_keys.top); 2068 bch_keylist_push(&parent_keys); 2069 2070 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 2071 BUG_ON(!bch_keylist_empty(&parent_keys)); 2072 } 2073 2074 btree_node_free(b); 2075 rw_unlock(true, n1); 2076 2077 bch_time_stats_update(&b->c->btree_split_time, start_time); 2078 2079 return 0; 2080 err_free2: 2081 bkey_put(b->c, &n2->key); 2082 btree_node_free(n2); 2083 rw_unlock(true, n2); 2084 err_free1: 2085 bkey_put(b->c, &n1->key); 2086 btree_node_free(n1); 2087 rw_unlock(true, n1); 2088 err: 2089 WARN(1, "bcache: btree split failed (level %u)", b->level); 2090 2091 if (n3 == ERR_PTR(-EAGAIN) || 2092 n2 == ERR_PTR(-EAGAIN) || 2093 n1 == ERR_PTR(-EAGAIN)) 2094 return -EAGAIN; 2095 2096 return -ENOMEM; 2097 } 2098 2099 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2100 struct keylist *insert_keys, 2101 atomic_t *journal_ref, 2102 struct bkey *replace_key) 2103 { 2104 struct closure cl; 2105 2106 BUG_ON(b->level && replace_key); 2107 2108 closure_init_stack(&cl); 2109 2110 mutex_lock(&b->write_lock); 2111 2112 if (write_block(b) != btree_bset_last(b) && 2113 b->keys.last_set_unwritten) 2114 bch_btree_init_next(b); /* just wrote a set */ 2115 2116 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 2117 mutex_unlock(&b->write_lock); 2118 goto split; 2119 } 2120 2121 BUG_ON(write_block(b) != btree_bset_last(b)); 2122 2123 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 2124 if (!b->level) 2125 bch_btree_leaf_dirty(b, journal_ref); 2126 else 2127 bch_btree_node_write(b, &cl); 2128 } 2129 2130 mutex_unlock(&b->write_lock); 2131 2132 /* wait for btree node write if necessary, after unlock */ 2133 closure_sync(&cl); 2134 2135 return 0; 2136 split: 2137 if (current->bio_list) { 2138 op->lock = b->c->root->level + 1; 2139 return -EAGAIN; 2140 } else if (op->lock <= b->c->root->level) { 2141 op->lock = b->c->root->level + 1; 2142 return -EINTR; 2143 } else { 2144 /* Invalidated all iterators */ 2145 int ret = btree_split(b, op, insert_keys, replace_key); 2146 2147 if (bch_keylist_empty(insert_keys)) 2148 return 0; 2149 else if (!ret) 2150 return -EINTR; 2151 return ret; 2152 } 2153 } 2154 2155 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2156 struct bkey *check_key) 2157 { 2158 int ret = -EINTR; 2159 uint64_t btree_ptr = b->key.ptr[0]; 2160 unsigned long seq = b->seq; 2161 struct keylist insert; 2162 bool upgrade = op->lock == -1; 2163 2164 bch_keylist_init(&insert); 2165 2166 if (upgrade) { 2167 rw_unlock(false, b); 2168 rw_lock(true, b, b->level); 2169 2170 if (b->key.ptr[0] != btree_ptr || 2171 b->seq != seq + 1) { 2172 op->lock = b->level; 2173 goto out; 2174 } 2175 } 2176 2177 SET_KEY_PTRS(check_key, 1); 2178 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2179 2180 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2181 2182 bch_keylist_add(&insert, check_key); 2183 2184 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2185 2186 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2187 out: 2188 if (upgrade) 2189 downgrade_write(&b->lock); 2190 return ret; 2191 } 2192 2193 struct btree_insert_op { 2194 struct btree_op op; 2195 struct keylist *keys; 2196 atomic_t *journal_ref; 2197 struct bkey *replace_key; 2198 }; 2199 2200 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2201 { 2202 struct btree_insert_op *op = container_of(b_op, 2203 struct btree_insert_op, op); 2204 2205 int ret = bch_btree_insert_node(b, &op->op, op->keys, 2206 op->journal_ref, op->replace_key); 2207 if (ret && !bch_keylist_empty(op->keys)) 2208 return ret; 2209 else 2210 return MAP_DONE; 2211 } 2212 2213 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2214 atomic_t *journal_ref, struct bkey *replace_key) 2215 { 2216 struct btree_insert_op op; 2217 int ret = 0; 2218 2219 BUG_ON(current->bio_list); 2220 BUG_ON(bch_keylist_empty(keys)); 2221 2222 bch_btree_op_init(&op.op, 0); 2223 op.keys = keys; 2224 op.journal_ref = journal_ref; 2225 op.replace_key = replace_key; 2226 2227 while (!ret && !bch_keylist_empty(keys)) { 2228 op.op.lock = 0; 2229 ret = bch_btree_map_leaf_nodes(&op.op, c, 2230 &START_KEY(keys->keys), 2231 btree_insert_fn); 2232 } 2233 2234 if (ret) { 2235 struct bkey *k; 2236 2237 pr_err("error %i", ret); 2238 2239 while ((k = bch_keylist_pop(keys))) 2240 bkey_put(c, k); 2241 } else if (op.op.insert_collision) 2242 ret = -ESRCH; 2243 2244 return ret; 2245 } 2246 2247 void bch_btree_set_root(struct btree *b) 2248 { 2249 unsigned i; 2250 struct closure cl; 2251 2252 closure_init_stack(&cl); 2253 2254 trace_bcache_btree_set_root(b); 2255 2256 BUG_ON(!b->written); 2257 2258 for (i = 0; i < KEY_PTRS(&b->key); i++) 2259 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2260 2261 mutex_lock(&b->c->bucket_lock); 2262 list_del_init(&b->list); 2263 mutex_unlock(&b->c->bucket_lock); 2264 2265 b->c->root = b; 2266 2267 bch_journal_meta(b->c, &cl); 2268 closure_sync(&cl); 2269 } 2270 2271 /* Map across nodes or keys */ 2272 2273 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2274 struct bkey *from, 2275 btree_map_nodes_fn *fn, int flags) 2276 { 2277 int ret = MAP_CONTINUE; 2278 2279 if (b->level) { 2280 struct bkey *k; 2281 struct btree_iter iter; 2282 2283 bch_btree_iter_init(&b->keys, &iter, from); 2284 2285 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 2286 bch_ptr_bad))) { 2287 ret = btree(map_nodes_recurse, k, b, 2288 op, from, fn, flags); 2289 from = NULL; 2290 2291 if (ret != MAP_CONTINUE) 2292 return ret; 2293 } 2294 } 2295 2296 if (!b->level || flags == MAP_ALL_NODES) 2297 ret = fn(op, b); 2298 2299 return ret; 2300 } 2301 2302 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2303 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2304 { 2305 return btree_root(map_nodes_recurse, c, op, from, fn, flags); 2306 } 2307 2308 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2309 struct bkey *from, btree_map_keys_fn *fn, 2310 int flags) 2311 { 2312 int ret = MAP_CONTINUE; 2313 struct bkey *k; 2314 struct btree_iter iter; 2315 2316 bch_btree_iter_init(&b->keys, &iter, from); 2317 2318 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 2319 ret = !b->level 2320 ? fn(op, b, k) 2321 : btree(map_keys_recurse, k, b, op, from, fn, flags); 2322 from = NULL; 2323 2324 if (ret != MAP_CONTINUE) 2325 return ret; 2326 } 2327 2328 if (!b->level && (flags & MAP_END_KEY)) 2329 ret = fn(op, b, &KEY(KEY_INODE(&b->key), 2330 KEY_OFFSET(&b->key), 0)); 2331 2332 return ret; 2333 } 2334 2335 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2336 struct bkey *from, btree_map_keys_fn *fn, int flags) 2337 { 2338 return btree_root(map_keys_recurse, c, op, from, fn, flags); 2339 } 2340 2341 /* Keybuf code */ 2342 2343 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2344 { 2345 /* Overlapping keys compare equal */ 2346 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2347 return -1; 2348 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2349 return 1; 2350 return 0; 2351 } 2352 2353 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2354 struct keybuf_key *r) 2355 { 2356 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2357 } 2358 2359 struct refill { 2360 struct btree_op op; 2361 unsigned nr_found; 2362 struct keybuf *buf; 2363 struct bkey *end; 2364 keybuf_pred_fn *pred; 2365 }; 2366 2367 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 2368 struct bkey *k) 2369 { 2370 struct refill *refill = container_of(op, struct refill, op); 2371 struct keybuf *buf = refill->buf; 2372 int ret = MAP_CONTINUE; 2373 2374 if (bkey_cmp(k, refill->end) >= 0) { 2375 ret = MAP_DONE; 2376 goto out; 2377 } 2378 2379 if (!KEY_SIZE(k)) /* end key */ 2380 goto out; 2381 2382 if (refill->pred(buf, k)) { 2383 struct keybuf_key *w; 2384 2385 spin_lock(&buf->lock); 2386 2387 w = array_alloc(&buf->freelist); 2388 if (!w) { 2389 spin_unlock(&buf->lock); 2390 return MAP_DONE; 2391 } 2392 2393 w->private = NULL; 2394 bkey_copy(&w->key, k); 2395 2396 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2397 array_free(&buf->freelist, w); 2398 else 2399 refill->nr_found++; 2400 2401 if (array_freelist_empty(&buf->freelist)) 2402 ret = MAP_DONE; 2403 2404 spin_unlock(&buf->lock); 2405 } 2406 out: 2407 buf->last_scanned = *k; 2408 return ret; 2409 } 2410 2411 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2412 struct bkey *end, keybuf_pred_fn *pred) 2413 { 2414 struct bkey start = buf->last_scanned; 2415 struct refill refill; 2416 2417 cond_resched(); 2418 2419 bch_btree_op_init(&refill.op, -1); 2420 refill.nr_found = 0; 2421 refill.buf = buf; 2422 refill.end = end; 2423 refill.pred = pred; 2424 2425 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 2426 refill_keybuf_fn, MAP_END_KEY); 2427 2428 trace_bcache_keyscan(refill.nr_found, 2429 KEY_INODE(&start), KEY_OFFSET(&start), 2430 KEY_INODE(&buf->last_scanned), 2431 KEY_OFFSET(&buf->last_scanned)); 2432 2433 spin_lock(&buf->lock); 2434 2435 if (!RB_EMPTY_ROOT(&buf->keys)) { 2436 struct keybuf_key *w; 2437 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2438 buf->start = START_KEY(&w->key); 2439 2440 w = RB_LAST(&buf->keys, struct keybuf_key, node); 2441 buf->end = w->key; 2442 } else { 2443 buf->start = MAX_KEY; 2444 buf->end = MAX_KEY; 2445 } 2446 2447 spin_unlock(&buf->lock); 2448 } 2449 2450 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2451 { 2452 rb_erase(&w->node, &buf->keys); 2453 array_free(&buf->freelist, w); 2454 } 2455 2456 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2457 { 2458 spin_lock(&buf->lock); 2459 __bch_keybuf_del(buf, w); 2460 spin_unlock(&buf->lock); 2461 } 2462 2463 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2464 struct bkey *end) 2465 { 2466 bool ret = false; 2467 struct keybuf_key *p, *w, s; 2468 s.key = *start; 2469 2470 if (bkey_cmp(end, &buf->start) <= 0 || 2471 bkey_cmp(start, &buf->end) >= 0) 2472 return false; 2473 2474 spin_lock(&buf->lock); 2475 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2476 2477 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2478 p = w; 2479 w = RB_NEXT(w, node); 2480 2481 if (p->private) 2482 ret = true; 2483 else 2484 __bch_keybuf_del(buf, p); 2485 } 2486 2487 spin_unlock(&buf->lock); 2488 return ret; 2489 } 2490 2491 struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2492 { 2493 struct keybuf_key *w; 2494 spin_lock(&buf->lock); 2495 2496 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2497 2498 while (w && w->private) 2499 w = RB_NEXT(w, node); 2500 2501 if (w) 2502 w->private = ERR_PTR(-EINTR); 2503 2504 spin_unlock(&buf->lock); 2505 return w; 2506 } 2507 2508 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2509 struct keybuf *buf, 2510 struct bkey *end, 2511 keybuf_pred_fn *pred) 2512 { 2513 struct keybuf_key *ret; 2514 2515 while (1) { 2516 ret = bch_keybuf_next(buf); 2517 if (ret) 2518 break; 2519 2520 if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2521 pr_debug("scan finished"); 2522 break; 2523 } 2524 2525 bch_refill_keybuf(c, buf, end, pred); 2526 } 2527 2528 return ret; 2529 } 2530 2531 void bch_keybuf_init(struct keybuf *buf) 2532 { 2533 buf->last_scanned = MAX_KEY; 2534 buf->keys = RB_ROOT; 2535 2536 spin_lock_init(&buf->lock); 2537 array_allocator_init(&buf->freelist); 2538 } 2539