1 /* 2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 3 * 4 * Uses a block device as cache for other block devices; optimized for SSDs. 5 * All allocation is done in buckets, which should match the erase block size 6 * of the device. 7 * 8 * Buckets containing cached data are kept on a heap sorted by priority; 9 * bucket priority is increased on cache hit, and periodically all the buckets 10 * on the heap have their priority scaled down. This currently is just used as 11 * an LRU but in the future should allow for more intelligent heuristics. 12 * 13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 14 * counter. Garbage collection is used to remove stale pointers. 15 * 16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 17 * as keys are inserted we only sort the pages that have not yet been written. 18 * When garbage collection is run, we resort the entire node. 19 * 20 * All configuration is done via sysfs; see Documentation/bcache.txt. 21 */ 22 23 #include "bcache.h" 24 #include "btree.h" 25 #include "debug.h" 26 #include "extents.h" 27 28 #include <linux/slab.h> 29 #include <linux/bitops.h> 30 #include <linux/hash.h> 31 #include <linux/kthread.h> 32 #include <linux/prefetch.h> 33 #include <linux/random.h> 34 #include <linux/rcupdate.h> 35 #include <trace/events/bcache.h> 36 37 /* 38 * Todo: 39 * register_bcache: Return errors out to userspace correctly 40 * 41 * Writeback: don't undirty key until after a cache flush 42 * 43 * Create an iterator for key pointers 44 * 45 * On btree write error, mark bucket such that it won't be freed from the cache 46 * 47 * Journalling: 48 * Check for bad keys in replay 49 * Propagate barriers 50 * Refcount journal entries in journal_replay 51 * 52 * Garbage collection: 53 * Finish incremental gc 54 * Gc should free old UUIDs, data for invalid UUIDs 55 * 56 * Provide a way to list backing device UUIDs we have data cached for, and 57 * probably how long it's been since we've seen them, and a way to invalidate 58 * dirty data for devices that will never be attached again 59 * 60 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 61 * that based on that and how much dirty data we have we can keep writeback 62 * from being starved 63 * 64 * Add a tracepoint or somesuch to watch for writeback starvation 65 * 66 * When btree depth > 1 and splitting an interior node, we have to make sure 67 * alloc_bucket() cannot fail. This should be true but is not completely 68 * obvious. 69 * 70 * Plugging? 71 * 72 * If data write is less than hard sector size of ssd, round up offset in open 73 * bucket to the next whole sector 74 * 75 * Superblock needs to be fleshed out for multiple cache devices 76 * 77 * Add a sysfs tunable for the number of writeback IOs in flight 78 * 79 * Add a sysfs tunable for the number of open data buckets 80 * 81 * IO tracking: Can we track when one process is doing io on behalf of another? 82 * IO tracking: Don't use just an average, weigh more recent stuff higher 83 * 84 * Test module load/unload 85 */ 86 87 #define MAX_NEED_GC 64 88 #define MAX_SAVE_PRIO 72 89 90 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 91 92 #define PTR_HASH(c, k) \ 93 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 94 95 #define insert_lock(s, b) ((b)->level <= (s)->lock) 96 97 /* 98 * These macros are for recursing down the btree - they handle the details of 99 * locking and looking up nodes in the cache for you. They're best treated as 100 * mere syntax when reading code that uses them. 101 * 102 * op->lock determines whether we take a read or a write lock at a given depth. 103 * If you've got a read lock and find that you need a write lock (i.e. you're 104 * going to have to split), set op->lock and return -EINTR; btree_root() will 105 * call you again and you'll have the correct lock. 106 */ 107 108 /** 109 * btree - recurse down the btree on a specified key 110 * @fn: function to call, which will be passed the child node 111 * @key: key to recurse on 112 * @b: parent btree node 113 * @op: pointer to struct btree_op 114 */ 115 #define btree(fn, key, b, op, ...) \ 116 ({ \ 117 int _r, l = (b)->level - 1; \ 118 bool _w = l <= (op)->lock; \ 119 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ 120 _w, b); \ 121 if (!IS_ERR(_child)) { \ 122 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ 123 rw_unlock(_w, _child); \ 124 } else \ 125 _r = PTR_ERR(_child); \ 126 _r; \ 127 }) 128 129 /** 130 * btree_root - call a function on the root of the btree 131 * @fn: function to call, which will be passed the child node 132 * @c: cache set 133 * @op: pointer to struct btree_op 134 */ 135 #define btree_root(fn, c, op, ...) \ 136 ({ \ 137 int _r = -EINTR; \ 138 do { \ 139 struct btree *_b = (c)->root; \ 140 bool _w = insert_lock(op, _b); \ 141 rw_lock(_w, _b, _b->level); \ 142 if (_b == (c)->root && \ 143 _w == insert_lock(op, _b)) { \ 144 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 145 } \ 146 rw_unlock(_w, _b); \ 147 bch_cannibalize_unlock(c); \ 148 if (_r == -EINTR) \ 149 schedule(); \ 150 } while (_r == -EINTR); \ 151 \ 152 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ 153 _r; \ 154 }) 155 156 static inline struct bset *write_block(struct btree *b) 157 { 158 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); 159 } 160 161 static void bch_btree_init_next(struct btree *b) 162 { 163 /* If not a leaf node, always sort */ 164 if (b->level && b->keys.nsets) 165 bch_btree_sort(&b->keys, &b->c->sort); 166 else 167 bch_btree_sort_lazy(&b->keys, &b->c->sort); 168 169 if (b->written < btree_blocks(b)) 170 bch_bset_init_next(&b->keys, write_block(b), 171 bset_magic(&b->c->sb)); 172 173 } 174 175 /* Btree key manipulation */ 176 177 void bkey_put(struct cache_set *c, struct bkey *k) 178 { 179 unsigned i; 180 181 for (i = 0; i < KEY_PTRS(k); i++) 182 if (ptr_available(c, k, i)) 183 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 184 } 185 186 /* Btree IO */ 187 188 static uint64_t btree_csum_set(struct btree *b, struct bset *i) 189 { 190 uint64_t crc = b->key.ptr[0]; 191 void *data = (void *) i + 8, *end = bset_bkey_last(i); 192 193 crc = bch_crc64_update(crc, data, end - data); 194 return crc ^ 0xffffffffffffffffULL; 195 } 196 197 void bch_btree_node_read_done(struct btree *b) 198 { 199 const char *err = "bad btree header"; 200 struct bset *i = btree_bset_first(b); 201 struct btree_iter *iter; 202 203 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); 204 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; 205 iter->used = 0; 206 207 #ifdef CONFIG_BCACHE_DEBUG 208 iter->b = &b->keys; 209 #endif 210 211 if (!i->seq) 212 goto err; 213 214 for (; 215 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 216 i = write_block(b)) { 217 err = "unsupported bset version"; 218 if (i->version > BCACHE_BSET_VERSION) 219 goto err; 220 221 err = "bad btree header"; 222 if (b->written + set_blocks(i, block_bytes(b->c)) > 223 btree_blocks(b)) 224 goto err; 225 226 err = "bad magic"; 227 if (i->magic != bset_magic(&b->c->sb)) 228 goto err; 229 230 err = "bad checksum"; 231 switch (i->version) { 232 case 0: 233 if (i->csum != csum_set(i)) 234 goto err; 235 break; 236 case BCACHE_BSET_VERSION: 237 if (i->csum != btree_csum_set(b, i)) 238 goto err; 239 break; 240 } 241 242 err = "empty set"; 243 if (i != b->keys.set[0].data && !i->keys) 244 goto err; 245 246 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 247 248 b->written += set_blocks(i, block_bytes(b->c)); 249 } 250 251 err = "corrupted btree"; 252 for (i = write_block(b); 253 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 254 i = ((void *) i) + block_bytes(b->c)) 255 if (i->seq == b->keys.set[0].data->seq) 256 goto err; 257 258 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 259 260 i = b->keys.set[0].data; 261 err = "short btree key"; 262 if (b->keys.set[0].size && 263 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 264 goto err; 265 266 if (b->written < btree_blocks(b)) 267 bch_bset_init_next(&b->keys, write_block(b), 268 bset_magic(&b->c->sb)); 269 out: 270 mempool_free(iter, b->c->fill_iter); 271 return; 272 err: 273 set_btree_node_io_error(b); 274 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 275 err, PTR_BUCKET_NR(b->c, &b->key, 0), 276 bset_block_offset(b, i), i->keys); 277 goto out; 278 } 279 280 static void btree_node_read_endio(struct bio *bio) 281 { 282 struct closure *cl = bio->bi_private; 283 closure_put(cl); 284 } 285 286 static void bch_btree_node_read(struct btree *b) 287 { 288 uint64_t start_time = local_clock(); 289 struct closure cl; 290 struct bio *bio; 291 292 trace_bcache_btree_read(b); 293 294 closure_init_stack(&cl); 295 296 bio = bch_bbio_alloc(b->c); 297 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 298 bio->bi_end_io = btree_node_read_endio; 299 bio->bi_private = &cl; 300 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); 301 302 bch_bio_map(bio, b->keys.set[0].data); 303 304 bch_submit_bbio(bio, b->c, &b->key, 0); 305 closure_sync(&cl); 306 307 if (bio->bi_error) 308 set_btree_node_io_error(b); 309 310 bch_bbio_free(bio, b->c); 311 312 if (btree_node_io_error(b)) 313 goto err; 314 315 bch_btree_node_read_done(b); 316 bch_time_stats_update(&b->c->btree_read_time, start_time); 317 318 return; 319 err: 320 bch_cache_set_error(b->c, "io error reading bucket %zu", 321 PTR_BUCKET_NR(b->c, &b->key, 0)); 322 } 323 324 static void btree_complete_write(struct btree *b, struct btree_write *w) 325 { 326 if (w->prio_blocked && 327 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 328 wake_up_allocators(b->c); 329 330 if (w->journal) { 331 atomic_dec_bug(w->journal); 332 __closure_wake_up(&b->c->journal.wait); 333 } 334 335 w->prio_blocked = 0; 336 w->journal = NULL; 337 } 338 339 static void btree_node_write_unlock(struct closure *cl) 340 { 341 struct btree *b = container_of(cl, struct btree, io); 342 343 up(&b->io_mutex); 344 } 345 346 static void __btree_node_write_done(struct closure *cl) 347 { 348 struct btree *b = container_of(cl, struct btree, io); 349 struct btree_write *w = btree_prev_write(b); 350 351 bch_bbio_free(b->bio, b->c); 352 b->bio = NULL; 353 btree_complete_write(b, w); 354 355 if (btree_node_dirty(b)) 356 schedule_delayed_work(&b->work, 30 * HZ); 357 358 closure_return_with_destructor(cl, btree_node_write_unlock); 359 } 360 361 static void btree_node_write_done(struct closure *cl) 362 { 363 struct btree *b = container_of(cl, struct btree, io); 364 365 bio_free_pages(b->bio); 366 __btree_node_write_done(cl); 367 } 368 369 static void btree_node_write_endio(struct bio *bio) 370 { 371 struct closure *cl = bio->bi_private; 372 struct btree *b = container_of(cl, struct btree, io); 373 374 if (bio->bi_error) 375 set_btree_node_io_error(b); 376 377 bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); 378 closure_put(cl); 379 } 380 381 static void do_btree_node_write(struct btree *b) 382 { 383 struct closure *cl = &b->io; 384 struct bset *i = btree_bset_last(b); 385 BKEY_PADDED(key) k; 386 387 i->version = BCACHE_BSET_VERSION; 388 i->csum = btree_csum_set(b, i); 389 390 BUG_ON(b->bio); 391 b->bio = bch_bbio_alloc(b->c); 392 393 b->bio->bi_end_io = btree_node_write_endio; 394 b->bio->bi_private = cl; 395 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 396 bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); 397 bch_bio_map(b->bio, i); 398 399 /* 400 * If we're appending to a leaf node, we don't technically need FUA - 401 * this write just needs to be persisted before the next journal write, 402 * which will be marked FLUSH|FUA. 403 * 404 * Similarly if we're writing a new btree root - the pointer is going to 405 * be in the next journal entry. 406 * 407 * But if we're writing a new btree node (that isn't a root) or 408 * appending to a non leaf btree node, we need either FUA or a flush 409 * when we write the parent with the new pointer. FUA is cheaper than a 410 * flush, and writes appending to leaf nodes aren't blocking anything so 411 * just make all btree node writes FUA to keep things sane. 412 */ 413 414 bkey_copy(&k.key, &b->key); 415 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 416 bset_sector_offset(&b->keys, i)); 417 418 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 419 int j; 420 struct bio_vec *bv; 421 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 422 423 bio_for_each_segment_all(bv, b->bio, j) 424 memcpy(page_address(bv->bv_page), 425 base + j * PAGE_SIZE, PAGE_SIZE); 426 427 bch_submit_bbio(b->bio, b->c, &k.key, 0); 428 429 continue_at(cl, btree_node_write_done, NULL); 430 } else { 431 b->bio->bi_vcnt = 0; 432 bch_bio_map(b->bio, i); 433 434 bch_submit_bbio(b->bio, b->c, &k.key, 0); 435 436 closure_sync(cl); 437 continue_at_nobarrier(cl, __btree_node_write_done, NULL); 438 } 439 } 440 441 void __bch_btree_node_write(struct btree *b, struct closure *parent) 442 { 443 struct bset *i = btree_bset_last(b); 444 445 lockdep_assert_held(&b->write_lock); 446 447 trace_bcache_btree_write(b); 448 449 BUG_ON(current->bio_list); 450 BUG_ON(b->written >= btree_blocks(b)); 451 BUG_ON(b->written && !i->keys); 452 BUG_ON(btree_bset_first(b)->seq != i->seq); 453 bch_check_keys(&b->keys, "writing"); 454 455 cancel_delayed_work(&b->work); 456 457 /* If caller isn't waiting for write, parent refcount is cache set */ 458 down(&b->io_mutex); 459 closure_init(&b->io, parent ?: &b->c->cl); 460 461 clear_bit(BTREE_NODE_dirty, &b->flags); 462 change_bit(BTREE_NODE_write_idx, &b->flags); 463 464 do_btree_node_write(b); 465 466 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, 467 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 468 469 b->written += set_blocks(i, block_bytes(b->c)); 470 } 471 472 void bch_btree_node_write(struct btree *b, struct closure *parent) 473 { 474 unsigned nsets = b->keys.nsets; 475 476 lockdep_assert_held(&b->lock); 477 478 __bch_btree_node_write(b, parent); 479 480 /* 481 * do verify if there was more than one set initially (i.e. we did a 482 * sort) and we sorted down to a single set: 483 */ 484 if (nsets && !b->keys.nsets) 485 bch_btree_verify(b); 486 487 bch_btree_init_next(b); 488 } 489 490 static void bch_btree_node_write_sync(struct btree *b) 491 { 492 struct closure cl; 493 494 closure_init_stack(&cl); 495 496 mutex_lock(&b->write_lock); 497 bch_btree_node_write(b, &cl); 498 mutex_unlock(&b->write_lock); 499 500 closure_sync(&cl); 501 } 502 503 static void btree_node_write_work(struct work_struct *w) 504 { 505 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 506 507 mutex_lock(&b->write_lock); 508 if (btree_node_dirty(b)) 509 __bch_btree_node_write(b, NULL); 510 mutex_unlock(&b->write_lock); 511 } 512 513 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 514 { 515 struct bset *i = btree_bset_last(b); 516 struct btree_write *w = btree_current_write(b); 517 518 lockdep_assert_held(&b->write_lock); 519 520 BUG_ON(!b->written); 521 BUG_ON(!i->keys); 522 523 if (!btree_node_dirty(b)) 524 schedule_delayed_work(&b->work, 30 * HZ); 525 526 set_btree_node_dirty(b); 527 528 if (journal_ref) { 529 if (w->journal && 530 journal_pin_cmp(b->c, w->journal, journal_ref)) { 531 atomic_dec_bug(w->journal); 532 w->journal = NULL; 533 } 534 535 if (!w->journal) { 536 w->journal = journal_ref; 537 atomic_inc(w->journal); 538 } 539 } 540 541 /* Force write if set is too big */ 542 if (set_bytes(i) > PAGE_SIZE - 48 && 543 !current->bio_list) 544 bch_btree_node_write(b, NULL); 545 } 546 547 /* 548 * Btree in memory cache - allocation/freeing 549 * mca -> memory cache 550 */ 551 552 #define mca_reserve(c) (((c->root && c->root->level) \ 553 ? c->root->level : 1) * 8 + 16) 554 #define mca_can_free(c) \ 555 max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 556 557 static void mca_data_free(struct btree *b) 558 { 559 BUG_ON(b->io_mutex.count != 1); 560 561 bch_btree_keys_free(&b->keys); 562 563 b->c->btree_cache_used--; 564 list_move(&b->list, &b->c->btree_cache_freed); 565 } 566 567 static void mca_bucket_free(struct btree *b) 568 { 569 BUG_ON(btree_node_dirty(b)); 570 571 b->key.ptr[0] = 0; 572 hlist_del_init_rcu(&b->hash); 573 list_move(&b->list, &b->c->btree_cache_freeable); 574 } 575 576 static unsigned btree_order(struct bkey *k) 577 { 578 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 579 } 580 581 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 582 { 583 if (!bch_btree_keys_alloc(&b->keys, 584 max_t(unsigned, 585 ilog2(b->c->btree_pages), 586 btree_order(k)), 587 gfp)) { 588 b->c->btree_cache_used++; 589 list_move(&b->list, &b->c->btree_cache); 590 } else { 591 list_move(&b->list, &b->c->btree_cache_freed); 592 } 593 } 594 595 static struct btree *mca_bucket_alloc(struct cache_set *c, 596 struct bkey *k, gfp_t gfp) 597 { 598 struct btree *b = kzalloc(sizeof(struct btree), gfp); 599 if (!b) 600 return NULL; 601 602 init_rwsem(&b->lock); 603 lockdep_set_novalidate_class(&b->lock); 604 mutex_init(&b->write_lock); 605 lockdep_set_novalidate_class(&b->write_lock); 606 INIT_LIST_HEAD(&b->list); 607 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 608 b->c = c; 609 sema_init(&b->io_mutex, 1); 610 611 mca_data_alloc(b, k, gfp); 612 return b; 613 } 614 615 static int mca_reap(struct btree *b, unsigned min_order, bool flush) 616 { 617 struct closure cl; 618 619 closure_init_stack(&cl); 620 lockdep_assert_held(&b->c->bucket_lock); 621 622 if (!down_write_trylock(&b->lock)) 623 return -ENOMEM; 624 625 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 626 627 if (b->keys.page_order < min_order) 628 goto out_unlock; 629 630 if (!flush) { 631 if (btree_node_dirty(b)) 632 goto out_unlock; 633 634 if (down_trylock(&b->io_mutex)) 635 goto out_unlock; 636 up(&b->io_mutex); 637 } 638 639 mutex_lock(&b->write_lock); 640 if (btree_node_dirty(b)) 641 __bch_btree_node_write(b, &cl); 642 mutex_unlock(&b->write_lock); 643 644 closure_sync(&cl); 645 646 /* wait for any in flight btree write */ 647 down(&b->io_mutex); 648 up(&b->io_mutex); 649 650 return 0; 651 out_unlock: 652 rw_unlock(true, b); 653 return -ENOMEM; 654 } 655 656 static unsigned long bch_mca_scan(struct shrinker *shrink, 657 struct shrink_control *sc) 658 { 659 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 660 struct btree *b, *t; 661 unsigned long i, nr = sc->nr_to_scan; 662 unsigned long freed = 0; 663 664 if (c->shrinker_disabled) 665 return SHRINK_STOP; 666 667 if (c->btree_cache_alloc_lock) 668 return SHRINK_STOP; 669 670 /* Return -1 if we can't do anything right now */ 671 if (sc->gfp_mask & __GFP_IO) 672 mutex_lock(&c->bucket_lock); 673 else if (!mutex_trylock(&c->bucket_lock)) 674 return -1; 675 676 /* 677 * It's _really_ critical that we don't free too many btree nodes - we 678 * have to always leave ourselves a reserve. The reserve is how we 679 * guarantee that allocating memory for a new btree node can always 680 * succeed, so that inserting keys into the btree can always succeed and 681 * IO can always make forward progress: 682 */ 683 nr /= c->btree_pages; 684 nr = min_t(unsigned long, nr, mca_can_free(c)); 685 686 i = 0; 687 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { 688 if (freed >= nr) 689 break; 690 691 if (++i > 3 && 692 !mca_reap(b, 0, false)) { 693 mca_data_free(b); 694 rw_unlock(true, b); 695 freed++; 696 } 697 } 698 699 for (i = 0; (nr--) && i < c->btree_cache_used; i++) { 700 if (list_empty(&c->btree_cache)) 701 goto out; 702 703 b = list_first_entry(&c->btree_cache, struct btree, list); 704 list_rotate_left(&c->btree_cache); 705 706 if (!b->accessed && 707 !mca_reap(b, 0, false)) { 708 mca_bucket_free(b); 709 mca_data_free(b); 710 rw_unlock(true, b); 711 freed++; 712 } else 713 b->accessed = 0; 714 } 715 out: 716 mutex_unlock(&c->bucket_lock); 717 return freed; 718 } 719 720 static unsigned long bch_mca_count(struct shrinker *shrink, 721 struct shrink_control *sc) 722 { 723 struct cache_set *c = container_of(shrink, struct cache_set, shrink); 724 725 if (c->shrinker_disabled) 726 return 0; 727 728 if (c->btree_cache_alloc_lock) 729 return 0; 730 731 return mca_can_free(c) * c->btree_pages; 732 } 733 734 void bch_btree_cache_free(struct cache_set *c) 735 { 736 struct btree *b; 737 struct closure cl; 738 closure_init_stack(&cl); 739 740 if (c->shrink.list.next) 741 unregister_shrinker(&c->shrink); 742 743 mutex_lock(&c->bucket_lock); 744 745 #ifdef CONFIG_BCACHE_DEBUG 746 if (c->verify_data) 747 list_move(&c->verify_data->list, &c->btree_cache); 748 749 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); 750 #endif 751 752 list_splice(&c->btree_cache_freeable, 753 &c->btree_cache); 754 755 while (!list_empty(&c->btree_cache)) { 756 b = list_first_entry(&c->btree_cache, struct btree, list); 757 758 if (btree_node_dirty(b)) 759 btree_complete_write(b, btree_current_write(b)); 760 clear_bit(BTREE_NODE_dirty, &b->flags); 761 762 mca_data_free(b); 763 } 764 765 while (!list_empty(&c->btree_cache_freed)) { 766 b = list_first_entry(&c->btree_cache_freed, 767 struct btree, list); 768 list_del(&b->list); 769 cancel_delayed_work_sync(&b->work); 770 kfree(b); 771 } 772 773 mutex_unlock(&c->bucket_lock); 774 } 775 776 int bch_btree_cache_alloc(struct cache_set *c) 777 { 778 unsigned i; 779 780 for (i = 0; i < mca_reserve(c); i++) 781 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 782 return -ENOMEM; 783 784 list_splice_init(&c->btree_cache, 785 &c->btree_cache_freeable); 786 787 #ifdef CONFIG_BCACHE_DEBUG 788 mutex_init(&c->verify_lock); 789 790 c->verify_ondisk = (void *) 791 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); 792 793 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 794 795 if (c->verify_data && 796 c->verify_data->keys.set->data) 797 list_del_init(&c->verify_data->list); 798 else 799 c->verify_data = NULL; 800 #endif 801 802 c->shrink.count_objects = bch_mca_count; 803 c->shrink.scan_objects = bch_mca_scan; 804 c->shrink.seeks = 4; 805 c->shrink.batch = c->btree_pages * 2; 806 register_shrinker(&c->shrink); 807 808 return 0; 809 } 810 811 /* Btree in memory cache - hash table */ 812 813 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 814 { 815 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 816 } 817 818 static struct btree *mca_find(struct cache_set *c, struct bkey *k) 819 { 820 struct btree *b; 821 822 rcu_read_lock(); 823 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 824 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 825 goto out; 826 b = NULL; 827 out: 828 rcu_read_unlock(); 829 return b; 830 } 831 832 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 833 { 834 struct task_struct *old; 835 836 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); 837 if (old && old != current) { 838 if (op) 839 prepare_to_wait(&c->btree_cache_wait, &op->wait, 840 TASK_UNINTERRUPTIBLE); 841 return -EINTR; 842 } 843 844 return 0; 845 } 846 847 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 848 struct bkey *k) 849 { 850 struct btree *b; 851 852 trace_bcache_btree_cache_cannibalize(c); 853 854 if (mca_cannibalize_lock(c, op)) 855 return ERR_PTR(-EINTR); 856 857 list_for_each_entry_reverse(b, &c->btree_cache, list) 858 if (!mca_reap(b, btree_order(k), false)) 859 return b; 860 861 list_for_each_entry_reverse(b, &c->btree_cache, list) 862 if (!mca_reap(b, btree_order(k), true)) 863 return b; 864 865 WARN(1, "btree cache cannibalize failed\n"); 866 return ERR_PTR(-ENOMEM); 867 } 868 869 /* 870 * We can only have one thread cannibalizing other cached btree nodes at a time, 871 * or we'll deadlock. We use an open coded mutex to ensure that, which a 872 * cannibalize_bucket() will take. This means every time we unlock the root of 873 * the btree, we need to release this lock if we have it held. 874 */ 875 static void bch_cannibalize_unlock(struct cache_set *c) 876 { 877 if (c->btree_cache_alloc_lock == current) { 878 c->btree_cache_alloc_lock = NULL; 879 wake_up(&c->btree_cache_wait); 880 } 881 } 882 883 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 884 struct bkey *k, int level) 885 { 886 struct btree *b; 887 888 BUG_ON(current->bio_list); 889 890 lockdep_assert_held(&c->bucket_lock); 891 892 if (mca_find(c, k)) 893 return NULL; 894 895 /* btree_free() doesn't free memory; it sticks the node on the end of 896 * the list. Check if there's any freed nodes there: 897 */ 898 list_for_each_entry(b, &c->btree_cache_freeable, list) 899 if (!mca_reap(b, btree_order(k), false)) 900 goto out; 901 902 /* We never free struct btree itself, just the memory that holds the on 903 * disk node. Check the freed list before allocating a new one: 904 */ 905 list_for_each_entry(b, &c->btree_cache_freed, list) 906 if (!mca_reap(b, 0, false)) { 907 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 908 if (!b->keys.set[0].data) 909 goto err; 910 else 911 goto out; 912 } 913 914 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 915 if (!b) 916 goto err; 917 918 BUG_ON(!down_write_trylock(&b->lock)); 919 if (!b->keys.set->data) 920 goto err; 921 out: 922 BUG_ON(b->io_mutex.count != 1); 923 924 bkey_copy(&b->key, k); 925 list_move(&b->list, &c->btree_cache); 926 hlist_del_init_rcu(&b->hash); 927 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 928 929 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 930 b->parent = (void *) ~0UL; 931 b->flags = 0; 932 b->written = 0; 933 b->level = level; 934 935 if (!b->level) 936 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 937 &b->c->expensive_debug_checks); 938 else 939 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 940 &b->c->expensive_debug_checks); 941 942 return b; 943 err: 944 if (b) 945 rw_unlock(true, b); 946 947 b = mca_cannibalize(c, op, k); 948 if (!IS_ERR(b)) 949 goto out; 950 951 return b; 952 } 953 954 /** 955 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 956 * in from disk if necessary. 957 * 958 * If IO is necessary and running under generic_make_request, returns -EAGAIN. 959 * 960 * The btree node will have either a read or a write lock held, depending on 961 * level and op->lock. 962 */ 963 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 964 struct bkey *k, int level, bool write, 965 struct btree *parent) 966 { 967 int i = 0; 968 struct btree *b; 969 970 BUG_ON(level < 0); 971 retry: 972 b = mca_find(c, k); 973 974 if (!b) { 975 if (current->bio_list) 976 return ERR_PTR(-EAGAIN); 977 978 mutex_lock(&c->bucket_lock); 979 b = mca_alloc(c, op, k, level); 980 mutex_unlock(&c->bucket_lock); 981 982 if (!b) 983 goto retry; 984 if (IS_ERR(b)) 985 return b; 986 987 bch_btree_node_read(b); 988 989 if (!write) 990 downgrade_write(&b->lock); 991 } else { 992 rw_lock(write, b, level); 993 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 994 rw_unlock(write, b); 995 goto retry; 996 } 997 BUG_ON(b->level != level); 998 } 999 1000 b->parent = parent; 1001 b->accessed = 1; 1002 1003 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1004 prefetch(b->keys.set[i].tree); 1005 prefetch(b->keys.set[i].data); 1006 } 1007 1008 for (; i <= b->keys.nsets; i++) 1009 prefetch(b->keys.set[i].data); 1010 1011 if (btree_node_io_error(b)) { 1012 rw_unlock(write, b); 1013 return ERR_PTR(-EIO); 1014 } 1015 1016 BUG_ON(!b->written); 1017 1018 return b; 1019 } 1020 1021 static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1022 { 1023 struct btree *b; 1024 1025 mutex_lock(&parent->c->bucket_lock); 1026 b = mca_alloc(parent->c, NULL, k, parent->level - 1); 1027 mutex_unlock(&parent->c->bucket_lock); 1028 1029 if (!IS_ERR_OR_NULL(b)) { 1030 b->parent = parent; 1031 bch_btree_node_read(b); 1032 rw_unlock(true, b); 1033 } 1034 } 1035 1036 /* Btree alloc */ 1037 1038 static void btree_node_free(struct btree *b) 1039 { 1040 trace_bcache_btree_node_free(b); 1041 1042 BUG_ON(b == b->c->root); 1043 1044 mutex_lock(&b->write_lock); 1045 1046 if (btree_node_dirty(b)) 1047 btree_complete_write(b, btree_current_write(b)); 1048 clear_bit(BTREE_NODE_dirty, &b->flags); 1049 1050 mutex_unlock(&b->write_lock); 1051 1052 cancel_delayed_work(&b->work); 1053 1054 mutex_lock(&b->c->bucket_lock); 1055 bch_bucket_free(b->c, &b->key); 1056 mca_bucket_free(b); 1057 mutex_unlock(&b->c->bucket_lock); 1058 } 1059 1060 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1061 int level, bool wait, 1062 struct btree *parent) 1063 { 1064 BKEY_PADDED(key) k; 1065 struct btree *b = ERR_PTR(-EAGAIN); 1066 1067 mutex_lock(&c->bucket_lock); 1068 retry: 1069 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) 1070 goto err; 1071 1072 bkey_put(c, &k.key); 1073 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1074 1075 b = mca_alloc(c, op, &k.key, level); 1076 if (IS_ERR(b)) 1077 goto err_free; 1078 1079 if (!b) { 1080 cache_bug(c, 1081 "Tried to allocate bucket that was in btree cache"); 1082 goto retry; 1083 } 1084 1085 b->accessed = 1; 1086 b->parent = parent; 1087 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); 1088 1089 mutex_unlock(&c->bucket_lock); 1090 1091 trace_bcache_btree_node_alloc(b); 1092 return b; 1093 err_free: 1094 bch_bucket_free(c, &k.key); 1095 err: 1096 mutex_unlock(&c->bucket_lock); 1097 1098 trace_bcache_btree_node_alloc_fail(c); 1099 return b; 1100 } 1101 1102 static struct btree *bch_btree_node_alloc(struct cache_set *c, 1103 struct btree_op *op, int level, 1104 struct btree *parent) 1105 { 1106 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1107 } 1108 1109 static struct btree *btree_node_alloc_replacement(struct btree *b, 1110 struct btree_op *op) 1111 { 1112 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1113 if (!IS_ERR_OR_NULL(n)) { 1114 mutex_lock(&n->write_lock); 1115 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 1116 bkey_copy_key(&n->key, &b->key); 1117 mutex_unlock(&n->write_lock); 1118 } 1119 1120 return n; 1121 } 1122 1123 static void make_btree_freeing_key(struct btree *b, struct bkey *k) 1124 { 1125 unsigned i; 1126 1127 mutex_lock(&b->c->bucket_lock); 1128 1129 atomic_inc(&b->c->prio_blocked); 1130 1131 bkey_copy(k, &b->key); 1132 bkey_copy_key(k, &ZERO_KEY); 1133 1134 for (i = 0; i < KEY_PTRS(k); i++) 1135 SET_PTR_GEN(k, i, 1136 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), 1137 PTR_BUCKET(b->c, &b->key, i))); 1138 1139 mutex_unlock(&b->c->bucket_lock); 1140 } 1141 1142 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1143 { 1144 struct cache_set *c = b->c; 1145 struct cache *ca; 1146 unsigned i, reserve = (c->root->level - b->level) * 2 + 1; 1147 1148 mutex_lock(&c->bucket_lock); 1149 1150 for_each_cache(ca, c, i) 1151 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1152 if (op) 1153 prepare_to_wait(&c->btree_cache_wait, &op->wait, 1154 TASK_UNINTERRUPTIBLE); 1155 mutex_unlock(&c->bucket_lock); 1156 return -EINTR; 1157 } 1158 1159 mutex_unlock(&c->bucket_lock); 1160 1161 return mca_cannibalize_lock(b->c, op); 1162 } 1163 1164 /* Garbage collection */ 1165 1166 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1167 struct bkey *k) 1168 { 1169 uint8_t stale = 0; 1170 unsigned i; 1171 struct bucket *g; 1172 1173 /* 1174 * ptr_invalid() can't return true for the keys that mark btree nodes as 1175 * freed, but since ptr_bad() returns true we'll never actually use them 1176 * for anything and thus we don't want mark their pointers here 1177 */ 1178 if (!bkey_cmp(k, &ZERO_KEY)) 1179 return stale; 1180 1181 for (i = 0; i < KEY_PTRS(k); i++) { 1182 if (!ptr_available(c, k, i)) 1183 continue; 1184 1185 g = PTR_BUCKET(c, k, i); 1186 1187 if (gen_after(g->last_gc, PTR_GEN(k, i))) 1188 g->last_gc = PTR_GEN(k, i); 1189 1190 if (ptr_stale(c, k, i)) { 1191 stale = max(stale, ptr_stale(c, k, i)); 1192 continue; 1193 } 1194 1195 cache_bug_on(GC_MARK(g) && 1196 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1197 c, "inconsistent ptrs: mark = %llu, level = %i", 1198 GC_MARK(g), level); 1199 1200 if (level) 1201 SET_GC_MARK(g, GC_MARK_METADATA); 1202 else if (KEY_DIRTY(k)) 1203 SET_GC_MARK(g, GC_MARK_DIRTY); 1204 else if (!GC_MARK(g)) 1205 SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1206 1207 /* guard against overflow */ 1208 SET_GC_SECTORS_USED(g, min_t(unsigned, 1209 GC_SECTORS_USED(g) + KEY_SIZE(k), 1210 MAX_GC_SECTORS_USED)); 1211 1212 BUG_ON(!GC_SECTORS_USED(g)); 1213 } 1214 1215 return stale; 1216 } 1217 1218 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1219 1220 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1221 { 1222 unsigned i; 1223 1224 for (i = 0; i < KEY_PTRS(k); i++) 1225 if (ptr_available(c, k, i) && 1226 !ptr_stale(c, k, i)) { 1227 struct bucket *b = PTR_BUCKET(c, k, i); 1228 1229 b->gen = PTR_GEN(k, i); 1230 1231 if (level && bkey_cmp(k, &ZERO_KEY)) 1232 b->prio = BTREE_PRIO; 1233 else if (!level && b->prio == BTREE_PRIO) 1234 b->prio = INITIAL_PRIO; 1235 } 1236 1237 __bch_btree_mark_key(c, level, k); 1238 } 1239 1240 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1241 { 1242 uint8_t stale = 0; 1243 unsigned keys = 0, good_keys = 0; 1244 struct bkey *k; 1245 struct btree_iter iter; 1246 struct bset_tree *t; 1247 1248 gc->nodes++; 1249 1250 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1251 stale = max(stale, btree_mark_key(b, k)); 1252 keys++; 1253 1254 if (bch_ptr_bad(&b->keys, k)) 1255 continue; 1256 1257 gc->key_bytes += bkey_u64s(k); 1258 gc->nkeys++; 1259 good_keys++; 1260 1261 gc->data += KEY_SIZE(k); 1262 } 1263 1264 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1265 btree_bug_on(t->size && 1266 bset_written(&b->keys, t) && 1267 bkey_cmp(&b->key, &t->end) < 0, 1268 b, "found short btree key in gc"); 1269 1270 if (b->c->gc_always_rewrite) 1271 return true; 1272 1273 if (stale > 10) 1274 return true; 1275 1276 if ((keys - good_keys) * 2 > keys) 1277 return true; 1278 1279 return false; 1280 } 1281 1282 #define GC_MERGE_NODES 4U 1283 1284 struct gc_merge_info { 1285 struct btree *b; 1286 unsigned keys; 1287 }; 1288 1289 static int bch_btree_insert_node(struct btree *, struct btree_op *, 1290 struct keylist *, atomic_t *, struct bkey *); 1291 1292 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1293 struct gc_stat *gc, struct gc_merge_info *r) 1294 { 1295 unsigned i, nodes = 0, keys = 0, blocks; 1296 struct btree *new_nodes[GC_MERGE_NODES]; 1297 struct keylist keylist; 1298 struct closure cl; 1299 struct bkey *k; 1300 1301 bch_keylist_init(&keylist); 1302 1303 if (btree_check_reserve(b, NULL)) 1304 return 0; 1305 1306 memset(new_nodes, 0, sizeof(new_nodes)); 1307 closure_init_stack(&cl); 1308 1309 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1310 keys += r[nodes++].keys; 1311 1312 blocks = btree_default_blocks(b->c) * 2 / 3; 1313 1314 if (nodes < 2 || 1315 __set_blocks(b->keys.set[0].data, keys, 1316 block_bytes(b->c)) > blocks * (nodes - 1)) 1317 return 0; 1318 1319 for (i = 0; i < nodes; i++) { 1320 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1321 if (IS_ERR_OR_NULL(new_nodes[i])) 1322 goto out_nocoalesce; 1323 } 1324 1325 /* 1326 * We have to check the reserve here, after we've allocated our new 1327 * nodes, to make sure the insert below will succeed - we also check 1328 * before as an optimization to potentially avoid a bunch of expensive 1329 * allocs/sorts 1330 */ 1331 if (btree_check_reserve(b, NULL)) 1332 goto out_nocoalesce; 1333 1334 for (i = 0; i < nodes; i++) 1335 mutex_lock(&new_nodes[i]->write_lock); 1336 1337 for (i = nodes - 1; i > 0; --i) { 1338 struct bset *n1 = btree_bset_first(new_nodes[i]); 1339 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1340 struct bkey *k, *last = NULL; 1341 1342 keys = 0; 1343 1344 if (i > 1) { 1345 for (k = n2->start; 1346 k < bset_bkey_last(n2); 1347 k = bkey_next(k)) { 1348 if (__set_blocks(n1, n1->keys + keys + 1349 bkey_u64s(k), 1350 block_bytes(b->c)) > blocks) 1351 break; 1352 1353 last = k; 1354 keys += bkey_u64s(k); 1355 } 1356 } else { 1357 /* 1358 * Last node we're not getting rid of - we're getting 1359 * rid of the node at r[0]. Have to try and fit all of 1360 * the remaining keys into this node; we can't ensure 1361 * they will always fit due to rounding and variable 1362 * length keys (shouldn't be possible in practice, 1363 * though) 1364 */ 1365 if (__set_blocks(n1, n1->keys + n2->keys, 1366 block_bytes(b->c)) > 1367 btree_blocks(new_nodes[i])) 1368 goto out_nocoalesce; 1369 1370 keys = n2->keys; 1371 /* Take the key of the node we're getting rid of */ 1372 last = &r->b->key; 1373 } 1374 1375 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > 1376 btree_blocks(new_nodes[i])); 1377 1378 if (last) 1379 bkey_copy_key(&new_nodes[i]->key, last); 1380 1381 memcpy(bset_bkey_last(n1), 1382 n2->start, 1383 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1384 1385 n1->keys += keys; 1386 r[i].keys = n1->keys; 1387 1388 memmove(n2->start, 1389 bset_bkey_idx(n2, keys), 1390 (void *) bset_bkey_last(n2) - 1391 (void *) bset_bkey_idx(n2, keys)); 1392 1393 n2->keys -= keys; 1394 1395 if (__bch_keylist_realloc(&keylist, 1396 bkey_u64s(&new_nodes[i]->key))) 1397 goto out_nocoalesce; 1398 1399 bch_btree_node_write(new_nodes[i], &cl); 1400 bch_keylist_add(&keylist, &new_nodes[i]->key); 1401 } 1402 1403 for (i = 0; i < nodes; i++) 1404 mutex_unlock(&new_nodes[i]->write_lock); 1405 1406 closure_sync(&cl); 1407 1408 /* We emptied out this node */ 1409 BUG_ON(btree_bset_first(new_nodes[0])->keys); 1410 btree_node_free(new_nodes[0]); 1411 rw_unlock(true, new_nodes[0]); 1412 new_nodes[0] = NULL; 1413 1414 for (i = 0; i < nodes; i++) { 1415 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1416 goto out_nocoalesce; 1417 1418 make_btree_freeing_key(r[i].b, keylist.top); 1419 bch_keylist_push(&keylist); 1420 } 1421 1422 bch_btree_insert_node(b, op, &keylist, NULL, NULL); 1423 BUG_ON(!bch_keylist_empty(&keylist)); 1424 1425 for (i = 0; i < nodes; i++) { 1426 btree_node_free(r[i].b); 1427 rw_unlock(true, r[i].b); 1428 1429 r[i].b = new_nodes[i]; 1430 } 1431 1432 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1433 r[nodes - 1].b = ERR_PTR(-EINTR); 1434 1435 trace_bcache_btree_gc_coalesce(nodes); 1436 gc->nodes--; 1437 1438 bch_keylist_free(&keylist); 1439 1440 /* Invalidated our iterator */ 1441 return -EINTR; 1442 1443 out_nocoalesce: 1444 closure_sync(&cl); 1445 bch_keylist_free(&keylist); 1446 1447 while ((k = bch_keylist_pop(&keylist))) 1448 if (!bkey_cmp(k, &ZERO_KEY)) 1449 atomic_dec(&b->c->prio_blocked); 1450 1451 for (i = 0; i < nodes; i++) 1452 if (!IS_ERR_OR_NULL(new_nodes[i])) { 1453 btree_node_free(new_nodes[i]); 1454 rw_unlock(true, new_nodes[i]); 1455 } 1456 return 0; 1457 } 1458 1459 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 1460 struct btree *replace) 1461 { 1462 struct keylist keys; 1463 struct btree *n; 1464 1465 if (btree_check_reserve(b, NULL)) 1466 return 0; 1467 1468 n = btree_node_alloc_replacement(replace, NULL); 1469 1470 /* recheck reserve after allocating replacement node */ 1471 if (btree_check_reserve(b, NULL)) { 1472 btree_node_free(n); 1473 rw_unlock(true, n); 1474 return 0; 1475 } 1476 1477 bch_btree_node_write_sync(n); 1478 1479 bch_keylist_init(&keys); 1480 bch_keylist_add(&keys, &n->key); 1481 1482 make_btree_freeing_key(replace, keys.top); 1483 bch_keylist_push(&keys); 1484 1485 bch_btree_insert_node(b, op, &keys, NULL, NULL); 1486 BUG_ON(!bch_keylist_empty(&keys)); 1487 1488 btree_node_free(replace); 1489 rw_unlock(true, n); 1490 1491 /* Invalidated our iterator */ 1492 return -EINTR; 1493 } 1494 1495 static unsigned btree_gc_count_keys(struct btree *b) 1496 { 1497 struct bkey *k; 1498 struct btree_iter iter; 1499 unsigned ret = 0; 1500 1501 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1502 ret += bkey_u64s(k); 1503 1504 return ret; 1505 } 1506 1507 static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1508 struct closure *writes, struct gc_stat *gc) 1509 { 1510 int ret = 0; 1511 bool should_rewrite; 1512 struct bkey *k; 1513 struct btree_iter iter; 1514 struct gc_merge_info r[GC_MERGE_NODES]; 1515 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1516 1517 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1518 1519 for (i = r; i < r + ARRAY_SIZE(r); i++) 1520 i->b = ERR_PTR(-EINTR); 1521 1522 while (1) { 1523 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1524 if (k) { 1525 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1526 true, b); 1527 if (IS_ERR(r->b)) { 1528 ret = PTR_ERR(r->b); 1529 break; 1530 } 1531 1532 r->keys = btree_gc_count_keys(r->b); 1533 1534 ret = btree_gc_coalesce(b, op, gc, r); 1535 if (ret) 1536 break; 1537 } 1538 1539 if (!last->b) 1540 break; 1541 1542 if (!IS_ERR(last->b)) { 1543 should_rewrite = btree_gc_mark_node(last->b, gc); 1544 if (should_rewrite) { 1545 ret = btree_gc_rewrite_node(b, op, last->b); 1546 if (ret) 1547 break; 1548 } 1549 1550 if (last->b->level) { 1551 ret = btree_gc_recurse(last->b, op, writes, gc); 1552 if (ret) 1553 break; 1554 } 1555 1556 bkey_copy_key(&b->c->gc_done, &last->b->key); 1557 1558 /* 1559 * Must flush leaf nodes before gc ends, since replace 1560 * operations aren't journalled 1561 */ 1562 mutex_lock(&last->b->write_lock); 1563 if (btree_node_dirty(last->b)) 1564 bch_btree_node_write(last->b, writes); 1565 mutex_unlock(&last->b->write_lock); 1566 rw_unlock(true, last->b); 1567 } 1568 1569 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1570 r->b = NULL; 1571 1572 if (need_resched()) { 1573 ret = -EAGAIN; 1574 break; 1575 } 1576 } 1577 1578 for (i = r; i < r + ARRAY_SIZE(r); i++) 1579 if (!IS_ERR_OR_NULL(i->b)) { 1580 mutex_lock(&i->b->write_lock); 1581 if (btree_node_dirty(i->b)) 1582 bch_btree_node_write(i->b, writes); 1583 mutex_unlock(&i->b->write_lock); 1584 rw_unlock(true, i->b); 1585 } 1586 1587 return ret; 1588 } 1589 1590 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1591 struct closure *writes, struct gc_stat *gc) 1592 { 1593 struct btree *n = NULL; 1594 int ret = 0; 1595 bool should_rewrite; 1596 1597 should_rewrite = btree_gc_mark_node(b, gc); 1598 if (should_rewrite) { 1599 n = btree_node_alloc_replacement(b, NULL); 1600 1601 if (!IS_ERR_OR_NULL(n)) { 1602 bch_btree_node_write_sync(n); 1603 1604 bch_btree_set_root(n); 1605 btree_node_free(b); 1606 rw_unlock(true, n); 1607 1608 return -EINTR; 1609 } 1610 } 1611 1612 __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1613 1614 if (b->level) { 1615 ret = btree_gc_recurse(b, op, writes, gc); 1616 if (ret) 1617 return ret; 1618 } 1619 1620 bkey_copy_key(&b->c->gc_done, &b->key); 1621 1622 return ret; 1623 } 1624 1625 static void btree_gc_start(struct cache_set *c) 1626 { 1627 struct cache *ca; 1628 struct bucket *b; 1629 unsigned i; 1630 1631 if (!c->gc_mark_valid) 1632 return; 1633 1634 mutex_lock(&c->bucket_lock); 1635 1636 c->gc_mark_valid = 0; 1637 c->gc_done = ZERO_KEY; 1638 1639 for_each_cache(ca, c, i) 1640 for_each_bucket(b, ca) { 1641 b->last_gc = b->gen; 1642 if (!atomic_read(&b->pin)) { 1643 SET_GC_MARK(b, 0); 1644 SET_GC_SECTORS_USED(b, 0); 1645 } 1646 } 1647 1648 mutex_unlock(&c->bucket_lock); 1649 } 1650 1651 static size_t bch_btree_gc_finish(struct cache_set *c) 1652 { 1653 size_t available = 0; 1654 struct bucket *b; 1655 struct cache *ca; 1656 unsigned i; 1657 1658 mutex_lock(&c->bucket_lock); 1659 1660 set_gc_sectors(c); 1661 c->gc_mark_valid = 1; 1662 c->need_gc = 0; 1663 1664 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1665 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1666 GC_MARK_METADATA); 1667 1668 /* don't reclaim buckets to which writeback keys point */ 1669 rcu_read_lock(); 1670 for (i = 0; i < c->nr_uuids; i++) { 1671 struct bcache_device *d = c->devices[i]; 1672 struct cached_dev *dc; 1673 struct keybuf_key *w, *n; 1674 unsigned j; 1675 1676 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1677 continue; 1678 dc = container_of(d, struct cached_dev, disk); 1679 1680 spin_lock(&dc->writeback_keys.lock); 1681 rbtree_postorder_for_each_entry_safe(w, n, 1682 &dc->writeback_keys.keys, node) 1683 for (j = 0; j < KEY_PTRS(&w->key); j++) 1684 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1685 GC_MARK_DIRTY); 1686 spin_unlock(&dc->writeback_keys.lock); 1687 } 1688 rcu_read_unlock(); 1689 1690 for_each_cache(ca, c, i) { 1691 uint64_t *i; 1692 1693 ca->invalidate_needs_gc = 0; 1694 1695 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++) 1696 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1697 1698 for (i = ca->prio_buckets; 1699 i < ca->prio_buckets + prio_buckets(ca) * 2; i++) 1700 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); 1701 1702 for_each_bucket(b, ca) { 1703 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1704 1705 if (atomic_read(&b->pin)) 1706 continue; 1707 1708 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1709 1710 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1711 available++; 1712 } 1713 } 1714 1715 mutex_unlock(&c->bucket_lock); 1716 return available; 1717 } 1718 1719 static void bch_btree_gc(struct cache_set *c) 1720 { 1721 int ret; 1722 unsigned long available; 1723 struct gc_stat stats; 1724 struct closure writes; 1725 struct btree_op op; 1726 uint64_t start_time = local_clock(); 1727 1728 trace_bcache_gc_start(c); 1729 1730 memset(&stats, 0, sizeof(struct gc_stat)); 1731 closure_init_stack(&writes); 1732 bch_btree_op_init(&op, SHRT_MAX); 1733 1734 btree_gc_start(c); 1735 1736 do { 1737 ret = btree_root(gc_root, c, &op, &writes, &stats); 1738 closure_sync(&writes); 1739 cond_resched(); 1740 1741 if (ret && ret != -EAGAIN) 1742 pr_warn("gc failed!"); 1743 } while (ret); 1744 1745 available = bch_btree_gc_finish(c); 1746 wake_up_allocators(c); 1747 1748 bch_time_stats_update(&c->btree_gc_time, start_time); 1749 1750 stats.key_bytes *= sizeof(uint64_t); 1751 stats.data <<= 9; 1752 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; 1753 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1754 1755 trace_bcache_gc_end(c); 1756 1757 bch_moving_gc(c); 1758 } 1759 1760 static int bch_gc_thread(void *arg) 1761 { 1762 struct cache_set *c = arg; 1763 struct cache *ca; 1764 unsigned i; 1765 1766 while (1) { 1767 again: 1768 bch_btree_gc(c); 1769 1770 set_current_state(TASK_INTERRUPTIBLE); 1771 if (kthread_should_stop()) 1772 break; 1773 1774 mutex_lock(&c->bucket_lock); 1775 1776 for_each_cache(ca, c, i) 1777 if (ca->invalidate_needs_gc) { 1778 mutex_unlock(&c->bucket_lock); 1779 set_current_state(TASK_RUNNING); 1780 goto again; 1781 } 1782 1783 mutex_unlock(&c->bucket_lock); 1784 1785 schedule(); 1786 } 1787 1788 return 0; 1789 } 1790 1791 int bch_gc_thread_start(struct cache_set *c) 1792 { 1793 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); 1794 if (IS_ERR(c->gc_thread)) 1795 return PTR_ERR(c->gc_thread); 1796 1797 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); 1798 return 0; 1799 } 1800 1801 /* Initial partial gc */ 1802 1803 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1804 { 1805 int ret = 0; 1806 struct bkey *k, *p = NULL; 1807 struct btree_iter iter; 1808 1809 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1810 bch_initial_mark_key(b->c, b->level, k); 1811 1812 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1813 1814 if (b->level) { 1815 bch_btree_iter_init(&b->keys, &iter, NULL); 1816 1817 do { 1818 k = bch_btree_iter_next_filter(&iter, &b->keys, 1819 bch_ptr_bad); 1820 if (k) 1821 btree_node_prefetch(b, k); 1822 1823 if (p) 1824 ret = btree(check_recurse, p, b, op); 1825 1826 p = k; 1827 } while (p && !ret); 1828 } 1829 1830 return ret; 1831 } 1832 1833 int bch_btree_check(struct cache_set *c) 1834 { 1835 struct btree_op op; 1836 1837 bch_btree_op_init(&op, SHRT_MAX); 1838 1839 return btree_root(check_recurse, c, &op); 1840 } 1841 1842 void bch_initial_gc_finish(struct cache_set *c) 1843 { 1844 struct cache *ca; 1845 struct bucket *b; 1846 unsigned i; 1847 1848 bch_btree_gc_finish(c); 1849 1850 mutex_lock(&c->bucket_lock); 1851 1852 /* 1853 * We need to put some unused buckets directly on the prio freelist in 1854 * order to get the allocator thread started - it needs freed buckets in 1855 * order to rewrite the prios and gens, and it needs to rewrite prios 1856 * and gens in order to free buckets. 1857 * 1858 * This is only safe for buckets that have no live data in them, which 1859 * there should always be some of. 1860 */ 1861 for_each_cache(ca, c, i) { 1862 for_each_bucket(b, ca) { 1863 if (fifo_full(&ca->free[RESERVE_PRIO])) 1864 break; 1865 1866 if (bch_can_invalidate_bucket(ca, b) && 1867 !GC_MARK(b)) { 1868 __bch_invalidate_one_bucket(ca, b); 1869 fifo_push(&ca->free[RESERVE_PRIO], 1870 b - ca->buckets); 1871 } 1872 } 1873 } 1874 1875 mutex_unlock(&c->bucket_lock); 1876 } 1877 1878 /* Btree insertion */ 1879 1880 static bool btree_insert_key(struct btree *b, struct bkey *k, 1881 struct bkey *replace_key) 1882 { 1883 unsigned status; 1884 1885 BUG_ON(bkey_cmp(k, &b->key) > 0); 1886 1887 status = bch_btree_insert_key(&b->keys, k, replace_key); 1888 if (status != BTREE_INSERT_STATUS_NO_INSERT) { 1889 bch_check_keys(&b->keys, "%u for %s", status, 1890 replace_key ? "replace" : "insert"); 1891 1892 trace_bcache_btree_insert_key(b, k, replace_key != NULL, 1893 status); 1894 return true; 1895 } else 1896 return false; 1897 } 1898 1899 static size_t insert_u64s_remaining(struct btree *b) 1900 { 1901 long ret = bch_btree_keys_u64s_remaining(&b->keys); 1902 1903 /* 1904 * Might land in the middle of an existing extent and have to split it 1905 */ 1906 if (b->keys.ops->is_extents) 1907 ret -= KEY_MAX_U64S; 1908 1909 return max(ret, 0L); 1910 } 1911 1912 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 1913 struct keylist *insert_keys, 1914 struct bkey *replace_key) 1915 { 1916 bool ret = false; 1917 int oldsize = bch_count_data(&b->keys); 1918 1919 while (!bch_keylist_empty(insert_keys)) { 1920 struct bkey *k = insert_keys->keys; 1921 1922 if (bkey_u64s(k) > insert_u64s_remaining(b)) 1923 break; 1924 1925 if (bkey_cmp(k, &b->key) <= 0) { 1926 if (!b->level) 1927 bkey_put(b->c, k); 1928 1929 ret |= btree_insert_key(b, k, replace_key); 1930 bch_keylist_pop_front(insert_keys); 1931 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 1932 BKEY_PADDED(key) temp; 1933 bkey_copy(&temp.key, insert_keys->keys); 1934 1935 bch_cut_back(&b->key, &temp.key); 1936 bch_cut_front(&b->key, insert_keys->keys); 1937 1938 ret |= btree_insert_key(b, &temp.key, replace_key); 1939 break; 1940 } else { 1941 break; 1942 } 1943 } 1944 1945 if (!ret) 1946 op->insert_collision = true; 1947 1948 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 1949 1950 BUG_ON(bch_count_data(&b->keys) < oldsize); 1951 return ret; 1952 } 1953 1954 static int btree_split(struct btree *b, struct btree_op *op, 1955 struct keylist *insert_keys, 1956 struct bkey *replace_key) 1957 { 1958 bool split; 1959 struct btree *n1, *n2 = NULL, *n3 = NULL; 1960 uint64_t start_time = local_clock(); 1961 struct closure cl; 1962 struct keylist parent_keys; 1963 1964 closure_init_stack(&cl); 1965 bch_keylist_init(&parent_keys); 1966 1967 if (btree_check_reserve(b, op)) { 1968 if (!b->level) 1969 return -EINTR; 1970 else 1971 WARN(1, "insufficient reserve for split\n"); 1972 } 1973 1974 n1 = btree_node_alloc_replacement(b, op); 1975 if (IS_ERR(n1)) 1976 goto err; 1977 1978 split = set_blocks(btree_bset_first(n1), 1979 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; 1980 1981 if (split) { 1982 unsigned keys = 0; 1983 1984 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 1985 1986 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1987 if (IS_ERR(n2)) 1988 goto err_free1; 1989 1990 if (!b->parent) { 1991 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 1992 if (IS_ERR(n3)) 1993 goto err_free2; 1994 } 1995 1996 mutex_lock(&n1->write_lock); 1997 mutex_lock(&n2->write_lock); 1998 1999 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2000 2001 /* 2002 * Has to be a linear search because we don't have an auxiliary 2003 * search tree yet 2004 */ 2005 2006 while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2007 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2008 keys)); 2009 2010 bkey_copy_key(&n1->key, 2011 bset_bkey_idx(btree_bset_first(n1), keys)); 2012 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2013 2014 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2015 btree_bset_first(n1)->keys = keys; 2016 2017 memcpy(btree_bset_first(n2)->start, 2018 bset_bkey_last(btree_bset_first(n1)), 2019 btree_bset_first(n2)->keys * sizeof(uint64_t)); 2020 2021 bkey_copy_key(&n2->key, &b->key); 2022 2023 bch_keylist_add(&parent_keys, &n2->key); 2024 bch_btree_node_write(n2, &cl); 2025 mutex_unlock(&n2->write_lock); 2026 rw_unlock(true, n2); 2027 } else { 2028 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2029 2030 mutex_lock(&n1->write_lock); 2031 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2032 } 2033 2034 bch_keylist_add(&parent_keys, &n1->key); 2035 bch_btree_node_write(n1, &cl); 2036 mutex_unlock(&n1->write_lock); 2037 2038 if (n3) { 2039 /* Depth increases, make a new root */ 2040 mutex_lock(&n3->write_lock); 2041 bkey_copy_key(&n3->key, &MAX_KEY); 2042 bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2043 bch_btree_node_write(n3, &cl); 2044 mutex_unlock(&n3->write_lock); 2045 2046 closure_sync(&cl); 2047 bch_btree_set_root(n3); 2048 rw_unlock(true, n3); 2049 } else if (!b->parent) { 2050 /* Root filled up but didn't need to be split */ 2051 closure_sync(&cl); 2052 bch_btree_set_root(n1); 2053 } else { 2054 /* Split a non root node */ 2055 closure_sync(&cl); 2056 make_btree_freeing_key(b, parent_keys.top); 2057 bch_keylist_push(&parent_keys); 2058 2059 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 2060 BUG_ON(!bch_keylist_empty(&parent_keys)); 2061 } 2062 2063 btree_node_free(b); 2064 rw_unlock(true, n1); 2065 2066 bch_time_stats_update(&b->c->btree_split_time, start_time); 2067 2068 return 0; 2069 err_free2: 2070 bkey_put(b->c, &n2->key); 2071 btree_node_free(n2); 2072 rw_unlock(true, n2); 2073 err_free1: 2074 bkey_put(b->c, &n1->key); 2075 btree_node_free(n1); 2076 rw_unlock(true, n1); 2077 err: 2078 WARN(1, "bcache: btree split failed (level %u)", b->level); 2079 2080 if (n3 == ERR_PTR(-EAGAIN) || 2081 n2 == ERR_PTR(-EAGAIN) || 2082 n1 == ERR_PTR(-EAGAIN)) 2083 return -EAGAIN; 2084 2085 return -ENOMEM; 2086 } 2087 2088 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2089 struct keylist *insert_keys, 2090 atomic_t *journal_ref, 2091 struct bkey *replace_key) 2092 { 2093 struct closure cl; 2094 2095 BUG_ON(b->level && replace_key); 2096 2097 closure_init_stack(&cl); 2098 2099 mutex_lock(&b->write_lock); 2100 2101 if (write_block(b) != btree_bset_last(b) && 2102 b->keys.last_set_unwritten) 2103 bch_btree_init_next(b); /* just wrote a set */ 2104 2105 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 2106 mutex_unlock(&b->write_lock); 2107 goto split; 2108 } 2109 2110 BUG_ON(write_block(b) != btree_bset_last(b)); 2111 2112 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 2113 if (!b->level) 2114 bch_btree_leaf_dirty(b, journal_ref); 2115 else 2116 bch_btree_node_write(b, &cl); 2117 } 2118 2119 mutex_unlock(&b->write_lock); 2120 2121 /* wait for btree node write if necessary, after unlock */ 2122 closure_sync(&cl); 2123 2124 return 0; 2125 split: 2126 if (current->bio_list) { 2127 op->lock = b->c->root->level + 1; 2128 return -EAGAIN; 2129 } else if (op->lock <= b->c->root->level) { 2130 op->lock = b->c->root->level + 1; 2131 return -EINTR; 2132 } else { 2133 /* Invalidated all iterators */ 2134 int ret = btree_split(b, op, insert_keys, replace_key); 2135 2136 if (bch_keylist_empty(insert_keys)) 2137 return 0; 2138 else if (!ret) 2139 return -EINTR; 2140 return ret; 2141 } 2142 } 2143 2144 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2145 struct bkey *check_key) 2146 { 2147 int ret = -EINTR; 2148 uint64_t btree_ptr = b->key.ptr[0]; 2149 unsigned long seq = b->seq; 2150 struct keylist insert; 2151 bool upgrade = op->lock == -1; 2152 2153 bch_keylist_init(&insert); 2154 2155 if (upgrade) { 2156 rw_unlock(false, b); 2157 rw_lock(true, b, b->level); 2158 2159 if (b->key.ptr[0] != btree_ptr || 2160 b->seq != seq + 1) { 2161 op->lock = b->level; 2162 goto out; 2163 } 2164 } 2165 2166 SET_KEY_PTRS(check_key, 1); 2167 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2168 2169 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2170 2171 bch_keylist_add(&insert, check_key); 2172 2173 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2174 2175 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2176 out: 2177 if (upgrade) 2178 downgrade_write(&b->lock); 2179 return ret; 2180 } 2181 2182 struct btree_insert_op { 2183 struct btree_op op; 2184 struct keylist *keys; 2185 atomic_t *journal_ref; 2186 struct bkey *replace_key; 2187 }; 2188 2189 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2190 { 2191 struct btree_insert_op *op = container_of(b_op, 2192 struct btree_insert_op, op); 2193 2194 int ret = bch_btree_insert_node(b, &op->op, op->keys, 2195 op->journal_ref, op->replace_key); 2196 if (ret && !bch_keylist_empty(op->keys)) 2197 return ret; 2198 else 2199 return MAP_DONE; 2200 } 2201 2202 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2203 atomic_t *journal_ref, struct bkey *replace_key) 2204 { 2205 struct btree_insert_op op; 2206 int ret = 0; 2207 2208 BUG_ON(current->bio_list); 2209 BUG_ON(bch_keylist_empty(keys)); 2210 2211 bch_btree_op_init(&op.op, 0); 2212 op.keys = keys; 2213 op.journal_ref = journal_ref; 2214 op.replace_key = replace_key; 2215 2216 while (!ret && !bch_keylist_empty(keys)) { 2217 op.op.lock = 0; 2218 ret = bch_btree_map_leaf_nodes(&op.op, c, 2219 &START_KEY(keys->keys), 2220 btree_insert_fn); 2221 } 2222 2223 if (ret) { 2224 struct bkey *k; 2225 2226 pr_err("error %i", ret); 2227 2228 while ((k = bch_keylist_pop(keys))) 2229 bkey_put(c, k); 2230 } else if (op.op.insert_collision) 2231 ret = -ESRCH; 2232 2233 return ret; 2234 } 2235 2236 void bch_btree_set_root(struct btree *b) 2237 { 2238 unsigned i; 2239 struct closure cl; 2240 2241 closure_init_stack(&cl); 2242 2243 trace_bcache_btree_set_root(b); 2244 2245 BUG_ON(!b->written); 2246 2247 for (i = 0; i < KEY_PTRS(&b->key); i++) 2248 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2249 2250 mutex_lock(&b->c->bucket_lock); 2251 list_del_init(&b->list); 2252 mutex_unlock(&b->c->bucket_lock); 2253 2254 b->c->root = b; 2255 2256 bch_journal_meta(b->c, &cl); 2257 closure_sync(&cl); 2258 } 2259 2260 /* Map across nodes or keys */ 2261 2262 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2263 struct bkey *from, 2264 btree_map_nodes_fn *fn, int flags) 2265 { 2266 int ret = MAP_CONTINUE; 2267 2268 if (b->level) { 2269 struct bkey *k; 2270 struct btree_iter iter; 2271 2272 bch_btree_iter_init(&b->keys, &iter, from); 2273 2274 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 2275 bch_ptr_bad))) { 2276 ret = btree(map_nodes_recurse, k, b, 2277 op, from, fn, flags); 2278 from = NULL; 2279 2280 if (ret != MAP_CONTINUE) 2281 return ret; 2282 } 2283 } 2284 2285 if (!b->level || flags == MAP_ALL_NODES) 2286 ret = fn(op, b); 2287 2288 return ret; 2289 } 2290 2291 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2292 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2293 { 2294 return btree_root(map_nodes_recurse, c, op, from, fn, flags); 2295 } 2296 2297 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2298 struct bkey *from, btree_map_keys_fn *fn, 2299 int flags) 2300 { 2301 int ret = MAP_CONTINUE; 2302 struct bkey *k; 2303 struct btree_iter iter; 2304 2305 bch_btree_iter_init(&b->keys, &iter, from); 2306 2307 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 2308 ret = !b->level 2309 ? fn(op, b, k) 2310 : btree(map_keys_recurse, k, b, op, from, fn, flags); 2311 from = NULL; 2312 2313 if (ret != MAP_CONTINUE) 2314 return ret; 2315 } 2316 2317 if (!b->level && (flags & MAP_END_KEY)) 2318 ret = fn(op, b, &KEY(KEY_INODE(&b->key), 2319 KEY_OFFSET(&b->key), 0)); 2320 2321 return ret; 2322 } 2323 2324 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2325 struct bkey *from, btree_map_keys_fn *fn, int flags) 2326 { 2327 return btree_root(map_keys_recurse, c, op, from, fn, flags); 2328 } 2329 2330 /* Keybuf code */ 2331 2332 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2333 { 2334 /* Overlapping keys compare equal */ 2335 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2336 return -1; 2337 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2338 return 1; 2339 return 0; 2340 } 2341 2342 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2343 struct keybuf_key *r) 2344 { 2345 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2346 } 2347 2348 struct refill { 2349 struct btree_op op; 2350 unsigned nr_found; 2351 struct keybuf *buf; 2352 struct bkey *end; 2353 keybuf_pred_fn *pred; 2354 }; 2355 2356 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 2357 struct bkey *k) 2358 { 2359 struct refill *refill = container_of(op, struct refill, op); 2360 struct keybuf *buf = refill->buf; 2361 int ret = MAP_CONTINUE; 2362 2363 if (bkey_cmp(k, refill->end) >= 0) { 2364 ret = MAP_DONE; 2365 goto out; 2366 } 2367 2368 if (!KEY_SIZE(k)) /* end key */ 2369 goto out; 2370 2371 if (refill->pred(buf, k)) { 2372 struct keybuf_key *w; 2373 2374 spin_lock(&buf->lock); 2375 2376 w = array_alloc(&buf->freelist); 2377 if (!w) { 2378 spin_unlock(&buf->lock); 2379 return MAP_DONE; 2380 } 2381 2382 w->private = NULL; 2383 bkey_copy(&w->key, k); 2384 2385 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2386 array_free(&buf->freelist, w); 2387 else 2388 refill->nr_found++; 2389 2390 if (array_freelist_empty(&buf->freelist)) 2391 ret = MAP_DONE; 2392 2393 spin_unlock(&buf->lock); 2394 } 2395 out: 2396 buf->last_scanned = *k; 2397 return ret; 2398 } 2399 2400 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2401 struct bkey *end, keybuf_pred_fn *pred) 2402 { 2403 struct bkey start = buf->last_scanned; 2404 struct refill refill; 2405 2406 cond_resched(); 2407 2408 bch_btree_op_init(&refill.op, -1); 2409 refill.nr_found = 0; 2410 refill.buf = buf; 2411 refill.end = end; 2412 refill.pred = pred; 2413 2414 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 2415 refill_keybuf_fn, MAP_END_KEY); 2416 2417 trace_bcache_keyscan(refill.nr_found, 2418 KEY_INODE(&start), KEY_OFFSET(&start), 2419 KEY_INODE(&buf->last_scanned), 2420 KEY_OFFSET(&buf->last_scanned)); 2421 2422 spin_lock(&buf->lock); 2423 2424 if (!RB_EMPTY_ROOT(&buf->keys)) { 2425 struct keybuf_key *w; 2426 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2427 buf->start = START_KEY(&w->key); 2428 2429 w = RB_LAST(&buf->keys, struct keybuf_key, node); 2430 buf->end = w->key; 2431 } else { 2432 buf->start = MAX_KEY; 2433 buf->end = MAX_KEY; 2434 } 2435 2436 spin_unlock(&buf->lock); 2437 } 2438 2439 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2440 { 2441 rb_erase(&w->node, &buf->keys); 2442 array_free(&buf->freelist, w); 2443 } 2444 2445 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2446 { 2447 spin_lock(&buf->lock); 2448 __bch_keybuf_del(buf, w); 2449 spin_unlock(&buf->lock); 2450 } 2451 2452 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2453 struct bkey *end) 2454 { 2455 bool ret = false; 2456 struct keybuf_key *p, *w, s; 2457 s.key = *start; 2458 2459 if (bkey_cmp(end, &buf->start) <= 0 || 2460 bkey_cmp(start, &buf->end) >= 0) 2461 return false; 2462 2463 spin_lock(&buf->lock); 2464 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2465 2466 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2467 p = w; 2468 w = RB_NEXT(w, node); 2469 2470 if (p->private) 2471 ret = true; 2472 else 2473 __bch_keybuf_del(buf, p); 2474 } 2475 2476 spin_unlock(&buf->lock); 2477 return ret; 2478 } 2479 2480 struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2481 { 2482 struct keybuf_key *w; 2483 spin_lock(&buf->lock); 2484 2485 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2486 2487 while (w && w->private) 2488 w = RB_NEXT(w, node); 2489 2490 if (w) 2491 w->private = ERR_PTR(-EINTR); 2492 2493 spin_unlock(&buf->lock); 2494 return w; 2495 } 2496 2497 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2498 struct keybuf *buf, 2499 struct bkey *end, 2500 keybuf_pred_fn *pred) 2501 { 2502 struct keybuf_key *ret; 2503 2504 while (1) { 2505 ret = bch_keybuf_next(buf); 2506 if (ret) 2507 break; 2508 2509 if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2510 pr_debug("scan finished"); 2511 break; 2512 } 2513 2514 bch_refill_keybuf(c, buf, end, pred); 2515 } 2516 2517 return ret; 2518 } 2519 2520 void bch_keybuf_init(struct keybuf *buf) 2521 { 2522 buf->last_scanned = MAX_KEY; 2523 buf->keys = RB_ROOT; 2524 2525 spin_lock_init(&buf->lock); 2526 array_allocator_init(&buf->freelist); 2527 } 2528