btree.c (6f10f7d1b02b1bbc305f88d7696445dd38b13881) btree.c (1fae7cf05293d3a2c9e59c1bc59372322386467c)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *

--- 273 unchanged lines hidden (view full) ---

282 err, PTR_BUCKET_NR(b->c, &b->key, 0),
283 bset_block_offset(b, i), i->keys);
284 goto out;
285}
286
287static void btree_node_read_endio(struct bio *bio)
288{
289 struct closure *cl = bio->bi_private;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *

--- 273 unchanged lines hidden (view full) ---

282 err, PTR_BUCKET_NR(b->c, &b->key, 0),
283 bset_block_offset(b, i), i->keys);
284 goto out;
285}
286
287static void btree_node_read_endio(struct bio *bio)
288{
289 struct closure *cl = bio->bi_private;
290
290 closure_put(cl);
291}
292
293static void bch_btree_node_read(struct btree *b)
294{
295 uint64_t start_time = local_clock();
296 struct closure cl;
297 struct bio *bio;

--- 301 unchanged lines hidden (view full) ---

599 list_move(&b->list, &b->c->btree_cache_freed);
600 }
601}
602
603static struct btree *mca_bucket_alloc(struct cache_set *c,
604 struct bkey *k, gfp_t gfp)
605{
606 struct btree *b = kzalloc(sizeof(struct btree), gfp);
291 closure_put(cl);
292}
293
294static void bch_btree_node_read(struct btree *b)
295{
296 uint64_t start_time = local_clock();
297 struct closure cl;
298 struct bio *bio;

--- 301 unchanged lines hidden (view full) ---

600 list_move(&b->list, &b->c->btree_cache_freed);
601 }
602}
603
604static struct btree *mca_bucket_alloc(struct cache_set *c,
605 struct bkey *k, gfp_t gfp)
606{
607 struct btree *b = kzalloc(sizeof(struct btree), gfp);
608
607 if (!b)
608 return NULL;
609
610 init_rwsem(&b->lock);
611 lockdep_set_novalidate_class(&b->lock);
612 mutex_init(&b->write_lock);
613 lockdep_set_novalidate_class(&b->write_lock);
614 INIT_LIST_HEAD(&b->list);

--- 126 unchanged lines hidden (view full) ---

741
742 return mca_can_free(c) * c->btree_pages;
743}
744
745void bch_btree_cache_free(struct cache_set *c)
746{
747 struct btree *b;
748 struct closure cl;
609 if (!b)
610 return NULL;
611
612 init_rwsem(&b->lock);
613 lockdep_set_novalidate_class(&b->lock);
614 mutex_init(&b->write_lock);
615 lockdep_set_novalidate_class(&b->write_lock);
616 INIT_LIST_HEAD(&b->list);

--- 126 unchanged lines hidden (view full) ---

743
744 return mca_can_free(c) * c->btree_pages;
745}
746
747void bch_btree_cache_free(struct cache_set *c)
748{
749 struct btree *b;
750 struct closure cl;
751
749 closure_init_stack(&cl);
750
751 if (c->shrink.list.next)
752 unregister_shrinker(&c->shrink);
753
754 mutex_lock(&c->bucket_lock);
755
756#ifdef CONFIG_BCACHE_DEBUG

--- 362 unchanged lines hidden (view full) ---

1119{
1120 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1121}
1122
1123static struct btree *btree_node_alloc_replacement(struct btree *b,
1124 struct btree_op *op)
1125{
1126 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
752 closure_init_stack(&cl);
753
754 if (c->shrink.list.next)
755 unregister_shrinker(&c->shrink);
756
757 mutex_lock(&c->bucket_lock);
758
759#ifdef CONFIG_BCACHE_DEBUG

--- 362 unchanged lines hidden (view full) ---

1122{
1123 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1124}
1125
1126static struct btree *btree_node_alloc_replacement(struct btree *b,
1127 struct btree_op *op)
1128{
1129 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1130
1127 if (!IS_ERR_OR_NULL(n)) {
1128 mutex_lock(&n->write_lock);
1129 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1130 bkey_copy_key(&n->key, &b->key);
1131 mutex_unlock(&n->write_lock);
1132 }
1133
1134 return n;

--- 1348 unchanged lines hidden (view full) ---

2483 KEY_INODE(&start), KEY_OFFSET(&start),
2484 KEY_INODE(&buf->last_scanned),
2485 KEY_OFFSET(&buf->last_scanned));
2486
2487 spin_lock(&buf->lock);
2488
2489 if (!RB_EMPTY_ROOT(&buf->keys)) {
2490 struct keybuf_key *w;
1131 if (!IS_ERR_OR_NULL(n)) {
1132 mutex_lock(&n->write_lock);
1133 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1134 bkey_copy_key(&n->key, &b->key);
1135 mutex_unlock(&n->write_lock);
1136 }
1137
1138 return n;

--- 1348 unchanged lines hidden (view full) ---

2487 KEY_INODE(&start), KEY_OFFSET(&start),
2488 KEY_INODE(&buf->last_scanned),
2489 KEY_OFFSET(&buf->last_scanned));
2490
2491 spin_lock(&buf->lock);
2492
2493 if (!RB_EMPTY_ROOT(&buf->keys)) {
2494 struct keybuf_key *w;
2495
2491 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2492 buf->start = START_KEY(&w->key);
2493
2494 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2495 buf->end = w->key;
2496 } else {
2497 buf->start = MAX_KEY;
2498 buf->end = MAX_KEY;

--- 15 unchanged lines hidden (view full) ---

2514 spin_unlock(&buf->lock);
2515}
2516
2517bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2518 struct bkey *end)
2519{
2520 bool ret = false;
2521 struct keybuf_key *p, *w, s;
2496 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2497 buf->start = START_KEY(&w->key);
2498
2499 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2500 buf->end = w->key;
2501 } else {
2502 buf->start = MAX_KEY;
2503 buf->end = MAX_KEY;

--- 15 unchanged lines hidden (view full) ---

2519 spin_unlock(&buf->lock);
2520}
2521
2522bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2523 struct bkey *end)
2524{
2525 bool ret = false;
2526 struct keybuf_key *p, *w, s;
2527
2522 s.key = *start;
2523
2524 if (bkey_cmp(end, &buf->start) <= 0 ||
2525 bkey_cmp(start, &buf->end) >= 0)
2526 return false;
2527
2528 spin_lock(&buf->lock);
2529 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);

--- 10 unchanged lines hidden (view full) ---

2540
2541 spin_unlock(&buf->lock);
2542 return ret;
2543}
2544
2545struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2546{
2547 struct keybuf_key *w;
2528 s.key = *start;
2529
2530 if (bkey_cmp(end, &buf->start) <= 0 ||
2531 bkey_cmp(start, &buf->end) >= 0)
2532 return false;
2533
2534 spin_lock(&buf->lock);
2535 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);

--- 10 unchanged lines hidden (view full) ---

2546
2547 spin_unlock(&buf->lock);
2548 return ret;
2549}
2550
2551struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2552{
2553 struct keybuf_key *w;
2554
2548 spin_lock(&buf->lock);
2549
2550 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2551
2552 while (w && w->private)
2553 w = RB_NEXT(w, node);
2554
2555 if (w)

--- 37 unchanged lines hidden ---
2555 spin_lock(&buf->lock);
2556
2557 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2558
2559 while (w && w->private)
2560 w = RB_NEXT(w, node);
2561
2562 if (w)

--- 37 unchanged lines hidden ---