alloc.c (6f10f7d1b02b1bbc305f88d7696445dd38b13881) alloc.c (1fae7cf05293d3a2c9e59c1bc59372322386467c)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Primary bucket allocation code
4 *
5 * Copyright 2012 Google, Inc.
6 *
7 * Allocation in bcache is done in terms of buckets:
8 *

--- 230 unchanged lines hidden (view full) ---

239
240static void invalidate_buckets_random(struct cache *ca)
241{
242 struct bucket *b;
243 size_t checked = 0;
244
245 while (!fifo_full(&ca->free_inc)) {
246 size_t n;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Primary bucket allocation code
4 *
5 * Copyright 2012 Google, Inc.
6 *
7 * Allocation in bcache is done in terms of buckets:
8 *

--- 230 unchanged lines hidden (view full) ---

239
240static void invalidate_buckets_random(struct cache *ca)
241{
242 struct bucket *b;
243 size_t checked = 0;
244
245 while (!fifo_full(&ca->free_inc)) {
246 size_t n;
247
247 get_random_bytes(&n, sizeof(n));
248
249 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
250 n += ca->sb.first_bucket;
251
252 b = ca->buckets + n;
253
254 if (bch_can_invalidate_bucket(ca, b))

--- 254 unchanged lines hidden (view full) ---

509 bkey_put(c, k);
510 return -1;
511}
512
513int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
514 struct bkey *k, int n, bool wait)
515{
516 int ret;
248 get_random_bytes(&n, sizeof(n));
249
250 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
251 n += ca->sb.first_bucket;
252
253 b = ca->buckets + n;
254
255 if (bch_can_invalidate_bucket(ca, b))

--- 254 unchanged lines hidden (view full) ---

510 bkey_put(c, k);
511 return -1;
512}
513
514int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
515 struct bkey *k, int n, bool wait)
516{
517 int ret;
518
517 mutex_lock(&c->bucket_lock);
518 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
519 mutex_unlock(&c->bucket_lock);
520 return ret;
521}
522
523/* Sector allocator */
524

--- 176 unchanged lines hidden (view full) ---

701int bch_open_buckets_alloc(struct cache_set *c)
702{
703 int i;
704
705 spin_lock_init(&c->data_bucket_lock);
706
707 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
708 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
519 mutex_lock(&c->bucket_lock);
520 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
521 mutex_unlock(&c->bucket_lock);
522 return ret;
523}
524
525/* Sector allocator */
526

--- 176 unchanged lines hidden (view full) ---

703int bch_open_buckets_alloc(struct cache_set *c)
704{
705 int i;
706
707 spin_lock_init(&c->data_bucket_lock);
708
709 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
710 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
711
709 if (!b)
710 return -ENOMEM;
711
712 list_add(&b->list, &c->data_buckets);
713 }
714
715 return 0;
716}
717
718int bch_cache_allocator_start(struct cache *ca)
719{
720 struct task_struct *k = kthread_run(bch_allocator_thread,
721 ca, "bcache_allocator");
722 if (IS_ERR(k))
723 return PTR_ERR(k);
724
725 ca->alloc_thread = k;
726 return 0;
727}
712 if (!b)
713 return -ENOMEM;
714
715 list_add(&b->list, &c->data_buckets);
716 }
717
718 return 0;
719}
720
721int bch_cache_allocator_start(struct cache *ca)
722{
723 struct task_struct *k = kthread_run(bch_allocator_thread,
724 ca, "bcache_allocator");
725 if (IS_ERR(k))
726 return PTR_ERR(k);
727
728 ca->alloc_thread = k;
729 return 0;
730}