xref: /openbmc/linux/drivers/md/bcache/alloc.c (revision 5f32c314)
1 /*
2  * Primary bucket allocation code
3  *
4  * Copyright 2012 Google, Inc.
5  *
6  * Allocation in bcache is done in terms of buckets:
7  *
8  * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9  * btree pointers - they must match for the pointer to be considered valid.
10  *
11  * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12  * bucket simply by incrementing its gen.
13  *
14  * The gens (along with the priorities; it's really the gens are important but
15  * the code is named as if it's the priorities) are written in an arbitrary list
16  * of buckets on disk, with a pointer to them in the journal header.
17  *
18  * When we invalidate a bucket, we have to write its new gen to disk and wait
19  * for that write to complete before we use it - otherwise after a crash we
20  * could have pointers that appeared to be good but pointed to data that had
21  * been overwritten.
22  *
23  * Since the gens and priorities are all stored contiguously on disk, we can
24  * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25  * call prio_write(), and when prio_write() finishes we pull buckets off the
26  * free_inc list and optionally discard them.
27  *
28  * free_inc isn't the only freelist - if it was, we'd often to sleep while
29  * priorities and gens were being written before we could allocate. c->free is a
30  * smaller freelist, and buckets on that list are always ready to be used.
31  *
32  * If we've got discards enabled, that happens when a bucket moves from the
33  * free_inc list to the free list.
34  *
35  * There is another freelist, because sometimes we have buckets that we know
36  * have nothing pointing into them - these we can reuse without waiting for
37  * priorities to be rewritten. These come from freed btree nodes and buckets
38  * that garbage collection discovered no longer had valid keys pointing into
39  * them (because they were overwritten). That's the unused list - buckets on the
40  * unused list move to the free list, optionally being discarded in the process.
41  *
42  * It's also important to ensure that gens don't wrap around - with respect to
43  * either the oldest gen in the btree or the gen on disk. This is quite
44  * difficult to do in practice, but we explicitly guard against it anyways - if
45  * a bucket is in danger of wrapping around we simply skip invalidating it that
46  * time around, and we garbage collect or rewrite the priorities sooner than we
47  * would have otherwise.
48  *
49  * bch_bucket_alloc() allocates a single bucket from a specific cache.
50  *
51  * bch_bucket_alloc_set() allocates one or more buckets from different caches
52  * out of a cache set.
53  *
54  * free_some_buckets() drives all the processes described above. It's called
55  * from bch_bucket_alloc() and a few other places that need to make sure free
56  * buckets are ready.
57  *
58  * invalidate_buckets_(lru|fifo)() find buckets that are available to be
59  * invalidated, and then invalidate them and stick them on the free_inc list -
60  * in either lru or fifo order.
61  */
62 
63 #include "bcache.h"
64 #include "btree.h"
65 
66 #include <linux/blkdev.h>
67 #include <linux/freezer.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <trace/events/bcache.h>
71 
72 /* Bucket heap / gen */
73 
74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
75 {
76 	uint8_t ret = ++b->gen;
77 
78 	ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
79 	WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
80 
81 	if (CACHE_SYNC(&ca->set->sb)) {
82 		ca->need_save_prio = max(ca->need_save_prio,
83 					 bucket_disk_gen(b));
84 		WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
85 	}
86 
87 	return ret;
88 }
89 
90 void bch_rescale_priorities(struct cache_set *c, int sectors)
91 {
92 	struct cache *ca;
93 	struct bucket *b;
94 	unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
95 	unsigned i;
96 	int r;
97 
98 	atomic_sub(sectors, &c->rescale);
99 
100 	do {
101 		r = atomic_read(&c->rescale);
102 
103 		if (r >= 0)
104 			return;
105 	} while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
106 
107 	mutex_lock(&c->bucket_lock);
108 
109 	c->min_prio = USHRT_MAX;
110 
111 	for_each_cache(ca, c, i)
112 		for_each_bucket(b, ca)
113 			if (b->prio &&
114 			    b->prio != BTREE_PRIO &&
115 			    !atomic_read(&b->pin)) {
116 				b->prio--;
117 				c->min_prio = min(c->min_prio, b->prio);
118 			}
119 
120 	mutex_unlock(&c->bucket_lock);
121 }
122 
123 /* Allocation */
124 
125 static inline bool can_inc_bucket_gen(struct bucket *b)
126 {
127 	return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
128 		bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
129 }
130 
131 bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
132 {
133 	BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
134 
135 	if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
136 		unsigned i;
137 
138 		for (i = 0; i < RESERVE_NONE; i++)
139 			if (!fifo_full(&ca->free[i]))
140 				goto add;
141 
142 		return false;
143 	}
144 add:
145 	b->prio = 0;
146 
147 	if (can_inc_bucket_gen(b) &&
148 	    fifo_push(&ca->unused, b - ca->buckets)) {
149 		atomic_inc(&b->pin);
150 		return true;
151 	}
152 
153 	return false;
154 }
155 
156 static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
157 {
158 	return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
159 		!atomic_read(&b->pin) &&
160 		can_inc_bucket_gen(b);
161 }
162 
163 static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
164 {
165 	bch_inc_gen(ca, b);
166 	b->prio = INITIAL_PRIO;
167 	atomic_inc(&b->pin);
168 	fifo_push(&ca->free_inc, b - ca->buckets);
169 }
170 
171 /*
172  * Determines what order we're going to reuse buckets, smallest bucket_prio()
173  * first: we also take into account the number of sectors of live data in that
174  * bucket, and in order for that multiply to make sense we have to scale bucket
175  *
176  * Thus, we scale the bucket priorities so that the bucket with the smallest
177  * prio is worth 1/8th of what INITIAL_PRIO is worth.
178  */
179 
180 #define bucket_prio(b)							\
181 ({									\
182 	unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
183 									\
184 	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\
185 })
186 
187 #define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))
188 #define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r))
189 
190 static void invalidate_buckets_lru(struct cache *ca)
191 {
192 	struct bucket *b;
193 	ssize_t i;
194 
195 	ca->heap.used = 0;
196 
197 	for_each_bucket(b, ca) {
198 		/*
199 		 * If we fill up the unused list, if we then return before
200 		 * adding anything to the free_inc list we'll skip writing
201 		 * prios/gens and just go back to allocating from the unused
202 		 * list:
203 		 */
204 		if (fifo_full(&ca->unused))
205 			return;
206 
207 		if (!can_invalidate_bucket(ca, b))
208 			continue;
209 
210 		if (!GC_SECTORS_USED(b) &&
211 		    bch_bucket_add_unused(ca, b))
212 			continue;
213 
214 		if (!heap_full(&ca->heap))
215 			heap_add(&ca->heap, b, bucket_max_cmp);
216 		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
217 			ca->heap.data[0] = b;
218 			heap_sift(&ca->heap, 0, bucket_max_cmp);
219 		}
220 	}
221 
222 	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
223 		heap_sift(&ca->heap, i, bucket_min_cmp);
224 
225 	while (!fifo_full(&ca->free_inc)) {
226 		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
227 			/*
228 			 * We don't want to be calling invalidate_buckets()
229 			 * multiple times when it can't do anything
230 			 */
231 			ca->invalidate_needs_gc = 1;
232 			wake_up_gc(ca->set);
233 			return;
234 		}
235 
236 		invalidate_one_bucket(ca, b);
237 	}
238 }
239 
240 static void invalidate_buckets_fifo(struct cache *ca)
241 {
242 	struct bucket *b;
243 	size_t checked = 0;
244 
245 	while (!fifo_full(&ca->free_inc)) {
246 		if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
247 		    ca->fifo_last_bucket >= ca->sb.nbuckets)
248 			ca->fifo_last_bucket = ca->sb.first_bucket;
249 
250 		b = ca->buckets + ca->fifo_last_bucket++;
251 
252 		if (can_invalidate_bucket(ca, b))
253 			invalidate_one_bucket(ca, b);
254 
255 		if (++checked >= ca->sb.nbuckets) {
256 			ca->invalidate_needs_gc = 1;
257 			wake_up_gc(ca->set);
258 			return;
259 		}
260 	}
261 }
262 
263 static void invalidate_buckets_random(struct cache *ca)
264 {
265 	struct bucket *b;
266 	size_t checked = 0;
267 
268 	while (!fifo_full(&ca->free_inc)) {
269 		size_t n;
270 		get_random_bytes(&n, sizeof(n));
271 
272 		n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
273 		n += ca->sb.first_bucket;
274 
275 		b = ca->buckets + n;
276 
277 		if (can_invalidate_bucket(ca, b))
278 			invalidate_one_bucket(ca, b);
279 
280 		if (++checked >= ca->sb.nbuckets / 2) {
281 			ca->invalidate_needs_gc = 1;
282 			wake_up_gc(ca->set);
283 			return;
284 		}
285 	}
286 }
287 
288 static void invalidate_buckets(struct cache *ca)
289 {
290 	if (ca->invalidate_needs_gc)
291 		return;
292 
293 	switch (CACHE_REPLACEMENT(&ca->sb)) {
294 	case CACHE_REPLACEMENT_LRU:
295 		invalidate_buckets_lru(ca);
296 		break;
297 	case CACHE_REPLACEMENT_FIFO:
298 		invalidate_buckets_fifo(ca);
299 		break;
300 	case CACHE_REPLACEMENT_RANDOM:
301 		invalidate_buckets_random(ca);
302 		break;
303 	}
304 
305 	trace_bcache_alloc_invalidate(ca);
306 }
307 
308 #define allocator_wait(ca, cond)					\
309 do {									\
310 	while (1) {							\
311 		set_current_state(TASK_INTERRUPTIBLE);			\
312 		if (cond)						\
313 			break;						\
314 									\
315 		mutex_unlock(&(ca)->set->bucket_lock);			\
316 		if (kthread_should_stop())				\
317 			return 0;					\
318 									\
319 		try_to_freeze();					\
320 		schedule();						\
321 		mutex_lock(&(ca)->set->bucket_lock);			\
322 	}								\
323 	__set_current_state(TASK_RUNNING);				\
324 } while (0)
325 
326 static int bch_allocator_push(struct cache *ca, long bucket)
327 {
328 	unsigned i;
329 
330 	/* Prios/gens are actually the most important reserve */
331 	if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
332 		return true;
333 
334 	for (i = 0; i < RESERVE_NR; i++)
335 		if (fifo_push(&ca->free[i], bucket))
336 			return true;
337 
338 	return false;
339 }
340 
341 static int bch_allocator_thread(void *arg)
342 {
343 	struct cache *ca = arg;
344 
345 	mutex_lock(&ca->set->bucket_lock);
346 
347 	while (1) {
348 		/*
349 		 * First, we pull buckets off of the unused and free_inc lists,
350 		 * possibly issue discards to them, then we add the bucket to
351 		 * the free list:
352 		 */
353 		while (1) {
354 			long bucket;
355 
356 			if ((!atomic_read(&ca->set->prio_blocked) ||
357 			     !CACHE_SYNC(&ca->set->sb)) &&
358 			    !fifo_empty(&ca->unused))
359 				fifo_pop(&ca->unused, bucket);
360 			else if (!fifo_empty(&ca->free_inc))
361 				fifo_pop(&ca->free_inc, bucket);
362 			else
363 				break;
364 
365 			if (ca->discard) {
366 				mutex_unlock(&ca->set->bucket_lock);
367 				blkdev_issue_discard(ca->bdev,
368 					bucket_to_sector(ca->set, bucket),
369 					ca->sb.block_size, GFP_KERNEL, 0);
370 				mutex_lock(&ca->set->bucket_lock);
371 			}
372 
373 			allocator_wait(ca, bch_allocator_push(ca, bucket));
374 			wake_up(&ca->set->bucket_wait);
375 		}
376 
377 		/*
378 		 * We've run out of free buckets, we need to find some buckets
379 		 * we can invalidate. First, invalidate them in memory and add
380 		 * them to the free_inc list:
381 		 */
382 
383 		allocator_wait(ca, ca->set->gc_mark_valid &&
384 			       (ca->need_save_prio > 64 ||
385 				!ca->invalidate_needs_gc));
386 		invalidate_buckets(ca);
387 
388 		/*
389 		 * Now, we write their new gens to disk so we can start writing
390 		 * new stuff to them:
391 		 */
392 		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
393 		if (CACHE_SYNC(&ca->set->sb) &&
394 		    (!fifo_empty(&ca->free_inc) ||
395 		     ca->need_save_prio > 64))
396 			bch_prio_write(ca);
397 	}
398 }
399 
400 long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
401 {
402 	DEFINE_WAIT(w);
403 	struct bucket *b;
404 	long r;
405 
406 	/* fastpath */
407 	if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
408 	    fifo_pop(&ca->free[reserve], r))
409 		goto out;
410 
411 	if (!wait)
412 		return -1;
413 
414 	do {
415 		prepare_to_wait(&ca->set->bucket_wait, &w,
416 				TASK_UNINTERRUPTIBLE);
417 
418 		mutex_unlock(&ca->set->bucket_lock);
419 		schedule();
420 		mutex_lock(&ca->set->bucket_lock);
421 	} while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
422 		 !fifo_pop(&ca->free[reserve], r));
423 
424 	finish_wait(&ca->set->bucket_wait, &w);
425 out:
426 	wake_up_process(ca->alloc_thread);
427 
428 	if (expensive_debug_checks(ca->set)) {
429 		size_t iter;
430 		long i;
431 		unsigned j;
432 
433 		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
434 			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
435 
436 		for (j = 0; j < RESERVE_NR; j++)
437 			fifo_for_each(i, &ca->free[j], iter)
438 				BUG_ON(i == r);
439 		fifo_for_each(i, &ca->free_inc, iter)
440 			BUG_ON(i == r);
441 		fifo_for_each(i, &ca->unused, iter)
442 			BUG_ON(i == r);
443 	}
444 
445 	b = ca->buckets + r;
446 
447 	BUG_ON(atomic_read(&b->pin) != 1);
448 
449 	SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
450 
451 	if (reserve <= RESERVE_PRIO) {
452 		SET_GC_MARK(b, GC_MARK_METADATA);
453 		SET_GC_MOVE(b, 0);
454 		b->prio = BTREE_PRIO;
455 	} else {
456 		SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
457 		SET_GC_MOVE(b, 0);
458 		b->prio = INITIAL_PRIO;
459 	}
460 
461 	return r;
462 }
463 
464 void bch_bucket_free(struct cache_set *c, struct bkey *k)
465 {
466 	unsigned i;
467 
468 	for (i = 0; i < KEY_PTRS(k); i++) {
469 		struct bucket *b = PTR_BUCKET(c, k, i);
470 
471 		SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
472 		SET_GC_SECTORS_USED(b, 0);
473 		bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
474 	}
475 }
476 
477 int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
478 			   struct bkey *k, int n, bool wait)
479 {
480 	int i;
481 
482 	lockdep_assert_held(&c->bucket_lock);
483 	BUG_ON(!n || n > c->caches_loaded || n > 8);
484 
485 	bkey_init(k);
486 
487 	/* sort by free space/prio of oldest data in caches */
488 
489 	for (i = 0; i < n; i++) {
490 		struct cache *ca = c->cache_by_alloc[i];
491 		long b = bch_bucket_alloc(ca, reserve, wait);
492 
493 		if (b == -1)
494 			goto err;
495 
496 		k->ptr[i] = PTR(ca->buckets[b].gen,
497 				bucket_to_sector(c, b),
498 				ca->sb.nr_this_dev);
499 
500 		SET_KEY_PTRS(k, i + 1);
501 	}
502 
503 	return 0;
504 err:
505 	bch_bucket_free(c, k);
506 	bkey_put(c, k);
507 	return -1;
508 }
509 
510 int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
511 			 struct bkey *k, int n, bool wait)
512 {
513 	int ret;
514 	mutex_lock(&c->bucket_lock);
515 	ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
516 	mutex_unlock(&c->bucket_lock);
517 	return ret;
518 }
519 
520 /* Sector allocator */
521 
522 struct open_bucket {
523 	struct list_head	list;
524 	unsigned		last_write_point;
525 	unsigned		sectors_free;
526 	BKEY_PADDED(key);
527 };
528 
529 /*
530  * We keep multiple buckets open for writes, and try to segregate different
531  * write streams for better cache utilization: first we look for a bucket where
532  * the last write to it was sequential with the current write, and failing that
533  * we look for a bucket that was last used by the same task.
534  *
535  * The ideas is if you've got multiple tasks pulling data into the cache at the
536  * same time, you'll get better cache utilization if you try to segregate their
537  * data and preserve locality.
538  *
539  * For example, say you've starting Firefox at the same time you're copying a
540  * bunch of files. Firefox will likely end up being fairly hot and stay in the
541  * cache awhile, but the data you copied might not be; if you wrote all that
542  * data to the same buckets it'd get invalidated at the same time.
543  *
544  * Both of those tasks will be doing fairly random IO so we can't rely on
545  * detecting sequential IO to segregate their data, but going off of the task
546  * should be a sane heuristic.
547  */
548 static struct open_bucket *pick_data_bucket(struct cache_set *c,
549 					    const struct bkey *search,
550 					    unsigned write_point,
551 					    struct bkey *alloc)
552 {
553 	struct open_bucket *ret, *ret_task = NULL;
554 
555 	list_for_each_entry_reverse(ret, &c->data_buckets, list)
556 		if (!bkey_cmp(&ret->key, search))
557 			goto found;
558 		else if (ret->last_write_point == write_point)
559 			ret_task = ret;
560 
561 	ret = ret_task ?: list_first_entry(&c->data_buckets,
562 					   struct open_bucket, list);
563 found:
564 	if (!ret->sectors_free && KEY_PTRS(alloc)) {
565 		ret->sectors_free = c->sb.bucket_size;
566 		bkey_copy(&ret->key, alloc);
567 		bkey_init(alloc);
568 	}
569 
570 	if (!ret->sectors_free)
571 		ret = NULL;
572 
573 	return ret;
574 }
575 
576 /*
577  * Allocates some space in the cache to write to, and k to point to the newly
578  * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
579  * end of the newly allocated space).
580  *
581  * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
582  * sectors were actually allocated.
583  *
584  * If s->writeback is true, will not fail.
585  */
586 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
587 		       unsigned write_point, unsigned write_prio, bool wait)
588 {
589 	struct open_bucket *b;
590 	BKEY_PADDED(key) alloc;
591 	unsigned i;
592 
593 	/*
594 	 * We might have to allocate a new bucket, which we can't do with a
595 	 * spinlock held. So if we have to allocate, we drop the lock, allocate
596 	 * and then retry. KEY_PTRS() indicates whether alloc points to
597 	 * allocated bucket(s).
598 	 */
599 
600 	bkey_init(&alloc.key);
601 	spin_lock(&c->data_bucket_lock);
602 
603 	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
604 		unsigned watermark = write_prio
605 			? RESERVE_MOVINGGC
606 			: RESERVE_NONE;
607 
608 		spin_unlock(&c->data_bucket_lock);
609 
610 		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
611 			return false;
612 
613 		spin_lock(&c->data_bucket_lock);
614 	}
615 
616 	/*
617 	 * If we had to allocate, we might race and not need to allocate the
618 	 * second time we call find_data_bucket(). If we allocated a bucket but
619 	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
620 	 */
621 	if (KEY_PTRS(&alloc.key))
622 		bkey_put(c, &alloc.key);
623 
624 	for (i = 0; i < KEY_PTRS(&b->key); i++)
625 		EBUG_ON(ptr_stale(c, &b->key, i));
626 
627 	/* Set up the pointer to the space we're allocating: */
628 
629 	for (i = 0; i < KEY_PTRS(&b->key); i++)
630 		k->ptr[i] = b->key.ptr[i];
631 
632 	sectors = min(sectors, b->sectors_free);
633 
634 	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
635 	SET_KEY_SIZE(k, sectors);
636 	SET_KEY_PTRS(k, KEY_PTRS(&b->key));
637 
638 	/*
639 	 * Move b to the end of the lru, and keep track of what this bucket was
640 	 * last used for:
641 	 */
642 	list_move_tail(&b->list, &c->data_buckets);
643 	bkey_copy_key(&b->key, k);
644 	b->last_write_point = write_point;
645 
646 	b->sectors_free	-= sectors;
647 
648 	for (i = 0; i < KEY_PTRS(&b->key); i++) {
649 		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
650 
651 		atomic_long_add(sectors,
652 				&PTR_CACHE(c, &b->key, i)->sectors_written);
653 	}
654 
655 	if (b->sectors_free < c->sb.block_size)
656 		b->sectors_free = 0;
657 
658 	/*
659 	 * k takes refcounts on the buckets it points to until it's inserted
660 	 * into the btree, but if we're done with this bucket we just transfer
661 	 * get_data_bucket()'s refcount.
662 	 */
663 	if (b->sectors_free)
664 		for (i = 0; i < KEY_PTRS(&b->key); i++)
665 			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
666 
667 	spin_unlock(&c->data_bucket_lock);
668 	return true;
669 }
670 
671 /* Init */
672 
673 void bch_open_buckets_free(struct cache_set *c)
674 {
675 	struct open_bucket *b;
676 
677 	while (!list_empty(&c->data_buckets)) {
678 		b = list_first_entry(&c->data_buckets,
679 				     struct open_bucket, list);
680 		list_del(&b->list);
681 		kfree(b);
682 	}
683 }
684 
685 int bch_open_buckets_alloc(struct cache_set *c)
686 {
687 	int i;
688 
689 	spin_lock_init(&c->data_bucket_lock);
690 
691 	for (i = 0; i < 6; i++) {
692 		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
693 		if (!b)
694 			return -ENOMEM;
695 
696 		list_add(&b->list, &c->data_buckets);
697 	}
698 
699 	return 0;
700 }
701 
702 int bch_cache_allocator_start(struct cache *ca)
703 {
704 	struct task_struct *k = kthread_run(bch_allocator_thread,
705 					    ca, "bcache_allocator");
706 	if (IS_ERR(k))
707 		return PTR_ERR(k);
708 
709 	ca->alloc_thread = k;
710 	return 0;
711 }
712 
713 int bch_cache_allocator_init(struct cache *ca)
714 {
715 	/*
716 	 * Reserve:
717 	 * Prio/gen writes first
718 	 * Then 8 for btree allocations
719 	 * Then half for the moving garbage collector
720 	 */
721 #if 0
722 	ca->watermark[WATERMARK_PRIO] = 0;
723 
724 	ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
725 
726 	ca->watermark[WATERMARK_MOVINGGC] = 8 +
727 		ca->watermark[WATERMARK_METADATA];
728 
729 	ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
730 		ca->watermark[WATERMARK_MOVINGGC];
731 #endif
732 	return 0;
733 }
734