xref: /openbmc/linux/drivers/md/bcache/btree.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/bcache.txt.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 
39 #include <trace/events/bcache.h>
40 
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90 
91 #define MAX_NEED_GC		64
92 #define MAX_SAVE_PRIO		72
93 
94 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
95 
96 #define PTR_HASH(c, k)							\
97 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98 
99 #define insert_lock(s, b)	((b)->level <= (s)->lock)
100 
101 /*
102  * These macros are for recursing down the btree - they handle the details of
103  * locking and looking up nodes in the cache for you. They're best treated as
104  * mere syntax when reading code that uses them.
105  *
106  * op->lock determines whether we take a read or a write lock at a given depth.
107  * If you've got a read lock and find that you need a write lock (i.e. you're
108  * going to have to split), set op->lock and return -EINTR; btree_root() will
109  * call you again and you'll have the correct lock.
110  */
111 
112 /**
113  * btree - recurse down the btree on a specified key
114  * @fn:		function to call, which will be passed the child node
115  * @key:	key to recurse on
116  * @b:		parent btree node
117  * @op:		pointer to struct btree_op
118  */
119 #define btree(fn, key, b, op, ...)					\
120 ({									\
121 	int _r, l = (b)->level - 1;					\
122 	bool _w = l <= (op)->lock;					\
123 	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
124 						  _w, b);		\
125 	if (!IS_ERR(_child)) {						\
126 		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
127 		rw_unlock(_w, _child);					\
128 	} else								\
129 		_r = PTR_ERR(_child);					\
130 	_r;								\
131 })
132 
133 /**
134  * btree_root - call a function on the root of the btree
135  * @fn:		function to call, which will be passed the child node
136  * @c:		cache set
137  * @op:		pointer to struct btree_op
138  */
139 #define btree_root(fn, c, op, ...)					\
140 ({									\
141 	int _r = -EINTR;						\
142 	do {								\
143 		struct btree *_b = (c)->root;				\
144 		bool _w = insert_lock(op, _b);				\
145 		rw_lock(_w, _b, _b->level);				\
146 		if (_b == (c)->root &&					\
147 		    _w == insert_lock(op, _b)) {			\
148 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
149 		}							\
150 		rw_unlock(_w, _b);					\
151 		bch_cannibalize_unlock(c);				\
152 		if (_r == -EINTR)					\
153 			schedule();					\
154 	} while (_r == -EINTR);						\
155 									\
156 	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\
157 	_r;								\
158 })
159 
160 static inline struct bset *write_block(struct btree *b)
161 {
162 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
163 }
164 
165 static void bch_btree_init_next(struct btree *b)
166 {
167 	/* If not a leaf node, always sort */
168 	if (b->level && b->keys.nsets)
169 		bch_btree_sort(&b->keys, &b->c->sort);
170 	else
171 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
172 
173 	if (b->written < btree_blocks(b))
174 		bch_bset_init_next(&b->keys, write_block(b),
175 				   bset_magic(&b->c->sb));
176 
177 }
178 
179 /* Btree key manipulation */
180 
181 void bkey_put(struct cache_set *c, struct bkey *k)
182 {
183 	unsigned i;
184 
185 	for (i = 0; i < KEY_PTRS(k); i++)
186 		if (ptr_available(c, k, i))
187 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
188 }
189 
190 /* Btree IO */
191 
192 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
193 {
194 	uint64_t crc = b->key.ptr[0];
195 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
196 
197 	crc = bch_crc64_update(crc, data, end - data);
198 	return crc ^ 0xffffffffffffffffULL;
199 }
200 
201 void bch_btree_node_read_done(struct btree *b)
202 {
203 	const char *err = "bad btree header";
204 	struct bset *i = btree_bset_first(b);
205 	struct btree_iter *iter;
206 
207 	iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
208 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
209 	iter->used = 0;
210 
211 #ifdef CONFIG_BCACHE_DEBUG
212 	iter->b = &b->keys;
213 #endif
214 
215 	if (!i->seq)
216 		goto err;
217 
218 	for (;
219 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
220 	     i = write_block(b)) {
221 		err = "unsupported bset version";
222 		if (i->version > BCACHE_BSET_VERSION)
223 			goto err;
224 
225 		err = "bad btree header";
226 		if (b->written + set_blocks(i, block_bytes(b->c)) >
227 		    btree_blocks(b))
228 			goto err;
229 
230 		err = "bad magic";
231 		if (i->magic != bset_magic(&b->c->sb))
232 			goto err;
233 
234 		err = "bad checksum";
235 		switch (i->version) {
236 		case 0:
237 			if (i->csum != csum_set(i))
238 				goto err;
239 			break;
240 		case BCACHE_BSET_VERSION:
241 			if (i->csum != btree_csum_set(b, i))
242 				goto err;
243 			break;
244 		}
245 
246 		err = "empty set";
247 		if (i != b->keys.set[0].data && !i->keys)
248 			goto err;
249 
250 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
251 
252 		b->written += set_blocks(i, block_bytes(b->c));
253 	}
254 
255 	err = "corrupted btree";
256 	for (i = write_block(b);
257 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
258 	     i = ((void *) i) + block_bytes(b->c))
259 		if (i->seq == b->keys.set[0].data->seq)
260 			goto err;
261 
262 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
263 
264 	i = b->keys.set[0].data;
265 	err = "short btree key";
266 	if (b->keys.set[0].size &&
267 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
268 		goto err;
269 
270 	if (b->written < btree_blocks(b))
271 		bch_bset_init_next(&b->keys, write_block(b),
272 				   bset_magic(&b->c->sb));
273 out:
274 	mempool_free(iter, b->c->fill_iter);
275 	return;
276 err:
277 	set_btree_node_io_error(b);
278 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
279 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
280 			    bset_block_offset(b, i), i->keys);
281 	goto out;
282 }
283 
284 static void btree_node_read_endio(struct bio *bio)
285 {
286 	struct closure *cl = bio->bi_private;
287 	closure_put(cl);
288 }
289 
290 static void bch_btree_node_read(struct btree *b)
291 {
292 	uint64_t start_time = local_clock();
293 	struct closure cl;
294 	struct bio *bio;
295 
296 	trace_bcache_btree_read(b);
297 
298 	closure_init_stack(&cl);
299 
300 	bio = bch_bbio_alloc(b->c);
301 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
302 	bio->bi_end_io	= btree_node_read_endio;
303 	bio->bi_private	= &cl;
304 	bio->bi_opf = REQ_OP_READ | REQ_META;
305 
306 	bch_bio_map(bio, b->keys.set[0].data);
307 
308 	bch_submit_bbio(bio, b->c, &b->key, 0);
309 	closure_sync(&cl);
310 
311 	if (bio->bi_status)
312 		set_btree_node_io_error(b);
313 
314 	bch_bbio_free(bio, b->c);
315 
316 	if (btree_node_io_error(b))
317 		goto err;
318 
319 	bch_btree_node_read_done(b);
320 	bch_time_stats_update(&b->c->btree_read_time, start_time);
321 
322 	return;
323 err:
324 	bch_cache_set_error(b->c, "io error reading bucket %zu",
325 			    PTR_BUCKET_NR(b->c, &b->key, 0));
326 }
327 
328 static void btree_complete_write(struct btree *b, struct btree_write *w)
329 {
330 	if (w->prio_blocked &&
331 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
332 		wake_up_allocators(b->c);
333 
334 	if (w->journal) {
335 		atomic_dec_bug(w->journal);
336 		__closure_wake_up(&b->c->journal.wait);
337 	}
338 
339 	w->prio_blocked	= 0;
340 	w->journal	= NULL;
341 }
342 
343 static void btree_node_write_unlock(struct closure *cl)
344 {
345 	struct btree *b = container_of(cl, struct btree, io);
346 
347 	up(&b->io_mutex);
348 }
349 
350 static void __btree_node_write_done(struct closure *cl)
351 {
352 	struct btree *b = container_of(cl, struct btree, io);
353 	struct btree_write *w = btree_prev_write(b);
354 
355 	bch_bbio_free(b->bio, b->c);
356 	b->bio = NULL;
357 	btree_complete_write(b, w);
358 
359 	if (btree_node_dirty(b))
360 		schedule_delayed_work(&b->work, 30 * HZ);
361 
362 	closure_return_with_destructor(cl, btree_node_write_unlock);
363 }
364 
365 static void btree_node_write_done(struct closure *cl)
366 {
367 	struct btree *b = container_of(cl, struct btree, io);
368 
369 	bio_free_pages(b->bio);
370 	__btree_node_write_done(cl);
371 }
372 
373 static void btree_node_write_endio(struct bio *bio)
374 {
375 	struct closure *cl = bio->bi_private;
376 	struct btree *b = container_of(cl, struct btree, io);
377 
378 	if (bio->bi_status)
379 		set_btree_node_io_error(b);
380 
381 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
382 	closure_put(cl);
383 }
384 
385 static void do_btree_node_write(struct btree *b)
386 {
387 	struct closure *cl = &b->io;
388 	struct bset *i = btree_bset_last(b);
389 	BKEY_PADDED(key) k;
390 
391 	i->version	= BCACHE_BSET_VERSION;
392 	i->csum		= btree_csum_set(b, i);
393 
394 	BUG_ON(b->bio);
395 	b->bio = bch_bbio_alloc(b->c);
396 
397 	b->bio->bi_end_io	= btree_node_write_endio;
398 	b->bio->bi_private	= cl;
399 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
400 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
401 	bch_bio_map(b->bio, i);
402 
403 	/*
404 	 * If we're appending to a leaf node, we don't technically need FUA -
405 	 * this write just needs to be persisted before the next journal write,
406 	 * which will be marked FLUSH|FUA.
407 	 *
408 	 * Similarly if we're writing a new btree root - the pointer is going to
409 	 * be in the next journal entry.
410 	 *
411 	 * But if we're writing a new btree node (that isn't a root) or
412 	 * appending to a non leaf btree node, we need either FUA or a flush
413 	 * when we write the parent with the new pointer. FUA is cheaper than a
414 	 * flush, and writes appending to leaf nodes aren't blocking anything so
415 	 * just make all btree node writes FUA to keep things sane.
416 	 */
417 
418 	bkey_copy(&k.key, &b->key);
419 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
420 		       bset_sector_offset(&b->keys, i));
421 
422 	if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
423 		int j;
424 		struct bio_vec *bv;
425 		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
426 
427 		bio_for_each_segment_all(bv, b->bio, j)
428 			memcpy(page_address(bv->bv_page),
429 			       base + j * PAGE_SIZE, PAGE_SIZE);
430 
431 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
432 
433 		continue_at(cl, btree_node_write_done, NULL);
434 	} else {
435 		b->bio->bi_vcnt = 0;
436 		bch_bio_map(b->bio, i);
437 
438 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
439 
440 		closure_sync(cl);
441 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
442 	}
443 }
444 
445 void __bch_btree_node_write(struct btree *b, struct closure *parent)
446 {
447 	struct bset *i = btree_bset_last(b);
448 
449 	lockdep_assert_held(&b->write_lock);
450 
451 	trace_bcache_btree_write(b);
452 
453 	BUG_ON(current->bio_list);
454 	BUG_ON(b->written >= btree_blocks(b));
455 	BUG_ON(b->written && !i->keys);
456 	BUG_ON(btree_bset_first(b)->seq != i->seq);
457 	bch_check_keys(&b->keys, "writing");
458 
459 	cancel_delayed_work(&b->work);
460 
461 	/* If caller isn't waiting for write, parent refcount is cache set */
462 	down(&b->io_mutex);
463 	closure_init(&b->io, parent ?: &b->c->cl);
464 
465 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
466 	change_bit(BTREE_NODE_write_idx, &b->flags);
467 
468 	do_btree_node_write(b);
469 
470 	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
471 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
472 
473 	b->written += set_blocks(i, block_bytes(b->c));
474 }
475 
476 void bch_btree_node_write(struct btree *b, struct closure *parent)
477 {
478 	unsigned nsets = b->keys.nsets;
479 
480 	lockdep_assert_held(&b->lock);
481 
482 	__bch_btree_node_write(b, parent);
483 
484 	/*
485 	 * do verify if there was more than one set initially (i.e. we did a
486 	 * sort) and we sorted down to a single set:
487 	 */
488 	if (nsets && !b->keys.nsets)
489 		bch_btree_verify(b);
490 
491 	bch_btree_init_next(b);
492 }
493 
494 static void bch_btree_node_write_sync(struct btree *b)
495 {
496 	struct closure cl;
497 
498 	closure_init_stack(&cl);
499 
500 	mutex_lock(&b->write_lock);
501 	bch_btree_node_write(b, &cl);
502 	mutex_unlock(&b->write_lock);
503 
504 	closure_sync(&cl);
505 }
506 
507 static void btree_node_write_work(struct work_struct *w)
508 {
509 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
510 
511 	mutex_lock(&b->write_lock);
512 	if (btree_node_dirty(b))
513 		__bch_btree_node_write(b, NULL);
514 	mutex_unlock(&b->write_lock);
515 }
516 
517 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
518 {
519 	struct bset *i = btree_bset_last(b);
520 	struct btree_write *w = btree_current_write(b);
521 
522 	lockdep_assert_held(&b->write_lock);
523 
524 	BUG_ON(!b->written);
525 	BUG_ON(!i->keys);
526 
527 	if (!btree_node_dirty(b))
528 		schedule_delayed_work(&b->work, 30 * HZ);
529 
530 	set_btree_node_dirty(b);
531 
532 	if (journal_ref) {
533 		if (w->journal &&
534 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
535 			atomic_dec_bug(w->journal);
536 			w->journal = NULL;
537 		}
538 
539 		if (!w->journal) {
540 			w->journal = journal_ref;
541 			atomic_inc(w->journal);
542 		}
543 	}
544 
545 	/* Force write if set is too big */
546 	if (set_bytes(i) > PAGE_SIZE - 48 &&
547 	    !current->bio_list)
548 		bch_btree_node_write(b, NULL);
549 }
550 
551 /*
552  * Btree in memory cache - allocation/freeing
553  * mca -> memory cache
554  */
555 
556 #define mca_reserve(c)	(((c->root && c->root->level)		\
557 			  ? c->root->level : 1) * 8 + 16)
558 #define mca_can_free(c)						\
559 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
560 
561 static void mca_data_free(struct btree *b)
562 {
563 	BUG_ON(b->io_mutex.count != 1);
564 
565 	bch_btree_keys_free(&b->keys);
566 
567 	b->c->btree_cache_used--;
568 	list_move(&b->list, &b->c->btree_cache_freed);
569 }
570 
571 static void mca_bucket_free(struct btree *b)
572 {
573 	BUG_ON(btree_node_dirty(b));
574 
575 	b->key.ptr[0] = 0;
576 	hlist_del_init_rcu(&b->hash);
577 	list_move(&b->list, &b->c->btree_cache_freeable);
578 }
579 
580 static unsigned btree_order(struct bkey *k)
581 {
582 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
583 }
584 
585 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
586 {
587 	if (!bch_btree_keys_alloc(&b->keys,
588 				  max_t(unsigned,
589 					ilog2(b->c->btree_pages),
590 					btree_order(k)),
591 				  gfp)) {
592 		b->c->btree_cache_used++;
593 		list_move(&b->list, &b->c->btree_cache);
594 	} else {
595 		list_move(&b->list, &b->c->btree_cache_freed);
596 	}
597 }
598 
599 static struct btree *mca_bucket_alloc(struct cache_set *c,
600 				      struct bkey *k, gfp_t gfp)
601 {
602 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
603 	if (!b)
604 		return NULL;
605 
606 	init_rwsem(&b->lock);
607 	lockdep_set_novalidate_class(&b->lock);
608 	mutex_init(&b->write_lock);
609 	lockdep_set_novalidate_class(&b->write_lock);
610 	INIT_LIST_HEAD(&b->list);
611 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
612 	b->c = c;
613 	sema_init(&b->io_mutex, 1);
614 
615 	mca_data_alloc(b, k, gfp);
616 	return b;
617 }
618 
619 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
620 {
621 	struct closure cl;
622 
623 	closure_init_stack(&cl);
624 	lockdep_assert_held(&b->c->bucket_lock);
625 
626 	if (!down_write_trylock(&b->lock))
627 		return -ENOMEM;
628 
629 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
630 
631 	if (b->keys.page_order < min_order)
632 		goto out_unlock;
633 
634 	if (!flush) {
635 		if (btree_node_dirty(b))
636 			goto out_unlock;
637 
638 		if (down_trylock(&b->io_mutex))
639 			goto out_unlock;
640 		up(&b->io_mutex);
641 	}
642 
643 	mutex_lock(&b->write_lock);
644 	if (btree_node_dirty(b))
645 		__bch_btree_node_write(b, &cl);
646 	mutex_unlock(&b->write_lock);
647 
648 	closure_sync(&cl);
649 
650 	/* wait for any in flight btree write */
651 	down(&b->io_mutex);
652 	up(&b->io_mutex);
653 
654 	return 0;
655 out_unlock:
656 	rw_unlock(true, b);
657 	return -ENOMEM;
658 }
659 
660 static unsigned long bch_mca_scan(struct shrinker *shrink,
661 				  struct shrink_control *sc)
662 {
663 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
664 	struct btree *b, *t;
665 	unsigned long i, nr = sc->nr_to_scan;
666 	unsigned long freed = 0;
667 
668 	if (c->shrinker_disabled)
669 		return SHRINK_STOP;
670 
671 	if (c->btree_cache_alloc_lock)
672 		return SHRINK_STOP;
673 
674 	/* Return -1 if we can't do anything right now */
675 	if (sc->gfp_mask & __GFP_IO)
676 		mutex_lock(&c->bucket_lock);
677 	else if (!mutex_trylock(&c->bucket_lock))
678 		return -1;
679 
680 	/*
681 	 * It's _really_ critical that we don't free too many btree nodes - we
682 	 * have to always leave ourselves a reserve. The reserve is how we
683 	 * guarantee that allocating memory for a new btree node can always
684 	 * succeed, so that inserting keys into the btree can always succeed and
685 	 * IO can always make forward progress:
686 	 */
687 	nr /= c->btree_pages;
688 	nr = min_t(unsigned long, nr, mca_can_free(c));
689 
690 	i = 0;
691 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
692 		if (freed >= nr)
693 			break;
694 
695 		if (++i > 3 &&
696 		    !mca_reap(b, 0, false)) {
697 			mca_data_free(b);
698 			rw_unlock(true, b);
699 			freed++;
700 		}
701 	}
702 
703 	for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
704 		if (list_empty(&c->btree_cache))
705 			goto out;
706 
707 		b = list_first_entry(&c->btree_cache, struct btree, list);
708 		list_rotate_left(&c->btree_cache);
709 
710 		if (!b->accessed &&
711 		    !mca_reap(b, 0, false)) {
712 			mca_bucket_free(b);
713 			mca_data_free(b);
714 			rw_unlock(true, b);
715 			freed++;
716 		} else
717 			b->accessed = 0;
718 	}
719 out:
720 	mutex_unlock(&c->bucket_lock);
721 	return freed;
722 }
723 
724 static unsigned long bch_mca_count(struct shrinker *shrink,
725 				   struct shrink_control *sc)
726 {
727 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
728 
729 	if (c->shrinker_disabled)
730 		return 0;
731 
732 	if (c->btree_cache_alloc_lock)
733 		return 0;
734 
735 	return mca_can_free(c) * c->btree_pages;
736 }
737 
738 void bch_btree_cache_free(struct cache_set *c)
739 {
740 	struct btree *b;
741 	struct closure cl;
742 	closure_init_stack(&cl);
743 
744 	if (c->shrink.list.next)
745 		unregister_shrinker(&c->shrink);
746 
747 	mutex_lock(&c->bucket_lock);
748 
749 #ifdef CONFIG_BCACHE_DEBUG
750 	if (c->verify_data)
751 		list_move(&c->verify_data->list, &c->btree_cache);
752 
753 	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
754 #endif
755 
756 	list_splice(&c->btree_cache_freeable,
757 		    &c->btree_cache);
758 
759 	while (!list_empty(&c->btree_cache)) {
760 		b = list_first_entry(&c->btree_cache, struct btree, list);
761 
762 		if (btree_node_dirty(b))
763 			btree_complete_write(b, btree_current_write(b));
764 		clear_bit(BTREE_NODE_dirty, &b->flags);
765 
766 		mca_data_free(b);
767 	}
768 
769 	while (!list_empty(&c->btree_cache_freed)) {
770 		b = list_first_entry(&c->btree_cache_freed,
771 				     struct btree, list);
772 		list_del(&b->list);
773 		cancel_delayed_work_sync(&b->work);
774 		kfree(b);
775 	}
776 
777 	mutex_unlock(&c->bucket_lock);
778 }
779 
780 int bch_btree_cache_alloc(struct cache_set *c)
781 {
782 	unsigned i;
783 
784 	for (i = 0; i < mca_reserve(c); i++)
785 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
786 			return -ENOMEM;
787 
788 	list_splice_init(&c->btree_cache,
789 			 &c->btree_cache_freeable);
790 
791 #ifdef CONFIG_BCACHE_DEBUG
792 	mutex_init(&c->verify_lock);
793 
794 	c->verify_ondisk = (void *)
795 		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
796 
797 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
798 
799 	if (c->verify_data &&
800 	    c->verify_data->keys.set->data)
801 		list_del_init(&c->verify_data->list);
802 	else
803 		c->verify_data = NULL;
804 #endif
805 
806 	c->shrink.count_objects = bch_mca_count;
807 	c->shrink.scan_objects = bch_mca_scan;
808 	c->shrink.seeks = 4;
809 	c->shrink.batch = c->btree_pages * 2;
810 	register_shrinker(&c->shrink);
811 
812 	return 0;
813 }
814 
815 /* Btree in memory cache - hash table */
816 
817 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
818 {
819 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
820 }
821 
822 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
823 {
824 	struct btree *b;
825 
826 	rcu_read_lock();
827 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
828 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
829 			goto out;
830 	b = NULL;
831 out:
832 	rcu_read_unlock();
833 	return b;
834 }
835 
836 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
837 {
838 	struct task_struct *old;
839 
840 	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
841 	if (old && old != current) {
842 		if (op)
843 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
844 					TASK_UNINTERRUPTIBLE);
845 		return -EINTR;
846 	}
847 
848 	return 0;
849 }
850 
851 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
852 				     struct bkey *k)
853 {
854 	struct btree *b;
855 
856 	trace_bcache_btree_cache_cannibalize(c);
857 
858 	if (mca_cannibalize_lock(c, op))
859 		return ERR_PTR(-EINTR);
860 
861 	list_for_each_entry_reverse(b, &c->btree_cache, list)
862 		if (!mca_reap(b, btree_order(k), false))
863 			return b;
864 
865 	list_for_each_entry_reverse(b, &c->btree_cache, list)
866 		if (!mca_reap(b, btree_order(k), true))
867 			return b;
868 
869 	WARN(1, "btree cache cannibalize failed\n");
870 	return ERR_PTR(-ENOMEM);
871 }
872 
873 /*
874  * We can only have one thread cannibalizing other cached btree nodes at a time,
875  * or we'll deadlock. We use an open coded mutex to ensure that, which a
876  * cannibalize_bucket() will take. This means every time we unlock the root of
877  * the btree, we need to release this lock if we have it held.
878  */
879 static void bch_cannibalize_unlock(struct cache_set *c)
880 {
881 	if (c->btree_cache_alloc_lock == current) {
882 		c->btree_cache_alloc_lock = NULL;
883 		wake_up(&c->btree_cache_wait);
884 	}
885 }
886 
887 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
888 			       struct bkey *k, int level)
889 {
890 	struct btree *b;
891 
892 	BUG_ON(current->bio_list);
893 
894 	lockdep_assert_held(&c->bucket_lock);
895 
896 	if (mca_find(c, k))
897 		return NULL;
898 
899 	/* btree_free() doesn't free memory; it sticks the node on the end of
900 	 * the list. Check if there's any freed nodes there:
901 	 */
902 	list_for_each_entry(b, &c->btree_cache_freeable, list)
903 		if (!mca_reap(b, btree_order(k), false))
904 			goto out;
905 
906 	/* We never free struct btree itself, just the memory that holds the on
907 	 * disk node. Check the freed list before allocating a new one:
908 	 */
909 	list_for_each_entry(b, &c->btree_cache_freed, list)
910 		if (!mca_reap(b, 0, false)) {
911 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
912 			if (!b->keys.set[0].data)
913 				goto err;
914 			else
915 				goto out;
916 		}
917 
918 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
919 	if (!b)
920 		goto err;
921 
922 	BUG_ON(!down_write_trylock(&b->lock));
923 	if (!b->keys.set->data)
924 		goto err;
925 out:
926 	BUG_ON(b->io_mutex.count != 1);
927 
928 	bkey_copy(&b->key, k);
929 	list_move(&b->list, &c->btree_cache);
930 	hlist_del_init_rcu(&b->hash);
931 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
932 
933 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
934 	b->parent	= (void *) ~0UL;
935 	b->flags	= 0;
936 	b->written	= 0;
937 	b->level	= level;
938 
939 	if (!b->level)
940 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
941 				    &b->c->expensive_debug_checks);
942 	else
943 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
944 				    &b->c->expensive_debug_checks);
945 
946 	return b;
947 err:
948 	if (b)
949 		rw_unlock(true, b);
950 
951 	b = mca_cannibalize(c, op, k);
952 	if (!IS_ERR(b))
953 		goto out;
954 
955 	return b;
956 }
957 
958 /**
959  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
960  * in from disk if necessary.
961  *
962  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
963  *
964  * The btree node will have either a read or a write lock held, depending on
965  * level and op->lock.
966  */
967 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
968 				 struct bkey *k, int level, bool write,
969 				 struct btree *parent)
970 {
971 	int i = 0;
972 	struct btree *b;
973 
974 	BUG_ON(level < 0);
975 retry:
976 	b = mca_find(c, k);
977 
978 	if (!b) {
979 		if (current->bio_list)
980 			return ERR_PTR(-EAGAIN);
981 
982 		mutex_lock(&c->bucket_lock);
983 		b = mca_alloc(c, op, k, level);
984 		mutex_unlock(&c->bucket_lock);
985 
986 		if (!b)
987 			goto retry;
988 		if (IS_ERR(b))
989 			return b;
990 
991 		bch_btree_node_read(b);
992 
993 		if (!write)
994 			downgrade_write(&b->lock);
995 	} else {
996 		rw_lock(write, b, level);
997 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
998 			rw_unlock(write, b);
999 			goto retry;
1000 		}
1001 		BUG_ON(b->level != level);
1002 	}
1003 
1004 	b->parent = parent;
1005 	b->accessed = 1;
1006 
1007 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1008 		prefetch(b->keys.set[i].tree);
1009 		prefetch(b->keys.set[i].data);
1010 	}
1011 
1012 	for (; i <= b->keys.nsets; i++)
1013 		prefetch(b->keys.set[i].data);
1014 
1015 	if (btree_node_io_error(b)) {
1016 		rw_unlock(write, b);
1017 		return ERR_PTR(-EIO);
1018 	}
1019 
1020 	BUG_ON(!b->written);
1021 
1022 	return b;
1023 }
1024 
1025 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1026 {
1027 	struct btree *b;
1028 
1029 	mutex_lock(&parent->c->bucket_lock);
1030 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1031 	mutex_unlock(&parent->c->bucket_lock);
1032 
1033 	if (!IS_ERR_OR_NULL(b)) {
1034 		b->parent = parent;
1035 		bch_btree_node_read(b);
1036 		rw_unlock(true, b);
1037 	}
1038 }
1039 
1040 /* Btree alloc */
1041 
1042 static void btree_node_free(struct btree *b)
1043 {
1044 	trace_bcache_btree_node_free(b);
1045 
1046 	BUG_ON(b == b->c->root);
1047 
1048 	mutex_lock(&b->write_lock);
1049 
1050 	if (btree_node_dirty(b))
1051 		btree_complete_write(b, btree_current_write(b));
1052 	clear_bit(BTREE_NODE_dirty, &b->flags);
1053 
1054 	mutex_unlock(&b->write_lock);
1055 
1056 	cancel_delayed_work(&b->work);
1057 
1058 	mutex_lock(&b->c->bucket_lock);
1059 	bch_bucket_free(b->c, &b->key);
1060 	mca_bucket_free(b);
1061 	mutex_unlock(&b->c->bucket_lock);
1062 }
1063 
1064 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1065 				     int level, bool wait,
1066 				     struct btree *parent)
1067 {
1068 	BKEY_PADDED(key) k;
1069 	struct btree *b = ERR_PTR(-EAGAIN);
1070 
1071 	mutex_lock(&c->bucket_lock);
1072 retry:
1073 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1074 		goto err;
1075 
1076 	bkey_put(c, &k.key);
1077 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1078 
1079 	b = mca_alloc(c, op, &k.key, level);
1080 	if (IS_ERR(b))
1081 		goto err_free;
1082 
1083 	if (!b) {
1084 		cache_bug(c,
1085 			"Tried to allocate bucket that was in btree cache");
1086 		goto retry;
1087 	}
1088 
1089 	b->accessed = 1;
1090 	b->parent = parent;
1091 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1092 
1093 	mutex_unlock(&c->bucket_lock);
1094 
1095 	trace_bcache_btree_node_alloc(b);
1096 	return b;
1097 err_free:
1098 	bch_bucket_free(c, &k.key);
1099 err:
1100 	mutex_unlock(&c->bucket_lock);
1101 
1102 	trace_bcache_btree_node_alloc_fail(c);
1103 	return b;
1104 }
1105 
1106 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1107 					  struct btree_op *op, int level,
1108 					  struct btree *parent)
1109 {
1110 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1111 }
1112 
1113 static struct btree *btree_node_alloc_replacement(struct btree *b,
1114 						  struct btree_op *op)
1115 {
1116 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1117 	if (!IS_ERR_OR_NULL(n)) {
1118 		mutex_lock(&n->write_lock);
1119 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1120 		bkey_copy_key(&n->key, &b->key);
1121 		mutex_unlock(&n->write_lock);
1122 	}
1123 
1124 	return n;
1125 }
1126 
1127 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1128 {
1129 	unsigned i;
1130 
1131 	mutex_lock(&b->c->bucket_lock);
1132 
1133 	atomic_inc(&b->c->prio_blocked);
1134 
1135 	bkey_copy(k, &b->key);
1136 	bkey_copy_key(k, &ZERO_KEY);
1137 
1138 	for (i = 0; i < KEY_PTRS(k); i++)
1139 		SET_PTR_GEN(k, i,
1140 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1141 					PTR_BUCKET(b->c, &b->key, i)));
1142 
1143 	mutex_unlock(&b->c->bucket_lock);
1144 }
1145 
1146 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1147 {
1148 	struct cache_set *c = b->c;
1149 	struct cache *ca;
1150 	unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1151 
1152 	mutex_lock(&c->bucket_lock);
1153 
1154 	for_each_cache(ca, c, i)
1155 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1156 			if (op)
1157 				prepare_to_wait(&c->btree_cache_wait, &op->wait,
1158 						TASK_UNINTERRUPTIBLE);
1159 			mutex_unlock(&c->bucket_lock);
1160 			return -EINTR;
1161 		}
1162 
1163 	mutex_unlock(&c->bucket_lock);
1164 
1165 	return mca_cannibalize_lock(b->c, op);
1166 }
1167 
1168 /* Garbage collection */
1169 
1170 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1171 				    struct bkey *k)
1172 {
1173 	uint8_t stale = 0;
1174 	unsigned i;
1175 	struct bucket *g;
1176 
1177 	/*
1178 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1179 	 * freed, but since ptr_bad() returns true we'll never actually use them
1180 	 * for anything and thus we don't want mark their pointers here
1181 	 */
1182 	if (!bkey_cmp(k, &ZERO_KEY))
1183 		return stale;
1184 
1185 	for (i = 0; i < KEY_PTRS(k); i++) {
1186 		if (!ptr_available(c, k, i))
1187 			continue;
1188 
1189 		g = PTR_BUCKET(c, k, i);
1190 
1191 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1192 			g->last_gc = PTR_GEN(k, i);
1193 
1194 		if (ptr_stale(c, k, i)) {
1195 			stale = max(stale, ptr_stale(c, k, i));
1196 			continue;
1197 		}
1198 
1199 		cache_bug_on(GC_MARK(g) &&
1200 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1201 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1202 			     GC_MARK(g), level);
1203 
1204 		if (level)
1205 			SET_GC_MARK(g, GC_MARK_METADATA);
1206 		else if (KEY_DIRTY(k))
1207 			SET_GC_MARK(g, GC_MARK_DIRTY);
1208 		else if (!GC_MARK(g))
1209 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1210 
1211 		/* guard against overflow */
1212 		SET_GC_SECTORS_USED(g, min_t(unsigned,
1213 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1214 					     MAX_GC_SECTORS_USED));
1215 
1216 		BUG_ON(!GC_SECTORS_USED(g));
1217 	}
1218 
1219 	return stale;
1220 }
1221 
1222 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1223 
1224 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1225 {
1226 	unsigned i;
1227 
1228 	for (i = 0; i < KEY_PTRS(k); i++)
1229 		if (ptr_available(c, k, i) &&
1230 		    !ptr_stale(c, k, i)) {
1231 			struct bucket *b = PTR_BUCKET(c, k, i);
1232 
1233 			b->gen = PTR_GEN(k, i);
1234 
1235 			if (level && bkey_cmp(k, &ZERO_KEY))
1236 				b->prio = BTREE_PRIO;
1237 			else if (!level && b->prio == BTREE_PRIO)
1238 				b->prio = INITIAL_PRIO;
1239 		}
1240 
1241 	__bch_btree_mark_key(c, level, k);
1242 }
1243 
1244 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1245 {
1246 	uint8_t stale = 0;
1247 	unsigned keys = 0, good_keys = 0;
1248 	struct bkey *k;
1249 	struct btree_iter iter;
1250 	struct bset_tree *t;
1251 
1252 	gc->nodes++;
1253 
1254 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1255 		stale = max(stale, btree_mark_key(b, k));
1256 		keys++;
1257 
1258 		if (bch_ptr_bad(&b->keys, k))
1259 			continue;
1260 
1261 		gc->key_bytes += bkey_u64s(k);
1262 		gc->nkeys++;
1263 		good_keys++;
1264 
1265 		gc->data += KEY_SIZE(k);
1266 	}
1267 
1268 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1269 		btree_bug_on(t->size &&
1270 			     bset_written(&b->keys, t) &&
1271 			     bkey_cmp(&b->key, &t->end) < 0,
1272 			     b, "found short btree key in gc");
1273 
1274 	if (b->c->gc_always_rewrite)
1275 		return true;
1276 
1277 	if (stale > 10)
1278 		return true;
1279 
1280 	if ((keys - good_keys) * 2 > keys)
1281 		return true;
1282 
1283 	return false;
1284 }
1285 
1286 #define GC_MERGE_NODES	4U
1287 
1288 struct gc_merge_info {
1289 	struct btree	*b;
1290 	unsigned	keys;
1291 };
1292 
1293 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1294 				 struct keylist *, atomic_t *, struct bkey *);
1295 
1296 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1297 			     struct gc_stat *gc, struct gc_merge_info *r)
1298 {
1299 	unsigned i, nodes = 0, keys = 0, blocks;
1300 	struct btree *new_nodes[GC_MERGE_NODES];
1301 	struct keylist keylist;
1302 	struct closure cl;
1303 	struct bkey *k;
1304 
1305 	bch_keylist_init(&keylist);
1306 
1307 	if (btree_check_reserve(b, NULL))
1308 		return 0;
1309 
1310 	memset(new_nodes, 0, sizeof(new_nodes));
1311 	closure_init_stack(&cl);
1312 
1313 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1314 		keys += r[nodes++].keys;
1315 
1316 	blocks = btree_default_blocks(b->c) * 2 / 3;
1317 
1318 	if (nodes < 2 ||
1319 	    __set_blocks(b->keys.set[0].data, keys,
1320 			 block_bytes(b->c)) > blocks * (nodes - 1))
1321 		return 0;
1322 
1323 	for (i = 0; i < nodes; i++) {
1324 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1325 		if (IS_ERR_OR_NULL(new_nodes[i]))
1326 			goto out_nocoalesce;
1327 	}
1328 
1329 	/*
1330 	 * We have to check the reserve here, after we've allocated our new
1331 	 * nodes, to make sure the insert below will succeed - we also check
1332 	 * before as an optimization to potentially avoid a bunch of expensive
1333 	 * allocs/sorts
1334 	 */
1335 	if (btree_check_reserve(b, NULL))
1336 		goto out_nocoalesce;
1337 
1338 	for (i = 0; i < nodes; i++)
1339 		mutex_lock(&new_nodes[i]->write_lock);
1340 
1341 	for (i = nodes - 1; i > 0; --i) {
1342 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1343 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1344 		struct bkey *k, *last = NULL;
1345 
1346 		keys = 0;
1347 
1348 		if (i > 1) {
1349 			for (k = n2->start;
1350 			     k < bset_bkey_last(n2);
1351 			     k = bkey_next(k)) {
1352 				if (__set_blocks(n1, n1->keys + keys +
1353 						 bkey_u64s(k),
1354 						 block_bytes(b->c)) > blocks)
1355 					break;
1356 
1357 				last = k;
1358 				keys += bkey_u64s(k);
1359 			}
1360 		} else {
1361 			/*
1362 			 * Last node we're not getting rid of - we're getting
1363 			 * rid of the node at r[0]. Have to try and fit all of
1364 			 * the remaining keys into this node; we can't ensure
1365 			 * they will always fit due to rounding and variable
1366 			 * length keys (shouldn't be possible in practice,
1367 			 * though)
1368 			 */
1369 			if (__set_blocks(n1, n1->keys + n2->keys,
1370 					 block_bytes(b->c)) >
1371 			    btree_blocks(new_nodes[i]))
1372 				goto out_nocoalesce;
1373 
1374 			keys = n2->keys;
1375 			/* Take the key of the node we're getting rid of */
1376 			last = &r->b->key;
1377 		}
1378 
1379 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1380 		       btree_blocks(new_nodes[i]));
1381 
1382 		if (last)
1383 			bkey_copy_key(&new_nodes[i]->key, last);
1384 
1385 		memcpy(bset_bkey_last(n1),
1386 		       n2->start,
1387 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1388 
1389 		n1->keys += keys;
1390 		r[i].keys = n1->keys;
1391 
1392 		memmove(n2->start,
1393 			bset_bkey_idx(n2, keys),
1394 			(void *) bset_bkey_last(n2) -
1395 			(void *) bset_bkey_idx(n2, keys));
1396 
1397 		n2->keys -= keys;
1398 
1399 		if (__bch_keylist_realloc(&keylist,
1400 					  bkey_u64s(&new_nodes[i]->key)))
1401 			goto out_nocoalesce;
1402 
1403 		bch_btree_node_write(new_nodes[i], &cl);
1404 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1405 	}
1406 
1407 	for (i = 0; i < nodes; i++)
1408 		mutex_unlock(&new_nodes[i]->write_lock);
1409 
1410 	closure_sync(&cl);
1411 
1412 	/* We emptied out this node */
1413 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1414 	btree_node_free(new_nodes[0]);
1415 	rw_unlock(true, new_nodes[0]);
1416 	new_nodes[0] = NULL;
1417 
1418 	for (i = 0; i < nodes; i++) {
1419 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1420 			goto out_nocoalesce;
1421 
1422 		make_btree_freeing_key(r[i].b, keylist.top);
1423 		bch_keylist_push(&keylist);
1424 	}
1425 
1426 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1427 	BUG_ON(!bch_keylist_empty(&keylist));
1428 
1429 	for (i = 0; i < nodes; i++) {
1430 		btree_node_free(r[i].b);
1431 		rw_unlock(true, r[i].b);
1432 
1433 		r[i].b = new_nodes[i];
1434 	}
1435 
1436 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1437 	r[nodes - 1].b = ERR_PTR(-EINTR);
1438 
1439 	trace_bcache_btree_gc_coalesce(nodes);
1440 	gc->nodes--;
1441 
1442 	bch_keylist_free(&keylist);
1443 
1444 	/* Invalidated our iterator */
1445 	return -EINTR;
1446 
1447 out_nocoalesce:
1448 	closure_sync(&cl);
1449 	bch_keylist_free(&keylist);
1450 
1451 	while ((k = bch_keylist_pop(&keylist)))
1452 		if (!bkey_cmp(k, &ZERO_KEY))
1453 			atomic_dec(&b->c->prio_blocked);
1454 
1455 	for (i = 0; i < nodes; i++)
1456 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1457 			btree_node_free(new_nodes[i]);
1458 			rw_unlock(true, new_nodes[i]);
1459 		}
1460 	return 0;
1461 }
1462 
1463 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1464 				 struct btree *replace)
1465 {
1466 	struct keylist keys;
1467 	struct btree *n;
1468 
1469 	if (btree_check_reserve(b, NULL))
1470 		return 0;
1471 
1472 	n = btree_node_alloc_replacement(replace, NULL);
1473 
1474 	/* recheck reserve after allocating replacement node */
1475 	if (btree_check_reserve(b, NULL)) {
1476 		btree_node_free(n);
1477 		rw_unlock(true, n);
1478 		return 0;
1479 	}
1480 
1481 	bch_btree_node_write_sync(n);
1482 
1483 	bch_keylist_init(&keys);
1484 	bch_keylist_add(&keys, &n->key);
1485 
1486 	make_btree_freeing_key(replace, keys.top);
1487 	bch_keylist_push(&keys);
1488 
1489 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1490 	BUG_ON(!bch_keylist_empty(&keys));
1491 
1492 	btree_node_free(replace);
1493 	rw_unlock(true, n);
1494 
1495 	/* Invalidated our iterator */
1496 	return -EINTR;
1497 }
1498 
1499 static unsigned btree_gc_count_keys(struct btree *b)
1500 {
1501 	struct bkey *k;
1502 	struct btree_iter iter;
1503 	unsigned ret = 0;
1504 
1505 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1506 		ret += bkey_u64s(k);
1507 
1508 	return ret;
1509 }
1510 
1511 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1512 			    struct closure *writes, struct gc_stat *gc)
1513 {
1514 	int ret = 0;
1515 	bool should_rewrite;
1516 	struct bkey *k;
1517 	struct btree_iter iter;
1518 	struct gc_merge_info r[GC_MERGE_NODES];
1519 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1520 
1521 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1522 
1523 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1524 		i->b = ERR_PTR(-EINTR);
1525 
1526 	while (1) {
1527 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1528 		if (k) {
1529 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1530 						  true, b);
1531 			if (IS_ERR(r->b)) {
1532 				ret = PTR_ERR(r->b);
1533 				break;
1534 			}
1535 
1536 			r->keys = btree_gc_count_keys(r->b);
1537 
1538 			ret = btree_gc_coalesce(b, op, gc, r);
1539 			if (ret)
1540 				break;
1541 		}
1542 
1543 		if (!last->b)
1544 			break;
1545 
1546 		if (!IS_ERR(last->b)) {
1547 			should_rewrite = btree_gc_mark_node(last->b, gc);
1548 			if (should_rewrite) {
1549 				ret = btree_gc_rewrite_node(b, op, last->b);
1550 				if (ret)
1551 					break;
1552 			}
1553 
1554 			if (last->b->level) {
1555 				ret = btree_gc_recurse(last->b, op, writes, gc);
1556 				if (ret)
1557 					break;
1558 			}
1559 
1560 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1561 
1562 			/*
1563 			 * Must flush leaf nodes before gc ends, since replace
1564 			 * operations aren't journalled
1565 			 */
1566 			mutex_lock(&last->b->write_lock);
1567 			if (btree_node_dirty(last->b))
1568 				bch_btree_node_write(last->b, writes);
1569 			mutex_unlock(&last->b->write_lock);
1570 			rw_unlock(true, last->b);
1571 		}
1572 
1573 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1574 		r->b = NULL;
1575 
1576 		if (need_resched()) {
1577 			ret = -EAGAIN;
1578 			break;
1579 		}
1580 	}
1581 
1582 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1583 		if (!IS_ERR_OR_NULL(i->b)) {
1584 			mutex_lock(&i->b->write_lock);
1585 			if (btree_node_dirty(i->b))
1586 				bch_btree_node_write(i->b, writes);
1587 			mutex_unlock(&i->b->write_lock);
1588 			rw_unlock(true, i->b);
1589 		}
1590 
1591 	return ret;
1592 }
1593 
1594 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1595 			     struct closure *writes, struct gc_stat *gc)
1596 {
1597 	struct btree *n = NULL;
1598 	int ret = 0;
1599 	bool should_rewrite;
1600 
1601 	should_rewrite = btree_gc_mark_node(b, gc);
1602 	if (should_rewrite) {
1603 		n = btree_node_alloc_replacement(b, NULL);
1604 
1605 		if (!IS_ERR_OR_NULL(n)) {
1606 			bch_btree_node_write_sync(n);
1607 
1608 			bch_btree_set_root(n);
1609 			btree_node_free(b);
1610 			rw_unlock(true, n);
1611 
1612 			return -EINTR;
1613 		}
1614 	}
1615 
1616 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1617 
1618 	if (b->level) {
1619 		ret = btree_gc_recurse(b, op, writes, gc);
1620 		if (ret)
1621 			return ret;
1622 	}
1623 
1624 	bkey_copy_key(&b->c->gc_done, &b->key);
1625 
1626 	return ret;
1627 }
1628 
1629 static void btree_gc_start(struct cache_set *c)
1630 {
1631 	struct cache *ca;
1632 	struct bucket *b;
1633 	unsigned i;
1634 
1635 	if (!c->gc_mark_valid)
1636 		return;
1637 
1638 	mutex_lock(&c->bucket_lock);
1639 
1640 	c->gc_mark_valid = 0;
1641 	c->gc_done = ZERO_KEY;
1642 
1643 	for_each_cache(ca, c, i)
1644 		for_each_bucket(b, ca) {
1645 			b->last_gc = b->gen;
1646 			if (!atomic_read(&b->pin)) {
1647 				SET_GC_MARK(b, 0);
1648 				SET_GC_SECTORS_USED(b, 0);
1649 			}
1650 		}
1651 
1652 	mutex_unlock(&c->bucket_lock);
1653 }
1654 
1655 static size_t bch_btree_gc_finish(struct cache_set *c)
1656 {
1657 	size_t available = 0;
1658 	struct bucket *b;
1659 	struct cache *ca;
1660 	unsigned i;
1661 
1662 	mutex_lock(&c->bucket_lock);
1663 
1664 	set_gc_sectors(c);
1665 	c->gc_mark_valid = 1;
1666 	c->need_gc	= 0;
1667 
1668 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1669 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1670 			    GC_MARK_METADATA);
1671 
1672 	/* don't reclaim buckets to which writeback keys point */
1673 	rcu_read_lock();
1674 	for (i = 0; i < c->nr_uuids; i++) {
1675 		struct bcache_device *d = c->devices[i];
1676 		struct cached_dev *dc;
1677 		struct keybuf_key *w, *n;
1678 		unsigned j;
1679 
1680 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1681 			continue;
1682 		dc = container_of(d, struct cached_dev, disk);
1683 
1684 		spin_lock(&dc->writeback_keys.lock);
1685 		rbtree_postorder_for_each_entry_safe(w, n,
1686 					&dc->writeback_keys.keys, node)
1687 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1688 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1689 					    GC_MARK_DIRTY);
1690 		spin_unlock(&dc->writeback_keys.lock);
1691 	}
1692 	rcu_read_unlock();
1693 
1694 	for_each_cache(ca, c, i) {
1695 		uint64_t *i;
1696 
1697 		ca->invalidate_needs_gc = 0;
1698 
1699 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1700 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1701 
1702 		for (i = ca->prio_buckets;
1703 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1704 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1705 
1706 		for_each_bucket(b, ca) {
1707 			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1708 
1709 			if (atomic_read(&b->pin))
1710 				continue;
1711 
1712 			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1713 
1714 			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1715 				available++;
1716 		}
1717 	}
1718 
1719 	mutex_unlock(&c->bucket_lock);
1720 	return available;
1721 }
1722 
1723 static void bch_btree_gc(struct cache_set *c)
1724 {
1725 	int ret;
1726 	unsigned long available;
1727 	struct gc_stat stats;
1728 	struct closure writes;
1729 	struct btree_op op;
1730 	uint64_t start_time = local_clock();
1731 
1732 	trace_bcache_gc_start(c);
1733 
1734 	memset(&stats, 0, sizeof(struct gc_stat));
1735 	closure_init_stack(&writes);
1736 	bch_btree_op_init(&op, SHRT_MAX);
1737 
1738 	btree_gc_start(c);
1739 
1740 	do {
1741 		ret = btree_root(gc_root, c, &op, &writes, &stats);
1742 		closure_sync(&writes);
1743 		cond_resched();
1744 
1745 		if (ret && ret != -EAGAIN)
1746 			pr_warn("gc failed!");
1747 	} while (ret);
1748 
1749 	available = bch_btree_gc_finish(c);
1750 	wake_up_allocators(c);
1751 
1752 	bch_time_stats_update(&c->btree_gc_time, start_time);
1753 
1754 	stats.key_bytes *= sizeof(uint64_t);
1755 	stats.data	<<= 9;
1756 	stats.in_use	= (c->nbuckets - available) * 100 / c->nbuckets;
1757 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1758 
1759 	trace_bcache_gc_end(c);
1760 
1761 	bch_moving_gc(c);
1762 }
1763 
1764 static bool gc_should_run(struct cache_set *c)
1765 {
1766 	struct cache *ca;
1767 	unsigned i;
1768 
1769 	for_each_cache(ca, c, i)
1770 		if (ca->invalidate_needs_gc)
1771 			return true;
1772 
1773 	if (atomic_read(&c->sectors_to_gc) < 0)
1774 		return true;
1775 
1776 	return false;
1777 }
1778 
1779 static int bch_gc_thread(void *arg)
1780 {
1781 	struct cache_set *c = arg;
1782 
1783 	while (1) {
1784 		wait_event_interruptible(c->gc_wait,
1785 			   kthread_should_stop() || gc_should_run(c));
1786 
1787 		if (kthread_should_stop())
1788 			break;
1789 
1790 		set_gc_sectors(c);
1791 		bch_btree_gc(c);
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 int bch_gc_thread_start(struct cache_set *c)
1798 {
1799 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1800 	if (IS_ERR(c->gc_thread))
1801 		return PTR_ERR(c->gc_thread);
1802 
1803 	return 0;
1804 }
1805 
1806 /* Initial partial gc */
1807 
1808 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1809 {
1810 	int ret = 0;
1811 	struct bkey *k, *p = NULL;
1812 	struct btree_iter iter;
1813 
1814 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1815 		bch_initial_mark_key(b->c, b->level, k);
1816 
1817 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1818 
1819 	if (b->level) {
1820 		bch_btree_iter_init(&b->keys, &iter, NULL);
1821 
1822 		do {
1823 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1824 						       bch_ptr_bad);
1825 			if (k)
1826 				btree_node_prefetch(b, k);
1827 
1828 			if (p)
1829 				ret = btree(check_recurse, p, b, op);
1830 
1831 			p = k;
1832 		} while (p && !ret);
1833 	}
1834 
1835 	return ret;
1836 }
1837 
1838 int bch_btree_check(struct cache_set *c)
1839 {
1840 	struct btree_op op;
1841 
1842 	bch_btree_op_init(&op, SHRT_MAX);
1843 
1844 	return btree_root(check_recurse, c, &op);
1845 }
1846 
1847 void bch_initial_gc_finish(struct cache_set *c)
1848 {
1849 	struct cache *ca;
1850 	struct bucket *b;
1851 	unsigned i;
1852 
1853 	bch_btree_gc_finish(c);
1854 
1855 	mutex_lock(&c->bucket_lock);
1856 
1857 	/*
1858 	 * We need to put some unused buckets directly on the prio freelist in
1859 	 * order to get the allocator thread started - it needs freed buckets in
1860 	 * order to rewrite the prios and gens, and it needs to rewrite prios
1861 	 * and gens in order to free buckets.
1862 	 *
1863 	 * This is only safe for buckets that have no live data in them, which
1864 	 * there should always be some of.
1865 	 */
1866 	for_each_cache(ca, c, i) {
1867 		for_each_bucket(b, ca) {
1868 			if (fifo_full(&ca->free[RESERVE_PRIO]))
1869 				break;
1870 
1871 			if (bch_can_invalidate_bucket(ca, b) &&
1872 			    !GC_MARK(b)) {
1873 				__bch_invalidate_one_bucket(ca, b);
1874 				fifo_push(&ca->free[RESERVE_PRIO],
1875 					  b - ca->buckets);
1876 			}
1877 		}
1878 	}
1879 
1880 	mutex_unlock(&c->bucket_lock);
1881 }
1882 
1883 /* Btree insertion */
1884 
1885 static bool btree_insert_key(struct btree *b, struct bkey *k,
1886 			     struct bkey *replace_key)
1887 {
1888 	unsigned status;
1889 
1890 	BUG_ON(bkey_cmp(k, &b->key) > 0);
1891 
1892 	status = bch_btree_insert_key(&b->keys, k, replace_key);
1893 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1894 		bch_check_keys(&b->keys, "%u for %s", status,
1895 			       replace_key ? "replace" : "insert");
1896 
1897 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1898 					      status);
1899 		return true;
1900 	} else
1901 		return false;
1902 }
1903 
1904 static size_t insert_u64s_remaining(struct btree *b)
1905 {
1906 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
1907 
1908 	/*
1909 	 * Might land in the middle of an existing extent and have to split it
1910 	 */
1911 	if (b->keys.ops->is_extents)
1912 		ret -= KEY_MAX_U64S;
1913 
1914 	return max(ret, 0L);
1915 }
1916 
1917 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1918 				  struct keylist *insert_keys,
1919 				  struct bkey *replace_key)
1920 {
1921 	bool ret = false;
1922 	int oldsize = bch_count_data(&b->keys);
1923 
1924 	while (!bch_keylist_empty(insert_keys)) {
1925 		struct bkey *k = insert_keys->keys;
1926 
1927 		if (bkey_u64s(k) > insert_u64s_remaining(b))
1928 			break;
1929 
1930 		if (bkey_cmp(k, &b->key) <= 0) {
1931 			if (!b->level)
1932 				bkey_put(b->c, k);
1933 
1934 			ret |= btree_insert_key(b, k, replace_key);
1935 			bch_keylist_pop_front(insert_keys);
1936 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1937 			BKEY_PADDED(key) temp;
1938 			bkey_copy(&temp.key, insert_keys->keys);
1939 
1940 			bch_cut_back(&b->key, &temp.key);
1941 			bch_cut_front(&b->key, insert_keys->keys);
1942 
1943 			ret |= btree_insert_key(b, &temp.key, replace_key);
1944 			break;
1945 		} else {
1946 			break;
1947 		}
1948 	}
1949 
1950 	if (!ret)
1951 		op->insert_collision = true;
1952 
1953 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1954 
1955 	BUG_ON(bch_count_data(&b->keys) < oldsize);
1956 	return ret;
1957 }
1958 
1959 static int btree_split(struct btree *b, struct btree_op *op,
1960 		       struct keylist *insert_keys,
1961 		       struct bkey *replace_key)
1962 {
1963 	bool split;
1964 	struct btree *n1, *n2 = NULL, *n3 = NULL;
1965 	uint64_t start_time = local_clock();
1966 	struct closure cl;
1967 	struct keylist parent_keys;
1968 
1969 	closure_init_stack(&cl);
1970 	bch_keylist_init(&parent_keys);
1971 
1972 	if (btree_check_reserve(b, op)) {
1973 		if (!b->level)
1974 			return -EINTR;
1975 		else
1976 			WARN(1, "insufficient reserve for split\n");
1977 	}
1978 
1979 	n1 = btree_node_alloc_replacement(b, op);
1980 	if (IS_ERR(n1))
1981 		goto err;
1982 
1983 	split = set_blocks(btree_bset_first(n1),
1984 			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1985 
1986 	if (split) {
1987 		unsigned keys = 0;
1988 
1989 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1990 
1991 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1992 		if (IS_ERR(n2))
1993 			goto err_free1;
1994 
1995 		if (!b->parent) {
1996 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
1997 			if (IS_ERR(n3))
1998 				goto err_free2;
1999 		}
2000 
2001 		mutex_lock(&n1->write_lock);
2002 		mutex_lock(&n2->write_lock);
2003 
2004 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2005 
2006 		/*
2007 		 * Has to be a linear search because we don't have an auxiliary
2008 		 * search tree yet
2009 		 */
2010 
2011 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2012 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2013 							keys));
2014 
2015 		bkey_copy_key(&n1->key,
2016 			      bset_bkey_idx(btree_bset_first(n1), keys));
2017 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2018 
2019 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2020 		btree_bset_first(n1)->keys = keys;
2021 
2022 		memcpy(btree_bset_first(n2)->start,
2023 		       bset_bkey_last(btree_bset_first(n1)),
2024 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2025 
2026 		bkey_copy_key(&n2->key, &b->key);
2027 
2028 		bch_keylist_add(&parent_keys, &n2->key);
2029 		bch_btree_node_write(n2, &cl);
2030 		mutex_unlock(&n2->write_lock);
2031 		rw_unlock(true, n2);
2032 	} else {
2033 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2034 
2035 		mutex_lock(&n1->write_lock);
2036 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2037 	}
2038 
2039 	bch_keylist_add(&parent_keys, &n1->key);
2040 	bch_btree_node_write(n1, &cl);
2041 	mutex_unlock(&n1->write_lock);
2042 
2043 	if (n3) {
2044 		/* Depth increases, make a new root */
2045 		mutex_lock(&n3->write_lock);
2046 		bkey_copy_key(&n3->key, &MAX_KEY);
2047 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2048 		bch_btree_node_write(n3, &cl);
2049 		mutex_unlock(&n3->write_lock);
2050 
2051 		closure_sync(&cl);
2052 		bch_btree_set_root(n3);
2053 		rw_unlock(true, n3);
2054 	} else if (!b->parent) {
2055 		/* Root filled up but didn't need to be split */
2056 		closure_sync(&cl);
2057 		bch_btree_set_root(n1);
2058 	} else {
2059 		/* Split a non root node */
2060 		closure_sync(&cl);
2061 		make_btree_freeing_key(b, parent_keys.top);
2062 		bch_keylist_push(&parent_keys);
2063 
2064 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2065 		BUG_ON(!bch_keylist_empty(&parent_keys));
2066 	}
2067 
2068 	btree_node_free(b);
2069 	rw_unlock(true, n1);
2070 
2071 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2072 
2073 	return 0;
2074 err_free2:
2075 	bkey_put(b->c, &n2->key);
2076 	btree_node_free(n2);
2077 	rw_unlock(true, n2);
2078 err_free1:
2079 	bkey_put(b->c, &n1->key);
2080 	btree_node_free(n1);
2081 	rw_unlock(true, n1);
2082 err:
2083 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2084 
2085 	if (n3 == ERR_PTR(-EAGAIN) ||
2086 	    n2 == ERR_PTR(-EAGAIN) ||
2087 	    n1 == ERR_PTR(-EAGAIN))
2088 		return -EAGAIN;
2089 
2090 	return -ENOMEM;
2091 }
2092 
2093 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2094 				 struct keylist *insert_keys,
2095 				 atomic_t *journal_ref,
2096 				 struct bkey *replace_key)
2097 {
2098 	struct closure cl;
2099 
2100 	BUG_ON(b->level && replace_key);
2101 
2102 	closure_init_stack(&cl);
2103 
2104 	mutex_lock(&b->write_lock);
2105 
2106 	if (write_block(b) != btree_bset_last(b) &&
2107 	    b->keys.last_set_unwritten)
2108 		bch_btree_init_next(b); /* just wrote a set */
2109 
2110 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2111 		mutex_unlock(&b->write_lock);
2112 		goto split;
2113 	}
2114 
2115 	BUG_ON(write_block(b) != btree_bset_last(b));
2116 
2117 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2118 		if (!b->level)
2119 			bch_btree_leaf_dirty(b, journal_ref);
2120 		else
2121 			bch_btree_node_write(b, &cl);
2122 	}
2123 
2124 	mutex_unlock(&b->write_lock);
2125 
2126 	/* wait for btree node write if necessary, after unlock */
2127 	closure_sync(&cl);
2128 
2129 	return 0;
2130 split:
2131 	if (current->bio_list) {
2132 		op->lock = b->c->root->level + 1;
2133 		return -EAGAIN;
2134 	} else if (op->lock <= b->c->root->level) {
2135 		op->lock = b->c->root->level + 1;
2136 		return -EINTR;
2137 	} else {
2138 		/* Invalidated all iterators */
2139 		int ret = btree_split(b, op, insert_keys, replace_key);
2140 
2141 		if (bch_keylist_empty(insert_keys))
2142 			return 0;
2143 		else if (!ret)
2144 			return -EINTR;
2145 		return ret;
2146 	}
2147 }
2148 
2149 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2150 			       struct bkey *check_key)
2151 {
2152 	int ret = -EINTR;
2153 	uint64_t btree_ptr = b->key.ptr[0];
2154 	unsigned long seq = b->seq;
2155 	struct keylist insert;
2156 	bool upgrade = op->lock == -1;
2157 
2158 	bch_keylist_init(&insert);
2159 
2160 	if (upgrade) {
2161 		rw_unlock(false, b);
2162 		rw_lock(true, b, b->level);
2163 
2164 		if (b->key.ptr[0] != btree_ptr ||
2165                    b->seq != seq + 1) {
2166                        op->lock = b->level;
2167 			goto out;
2168                }
2169 	}
2170 
2171 	SET_KEY_PTRS(check_key, 1);
2172 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2173 
2174 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2175 
2176 	bch_keylist_add(&insert, check_key);
2177 
2178 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2179 
2180 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2181 out:
2182 	if (upgrade)
2183 		downgrade_write(&b->lock);
2184 	return ret;
2185 }
2186 
2187 struct btree_insert_op {
2188 	struct btree_op	op;
2189 	struct keylist	*keys;
2190 	atomic_t	*journal_ref;
2191 	struct bkey	*replace_key;
2192 };
2193 
2194 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2195 {
2196 	struct btree_insert_op *op = container_of(b_op,
2197 					struct btree_insert_op, op);
2198 
2199 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2200 					op->journal_ref, op->replace_key);
2201 	if (ret && !bch_keylist_empty(op->keys))
2202 		return ret;
2203 	else
2204 		return MAP_DONE;
2205 }
2206 
2207 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2208 		     atomic_t *journal_ref, struct bkey *replace_key)
2209 {
2210 	struct btree_insert_op op;
2211 	int ret = 0;
2212 
2213 	BUG_ON(current->bio_list);
2214 	BUG_ON(bch_keylist_empty(keys));
2215 
2216 	bch_btree_op_init(&op.op, 0);
2217 	op.keys		= keys;
2218 	op.journal_ref	= journal_ref;
2219 	op.replace_key	= replace_key;
2220 
2221 	while (!ret && !bch_keylist_empty(keys)) {
2222 		op.op.lock = 0;
2223 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2224 					       &START_KEY(keys->keys),
2225 					       btree_insert_fn);
2226 	}
2227 
2228 	if (ret) {
2229 		struct bkey *k;
2230 
2231 		pr_err("error %i", ret);
2232 
2233 		while ((k = bch_keylist_pop(keys)))
2234 			bkey_put(c, k);
2235 	} else if (op.op.insert_collision)
2236 		ret = -ESRCH;
2237 
2238 	return ret;
2239 }
2240 
2241 void bch_btree_set_root(struct btree *b)
2242 {
2243 	unsigned i;
2244 	struct closure cl;
2245 
2246 	closure_init_stack(&cl);
2247 
2248 	trace_bcache_btree_set_root(b);
2249 
2250 	BUG_ON(!b->written);
2251 
2252 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2253 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2254 
2255 	mutex_lock(&b->c->bucket_lock);
2256 	list_del_init(&b->list);
2257 	mutex_unlock(&b->c->bucket_lock);
2258 
2259 	b->c->root = b;
2260 
2261 	bch_journal_meta(b->c, &cl);
2262 	closure_sync(&cl);
2263 }
2264 
2265 /* Map across nodes or keys */
2266 
2267 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2268 				       struct bkey *from,
2269 				       btree_map_nodes_fn *fn, int flags)
2270 {
2271 	int ret = MAP_CONTINUE;
2272 
2273 	if (b->level) {
2274 		struct bkey *k;
2275 		struct btree_iter iter;
2276 
2277 		bch_btree_iter_init(&b->keys, &iter, from);
2278 
2279 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2280 						       bch_ptr_bad))) {
2281 			ret = btree(map_nodes_recurse, k, b,
2282 				    op, from, fn, flags);
2283 			from = NULL;
2284 
2285 			if (ret != MAP_CONTINUE)
2286 				return ret;
2287 		}
2288 	}
2289 
2290 	if (!b->level || flags == MAP_ALL_NODES)
2291 		ret = fn(op, b);
2292 
2293 	return ret;
2294 }
2295 
2296 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2297 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2298 {
2299 	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2300 }
2301 
2302 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2303 				      struct bkey *from, btree_map_keys_fn *fn,
2304 				      int flags)
2305 {
2306 	int ret = MAP_CONTINUE;
2307 	struct bkey *k;
2308 	struct btree_iter iter;
2309 
2310 	bch_btree_iter_init(&b->keys, &iter, from);
2311 
2312 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2313 		ret = !b->level
2314 			? fn(op, b, k)
2315 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
2316 		from = NULL;
2317 
2318 		if (ret != MAP_CONTINUE)
2319 			return ret;
2320 	}
2321 
2322 	if (!b->level && (flags & MAP_END_KEY))
2323 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2324 				     KEY_OFFSET(&b->key), 0));
2325 
2326 	return ret;
2327 }
2328 
2329 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2330 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2331 {
2332 	return btree_root(map_keys_recurse, c, op, from, fn, flags);
2333 }
2334 
2335 /* Keybuf code */
2336 
2337 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2338 {
2339 	/* Overlapping keys compare equal */
2340 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2341 		return -1;
2342 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2343 		return 1;
2344 	return 0;
2345 }
2346 
2347 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2348 					    struct keybuf_key *r)
2349 {
2350 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2351 }
2352 
2353 struct refill {
2354 	struct btree_op	op;
2355 	unsigned	nr_found;
2356 	struct keybuf	*buf;
2357 	struct bkey	*end;
2358 	keybuf_pred_fn	*pred;
2359 };
2360 
2361 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2362 			    struct bkey *k)
2363 {
2364 	struct refill *refill = container_of(op, struct refill, op);
2365 	struct keybuf *buf = refill->buf;
2366 	int ret = MAP_CONTINUE;
2367 
2368 	if (bkey_cmp(k, refill->end) >= 0) {
2369 		ret = MAP_DONE;
2370 		goto out;
2371 	}
2372 
2373 	if (!KEY_SIZE(k)) /* end key */
2374 		goto out;
2375 
2376 	if (refill->pred(buf, k)) {
2377 		struct keybuf_key *w;
2378 
2379 		spin_lock(&buf->lock);
2380 
2381 		w = array_alloc(&buf->freelist);
2382 		if (!w) {
2383 			spin_unlock(&buf->lock);
2384 			return MAP_DONE;
2385 		}
2386 
2387 		w->private = NULL;
2388 		bkey_copy(&w->key, k);
2389 
2390 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2391 			array_free(&buf->freelist, w);
2392 		else
2393 			refill->nr_found++;
2394 
2395 		if (array_freelist_empty(&buf->freelist))
2396 			ret = MAP_DONE;
2397 
2398 		spin_unlock(&buf->lock);
2399 	}
2400 out:
2401 	buf->last_scanned = *k;
2402 	return ret;
2403 }
2404 
2405 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2406 		       struct bkey *end, keybuf_pred_fn *pred)
2407 {
2408 	struct bkey start = buf->last_scanned;
2409 	struct refill refill;
2410 
2411 	cond_resched();
2412 
2413 	bch_btree_op_init(&refill.op, -1);
2414 	refill.nr_found	= 0;
2415 	refill.buf	= buf;
2416 	refill.end	= end;
2417 	refill.pred	= pred;
2418 
2419 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2420 			   refill_keybuf_fn, MAP_END_KEY);
2421 
2422 	trace_bcache_keyscan(refill.nr_found,
2423 			     KEY_INODE(&start), KEY_OFFSET(&start),
2424 			     KEY_INODE(&buf->last_scanned),
2425 			     KEY_OFFSET(&buf->last_scanned));
2426 
2427 	spin_lock(&buf->lock);
2428 
2429 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2430 		struct keybuf_key *w;
2431 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2432 		buf->start	= START_KEY(&w->key);
2433 
2434 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2435 		buf->end	= w->key;
2436 	} else {
2437 		buf->start	= MAX_KEY;
2438 		buf->end	= MAX_KEY;
2439 	}
2440 
2441 	spin_unlock(&buf->lock);
2442 }
2443 
2444 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2445 {
2446 	rb_erase(&w->node, &buf->keys);
2447 	array_free(&buf->freelist, w);
2448 }
2449 
2450 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2451 {
2452 	spin_lock(&buf->lock);
2453 	__bch_keybuf_del(buf, w);
2454 	spin_unlock(&buf->lock);
2455 }
2456 
2457 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2458 				  struct bkey *end)
2459 {
2460 	bool ret = false;
2461 	struct keybuf_key *p, *w, s;
2462 	s.key = *start;
2463 
2464 	if (bkey_cmp(end, &buf->start) <= 0 ||
2465 	    bkey_cmp(start, &buf->end) >= 0)
2466 		return false;
2467 
2468 	spin_lock(&buf->lock);
2469 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2470 
2471 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2472 		p = w;
2473 		w = RB_NEXT(w, node);
2474 
2475 		if (p->private)
2476 			ret = true;
2477 		else
2478 			__bch_keybuf_del(buf, p);
2479 	}
2480 
2481 	spin_unlock(&buf->lock);
2482 	return ret;
2483 }
2484 
2485 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2486 {
2487 	struct keybuf_key *w;
2488 	spin_lock(&buf->lock);
2489 
2490 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2491 
2492 	while (w && w->private)
2493 		w = RB_NEXT(w, node);
2494 
2495 	if (w)
2496 		w->private = ERR_PTR(-EINTR);
2497 
2498 	spin_unlock(&buf->lock);
2499 	return w;
2500 }
2501 
2502 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2503 					  struct keybuf *buf,
2504 					  struct bkey *end,
2505 					  keybuf_pred_fn *pred)
2506 {
2507 	struct keybuf_key *ret;
2508 
2509 	while (1) {
2510 		ret = bch_keybuf_next(buf);
2511 		if (ret)
2512 			break;
2513 
2514 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2515 			pr_debug("scan finished");
2516 			break;
2517 		}
2518 
2519 		bch_refill_keybuf(c, buf, end, pred);
2520 	}
2521 
2522 	return ret;
2523 }
2524 
2525 void bch_keybuf_init(struct keybuf *buf)
2526 {
2527 	buf->last_scanned	= MAX_KEY;
2528 	buf->keys		= RB_ROOT;
2529 
2530 	spin_lock_init(&buf->lock);
2531 	array_allocator_init(&buf->freelist);
2532 }
2533