xref: /openbmc/linux/drivers/md/bcache/btree.c (revision 174cd4b1)
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  *
4  * Uses a block device as cache for other block devices; optimized for SSDs.
5  * All allocation is done in buckets, which should match the erase block size
6  * of the device.
7  *
8  * Buckets containing cached data are kept on a heap sorted by priority;
9  * bucket priority is increased on cache hit, and periodically all the buckets
10  * on the heap have their priority scaled down. This currently is just used as
11  * an LRU but in the future should allow for more intelligent heuristics.
12  *
13  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14  * counter. Garbage collection is used to remove stale pointers.
15  *
16  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17  * as keys are inserted we only sort the pages that have not yet been written.
18  * When garbage collection is run, we resort the entire node.
19  *
20  * All configuration is done via sysfs; see Documentation/bcache.txt.
21  */
22 
23 #include "bcache.h"
24 #include "btree.h"
25 #include "debug.h"
26 #include "extents.h"
27 
28 #include <linux/slab.h>
29 #include <linux/bitops.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include <linux/prefetch.h>
33 #include <linux/random.h>
34 #include <linux/rcupdate.h>
35 #include <linux/sched/clock.h>
36 #include <trace/events/bcache.h>
37 
38 /*
39  * Todo:
40  * register_bcache: Return errors out to userspace correctly
41  *
42  * Writeback: don't undirty key until after a cache flush
43  *
44  * Create an iterator for key pointers
45  *
46  * On btree write error, mark bucket such that it won't be freed from the cache
47  *
48  * Journalling:
49  *   Check for bad keys in replay
50  *   Propagate barriers
51  *   Refcount journal entries in journal_replay
52  *
53  * Garbage collection:
54  *   Finish incremental gc
55  *   Gc should free old UUIDs, data for invalid UUIDs
56  *
57  * Provide a way to list backing device UUIDs we have data cached for, and
58  * probably how long it's been since we've seen them, and a way to invalidate
59  * dirty data for devices that will never be attached again
60  *
61  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62  * that based on that and how much dirty data we have we can keep writeback
63  * from being starved
64  *
65  * Add a tracepoint or somesuch to watch for writeback starvation
66  *
67  * When btree depth > 1 and splitting an interior node, we have to make sure
68  * alloc_bucket() cannot fail. This should be true but is not completely
69  * obvious.
70  *
71  * Plugging?
72  *
73  * If data write is less than hard sector size of ssd, round up offset in open
74  * bucket to the next whole sector
75  *
76  * Superblock needs to be fleshed out for multiple cache devices
77  *
78  * Add a sysfs tunable for the number of writeback IOs in flight
79  *
80  * Add a sysfs tunable for the number of open data buckets
81  *
82  * IO tracking: Can we track when one process is doing io on behalf of another?
83  * IO tracking: Don't use just an average, weigh more recent stuff higher
84  *
85  * Test module load/unload
86  */
87 
88 #define MAX_NEED_GC		64
89 #define MAX_SAVE_PRIO		72
90 
91 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
92 
93 #define PTR_HASH(c, k)							\
94 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
95 
96 #define insert_lock(s, b)	((b)->level <= (s)->lock)
97 
98 /*
99  * These macros are for recursing down the btree - they handle the details of
100  * locking and looking up nodes in the cache for you. They're best treated as
101  * mere syntax when reading code that uses them.
102  *
103  * op->lock determines whether we take a read or a write lock at a given depth.
104  * If you've got a read lock and find that you need a write lock (i.e. you're
105  * going to have to split), set op->lock and return -EINTR; btree_root() will
106  * call you again and you'll have the correct lock.
107  */
108 
109 /**
110  * btree - recurse down the btree on a specified key
111  * @fn:		function to call, which will be passed the child node
112  * @key:	key to recurse on
113  * @b:		parent btree node
114  * @op:		pointer to struct btree_op
115  */
116 #define btree(fn, key, b, op, ...)					\
117 ({									\
118 	int _r, l = (b)->level - 1;					\
119 	bool _w = l <= (op)->lock;					\
120 	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
121 						  _w, b);		\
122 	if (!IS_ERR(_child)) {						\
123 		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
124 		rw_unlock(_w, _child);					\
125 	} else								\
126 		_r = PTR_ERR(_child);					\
127 	_r;								\
128 })
129 
130 /**
131  * btree_root - call a function on the root of the btree
132  * @fn:		function to call, which will be passed the child node
133  * @c:		cache set
134  * @op:		pointer to struct btree_op
135  */
136 #define btree_root(fn, c, op, ...)					\
137 ({									\
138 	int _r = -EINTR;						\
139 	do {								\
140 		struct btree *_b = (c)->root;				\
141 		bool _w = insert_lock(op, _b);				\
142 		rw_lock(_w, _b, _b->level);				\
143 		if (_b == (c)->root &&					\
144 		    _w == insert_lock(op, _b)) {			\
145 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
146 		}							\
147 		rw_unlock(_w, _b);					\
148 		bch_cannibalize_unlock(c);				\
149 		if (_r == -EINTR)					\
150 			schedule();					\
151 	} while (_r == -EINTR);						\
152 									\
153 	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\
154 	_r;								\
155 })
156 
157 static inline struct bset *write_block(struct btree *b)
158 {
159 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
160 }
161 
162 static void bch_btree_init_next(struct btree *b)
163 {
164 	/* If not a leaf node, always sort */
165 	if (b->level && b->keys.nsets)
166 		bch_btree_sort(&b->keys, &b->c->sort);
167 	else
168 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
169 
170 	if (b->written < btree_blocks(b))
171 		bch_bset_init_next(&b->keys, write_block(b),
172 				   bset_magic(&b->c->sb));
173 
174 }
175 
176 /* Btree key manipulation */
177 
178 void bkey_put(struct cache_set *c, struct bkey *k)
179 {
180 	unsigned i;
181 
182 	for (i = 0; i < KEY_PTRS(k); i++)
183 		if (ptr_available(c, k, i))
184 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
185 }
186 
187 /* Btree IO */
188 
189 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
190 {
191 	uint64_t crc = b->key.ptr[0];
192 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
193 
194 	crc = bch_crc64_update(crc, data, end - data);
195 	return crc ^ 0xffffffffffffffffULL;
196 }
197 
198 void bch_btree_node_read_done(struct btree *b)
199 {
200 	const char *err = "bad btree header";
201 	struct bset *i = btree_bset_first(b);
202 	struct btree_iter *iter;
203 
204 	iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
205 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
206 	iter->used = 0;
207 
208 #ifdef CONFIG_BCACHE_DEBUG
209 	iter->b = &b->keys;
210 #endif
211 
212 	if (!i->seq)
213 		goto err;
214 
215 	for (;
216 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
217 	     i = write_block(b)) {
218 		err = "unsupported bset version";
219 		if (i->version > BCACHE_BSET_VERSION)
220 			goto err;
221 
222 		err = "bad btree header";
223 		if (b->written + set_blocks(i, block_bytes(b->c)) >
224 		    btree_blocks(b))
225 			goto err;
226 
227 		err = "bad magic";
228 		if (i->magic != bset_magic(&b->c->sb))
229 			goto err;
230 
231 		err = "bad checksum";
232 		switch (i->version) {
233 		case 0:
234 			if (i->csum != csum_set(i))
235 				goto err;
236 			break;
237 		case BCACHE_BSET_VERSION:
238 			if (i->csum != btree_csum_set(b, i))
239 				goto err;
240 			break;
241 		}
242 
243 		err = "empty set";
244 		if (i != b->keys.set[0].data && !i->keys)
245 			goto err;
246 
247 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
248 
249 		b->written += set_blocks(i, block_bytes(b->c));
250 	}
251 
252 	err = "corrupted btree";
253 	for (i = write_block(b);
254 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
255 	     i = ((void *) i) + block_bytes(b->c))
256 		if (i->seq == b->keys.set[0].data->seq)
257 			goto err;
258 
259 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
260 
261 	i = b->keys.set[0].data;
262 	err = "short btree key";
263 	if (b->keys.set[0].size &&
264 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
265 		goto err;
266 
267 	if (b->written < btree_blocks(b))
268 		bch_bset_init_next(&b->keys, write_block(b),
269 				   bset_magic(&b->c->sb));
270 out:
271 	mempool_free(iter, b->c->fill_iter);
272 	return;
273 err:
274 	set_btree_node_io_error(b);
275 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
276 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
277 			    bset_block_offset(b, i), i->keys);
278 	goto out;
279 }
280 
281 static void btree_node_read_endio(struct bio *bio)
282 {
283 	struct closure *cl = bio->bi_private;
284 	closure_put(cl);
285 }
286 
287 static void bch_btree_node_read(struct btree *b)
288 {
289 	uint64_t start_time = local_clock();
290 	struct closure cl;
291 	struct bio *bio;
292 
293 	trace_bcache_btree_read(b);
294 
295 	closure_init_stack(&cl);
296 
297 	bio = bch_bbio_alloc(b->c);
298 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
299 	bio->bi_end_io	= btree_node_read_endio;
300 	bio->bi_private	= &cl;
301 	bio->bi_opf = REQ_OP_READ | REQ_META;
302 
303 	bch_bio_map(bio, b->keys.set[0].data);
304 
305 	bch_submit_bbio(bio, b->c, &b->key, 0);
306 	closure_sync(&cl);
307 
308 	if (bio->bi_error)
309 		set_btree_node_io_error(b);
310 
311 	bch_bbio_free(bio, b->c);
312 
313 	if (btree_node_io_error(b))
314 		goto err;
315 
316 	bch_btree_node_read_done(b);
317 	bch_time_stats_update(&b->c->btree_read_time, start_time);
318 
319 	return;
320 err:
321 	bch_cache_set_error(b->c, "io error reading bucket %zu",
322 			    PTR_BUCKET_NR(b->c, &b->key, 0));
323 }
324 
325 static void btree_complete_write(struct btree *b, struct btree_write *w)
326 {
327 	if (w->prio_blocked &&
328 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
329 		wake_up_allocators(b->c);
330 
331 	if (w->journal) {
332 		atomic_dec_bug(w->journal);
333 		__closure_wake_up(&b->c->journal.wait);
334 	}
335 
336 	w->prio_blocked	= 0;
337 	w->journal	= NULL;
338 }
339 
340 static void btree_node_write_unlock(struct closure *cl)
341 {
342 	struct btree *b = container_of(cl, struct btree, io);
343 
344 	up(&b->io_mutex);
345 }
346 
347 static void __btree_node_write_done(struct closure *cl)
348 {
349 	struct btree *b = container_of(cl, struct btree, io);
350 	struct btree_write *w = btree_prev_write(b);
351 
352 	bch_bbio_free(b->bio, b->c);
353 	b->bio = NULL;
354 	btree_complete_write(b, w);
355 
356 	if (btree_node_dirty(b))
357 		schedule_delayed_work(&b->work, 30 * HZ);
358 
359 	closure_return_with_destructor(cl, btree_node_write_unlock);
360 }
361 
362 static void btree_node_write_done(struct closure *cl)
363 {
364 	struct btree *b = container_of(cl, struct btree, io);
365 
366 	bio_free_pages(b->bio);
367 	__btree_node_write_done(cl);
368 }
369 
370 static void btree_node_write_endio(struct bio *bio)
371 {
372 	struct closure *cl = bio->bi_private;
373 	struct btree *b = container_of(cl, struct btree, io);
374 
375 	if (bio->bi_error)
376 		set_btree_node_io_error(b);
377 
378 	bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
379 	closure_put(cl);
380 }
381 
382 static void do_btree_node_write(struct btree *b)
383 {
384 	struct closure *cl = &b->io;
385 	struct bset *i = btree_bset_last(b);
386 	BKEY_PADDED(key) k;
387 
388 	i->version	= BCACHE_BSET_VERSION;
389 	i->csum		= btree_csum_set(b, i);
390 
391 	BUG_ON(b->bio);
392 	b->bio = bch_bbio_alloc(b->c);
393 
394 	b->bio->bi_end_io	= btree_node_write_endio;
395 	b->bio->bi_private	= cl;
396 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
397 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
398 	bch_bio_map(b->bio, i);
399 
400 	/*
401 	 * If we're appending to a leaf node, we don't technically need FUA -
402 	 * this write just needs to be persisted before the next journal write,
403 	 * which will be marked FLUSH|FUA.
404 	 *
405 	 * Similarly if we're writing a new btree root - the pointer is going to
406 	 * be in the next journal entry.
407 	 *
408 	 * But if we're writing a new btree node (that isn't a root) or
409 	 * appending to a non leaf btree node, we need either FUA or a flush
410 	 * when we write the parent with the new pointer. FUA is cheaper than a
411 	 * flush, and writes appending to leaf nodes aren't blocking anything so
412 	 * just make all btree node writes FUA to keep things sane.
413 	 */
414 
415 	bkey_copy(&k.key, &b->key);
416 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
417 		       bset_sector_offset(&b->keys, i));
418 
419 	if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
420 		int j;
421 		struct bio_vec *bv;
422 		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
423 
424 		bio_for_each_segment_all(bv, b->bio, j)
425 			memcpy(page_address(bv->bv_page),
426 			       base + j * PAGE_SIZE, PAGE_SIZE);
427 
428 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
429 
430 		continue_at(cl, btree_node_write_done, NULL);
431 	} else {
432 		b->bio->bi_vcnt = 0;
433 		bch_bio_map(b->bio, i);
434 
435 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
436 
437 		closure_sync(cl);
438 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
439 	}
440 }
441 
442 void __bch_btree_node_write(struct btree *b, struct closure *parent)
443 {
444 	struct bset *i = btree_bset_last(b);
445 
446 	lockdep_assert_held(&b->write_lock);
447 
448 	trace_bcache_btree_write(b);
449 
450 	BUG_ON(current->bio_list);
451 	BUG_ON(b->written >= btree_blocks(b));
452 	BUG_ON(b->written && !i->keys);
453 	BUG_ON(btree_bset_first(b)->seq != i->seq);
454 	bch_check_keys(&b->keys, "writing");
455 
456 	cancel_delayed_work(&b->work);
457 
458 	/* If caller isn't waiting for write, parent refcount is cache set */
459 	down(&b->io_mutex);
460 	closure_init(&b->io, parent ?: &b->c->cl);
461 
462 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
463 	change_bit(BTREE_NODE_write_idx, &b->flags);
464 
465 	do_btree_node_write(b);
466 
467 	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
468 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
469 
470 	b->written += set_blocks(i, block_bytes(b->c));
471 }
472 
473 void bch_btree_node_write(struct btree *b, struct closure *parent)
474 {
475 	unsigned nsets = b->keys.nsets;
476 
477 	lockdep_assert_held(&b->lock);
478 
479 	__bch_btree_node_write(b, parent);
480 
481 	/*
482 	 * do verify if there was more than one set initially (i.e. we did a
483 	 * sort) and we sorted down to a single set:
484 	 */
485 	if (nsets && !b->keys.nsets)
486 		bch_btree_verify(b);
487 
488 	bch_btree_init_next(b);
489 }
490 
491 static void bch_btree_node_write_sync(struct btree *b)
492 {
493 	struct closure cl;
494 
495 	closure_init_stack(&cl);
496 
497 	mutex_lock(&b->write_lock);
498 	bch_btree_node_write(b, &cl);
499 	mutex_unlock(&b->write_lock);
500 
501 	closure_sync(&cl);
502 }
503 
504 static void btree_node_write_work(struct work_struct *w)
505 {
506 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
507 
508 	mutex_lock(&b->write_lock);
509 	if (btree_node_dirty(b))
510 		__bch_btree_node_write(b, NULL);
511 	mutex_unlock(&b->write_lock);
512 }
513 
514 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
515 {
516 	struct bset *i = btree_bset_last(b);
517 	struct btree_write *w = btree_current_write(b);
518 
519 	lockdep_assert_held(&b->write_lock);
520 
521 	BUG_ON(!b->written);
522 	BUG_ON(!i->keys);
523 
524 	if (!btree_node_dirty(b))
525 		schedule_delayed_work(&b->work, 30 * HZ);
526 
527 	set_btree_node_dirty(b);
528 
529 	if (journal_ref) {
530 		if (w->journal &&
531 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
532 			atomic_dec_bug(w->journal);
533 			w->journal = NULL;
534 		}
535 
536 		if (!w->journal) {
537 			w->journal = journal_ref;
538 			atomic_inc(w->journal);
539 		}
540 	}
541 
542 	/* Force write if set is too big */
543 	if (set_bytes(i) > PAGE_SIZE - 48 &&
544 	    !current->bio_list)
545 		bch_btree_node_write(b, NULL);
546 }
547 
548 /*
549  * Btree in memory cache - allocation/freeing
550  * mca -> memory cache
551  */
552 
553 #define mca_reserve(c)	(((c->root && c->root->level)		\
554 			  ? c->root->level : 1) * 8 + 16)
555 #define mca_can_free(c)						\
556 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
557 
558 static void mca_data_free(struct btree *b)
559 {
560 	BUG_ON(b->io_mutex.count != 1);
561 
562 	bch_btree_keys_free(&b->keys);
563 
564 	b->c->btree_cache_used--;
565 	list_move(&b->list, &b->c->btree_cache_freed);
566 }
567 
568 static void mca_bucket_free(struct btree *b)
569 {
570 	BUG_ON(btree_node_dirty(b));
571 
572 	b->key.ptr[0] = 0;
573 	hlist_del_init_rcu(&b->hash);
574 	list_move(&b->list, &b->c->btree_cache_freeable);
575 }
576 
577 static unsigned btree_order(struct bkey *k)
578 {
579 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
580 }
581 
582 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
583 {
584 	if (!bch_btree_keys_alloc(&b->keys,
585 				  max_t(unsigned,
586 					ilog2(b->c->btree_pages),
587 					btree_order(k)),
588 				  gfp)) {
589 		b->c->btree_cache_used++;
590 		list_move(&b->list, &b->c->btree_cache);
591 	} else {
592 		list_move(&b->list, &b->c->btree_cache_freed);
593 	}
594 }
595 
596 static struct btree *mca_bucket_alloc(struct cache_set *c,
597 				      struct bkey *k, gfp_t gfp)
598 {
599 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
600 	if (!b)
601 		return NULL;
602 
603 	init_rwsem(&b->lock);
604 	lockdep_set_novalidate_class(&b->lock);
605 	mutex_init(&b->write_lock);
606 	lockdep_set_novalidate_class(&b->write_lock);
607 	INIT_LIST_HEAD(&b->list);
608 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
609 	b->c = c;
610 	sema_init(&b->io_mutex, 1);
611 
612 	mca_data_alloc(b, k, gfp);
613 	return b;
614 }
615 
616 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
617 {
618 	struct closure cl;
619 
620 	closure_init_stack(&cl);
621 	lockdep_assert_held(&b->c->bucket_lock);
622 
623 	if (!down_write_trylock(&b->lock))
624 		return -ENOMEM;
625 
626 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
627 
628 	if (b->keys.page_order < min_order)
629 		goto out_unlock;
630 
631 	if (!flush) {
632 		if (btree_node_dirty(b))
633 			goto out_unlock;
634 
635 		if (down_trylock(&b->io_mutex))
636 			goto out_unlock;
637 		up(&b->io_mutex);
638 	}
639 
640 	mutex_lock(&b->write_lock);
641 	if (btree_node_dirty(b))
642 		__bch_btree_node_write(b, &cl);
643 	mutex_unlock(&b->write_lock);
644 
645 	closure_sync(&cl);
646 
647 	/* wait for any in flight btree write */
648 	down(&b->io_mutex);
649 	up(&b->io_mutex);
650 
651 	return 0;
652 out_unlock:
653 	rw_unlock(true, b);
654 	return -ENOMEM;
655 }
656 
657 static unsigned long bch_mca_scan(struct shrinker *shrink,
658 				  struct shrink_control *sc)
659 {
660 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
661 	struct btree *b, *t;
662 	unsigned long i, nr = sc->nr_to_scan;
663 	unsigned long freed = 0;
664 
665 	if (c->shrinker_disabled)
666 		return SHRINK_STOP;
667 
668 	if (c->btree_cache_alloc_lock)
669 		return SHRINK_STOP;
670 
671 	/* Return -1 if we can't do anything right now */
672 	if (sc->gfp_mask & __GFP_IO)
673 		mutex_lock(&c->bucket_lock);
674 	else if (!mutex_trylock(&c->bucket_lock))
675 		return -1;
676 
677 	/*
678 	 * It's _really_ critical that we don't free too many btree nodes - we
679 	 * have to always leave ourselves a reserve. The reserve is how we
680 	 * guarantee that allocating memory for a new btree node can always
681 	 * succeed, so that inserting keys into the btree can always succeed and
682 	 * IO can always make forward progress:
683 	 */
684 	nr /= c->btree_pages;
685 	nr = min_t(unsigned long, nr, mca_can_free(c));
686 
687 	i = 0;
688 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
689 		if (freed >= nr)
690 			break;
691 
692 		if (++i > 3 &&
693 		    !mca_reap(b, 0, false)) {
694 			mca_data_free(b);
695 			rw_unlock(true, b);
696 			freed++;
697 		}
698 	}
699 
700 	for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
701 		if (list_empty(&c->btree_cache))
702 			goto out;
703 
704 		b = list_first_entry(&c->btree_cache, struct btree, list);
705 		list_rotate_left(&c->btree_cache);
706 
707 		if (!b->accessed &&
708 		    !mca_reap(b, 0, false)) {
709 			mca_bucket_free(b);
710 			mca_data_free(b);
711 			rw_unlock(true, b);
712 			freed++;
713 		} else
714 			b->accessed = 0;
715 	}
716 out:
717 	mutex_unlock(&c->bucket_lock);
718 	return freed;
719 }
720 
721 static unsigned long bch_mca_count(struct shrinker *shrink,
722 				   struct shrink_control *sc)
723 {
724 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
725 
726 	if (c->shrinker_disabled)
727 		return 0;
728 
729 	if (c->btree_cache_alloc_lock)
730 		return 0;
731 
732 	return mca_can_free(c) * c->btree_pages;
733 }
734 
735 void bch_btree_cache_free(struct cache_set *c)
736 {
737 	struct btree *b;
738 	struct closure cl;
739 	closure_init_stack(&cl);
740 
741 	if (c->shrink.list.next)
742 		unregister_shrinker(&c->shrink);
743 
744 	mutex_lock(&c->bucket_lock);
745 
746 #ifdef CONFIG_BCACHE_DEBUG
747 	if (c->verify_data)
748 		list_move(&c->verify_data->list, &c->btree_cache);
749 
750 	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
751 #endif
752 
753 	list_splice(&c->btree_cache_freeable,
754 		    &c->btree_cache);
755 
756 	while (!list_empty(&c->btree_cache)) {
757 		b = list_first_entry(&c->btree_cache, struct btree, list);
758 
759 		if (btree_node_dirty(b))
760 			btree_complete_write(b, btree_current_write(b));
761 		clear_bit(BTREE_NODE_dirty, &b->flags);
762 
763 		mca_data_free(b);
764 	}
765 
766 	while (!list_empty(&c->btree_cache_freed)) {
767 		b = list_first_entry(&c->btree_cache_freed,
768 				     struct btree, list);
769 		list_del(&b->list);
770 		cancel_delayed_work_sync(&b->work);
771 		kfree(b);
772 	}
773 
774 	mutex_unlock(&c->bucket_lock);
775 }
776 
777 int bch_btree_cache_alloc(struct cache_set *c)
778 {
779 	unsigned i;
780 
781 	for (i = 0; i < mca_reserve(c); i++)
782 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
783 			return -ENOMEM;
784 
785 	list_splice_init(&c->btree_cache,
786 			 &c->btree_cache_freeable);
787 
788 #ifdef CONFIG_BCACHE_DEBUG
789 	mutex_init(&c->verify_lock);
790 
791 	c->verify_ondisk = (void *)
792 		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
793 
794 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
795 
796 	if (c->verify_data &&
797 	    c->verify_data->keys.set->data)
798 		list_del_init(&c->verify_data->list);
799 	else
800 		c->verify_data = NULL;
801 #endif
802 
803 	c->shrink.count_objects = bch_mca_count;
804 	c->shrink.scan_objects = bch_mca_scan;
805 	c->shrink.seeks = 4;
806 	c->shrink.batch = c->btree_pages * 2;
807 	register_shrinker(&c->shrink);
808 
809 	return 0;
810 }
811 
812 /* Btree in memory cache - hash table */
813 
814 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
815 {
816 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
817 }
818 
819 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
820 {
821 	struct btree *b;
822 
823 	rcu_read_lock();
824 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
825 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
826 			goto out;
827 	b = NULL;
828 out:
829 	rcu_read_unlock();
830 	return b;
831 }
832 
833 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
834 {
835 	struct task_struct *old;
836 
837 	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
838 	if (old && old != current) {
839 		if (op)
840 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
841 					TASK_UNINTERRUPTIBLE);
842 		return -EINTR;
843 	}
844 
845 	return 0;
846 }
847 
848 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
849 				     struct bkey *k)
850 {
851 	struct btree *b;
852 
853 	trace_bcache_btree_cache_cannibalize(c);
854 
855 	if (mca_cannibalize_lock(c, op))
856 		return ERR_PTR(-EINTR);
857 
858 	list_for_each_entry_reverse(b, &c->btree_cache, list)
859 		if (!mca_reap(b, btree_order(k), false))
860 			return b;
861 
862 	list_for_each_entry_reverse(b, &c->btree_cache, list)
863 		if (!mca_reap(b, btree_order(k), true))
864 			return b;
865 
866 	WARN(1, "btree cache cannibalize failed\n");
867 	return ERR_PTR(-ENOMEM);
868 }
869 
870 /*
871  * We can only have one thread cannibalizing other cached btree nodes at a time,
872  * or we'll deadlock. We use an open coded mutex to ensure that, which a
873  * cannibalize_bucket() will take. This means every time we unlock the root of
874  * the btree, we need to release this lock if we have it held.
875  */
876 static void bch_cannibalize_unlock(struct cache_set *c)
877 {
878 	if (c->btree_cache_alloc_lock == current) {
879 		c->btree_cache_alloc_lock = NULL;
880 		wake_up(&c->btree_cache_wait);
881 	}
882 }
883 
884 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
885 			       struct bkey *k, int level)
886 {
887 	struct btree *b;
888 
889 	BUG_ON(current->bio_list);
890 
891 	lockdep_assert_held(&c->bucket_lock);
892 
893 	if (mca_find(c, k))
894 		return NULL;
895 
896 	/* btree_free() doesn't free memory; it sticks the node on the end of
897 	 * the list. Check if there's any freed nodes there:
898 	 */
899 	list_for_each_entry(b, &c->btree_cache_freeable, list)
900 		if (!mca_reap(b, btree_order(k), false))
901 			goto out;
902 
903 	/* We never free struct btree itself, just the memory that holds the on
904 	 * disk node. Check the freed list before allocating a new one:
905 	 */
906 	list_for_each_entry(b, &c->btree_cache_freed, list)
907 		if (!mca_reap(b, 0, false)) {
908 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
909 			if (!b->keys.set[0].data)
910 				goto err;
911 			else
912 				goto out;
913 		}
914 
915 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
916 	if (!b)
917 		goto err;
918 
919 	BUG_ON(!down_write_trylock(&b->lock));
920 	if (!b->keys.set->data)
921 		goto err;
922 out:
923 	BUG_ON(b->io_mutex.count != 1);
924 
925 	bkey_copy(&b->key, k);
926 	list_move(&b->list, &c->btree_cache);
927 	hlist_del_init_rcu(&b->hash);
928 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
929 
930 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
931 	b->parent	= (void *) ~0UL;
932 	b->flags	= 0;
933 	b->written	= 0;
934 	b->level	= level;
935 
936 	if (!b->level)
937 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
938 				    &b->c->expensive_debug_checks);
939 	else
940 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
941 				    &b->c->expensive_debug_checks);
942 
943 	return b;
944 err:
945 	if (b)
946 		rw_unlock(true, b);
947 
948 	b = mca_cannibalize(c, op, k);
949 	if (!IS_ERR(b))
950 		goto out;
951 
952 	return b;
953 }
954 
955 /**
956  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
957  * in from disk if necessary.
958  *
959  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
960  *
961  * The btree node will have either a read or a write lock held, depending on
962  * level and op->lock.
963  */
964 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
965 				 struct bkey *k, int level, bool write,
966 				 struct btree *parent)
967 {
968 	int i = 0;
969 	struct btree *b;
970 
971 	BUG_ON(level < 0);
972 retry:
973 	b = mca_find(c, k);
974 
975 	if (!b) {
976 		if (current->bio_list)
977 			return ERR_PTR(-EAGAIN);
978 
979 		mutex_lock(&c->bucket_lock);
980 		b = mca_alloc(c, op, k, level);
981 		mutex_unlock(&c->bucket_lock);
982 
983 		if (!b)
984 			goto retry;
985 		if (IS_ERR(b))
986 			return b;
987 
988 		bch_btree_node_read(b);
989 
990 		if (!write)
991 			downgrade_write(&b->lock);
992 	} else {
993 		rw_lock(write, b, level);
994 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
995 			rw_unlock(write, b);
996 			goto retry;
997 		}
998 		BUG_ON(b->level != level);
999 	}
1000 
1001 	b->parent = parent;
1002 	b->accessed = 1;
1003 
1004 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1005 		prefetch(b->keys.set[i].tree);
1006 		prefetch(b->keys.set[i].data);
1007 	}
1008 
1009 	for (; i <= b->keys.nsets; i++)
1010 		prefetch(b->keys.set[i].data);
1011 
1012 	if (btree_node_io_error(b)) {
1013 		rw_unlock(write, b);
1014 		return ERR_PTR(-EIO);
1015 	}
1016 
1017 	BUG_ON(!b->written);
1018 
1019 	return b;
1020 }
1021 
1022 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1023 {
1024 	struct btree *b;
1025 
1026 	mutex_lock(&parent->c->bucket_lock);
1027 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1028 	mutex_unlock(&parent->c->bucket_lock);
1029 
1030 	if (!IS_ERR_OR_NULL(b)) {
1031 		b->parent = parent;
1032 		bch_btree_node_read(b);
1033 		rw_unlock(true, b);
1034 	}
1035 }
1036 
1037 /* Btree alloc */
1038 
1039 static void btree_node_free(struct btree *b)
1040 {
1041 	trace_bcache_btree_node_free(b);
1042 
1043 	BUG_ON(b == b->c->root);
1044 
1045 	mutex_lock(&b->write_lock);
1046 
1047 	if (btree_node_dirty(b))
1048 		btree_complete_write(b, btree_current_write(b));
1049 	clear_bit(BTREE_NODE_dirty, &b->flags);
1050 
1051 	mutex_unlock(&b->write_lock);
1052 
1053 	cancel_delayed_work(&b->work);
1054 
1055 	mutex_lock(&b->c->bucket_lock);
1056 	bch_bucket_free(b->c, &b->key);
1057 	mca_bucket_free(b);
1058 	mutex_unlock(&b->c->bucket_lock);
1059 }
1060 
1061 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1062 				     int level, bool wait,
1063 				     struct btree *parent)
1064 {
1065 	BKEY_PADDED(key) k;
1066 	struct btree *b = ERR_PTR(-EAGAIN);
1067 
1068 	mutex_lock(&c->bucket_lock);
1069 retry:
1070 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1071 		goto err;
1072 
1073 	bkey_put(c, &k.key);
1074 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1075 
1076 	b = mca_alloc(c, op, &k.key, level);
1077 	if (IS_ERR(b))
1078 		goto err_free;
1079 
1080 	if (!b) {
1081 		cache_bug(c,
1082 			"Tried to allocate bucket that was in btree cache");
1083 		goto retry;
1084 	}
1085 
1086 	b->accessed = 1;
1087 	b->parent = parent;
1088 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1089 
1090 	mutex_unlock(&c->bucket_lock);
1091 
1092 	trace_bcache_btree_node_alloc(b);
1093 	return b;
1094 err_free:
1095 	bch_bucket_free(c, &k.key);
1096 err:
1097 	mutex_unlock(&c->bucket_lock);
1098 
1099 	trace_bcache_btree_node_alloc_fail(c);
1100 	return b;
1101 }
1102 
1103 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1104 					  struct btree_op *op, int level,
1105 					  struct btree *parent)
1106 {
1107 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1108 }
1109 
1110 static struct btree *btree_node_alloc_replacement(struct btree *b,
1111 						  struct btree_op *op)
1112 {
1113 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1114 	if (!IS_ERR_OR_NULL(n)) {
1115 		mutex_lock(&n->write_lock);
1116 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1117 		bkey_copy_key(&n->key, &b->key);
1118 		mutex_unlock(&n->write_lock);
1119 	}
1120 
1121 	return n;
1122 }
1123 
1124 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1125 {
1126 	unsigned i;
1127 
1128 	mutex_lock(&b->c->bucket_lock);
1129 
1130 	atomic_inc(&b->c->prio_blocked);
1131 
1132 	bkey_copy(k, &b->key);
1133 	bkey_copy_key(k, &ZERO_KEY);
1134 
1135 	for (i = 0; i < KEY_PTRS(k); i++)
1136 		SET_PTR_GEN(k, i,
1137 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1138 					PTR_BUCKET(b->c, &b->key, i)));
1139 
1140 	mutex_unlock(&b->c->bucket_lock);
1141 }
1142 
1143 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1144 {
1145 	struct cache_set *c = b->c;
1146 	struct cache *ca;
1147 	unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1148 
1149 	mutex_lock(&c->bucket_lock);
1150 
1151 	for_each_cache(ca, c, i)
1152 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1153 			if (op)
1154 				prepare_to_wait(&c->btree_cache_wait, &op->wait,
1155 						TASK_UNINTERRUPTIBLE);
1156 			mutex_unlock(&c->bucket_lock);
1157 			return -EINTR;
1158 		}
1159 
1160 	mutex_unlock(&c->bucket_lock);
1161 
1162 	return mca_cannibalize_lock(b->c, op);
1163 }
1164 
1165 /* Garbage collection */
1166 
1167 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1168 				    struct bkey *k)
1169 {
1170 	uint8_t stale = 0;
1171 	unsigned i;
1172 	struct bucket *g;
1173 
1174 	/*
1175 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1176 	 * freed, but since ptr_bad() returns true we'll never actually use them
1177 	 * for anything and thus we don't want mark their pointers here
1178 	 */
1179 	if (!bkey_cmp(k, &ZERO_KEY))
1180 		return stale;
1181 
1182 	for (i = 0; i < KEY_PTRS(k); i++) {
1183 		if (!ptr_available(c, k, i))
1184 			continue;
1185 
1186 		g = PTR_BUCKET(c, k, i);
1187 
1188 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1189 			g->last_gc = PTR_GEN(k, i);
1190 
1191 		if (ptr_stale(c, k, i)) {
1192 			stale = max(stale, ptr_stale(c, k, i));
1193 			continue;
1194 		}
1195 
1196 		cache_bug_on(GC_MARK(g) &&
1197 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1198 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1199 			     GC_MARK(g), level);
1200 
1201 		if (level)
1202 			SET_GC_MARK(g, GC_MARK_METADATA);
1203 		else if (KEY_DIRTY(k))
1204 			SET_GC_MARK(g, GC_MARK_DIRTY);
1205 		else if (!GC_MARK(g))
1206 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1207 
1208 		/* guard against overflow */
1209 		SET_GC_SECTORS_USED(g, min_t(unsigned,
1210 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1211 					     MAX_GC_SECTORS_USED));
1212 
1213 		BUG_ON(!GC_SECTORS_USED(g));
1214 	}
1215 
1216 	return stale;
1217 }
1218 
1219 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1220 
1221 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1222 {
1223 	unsigned i;
1224 
1225 	for (i = 0; i < KEY_PTRS(k); i++)
1226 		if (ptr_available(c, k, i) &&
1227 		    !ptr_stale(c, k, i)) {
1228 			struct bucket *b = PTR_BUCKET(c, k, i);
1229 
1230 			b->gen = PTR_GEN(k, i);
1231 
1232 			if (level && bkey_cmp(k, &ZERO_KEY))
1233 				b->prio = BTREE_PRIO;
1234 			else if (!level && b->prio == BTREE_PRIO)
1235 				b->prio = INITIAL_PRIO;
1236 		}
1237 
1238 	__bch_btree_mark_key(c, level, k);
1239 }
1240 
1241 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1242 {
1243 	uint8_t stale = 0;
1244 	unsigned keys = 0, good_keys = 0;
1245 	struct bkey *k;
1246 	struct btree_iter iter;
1247 	struct bset_tree *t;
1248 
1249 	gc->nodes++;
1250 
1251 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1252 		stale = max(stale, btree_mark_key(b, k));
1253 		keys++;
1254 
1255 		if (bch_ptr_bad(&b->keys, k))
1256 			continue;
1257 
1258 		gc->key_bytes += bkey_u64s(k);
1259 		gc->nkeys++;
1260 		good_keys++;
1261 
1262 		gc->data += KEY_SIZE(k);
1263 	}
1264 
1265 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1266 		btree_bug_on(t->size &&
1267 			     bset_written(&b->keys, t) &&
1268 			     bkey_cmp(&b->key, &t->end) < 0,
1269 			     b, "found short btree key in gc");
1270 
1271 	if (b->c->gc_always_rewrite)
1272 		return true;
1273 
1274 	if (stale > 10)
1275 		return true;
1276 
1277 	if ((keys - good_keys) * 2 > keys)
1278 		return true;
1279 
1280 	return false;
1281 }
1282 
1283 #define GC_MERGE_NODES	4U
1284 
1285 struct gc_merge_info {
1286 	struct btree	*b;
1287 	unsigned	keys;
1288 };
1289 
1290 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1291 				 struct keylist *, atomic_t *, struct bkey *);
1292 
1293 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1294 			     struct gc_stat *gc, struct gc_merge_info *r)
1295 {
1296 	unsigned i, nodes = 0, keys = 0, blocks;
1297 	struct btree *new_nodes[GC_MERGE_NODES];
1298 	struct keylist keylist;
1299 	struct closure cl;
1300 	struct bkey *k;
1301 
1302 	bch_keylist_init(&keylist);
1303 
1304 	if (btree_check_reserve(b, NULL))
1305 		return 0;
1306 
1307 	memset(new_nodes, 0, sizeof(new_nodes));
1308 	closure_init_stack(&cl);
1309 
1310 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1311 		keys += r[nodes++].keys;
1312 
1313 	blocks = btree_default_blocks(b->c) * 2 / 3;
1314 
1315 	if (nodes < 2 ||
1316 	    __set_blocks(b->keys.set[0].data, keys,
1317 			 block_bytes(b->c)) > blocks * (nodes - 1))
1318 		return 0;
1319 
1320 	for (i = 0; i < nodes; i++) {
1321 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1322 		if (IS_ERR_OR_NULL(new_nodes[i]))
1323 			goto out_nocoalesce;
1324 	}
1325 
1326 	/*
1327 	 * We have to check the reserve here, after we've allocated our new
1328 	 * nodes, to make sure the insert below will succeed - we also check
1329 	 * before as an optimization to potentially avoid a bunch of expensive
1330 	 * allocs/sorts
1331 	 */
1332 	if (btree_check_reserve(b, NULL))
1333 		goto out_nocoalesce;
1334 
1335 	for (i = 0; i < nodes; i++)
1336 		mutex_lock(&new_nodes[i]->write_lock);
1337 
1338 	for (i = nodes - 1; i > 0; --i) {
1339 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1340 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1341 		struct bkey *k, *last = NULL;
1342 
1343 		keys = 0;
1344 
1345 		if (i > 1) {
1346 			for (k = n2->start;
1347 			     k < bset_bkey_last(n2);
1348 			     k = bkey_next(k)) {
1349 				if (__set_blocks(n1, n1->keys + keys +
1350 						 bkey_u64s(k),
1351 						 block_bytes(b->c)) > blocks)
1352 					break;
1353 
1354 				last = k;
1355 				keys += bkey_u64s(k);
1356 			}
1357 		} else {
1358 			/*
1359 			 * Last node we're not getting rid of - we're getting
1360 			 * rid of the node at r[0]. Have to try and fit all of
1361 			 * the remaining keys into this node; we can't ensure
1362 			 * they will always fit due to rounding and variable
1363 			 * length keys (shouldn't be possible in practice,
1364 			 * though)
1365 			 */
1366 			if (__set_blocks(n1, n1->keys + n2->keys,
1367 					 block_bytes(b->c)) >
1368 			    btree_blocks(new_nodes[i]))
1369 				goto out_nocoalesce;
1370 
1371 			keys = n2->keys;
1372 			/* Take the key of the node we're getting rid of */
1373 			last = &r->b->key;
1374 		}
1375 
1376 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1377 		       btree_blocks(new_nodes[i]));
1378 
1379 		if (last)
1380 			bkey_copy_key(&new_nodes[i]->key, last);
1381 
1382 		memcpy(bset_bkey_last(n1),
1383 		       n2->start,
1384 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1385 
1386 		n1->keys += keys;
1387 		r[i].keys = n1->keys;
1388 
1389 		memmove(n2->start,
1390 			bset_bkey_idx(n2, keys),
1391 			(void *) bset_bkey_last(n2) -
1392 			(void *) bset_bkey_idx(n2, keys));
1393 
1394 		n2->keys -= keys;
1395 
1396 		if (__bch_keylist_realloc(&keylist,
1397 					  bkey_u64s(&new_nodes[i]->key)))
1398 			goto out_nocoalesce;
1399 
1400 		bch_btree_node_write(new_nodes[i], &cl);
1401 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1402 	}
1403 
1404 	for (i = 0; i < nodes; i++)
1405 		mutex_unlock(&new_nodes[i]->write_lock);
1406 
1407 	closure_sync(&cl);
1408 
1409 	/* We emptied out this node */
1410 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1411 	btree_node_free(new_nodes[0]);
1412 	rw_unlock(true, new_nodes[0]);
1413 	new_nodes[0] = NULL;
1414 
1415 	for (i = 0; i < nodes; i++) {
1416 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1417 			goto out_nocoalesce;
1418 
1419 		make_btree_freeing_key(r[i].b, keylist.top);
1420 		bch_keylist_push(&keylist);
1421 	}
1422 
1423 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1424 	BUG_ON(!bch_keylist_empty(&keylist));
1425 
1426 	for (i = 0; i < nodes; i++) {
1427 		btree_node_free(r[i].b);
1428 		rw_unlock(true, r[i].b);
1429 
1430 		r[i].b = new_nodes[i];
1431 	}
1432 
1433 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1434 	r[nodes - 1].b = ERR_PTR(-EINTR);
1435 
1436 	trace_bcache_btree_gc_coalesce(nodes);
1437 	gc->nodes--;
1438 
1439 	bch_keylist_free(&keylist);
1440 
1441 	/* Invalidated our iterator */
1442 	return -EINTR;
1443 
1444 out_nocoalesce:
1445 	closure_sync(&cl);
1446 	bch_keylist_free(&keylist);
1447 
1448 	while ((k = bch_keylist_pop(&keylist)))
1449 		if (!bkey_cmp(k, &ZERO_KEY))
1450 			atomic_dec(&b->c->prio_blocked);
1451 
1452 	for (i = 0; i < nodes; i++)
1453 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1454 			btree_node_free(new_nodes[i]);
1455 			rw_unlock(true, new_nodes[i]);
1456 		}
1457 	return 0;
1458 }
1459 
1460 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1461 				 struct btree *replace)
1462 {
1463 	struct keylist keys;
1464 	struct btree *n;
1465 
1466 	if (btree_check_reserve(b, NULL))
1467 		return 0;
1468 
1469 	n = btree_node_alloc_replacement(replace, NULL);
1470 
1471 	/* recheck reserve after allocating replacement node */
1472 	if (btree_check_reserve(b, NULL)) {
1473 		btree_node_free(n);
1474 		rw_unlock(true, n);
1475 		return 0;
1476 	}
1477 
1478 	bch_btree_node_write_sync(n);
1479 
1480 	bch_keylist_init(&keys);
1481 	bch_keylist_add(&keys, &n->key);
1482 
1483 	make_btree_freeing_key(replace, keys.top);
1484 	bch_keylist_push(&keys);
1485 
1486 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1487 	BUG_ON(!bch_keylist_empty(&keys));
1488 
1489 	btree_node_free(replace);
1490 	rw_unlock(true, n);
1491 
1492 	/* Invalidated our iterator */
1493 	return -EINTR;
1494 }
1495 
1496 static unsigned btree_gc_count_keys(struct btree *b)
1497 {
1498 	struct bkey *k;
1499 	struct btree_iter iter;
1500 	unsigned ret = 0;
1501 
1502 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1503 		ret += bkey_u64s(k);
1504 
1505 	return ret;
1506 }
1507 
1508 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1509 			    struct closure *writes, struct gc_stat *gc)
1510 {
1511 	int ret = 0;
1512 	bool should_rewrite;
1513 	struct bkey *k;
1514 	struct btree_iter iter;
1515 	struct gc_merge_info r[GC_MERGE_NODES];
1516 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1517 
1518 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1519 
1520 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1521 		i->b = ERR_PTR(-EINTR);
1522 
1523 	while (1) {
1524 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1525 		if (k) {
1526 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1527 						  true, b);
1528 			if (IS_ERR(r->b)) {
1529 				ret = PTR_ERR(r->b);
1530 				break;
1531 			}
1532 
1533 			r->keys = btree_gc_count_keys(r->b);
1534 
1535 			ret = btree_gc_coalesce(b, op, gc, r);
1536 			if (ret)
1537 				break;
1538 		}
1539 
1540 		if (!last->b)
1541 			break;
1542 
1543 		if (!IS_ERR(last->b)) {
1544 			should_rewrite = btree_gc_mark_node(last->b, gc);
1545 			if (should_rewrite) {
1546 				ret = btree_gc_rewrite_node(b, op, last->b);
1547 				if (ret)
1548 					break;
1549 			}
1550 
1551 			if (last->b->level) {
1552 				ret = btree_gc_recurse(last->b, op, writes, gc);
1553 				if (ret)
1554 					break;
1555 			}
1556 
1557 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1558 
1559 			/*
1560 			 * Must flush leaf nodes before gc ends, since replace
1561 			 * operations aren't journalled
1562 			 */
1563 			mutex_lock(&last->b->write_lock);
1564 			if (btree_node_dirty(last->b))
1565 				bch_btree_node_write(last->b, writes);
1566 			mutex_unlock(&last->b->write_lock);
1567 			rw_unlock(true, last->b);
1568 		}
1569 
1570 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1571 		r->b = NULL;
1572 
1573 		if (need_resched()) {
1574 			ret = -EAGAIN;
1575 			break;
1576 		}
1577 	}
1578 
1579 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1580 		if (!IS_ERR_OR_NULL(i->b)) {
1581 			mutex_lock(&i->b->write_lock);
1582 			if (btree_node_dirty(i->b))
1583 				bch_btree_node_write(i->b, writes);
1584 			mutex_unlock(&i->b->write_lock);
1585 			rw_unlock(true, i->b);
1586 		}
1587 
1588 	return ret;
1589 }
1590 
1591 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1592 			     struct closure *writes, struct gc_stat *gc)
1593 {
1594 	struct btree *n = NULL;
1595 	int ret = 0;
1596 	bool should_rewrite;
1597 
1598 	should_rewrite = btree_gc_mark_node(b, gc);
1599 	if (should_rewrite) {
1600 		n = btree_node_alloc_replacement(b, NULL);
1601 
1602 		if (!IS_ERR_OR_NULL(n)) {
1603 			bch_btree_node_write_sync(n);
1604 
1605 			bch_btree_set_root(n);
1606 			btree_node_free(b);
1607 			rw_unlock(true, n);
1608 
1609 			return -EINTR;
1610 		}
1611 	}
1612 
1613 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1614 
1615 	if (b->level) {
1616 		ret = btree_gc_recurse(b, op, writes, gc);
1617 		if (ret)
1618 			return ret;
1619 	}
1620 
1621 	bkey_copy_key(&b->c->gc_done, &b->key);
1622 
1623 	return ret;
1624 }
1625 
1626 static void btree_gc_start(struct cache_set *c)
1627 {
1628 	struct cache *ca;
1629 	struct bucket *b;
1630 	unsigned i;
1631 
1632 	if (!c->gc_mark_valid)
1633 		return;
1634 
1635 	mutex_lock(&c->bucket_lock);
1636 
1637 	c->gc_mark_valid = 0;
1638 	c->gc_done = ZERO_KEY;
1639 
1640 	for_each_cache(ca, c, i)
1641 		for_each_bucket(b, ca) {
1642 			b->last_gc = b->gen;
1643 			if (!atomic_read(&b->pin)) {
1644 				SET_GC_MARK(b, 0);
1645 				SET_GC_SECTORS_USED(b, 0);
1646 			}
1647 		}
1648 
1649 	mutex_unlock(&c->bucket_lock);
1650 }
1651 
1652 static size_t bch_btree_gc_finish(struct cache_set *c)
1653 {
1654 	size_t available = 0;
1655 	struct bucket *b;
1656 	struct cache *ca;
1657 	unsigned i;
1658 
1659 	mutex_lock(&c->bucket_lock);
1660 
1661 	set_gc_sectors(c);
1662 	c->gc_mark_valid = 1;
1663 	c->need_gc	= 0;
1664 
1665 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1666 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1667 			    GC_MARK_METADATA);
1668 
1669 	/* don't reclaim buckets to which writeback keys point */
1670 	rcu_read_lock();
1671 	for (i = 0; i < c->nr_uuids; i++) {
1672 		struct bcache_device *d = c->devices[i];
1673 		struct cached_dev *dc;
1674 		struct keybuf_key *w, *n;
1675 		unsigned j;
1676 
1677 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1678 			continue;
1679 		dc = container_of(d, struct cached_dev, disk);
1680 
1681 		spin_lock(&dc->writeback_keys.lock);
1682 		rbtree_postorder_for_each_entry_safe(w, n,
1683 					&dc->writeback_keys.keys, node)
1684 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1685 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1686 					    GC_MARK_DIRTY);
1687 		spin_unlock(&dc->writeback_keys.lock);
1688 	}
1689 	rcu_read_unlock();
1690 
1691 	for_each_cache(ca, c, i) {
1692 		uint64_t *i;
1693 
1694 		ca->invalidate_needs_gc = 0;
1695 
1696 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1697 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1698 
1699 		for (i = ca->prio_buckets;
1700 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1701 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1702 
1703 		for_each_bucket(b, ca) {
1704 			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1705 
1706 			if (atomic_read(&b->pin))
1707 				continue;
1708 
1709 			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1710 
1711 			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1712 				available++;
1713 		}
1714 	}
1715 
1716 	mutex_unlock(&c->bucket_lock);
1717 	return available;
1718 }
1719 
1720 static void bch_btree_gc(struct cache_set *c)
1721 {
1722 	int ret;
1723 	unsigned long available;
1724 	struct gc_stat stats;
1725 	struct closure writes;
1726 	struct btree_op op;
1727 	uint64_t start_time = local_clock();
1728 
1729 	trace_bcache_gc_start(c);
1730 
1731 	memset(&stats, 0, sizeof(struct gc_stat));
1732 	closure_init_stack(&writes);
1733 	bch_btree_op_init(&op, SHRT_MAX);
1734 
1735 	btree_gc_start(c);
1736 
1737 	do {
1738 		ret = btree_root(gc_root, c, &op, &writes, &stats);
1739 		closure_sync(&writes);
1740 		cond_resched();
1741 
1742 		if (ret && ret != -EAGAIN)
1743 			pr_warn("gc failed!");
1744 	} while (ret);
1745 
1746 	available = bch_btree_gc_finish(c);
1747 	wake_up_allocators(c);
1748 
1749 	bch_time_stats_update(&c->btree_gc_time, start_time);
1750 
1751 	stats.key_bytes *= sizeof(uint64_t);
1752 	stats.data	<<= 9;
1753 	stats.in_use	= (c->nbuckets - available) * 100 / c->nbuckets;
1754 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1755 
1756 	trace_bcache_gc_end(c);
1757 
1758 	bch_moving_gc(c);
1759 }
1760 
1761 static bool gc_should_run(struct cache_set *c)
1762 {
1763 	struct cache *ca;
1764 	unsigned i;
1765 
1766 	for_each_cache(ca, c, i)
1767 		if (ca->invalidate_needs_gc)
1768 			return true;
1769 
1770 	if (atomic_read(&c->sectors_to_gc) < 0)
1771 		return true;
1772 
1773 	return false;
1774 }
1775 
1776 static int bch_gc_thread(void *arg)
1777 {
1778 	struct cache_set *c = arg;
1779 
1780 	while (1) {
1781 		wait_event_interruptible(c->gc_wait,
1782 			   kthread_should_stop() || gc_should_run(c));
1783 
1784 		if (kthread_should_stop())
1785 			break;
1786 
1787 		set_gc_sectors(c);
1788 		bch_btree_gc(c);
1789 	}
1790 
1791 	return 0;
1792 }
1793 
1794 int bch_gc_thread_start(struct cache_set *c)
1795 {
1796 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1797 	if (IS_ERR(c->gc_thread))
1798 		return PTR_ERR(c->gc_thread);
1799 
1800 	return 0;
1801 }
1802 
1803 /* Initial partial gc */
1804 
1805 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1806 {
1807 	int ret = 0;
1808 	struct bkey *k, *p = NULL;
1809 	struct btree_iter iter;
1810 
1811 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1812 		bch_initial_mark_key(b->c, b->level, k);
1813 
1814 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1815 
1816 	if (b->level) {
1817 		bch_btree_iter_init(&b->keys, &iter, NULL);
1818 
1819 		do {
1820 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1821 						       bch_ptr_bad);
1822 			if (k)
1823 				btree_node_prefetch(b, k);
1824 
1825 			if (p)
1826 				ret = btree(check_recurse, p, b, op);
1827 
1828 			p = k;
1829 		} while (p && !ret);
1830 	}
1831 
1832 	return ret;
1833 }
1834 
1835 int bch_btree_check(struct cache_set *c)
1836 {
1837 	struct btree_op op;
1838 
1839 	bch_btree_op_init(&op, SHRT_MAX);
1840 
1841 	return btree_root(check_recurse, c, &op);
1842 }
1843 
1844 void bch_initial_gc_finish(struct cache_set *c)
1845 {
1846 	struct cache *ca;
1847 	struct bucket *b;
1848 	unsigned i;
1849 
1850 	bch_btree_gc_finish(c);
1851 
1852 	mutex_lock(&c->bucket_lock);
1853 
1854 	/*
1855 	 * We need to put some unused buckets directly on the prio freelist in
1856 	 * order to get the allocator thread started - it needs freed buckets in
1857 	 * order to rewrite the prios and gens, and it needs to rewrite prios
1858 	 * and gens in order to free buckets.
1859 	 *
1860 	 * This is only safe for buckets that have no live data in them, which
1861 	 * there should always be some of.
1862 	 */
1863 	for_each_cache(ca, c, i) {
1864 		for_each_bucket(b, ca) {
1865 			if (fifo_full(&ca->free[RESERVE_PRIO]))
1866 				break;
1867 
1868 			if (bch_can_invalidate_bucket(ca, b) &&
1869 			    !GC_MARK(b)) {
1870 				__bch_invalidate_one_bucket(ca, b);
1871 				fifo_push(&ca->free[RESERVE_PRIO],
1872 					  b - ca->buckets);
1873 			}
1874 		}
1875 	}
1876 
1877 	mutex_unlock(&c->bucket_lock);
1878 }
1879 
1880 /* Btree insertion */
1881 
1882 static bool btree_insert_key(struct btree *b, struct bkey *k,
1883 			     struct bkey *replace_key)
1884 {
1885 	unsigned status;
1886 
1887 	BUG_ON(bkey_cmp(k, &b->key) > 0);
1888 
1889 	status = bch_btree_insert_key(&b->keys, k, replace_key);
1890 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1891 		bch_check_keys(&b->keys, "%u for %s", status,
1892 			       replace_key ? "replace" : "insert");
1893 
1894 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1895 					      status);
1896 		return true;
1897 	} else
1898 		return false;
1899 }
1900 
1901 static size_t insert_u64s_remaining(struct btree *b)
1902 {
1903 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
1904 
1905 	/*
1906 	 * Might land in the middle of an existing extent and have to split it
1907 	 */
1908 	if (b->keys.ops->is_extents)
1909 		ret -= KEY_MAX_U64S;
1910 
1911 	return max(ret, 0L);
1912 }
1913 
1914 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1915 				  struct keylist *insert_keys,
1916 				  struct bkey *replace_key)
1917 {
1918 	bool ret = false;
1919 	int oldsize = bch_count_data(&b->keys);
1920 
1921 	while (!bch_keylist_empty(insert_keys)) {
1922 		struct bkey *k = insert_keys->keys;
1923 
1924 		if (bkey_u64s(k) > insert_u64s_remaining(b))
1925 			break;
1926 
1927 		if (bkey_cmp(k, &b->key) <= 0) {
1928 			if (!b->level)
1929 				bkey_put(b->c, k);
1930 
1931 			ret |= btree_insert_key(b, k, replace_key);
1932 			bch_keylist_pop_front(insert_keys);
1933 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1934 			BKEY_PADDED(key) temp;
1935 			bkey_copy(&temp.key, insert_keys->keys);
1936 
1937 			bch_cut_back(&b->key, &temp.key);
1938 			bch_cut_front(&b->key, insert_keys->keys);
1939 
1940 			ret |= btree_insert_key(b, &temp.key, replace_key);
1941 			break;
1942 		} else {
1943 			break;
1944 		}
1945 	}
1946 
1947 	if (!ret)
1948 		op->insert_collision = true;
1949 
1950 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1951 
1952 	BUG_ON(bch_count_data(&b->keys) < oldsize);
1953 	return ret;
1954 }
1955 
1956 static int btree_split(struct btree *b, struct btree_op *op,
1957 		       struct keylist *insert_keys,
1958 		       struct bkey *replace_key)
1959 {
1960 	bool split;
1961 	struct btree *n1, *n2 = NULL, *n3 = NULL;
1962 	uint64_t start_time = local_clock();
1963 	struct closure cl;
1964 	struct keylist parent_keys;
1965 
1966 	closure_init_stack(&cl);
1967 	bch_keylist_init(&parent_keys);
1968 
1969 	if (btree_check_reserve(b, op)) {
1970 		if (!b->level)
1971 			return -EINTR;
1972 		else
1973 			WARN(1, "insufficient reserve for split\n");
1974 	}
1975 
1976 	n1 = btree_node_alloc_replacement(b, op);
1977 	if (IS_ERR(n1))
1978 		goto err;
1979 
1980 	split = set_blocks(btree_bset_first(n1),
1981 			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1982 
1983 	if (split) {
1984 		unsigned keys = 0;
1985 
1986 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1987 
1988 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1989 		if (IS_ERR(n2))
1990 			goto err_free1;
1991 
1992 		if (!b->parent) {
1993 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
1994 			if (IS_ERR(n3))
1995 				goto err_free2;
1996 		}
1997 
1998 		mutex_lock(&n1->write_lock);
1999 		mutex_lock(&n2->write_lock);
2000 
2001 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2002 
2003 		/*
2004 		 * Has to be a linear search because we don't have an auxiliary
2005 		 * search tree yet
2006 		 */
2007 
2008 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2009 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2010 							keys));
2011 
2012 		bkey_copy_key(&n1->key,
2013 			      bset_bkey_idx(btree_bset_first(n1), keys));
2014 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2015 
2016 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2017 		btree_bset_first(n1)->keys = keys;
2018 
2019 		memcpy(btree_bset_first(n2)->start,
2020 		       bset_bkey_last(btree_bset_first(n1)),
2021 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2022 
2023 		bkey_copy_key(&n2->key, &b->key);
2024 
2025 		bch_keylist_add(&parent_keys, &n2->key);
2026 		bch_btree_node_write(n2, &cl);
2027 		mutex_unlock(&n2->write_lock);
2028 		rw_unlock(true, n2);
2029 	} else {
2030 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2031 
2032 		mutex_lock(&n1->write_lock);
2033 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2034 	}
2035 
2036 	bch_keylist_add(&parent_keys, &n1->key);
2037 	bch_btree_node_write(n1, &cl);
2038 	mutex_unlock(&n1->write_lock);
2039 
2040 	if (n3) {
2041 		/* Depth increases, make a new root */
2042 		mutex_lock(&n3->write_lock);
2043 		bkey_copy_key(&n3->key, &MAX_KEY);
2044 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2045 		bch_btree_node_write(n3, &cl);
2046 		mutex_unlock(&n3->write_lock);
2047 
2048 		closure_sync(&cl);
2049 		bch_btree_set_root(n3);
2050 		rw_unlock(true, n3);
2051 	} else if (!b->parent) {
2052 		/* Root filled up but didn't need to be split */
2053 		closure_sync(&cl);
2054 		bch_btree_set_root(n1);
2055 	} else {
2056 		/* Split a non root node */
2057 		closure_sync(&cl);
2058 		make_btree_freeing_key(b, parent_keys.top);
2059 		bch_keylist_push(&parent_keys);
2060 
2061 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2062 		BUG_ON(!bch_keylist_empty(&parent_keys));
2063 	}
2064 
2065 	btree_node_free(b);
2066 	rw_unlock(true, n1);
2067 
2068 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2069 
2070 	return 0;
2071 err_free2:
2072 	bkey_put(b->c, &n2->key);
2073 	btree_node_free(n2);
2074 	rw_unlock(true, n2);
2075 err_free1:
2076 	bkey_put(b->c, &n1->key);
2077 	btree_node_free(n1);
2078 	rw_unlock(true, n1);
2079 err:
2080 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2081 
2082 	if (n3 == ERR_PTR(-EAGAIN) ||
2083 	    n2 == ERR_PTR(-EAGAIN) ||
2084 	    n1 == ERR_PTR(-EAGAIN))
2085 		return -EAGAIN;
2086 
2087 	return -ENOMEM;
2088 }
2089 
2090 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2091 				 struct keylist *insert_keys,
2092 				 atomic_t *journal_ref,
2093 				 struct bkey *replace_key)
2094 {
2095 	struct closure cl;
2096 
2097 	BUG_ON(b->level && replace_key);
2098 
2099 	closure_init_stack(&cl);
2100 
2101 	mutex_lock(&b->write_lock);
2102 
2103 	if (write_block(b) != btree_bset_last(b) &&
2104 	    b->keys.last_set_unwritten)
2105 		bch_btree_init_next(b); /* just wrote a set */
2106 
2107 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2108 		mutex_unlock(&b->write_lock);
2109 		goto split;
2110 	}
2111 
2112 	BUG_ON(write_block(b) != btree_bset_last(b));
2113 
2114 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2115 		if (!b->level)
2116 			bch_btree_leaf_dirty(b, journal_ref);
2117 		else
2118 			bch_btree_node_write(b, &cl);
2119 	}
2120 
2121 	mutex_unlock(&b->write_lock);
2122 
2123 	/* wait for btree node write if necessary, after unlock */
2124 	closure_sync(&cl);
2125 
2126 	return 0;
2127 split:
2128 	if (current->bio_list) {
2129 		op->lock = b->c->root->level + 1;
2130 		return -EAGAIN;
2131 	} else if (op->lock <= b->c->root->level) {
2132 		op->lock = b->c->root->level + 1;
2133 		return -EINTR;
2134 	} else {
2135 		/* Invalidated all iterators */
2136 		int ret = btree_split(b, op, insert_keys, replace_key);
2137 
2138 		if (bch_keylist_empty(insert_keys))
2139 			return 0;
2140 		else if (!ret)
2141 			return -EINTR;
2142 		return ret;
2143 	}
2144 }
2145 
2146 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2147 			       struct bkey *check_key)
2148 {
2149 	int ret = -EINTR;
2150 	uint64_t btree_ptr = b->key.ptr[0];
2151 	unsigned long seq = b->seq;
2152 	struct keylist insert;
2153 	bool upgrade = op->lock == -1;
2154 
2155 	bch_keylist_init(&insert);
2156 
2157 	if (upgrade) {
2158 		rw_unlock(false, b);
2159 		rw_lock(true, b, b->level);
2160 
2161 		if (b->key.ptr[0] != btree_ptr ||
2162                    b->seq != seq + 1) {
2163                        op->lock = b->level;
2164 			goto out;
2165                }
2166 	}
2167 
2168 	SET_KEY_PTRS(check_key, 1);
2169 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2170 
2171 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2172 
2173 	bch_keylist_add(&insert, check_key);
2174 
2175 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2176 
2177 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2178 out:
2179 	if (upgrade)
2180 		downgrade_write(&b->lock);
2181 	return ret;
2182 }
2183 
2184 struct btree_insert_op {
2185 	struct btree_op	op;
2186 	struct keylist	*keys;
2187 	atomic_t	*journal_ref;
2188 	struct bkey	*replace_key;
2189 };
2190 
2191 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2192 {
2193 	struct btree_insert_op *op = container_of(b_op,
2194 					struct btree_insert_op, op);
2195 
2196 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2197 					op->journal_ref, op->replace_key);
2198 	if (ret && !bch_keylist_empty(op->keys))
2199 		return ret;
2200 	else
2201 		return MAP_DONE;
2202 }
2203 
2204 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2205 		     atomic_t *journal_ref, struct bkey *replace_key)
2206 {
2207 	struct btree_insert_op op;
2208 	int ret = 0;
2209 
2210 	BUG_ON(current->bio_list);
2211 	BUG_ON(bch_keylist_empty(keys));
2212 
2213 	bch_btree_op_init(&op.op, 0);
2214 	op.keys		= keys;
2215 	op.journal_ref	= journal_ref;
2216 	op.replace_key	= replace_key;
2217 
2218 	while (!ret && !bch_keylist_empty(keys)) {
2219 		op.op.lock = 0;
2220 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2221 					       &START_KEY(keys->keys),
2222 					       btree_insert_fn);
2223 	}
2224 
2225 	if (ret) {
2226 		struct bkey *k;
2227 
2228 		pr_err("error %i", ret);
2229 
2230 		while ((k = bch_keylist_pop(keys)))
2231 			bkey_put(c, k);
2232 	} else if (op.op.insert_collision)
2233 		ret = -ESRCH;
2234 
2235 	return ret;
2236 }
2237 
2238 void bch_btree_set_root(struct btree *b)
2239 {
2240 	unsigned i;
2241 	struct closure cl;
2242 
2243 	closure_init_stack(&cl);
2244 
2245 	trace_bcache_btree_set_root(b);
2246 
2247 	BUG_ON(!b->written);
2248 
2249 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2250 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2251 
2252 	mutex_lock(&b->c->bucket_lock);
2253 	list_del_init(&b->list);
2254 	mutex_unlock(&b->c->bucket_lock);
2255 
2256 	b->c->root = b;
2257 
2258 	bch_journal_meta(b->c, &cl);
2259 	closure_sync(&cl);
2260 }
2261 
2262 /* Map across nodes or keys */
2263 
2264 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2265 				       struct bkey *from,
2266 				       btree_map_nodes_fn *fn, int flags)
2267 {
2268 	int ret = MAP_CONTINUE;
2269 
2270 	if (b->level) {
2271 		struct bkey *k;
2272 		struct btree_iter iter;
2273 
2274 		bch_btree_iter_init(&b->keys, &iter, from);
2275 
2276 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2277 						       bch_ptr_bad))) {
2278 			ret = btree(map_nodes_recurse, k, b,
2279 				    op, from, fn, flags);
2280 			from = NULL;
2281 
2282 			if (ret != MAP_CONTINUE)
2283 				return ret;
2284 		}
2285 	}
2286 
2287 	if (!b->level || flags == MAP_ALL_NODES)
2288 		ret = fn(op, b);
2289 
2290 	return ret;
2291 }
2292 
2293 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2294 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2295 {
2296 	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2297 }
2298 
2299 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2300 				      struct bkey *from, btree_map_keys_fn *fn,
2301 				      int flags)
2302 {
2303 	int ret = MAP_CONTINUE;
2304 	struct bkey *k;
2305 	struct btree_iter iter;
2306 
2307 	bch_btree_iter_init(&b->keys, &iter, from);
2308 
2309 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2310 		ret = !b->level
2311 			? fn(op, b, k)
2312 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
2313 		from = NULL;
2314 
2315 		if (ret != MAP_CONTINUE)
2316 			return ret;
2317 	}
2318 
2319 	if (!b->level && (flags & MAP_END_KEY))
2320 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2321 				     KEY_OFFSET(&b->key), 0));
2322 
2323 	return ret;
2324 }
2325 
2326 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2327 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2328 {
2329 	return btree_root(map_keys_recurse, c, op, from, fn, flags);
2330 }
2331 
2332 /* Keybuf code */
2333 
2334 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2335 {
2336 	/* Overlapping keys compare equal */
2337 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2338 		return -1;
2339 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2340 		return 1;
2341 	return 0;
2342 }
2343 
2344 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2345 					    struct keybuf_key *r)
2346 {
2347 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2348 }
2349 
2350 struct refill {
2351 	struct btree_op	op;
2352 	unsigned	nr_found;
2353 	struct keybuf	*buf;
2354 	struct bkey	*end;
2355 	keybuf_pred_fn	*pred;
2356 };
2357 
2358 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2359 			    struct bkey *k)
2360 {
2361 	struct refill *refill = container_of(op, struct refill, op);
2362 	struct keybuf *buf = refill->buf;
2363 	int ret = MAP_CONTINUE;
2364 
2365 	if (bkey_cmp(k, refill->end) >= 0) {
2366 		ret = MAP_DONE;
2367 		goto out;
2368 	}
2369 
2370 	if (!KEY_SIZE(k)) /* end key */
2371 		goto out;
2372 
2373 	if (refill->pred(buf, k)) {
2374 		struct keybuf_key *w;
2375 
2376 		spin_lock(&buf->lock);
2377 
2378 		w = array_alloc(&buf->freelist);
2379 		if (!w) {
2380 			spin_unlock(&buf->lock);
2381 			return MAP_DONE;
2382 		}
2383 
2384 		w->private = NULL;
2385 		bkey_copy(&w->key, k);
2386 
2387 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2388 			array_free(&buf->freelist, w);
2389 		else
2390 			refill->nr_found++;
2391 
2392 		if (array_freelist_empty(&buf->freelist))
2393 			ret = MAP_DONE;
2394 
2395 		spin_unlock(&buf->lock);
2396 	}
2397 out:
2398 	buf->last_scanned = *k;
2399 	return ret;
2400 }
2401 
2402 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2403 		       struct bkey *end, keybuf_pred_fn *pred)
2404 {
2405 	struct bkey start = buf->last_scanned;
2406 	struct refill refill;
2407 
2408 	cond_resched();
2409 
2410 	bch_btree_op_init(&refill.op, -1);
2411 	refill.nr_found	= 0;
2412 	refill.buf	= buf;
2413 	refill.end	= end;
2414 	refill.pred	= pred;
2415 
2416 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2417 			   refill_keybuf_fn, MAP_END_KEY);
2418 
2419 	trace_bcache_keyscan(refill.nr_found,
2420 			     KEY_INODE(&start), KEY_OFFSET(&start),
2421 			     KEY_INODE(&buf->last_scanned),
2422 			     KEY_OFFSET(&buf->last_scanned));
2423 
2424 	spin_lock(&buf->lock);
2425 
2426 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2427 		struct keybuf_key *w;
2428 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2429 		buf->start	= START_KEY(&w->key);
2430 
2431 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2432 		buf->end	= w->key;
2433 	} else {
2434 		buf->start	= MAX_KEY;
2435 		buf->end	= MAX_KEY;
2436 	}
2437 
2438 	spin_unlock(&buf->lock);
2439 }
2440 
2441 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2442 {
2443 	rb_erase(&w->node, &buf->keys);
2444 	array_free(&buf->freelist, w);
2445 }
2446 
2447 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2448 {
2449 	spin_lock(&buf->lock);
2450 	__bch_keybuf_del(buf, w);
2451 	spin_unlock(&buf->lock);
2452 }
2453 
2454 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2455 				  struct bkey *end)
2456 {
2457 	bool ret = false;
2458 	struct keybuf_key *p, *w, s;
2459 	s.key = *start;
2460 
2461 	if (bkey_cmp(end, &buf->start) <= 0 ||
2462 	    bkey_cmp(start, &buf->end) >= 0)
2463 		return false;
2464 
2465 	spin_lock(&buf->lock);
2466 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2467 
2468 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2469 		p = w;
2470 		w = RB_NEXT(w, node);
2471 
2472 		if (p->private)
2473 			ret = true;
2474 		else
2475 			__bch_keybuf_del(buf, p);
2476 	}
2477 
2478 	spin_unlock(&buf->lock);
2479 	return ret;
2480 }
2481 
2482 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2483 {
2484 	struct keybuf_key *w;
2485 	spin_lock(&buf->lock);
2486 
2487 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2488 
2489 	while (w && w->private)
2490 		w = RB_NEXT(w, node);
2491 
2492 	if (w)
2493 		w->private = ERR_PTR(-EINTR);
2494 
2495 	spin_unlock(&buf->lock);
2496 	return w;
2497 }
2498 
2499 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2500 					  struct keybuf *buf,
2501 					  struct bkey *end,
2502 					  keybuf_pred_fn *pred)
2503 {
2504 	struct keybuf_key *ret;
2505 
2506 	while (1) {
2507 		ret = bch_keybuf_next(buf);
2508 		if (ret)
2509 			break;
2510 
2511 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2512 			pr_debug("scan finished");
2513 			break;
2514 		}
2515 
2516 		bch_refill_keybuf(c, buf, end, pred);
2517 	}
2518 
2519 	return ret;
2520 }
2521 
2522 void bch_keybuf_init(struct keybuf *buf)
2523 {
2524 	buf->last_scanned	= MAX_KEY;
2525 	buf->keys		= RB_ROOT;
2526 
2527 	spin_lock_init(&buf->lock);
2528 	array_allocator_init(&buf->freelist);
2529 }
2530