xref: /openbmc/linux/drivers/md/bcache/journal.c (revision 1802d0be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "extents.h"
12 
13 #include <trace/events/bcache.h>
14 
15 /*
16  * Journal replay/recovery:
17  *
18  * This code is all driven from run_cache_set(); we first read the journal
19  * entries, do some other stuff, then we mark all the keys in the journal
20  * entries (same as garbage collection would), then we replay them - reinserting
21  * them into the cache in precisely the same order as they appear in the
22  * journal.
23  *
24  * We only journal keys that go in leaf nodes, which simplifies things quite a
25  * bit.
26  */
27 
28 static void journal_read_endio(struct bio *bio)
29 {
30 	struct closure *cl = bio->bi_private;
31 
32 	closure_put(cl);
33 }
34 
35 static int journal_read_bucket(struct cache *ca, struct list_head *list,
36 			       unsigned int bucket_index)
37 {
38 	struct journal_device *ja = &ca->journal;
39 	struct bio *bio = &ja->bio;
40 
41 	struct journal_replay *i;
42 	struct jset *j, *data = ca->set->journal.w[0].data;
43 	struct closure cl;
44 	unsigned int len, left, offset = 0;
45 	int ret = 0;
46 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47 
48 	closure_init_stack(&cl);
49 
50 	pr_debug("reading %u", bucket_index);
51 
52 	while (offset < ca->sb.bucket_size) {
53 reread:		left = ca->sb.bucket_size - offset;
54 		len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55 
56 		bio_reset(bio);
57 		bio->bi_iter.bi_sector	= bucket + offset;
58 		bio_set_dev(bio, ca->bdev);
59 		bio->bi_iter.bi_size	= len << 9;
60 
61 		bio->bi_end_io	= journal_read_endio;
62 		bio->bi_private = &cl;
63 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
64 		bch_bio_map(bio, data);
65 
66 		closure_bio_submit(ca->set, bio, &cl);
67 		closure_sync(&cl);
68 
69 		/* This function could be simpler now since we no longer write
70 		 * journal entries that overlap bucket boundaries; this means
71 		 * the start of a bucket will always have a valid journal entry
72 		 * if it has any journal entries at all.
73 		 */
74 
75 		j = data;
76 		while (len) {
77 			struct list_head *where;
78 			size_t blocks, bytes = set_bytes(j);
79 
80 			if (j->magic != jset_magic(&ca->sb)) {
81 				pr_debug("%u: bad magic", bucket_index);
82 				return ret;
83 			}
84 
85 			if (bytes > left << 9 ||
86 			    bytes > PAGE_SIZE << JSET_BITS) {
87 				pr_info("%u: too big, %zu bytes, offset %u",
88 					bucket_index, bytes, offset);
89 				return ret;
90 			}
91 
92 			if (bytes > len << 9)
93 				goto reread;
94 
95 			if (j->csum != csum_set(j)) {
96 				pr_info("%u: bad csum, %zu bytes, offset %u",
97 					bucket_index, bytes, offset);
98 				return ret;
99 			}
100 
101 			blocks = set_blocks(j, block_bytes(ca->set));
102 
103 			while (!list_empty(list)) {
104 				i = list_first_entry(list,
105 					struct journal_replay, list);
106 				if (i->j.seq >= j->last_seq)
107 					break;
108 				list_del(&i->list);
109 				kfree(i);
110 			}
111 
112 			list_for_each_entry_reverse(i, list, list) {
113 				if (j->seq == i->j.seq)
114 					goto next_set;
115 
116 				if (j->seq < i->j.last_seq)
117 					goto next_set;
118 
119 				if (j->seq > i->j.seq) {
120 					where = &i->list;
121 					goto add;
122 				}
123 			}
124 
125 			where = list;
126 add:
127 			i = kmalloc(offsetof(struct journal_replay, j) +
128 				    bytes, GFP_KERNEL);
129 			if (!i)
130 				return -ENOMEM;
131 			memcpy(&i->j, j, bytes);
132 			list_add(&i->list, where);
133 			ret = 1;
134 
135 			ja->seq[bucket_index] = j->seq;
136 next_set:
137 			offset	+= blocks * ca->sb.block_size;
138 			len	-= blocks * ca->sb.block_size;
139 			j = ((void *) j) + blocks * block_bytes(ca);
140 		}
141 	}
142 
143 	return ret;
144 }
145 
146 int bch_journal_read(struct cache_set *c, struct list_head *list)
147 {
148 #define read_bucket(b)							\
149 	({								\
150 		ret = journal_read_bucket(ca, list, b);			\
151 		__set_bit(b, bitmap);					\
152 		if (ret < 0)						\
153 			return ret;					\
154 		ret;							\
155 	})
156 
157 	struct cache *ca;
158 	unsigned int iter;
159 	int ret = 0;
160 
161 	for_each_cache(ca, c, iter) {
162 		struct journal_device *ja = &ca->journal;
163 		DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
164 		unsigned int i, l, r, m;
165 		uint64_t seq;
166 
167 		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
168 		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
169 
170 		/*
171 		 * Read journal buckets ordered by golden ratio hash to quickly
172 		 * find a sequence of buckets with valid journal entries
173 		 */
174 		for (i = 0; i < ca->sb.njournal_buckets; i++) {
175 			/*
176 			 * We must try the index l with ZERO first for
177 			 * correctness due to the scenario that the journal
178 			 * bucket is circular buffer which might have wrapped
179 			 */
180 			l = (i * 2654435769U) % ca->sb.njournal_buckets;
181 
182 			if (test_bit(l, bitmap))
183 				break;
184 
185 			if (read_bucket(l))
186 				goto bsearch;
187 		}
188 
189 		/*
190 		 * If that fails, check all the buckets we haven't checked
191 		 * already
192 		 */
193 		pr_debug("falling back to linear search");
194 
195 		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
196 		     l < ca->sb.njournal_buckets;
197 		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
198 					    l + 1))
199 			if (read_bucket(l))
200 				goto bsearch;
201 
202 		/* no journal entries on this device? */
203 		if (l == ca->sb.njournal_buckets)
204 			continue;
205 bsearch:
206 		BUG_ON(list_empty(list));
207 
208 		/* Binary search */
209 		m = l;
210 		r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
211 		pr_debug("starting binary search, l %u r %u", l, r);
212 
213 		while (l + 1 < r) {
214 			seq = list_entry(list->prev, struct journal_replay,
215 					 list)->j.seq;
216 
217 			m = (l + r) >> 1;
218 			read_bucket(m);
219 
220 			if (seq != list_entry(list->prev, struct journal_replay,
221 					      list)->j.seq)
222 				l = m;
223 			else
224 				r = m;
225 		}
226 
227 		/*
228 		 * Read buckets in reverse order until we stop finding more
229 		 * journal entries
230 		 */
231 		pr_debug("finishing up: m %u njournal_buckets %u",
232 			 m, ca->sb.njournal_buckets);
233 		l = m;
234 
235 		while (1) {
236 			if (!l--)
237 				l = ca->sb.njournal_buckets - 1;
238 
239 			if (l == m)
240 				break;
241 
242 			if (test_bit(l, bitmap))
243 				continue;
244 
245 			if (!read_bucket(l))
246 				break;
247 		}
248 
249 		seq = 0;
250 
251 		for (i = 0; i < ca->sb.njournal_buckets; i++)
252 			if (ja->seq[i] > seq) {
253 				seq = ja->seq[i];
254 				/*
255 				 * When journal_reclaim() goes to allocate for
256 				 * the first time, it'll use the bucket after
257 				 * ja->cur_idx
258 				 */
259 				ja->cur_idx = i;
260 				ja->last_idx = ja->discard_idx = (i + 1) %
261 					ca->sb.njournal_buckets;
262 
263 			}
264 	}
265 
266 	if (!list_empty(list))
267 		c->journal.seq = list_entry(list->prev,
268 					    struct journal_replay,
269 					    list)->j.seq;
270 
271 	return ret;
272 #undef read_bucket
273 }
274 
275 void bch_journal_mark(struct cache_set *c, struct list_head *list)
276 {
277 	atomic_t p = { 0 };
278 	struct bkey *k;
279 	struct journal_replay *i;
280 	struct journal *j = &c->journal;
281 	uint64_t last = j->seq;
282 
283 	/*
284 	 * journal.pin should never fill up - we never write a journal
285 	 * entry when it would fill up. But if for some reason it does, we
286 	 * iterate over the list in reverse order so that we can just skip that
287 	 * refcount instead of bugging.
288 	 */
289 
290 	list_for_each_entry_reverse(i, list, list) {
291 		BUG_ON(last < i->j.seq);
292 		i->pin = NULL;
293 
294 		while (last-- != i->j.seq)
295 			if (fifo_free(&j->pin) > 1) {
296 				fifo_push_front(&j->pin, p);
297 				atomic_set(&fifo_front(&j->pin), 0);
298 			}
299 
300 		if (fifo_free(&j->pin) > 1) {
301 			fifo_push_front(&j->pin, p);
302 			i->pin = &fifo_front(&j->pin);
303 			atomic_set(i->pin, 1);
304 		}
305 
306 		for (k = i->j.start;
307 		     k < bset_bkey_last(&i->j);
308 		     k = bkey_next(k))
309 			if (!__bch_extent_invalid(c, k)) {
310 				unsigned int j;
311 
312 				for (j = 0; j < KEY_PTRS(k); j++)
313 					if (ptr_available(c, k, j))
314 						atomic_inc(&PTR_BUCKET(c, k, j)->pin);
315 
316 				bch_initial_mark_key(c, 0, k);
317 			}
318 	}
319 }
320 
321 static bool is_discard_enabled(struct cache_set *s)
322 {
323 	struct cache *ca;
324 	unsigned int i;
325 
326 	for_each_cache(ca, s, i)
327 		if (ca->discard)
328 			return true;
329 
330 	return false;
331 }
332 
333 int bch_journal_replay(struct cache_set *s, struct list_head *list)
334 {
335 	int ret = 0, keys = 0, entries = 0;
336 	struct bkey *k;
337 	struct journal_replay *i =
338 		list_entry(list->prev, struct journal_replay, list);
339 
340 	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
341 	struct keylist keylist;
342 
343 	list_for_each_entry(i, list, list) {
344 		BUG_ON(i->pin && atomic_read(i->pin) != 1);
345 
346 		if (n != i->j.seq) {
347 			if (n == start && is_discard_enabled(s))
348 				pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
349 					n, i->j.seq - 1, start, end);
350 			else {
351 				pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
352 					n, i->j.seq - 1, start, end);
353 				ret = -EIO;
354 				goto err;
355 			}
356 		}
357 
358 		for (k = i->j.start;
359 		     k < bset_bkey_last(&i->j);
360 		     k = bkey_next(k)) {
361 			trace_bcache_journal_replay_key(k);
362 
363 			bch_keylist_init_single(&keylist, k);
364 
365 			ret = bch_btree_insert(s, &keylist, i->pin, NULL);
366 			if (ret)
367 				goto err;
368 
369 			BUG_ON(!bch_keylist_empty(&keylist));
370 			keys++;
371 
372 			cond_resched();
373 		}
374 
375 		if (i->pin)
376 			atomic_dec(i->pin);
377 		n = i->j.seq + 1;
378 		entries++;
379 	}
380 
381 	pr_info("journal replay done, %i keys in %i entries, seq %llu",
382 		keys, entries, end);
383 err:
384 	while (!list_empty(list)) {
385 		i = list_first_entry(list, struct journal_replay, list);
386 		list_del(&i->list);
387 		kfree(i);
388 	}
389 
390 	return ret;
391 }
392 
393 /* Journalling */
394 #define journal_max_cmp(l, r) \
395 	(fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
396 	 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
397 #define journal_min_cmp(l, r) \
398 	(fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
399 	 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
400 
401 static void btree_flush_write(struct cache_set *c)
402 {
403 	/*
404 	 * Try to find the btree node with that references the oldest journal
405 	 * entry, best is our current candidate and is locked if non NULL:
406 	 */
407 	struct btree *b;
408 	int i;
409 
410 	atomic_long_inc(&c->flush_write);
411 
412 retry:
413 	spin_lock(&c->journal.lock);
414 	if (heap_empty(&c->flush_btree)) {
415 		for_each_cached_btree(b, c, i)
416 			if (btree_current_write(b)->journal) {
417 				if (!heap_full(&c->flush_btree))
418 					heap_add(&c->flush_btree, b,
419 						 journal_max_cmp);
420 				else if (journal_max_cmp(b,
421 					 heap_peek(&c->flush_btree))) {
422 					c->flush_btree.data[0] = b;
423 					heap_sift(&c->flush_btree, 0,
424 						  journal_max_cmp);
425 				}
426 			}
427 
428 		for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
429 			heap_sift(&c->flush_btree, i, journal_min_cmp);
430 	}
431 
432 	b = NULL;
433 	heap_pop(&c->flush_btree, b, journal_min_cmp);
434 	spin_unlock(&c->journal.lock);
435 
436 	if (b) {
437 		mutex_lock(&b->write_lock);
438 		if (!btree_current_write(b)->journal) {
439 			mutex_unlock(&b->write_lock);
440 			/* We raced */
441 			atomic_long_inc(&c->retry_flush_write);
442 			goto retry;
443 		}
444 
445 		__bch_btree_node_write(b, NULL);
446 		mutex_unlock(&b->write_lock);
447 	}
448 }
449 
450 #define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
451 
452 static void journal_discard_endio(struct bio *bio)
453 {
454 	struct journal_device *ja =
455 		container_of(bio, struct journal_device, discard_bio);
456 	struct cache *ca = container_of(ja, struct cache, journal);
457 
458 	atomic_set(&ja->discard_in_flight, DISCARD_DONE);
459 
460 	closure_wake_up(&ca->set->journal.wait);
461 	closure_put(&ca->set->cl);
462 }
463 
464 static void journal_discard_work(struct work_struct *work)
465 {
466 	struct journal_device *ja =
467 		container_of(work, struct journal_device, discard_work);
468 
469 	submit_bio(&ja->discard_bio);
470 }
471 
472 static void do_journal_discard(struct cache *ca)
473 {
474 	struct journal_device *ja = &ca->journal;
475 	struct bio *bio = &ja->discard_bio;
476 
477 	if (!ca->discard) {
478 		ja->discard_idx = ja->last_idx;
479 		return;
480 	}
481 
482 	switch (atomic_read(&ja->discard_in_flight)) {
483 	case DISCARD_IN_FLIGHT:
484 		return;
485 
486 	case DISCARD_DONE:
487 		ja->discard_idx = (ja->discard_idx + 1) %
488 			ca->sb.njournal_buckets;
489 
490 		atomic_set(&ja->discard_in_flight, DISCARD_READY);
491 		/* fallthrough */
492 
493 	case DISCARD_READY:
494 		if (ja->discard_idx == ja->last_idx)
495 			return;
496 
497 		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
498 
499 		bio_init(bio, bio->bi_inline_vecs, 1);
500 		bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
501 		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
502 						ca->sb.d[ja->discard_idx]);
503 		bio_set_dev(bio, ca->bdev);
504 		bio->bi_iter.bi_size	= bucket_bytes(ca);
505 		bio->bi_end_io		= journal_discard_endio;
506 
507 		closure_get(&ca->set->cl);
508 		INIT_WORK(&ja->discard_work, journal_discard_work);
509 		queue_work(bch_journal_wq, &ja->discard_work);
510 	}
511 }
512 
513 static void journal_reclaim(struct cache_set *c)
514 {
515 	struct bkey *k = &c->journal.key;
516 	struct cache *ca;
517 	uint64_t last_seq;
518 	unsigned int iter, n = 0;
519 	atomic_t p __maybe_unused;
520 
521 	atomic_long_inc(&c->reclaim);
522 
523 	while (!atomic_read(&fifo_front(&c->journal.pin)))
524 		fifo_pop(&c->journal.pin, p);
525 
526 	last_seq = last_seq(&c->journal);
527 
528 	/* Update last_idx */
529 
530 	for_each_cache(ca, c, iter) {
531 		struct journal_device *ja = &ca->journal;
532 
533 		while (ja->last_idx != ja->cur_idx &&
534 		       ja->seq[ja->last_idx] < last_seq)
535 			ja->last_idx = (ja->last_idx + 1) %
536 				ca->sb.njournal_buckets;
537 	}
538 
539 	for_each_cache(ca, c, iter)
540 		do_journal_discard(ca);
541 
542 	if (c->journal.blocks_free)
543 		goto out;
544 
545 	/*
546 	 * Allocate:
547 	 * XXX: Sort by free journal space
548 	 */
549 
550 	for_each_cache(ca, c, iter) {
551 		struct journal_device *ja = &ca->journal;
552 		unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
553 
554 		/* No space available on this device */
555 		if (next == ja->discard_idx)
556 			continue;
557 
558 		ja->cur_idx = next;
559 		k->ptr[n++] = MAKE_PTR(0,
560 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
561 				  ca->sb.nr_this_dev);
562 	}
563 
564 	if (n) {
565 		bkey_init(k);
566 		SET_KEY_PTRS(k, n);
567 		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
568 	}
569 out:
570 	if (!journal_full(&c->journal))
571 		__closure_wake_up(&c->journal.wait);
572 }
573 
574 void bch_journal_next(struct journal *j)
575 {
576 	atomic_t p = { 1 };
577 
578 	j->cur = (j->cur == j->w)
579 		? &j->w[1]
580 		: &j->w[0];
581 
582 	/*
583 	 * The fifo_push() needs to happen at the same time as j->seq is
584 	 * incremented for last_seq() to be calculated correctly
585 	 */
586 	BUG_ON(!fifo_push(&j->pin, p));
587 	atomic_set(&fifo_back(&j->pin), 1);
588 
589 	j->cur->data->seq	= ++j->seq;
590 	j->cur->dirty		= false;
591 	j->cur->need_write	= false;
592 	j->cur->data->keys	= 0;
593 
594 	if (fifo_full(&j->pin))
595 		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
596 }
597 
598 static void journal_write_endio(struct bio *bio)
599 {
600 	struct journal_write *w = bio->bi_private;
601 
602 	cache_set_err_on(bio->bi_status, w->c, "journal io error");
603 	closure_put(&w->c->journal.io);
604 }
605 
606 static void journal_write(struct closure *cl);
607 
608 static void journal_write_done(struct closure *cl)
609 {
610 	struct journal *j = container_of(cl, struct journal, io);
611 	struct journal_write *w = (j->cur == j->w)
612 		? &j->w[1]
613 		: &j->w[0];
614 
615 	__closure_wake_up(&w->wait);
616 	continue_at_nobarrier(cl, journal_write, bch_journal_wq);
617 }
618 
619 static void journal_write_unlock(struct closure *cl)
620 	__releases(&c->journal.lock)
621 {
622 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
623 
624 	c->journal.io_in_flight = 0;
625 	spin_unlock(&c->journal.lock);
626 }
627 
628 static void journal_write_unlocked(struct closure *cl)
629 	__releases(c->journal.lock)
630 {
631 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
632 	struct cache *ca;
633 	struct journal_write *w = c->journal.cur;
634 	struct bkey *k = &c->journal.key;
635 	unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
636 		c->sb.block_size;
637 
638 	struct bio *bio;
639 	struct bio_list list;
640 
641 	bio_list_init(&list);
642 
643 	if (!w->need_write) {
644 		closure_return_with_destructor(cl, journal_write_unlock);
645 		return;
646 	} else if (journal_full(&c->journal)) {
647 		journal_reclaim(c);
648 		spin_unlock(&c->journal.lock);
649 
650 		btree_flush_write(c);
651 		continue_at(cl, journal_write, bch_journal_wq);
652 		return;
653 	}
654 
655 	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
656 
657 	w->data->btree_level = c->root->level;
658 
659 	bkey_copy(&w->data->btree_root, &c->root->key);
660 	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
661 
662 	for_each_cache(ca, c, i)
663 		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
664 
665 	w->data->magic		= jset_magic(&c->sb);
666 	w->data->version	= BCACHE_JSET_VERSION;
667 	w->data->last_seq	= last_seq(&c->journal);
668 	w->data->csum		= csum_set(w->data);
669 
670 	for (i = 0; i < KEY_PTRS(k); i++) {
671 		ca = PTR_CACHE(c, k, i);
672 		bio = &ca->journal.bio;
673 
674 		atomic_long_add(sectors, &ca->meta_sectors_written);
675 
676 		bio_reset(bio);
677 		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
678 		bio_set_dev(bio, ca->bdev);
679 		bio->bi_iter.bi_size = sectors << 9;
680 
681 		bio->bi_end_io	= journal_write_endio;
682 		bio->bi_private = w;
683 		bio_set_op_attrs(bio, REQ_OP_WRITE,
684 				 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
685 		bch_bio_map(bio, w->data);
686 
687 		trace_bcache_journal_write(bio, w->data->keys);
688 		bio_list_add(&list, bio);
689 
690 		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
691 
692 		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
693 	}
694 
695 	/* If KEY_PTRS(k) == 0, this jset gets lost in air */
696 	BUG_ON(i == 0);
697 
698 	atomic_dec_bug(&fifo_back(&c->journal.pin));
699 	bch_journal_next(&c->journal);
700 	journal_reclaim(c);
701 
702 	spin_unlock(&c->journal.lock);
703 
704 	while ((bio = bio_list_pop(&list)))
705 		closure_bio_submit(c, bio, cl);
706 
707 	continue_at(cl, journal_write_done, NULL);
708 }
709 
710 static void journal_write(struct closure *cl)
711 {
712 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
713 
714 	spin_lock(&c->journal.lock);
715 	journal_write_unlocked(cl);
716 }
717 
718 static void journal_try_write(struct cache_set *c)
719 	__releases(c->journal.lock)
720 {
721 	struct closure *cl = &c->journal.io;
722 	struct journal_write *w = c->journal.cur;
723 
724 	w->need_write = true;
725 
726 	if (!c->journal.io_in_flight) {
727 		c->journal.io_in_flight = 1;
728 		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
729 	} else {
730 		spin_unlock(&c->journal.lock);
731 	}
732 }
733 
734 static struct journal_write *journal_wait_for_write(struct cache_set *c,
735 						    unsigned int nkeys)
736 	__acquires(&c->journal.lock)
737 {
738 	size_t sectors;
739 	struct closure cl;
740 	bool wait = false;
741 
742 	closure_init_stack(&cl);
743 
744 	spin_lock(&c->journal.lock);
745 
746 	while (1) {
747 		struct journal_write *w = c->journal.cur;
748 
749 		sectors = __set_blocks(w->data, w->data->keys + nkeys,
750 				       block_bytes(c)) * c->sb.block_size;
751 
752 		if (sectors <= min_t(size_t,
753 				     c->journal.blocks_free * c->sb.block_size,
754 				     PAGE_SECTORS << JSET_BITS))
755 			return w;
756 
757 		if (wait)
758 			closure_wait(&c->journal.wait, &cl);
759 
760 		if (!journal_full(&c->journal)) {
761 			if (wait)
762 				trace_bcache_journal_entry_full(c);
763 
764 			/*
765 			 * XXX: If we were inserting so many keys that they
766 			 * won't fit in an _empty_ journal write, we'll
767 			 * deadlock. For now, handle this in
768 			 * bch_keylist_realloc() - but something to think about.
769 			 */
770 			BUG_ON(!w->data->keys);
771 
772 			journal_try_write(c); /* unlocks */
773 		} else {
774 			if (wait)
775 				trace_bcache_journal_full(c);
776 
777 			journal_reclaim(c);
778 			spin_unlock(&c->journal.lock);
779 
780 			btree_flush_write(c);
781 		}
782 
783 		closure_sync(&cl);
784 		spin_lock(&c->journal.lock);
785 		wait = true;
786 	}
787 }
788 
789 static void journal_write_work(struct work_struct *work)
790 {
791 	struct cache_set *c = container_of(to_delayed_work(work),
792 					   struct cache_set,
793 					   journal.work);
794 	spin_lock(&c->journal.lock);
795 	if (c->journal.cur->dirty)
796 		journal_try_write(c);
797 	else
798 		spin_unlock(&c->journal.lock);
799 }
800 
801 /*
802  * Entry point to the journalling code - bio_insert() and btree_invalidate()
803  * pass bch_journal() a list of keys to be journalled, and then
804  * bch_journal() hands those same keys off to btree_insert_async()
805  */
806 
807 atomic_t *bch_journal(struct cache_set *c,
808 		      struct keylist *keys,
809 		      struct closure *parent)
810 {
811 	struct journal_write *w;
812 	atomic_t *ret;
813 
814 	if (!CACHE_SYNC(&c->sb))
815 		return NULL;
816 
817 	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
818 
819 	memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
820 	w->data->keys += bch_keylist_nkeys(keys);
821 
822 	ret = &fifo_back(&c->journal.pin);
823 	atomic_inc(ret);
824 
825 	if (parent) {
826 		closure_wait(&w->wait, parent);
827 		journal_try_write(c);
828 	} else if (!w->dirty) {
829 		w->dirty = true;
830 		schedule_delayed_work(&c->journal.work,
831 				      msecs_to_jiffies(c->journal_delay_ms));
832 		spin_unlock(&c->journal.lock);
833 	} else {
834 		spin_unlock(&c->journal.lock);
835 	}
836 
837 
838 	return ret;
839 }
840 
841 void bch_journal_meta(struct cache_set *c, struct closure *cl)
842 {
843 	struct keylist keys;
844 	atomic_t *ref;
845 
846 	bch_keylist_init(&keys);
847 
848 	ref = bch_journal(c, &keys, cl);
849 	if (ref)
850 		atomic_dec_bug(ref);
851 }
852 
853 void bch_journal_free(struct cache_set *c)
854 {
855 	free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
856 	free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
857 	free_fifo(&c->journal.pin);
858 	free_heap(&c->flush_btree);
859 }
860 
861 int bch_journal_alloc(struct cache_set *c)
862 {
863 	struct journal *j = &c->journal;
864 
865 	spin_lock_init(&j->lock);
866 	INIT_DELAYED_WORK(&j->work, journal_write_work);
867 
868 	c->journal_delay_ms = 100;
869 
870 	j->w[0].c = c;
871 	j->w[1].c = c;
872 
873 	if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
874 	    !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
875 	    !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
876 	    !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
877 		return -ENOMEM;
878 
879 	return 0;
880 }
881