xref: /openbmc/linux/drivers/md/bcache/journal.c (revision b34e08d5)
1 /*
2  * bcache journalling code, for btree insertions
3  *
4  * Copyright 2012 Google, Inc.
5  */
6 
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 
11 #include <trace/events/bcache.h>
12 
13 /*
14  * Journal replay/recovery:
15  *
16  * This code is all driven from run_cache_set(); we first read the journal
17  * entries, do some other stuff, then we mark all the keys in the journal
18  * entries (same as garbage collection would), then we replay them - reinserting
19  * them into the cache in precisely the same order as they appear in the
20  * journal.
21  *
22  * We only journal keys that go in leaf nodes, which simplifies things quite a
23  * bit.
24  */
25 
26 static void journal_read_endio(struct bio *bio, int error)
27 {
28 	struct closure *cl = bio->bi_private;
29 	closure_put(cl);
30 }
31 
32 static int journal_read_bucket(struct cache *ca, struct list_head *list,
33 			       unsigned bucket_index)
34 {
35 	struct journal_device *ja = &ca->journal;
36 	struct bio *bio = &ja->bio;
37 
38 	struct journal_replay *i;
39 	struct jset *j, *data = ca->set->journal.w[0].data;
40 	struct closure cl;
41 	unsigned len, left, offset = 0;
42 	int ret = 0;
43 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44 
45 	closure_init_stack(&cl);
46 
47 	pr_debug("reading %u", bucket_index);
48 
49 	while (offset < ca->sb.bucket_size) {
50 reread:		left = ca->sb.bucket_size - offset;
51 		len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
52 
53 		bio_reset(bio);
54 		bio->bi_iter.bi_sector	= bucket + offset;
55 		bio->bi_bdev	= ca->bdev;
56 		bio->bi_rw	= READ;
57 		bio->bi_iter.bi_size	= len << 9;
58 
59 		bio->bi_end_io	= journal_read_endio;
60 		bio->bi_private = &cl;
61 		bch_bio_map(bio, data);
62 
63 		closure_bio_submit(bio, &cl, ca);
64 		closure_sync(&cl);
65 
66 		/* This function could be simpler now since we no longer write
67 		 * journal entries that overlap bucket boundaries; this means
68 		 * the start of a bucket will always have a valid journal entry
69 		 * if it has any journal entries at all.
70 		 */
71 
72 		j = data;
73 		while (len) {
74 			struct list_head *where;
75 			size_t blocks, bytes = set_bytes(j);
76 
77 			if (j->magic != jset_magic(&ca->sb)) {
78 				pr_debug("%u: bad magic", bucket_index);
79 				return ret;
80 			}
81 
82 			if (bytes > left << 9 ||
83 			    bytes > PAGE_SIZE << JSET_BITS) {
84 				pr_info("%u: too big, %zu bytes, offset %u",
85 					bucket_index, bytes, offset);
86 				return ret;
87 			}
88 
89 			if (bytes > len << 9)
90 				goto reread;
91 
92 			if (j->csum != csum_set(j)) {
93 				pr_info("%u: bad csum, %zu bytes, offset %u",
94 					bucket_index, bytes, offset);
95 				return ret;
96 			}
97 
98 			blocks = set_blocks(j, block_bytes(ca->set));
99 
100 			while (!list_empty(list)) {
101 				i = list_first_entry(list,
102 					struct journal_replay, list);
103 				if (i->j.seq >= j->last_seq)
104 					break;
105 				list_del(&i->list);
106 				kfree(i);
107 			}
108 
109 			list_for_each_entry_reverse(i, list, list) {
110 				if (j->seq == i->j.seq)
111 					goto next_set;
112 
113 				if (j->seq < i->j.last_seq)
114 					goto next_set;
115 
116 				if (j->seq > i->j.seq) {
117 					where = &i->list;
118 					goto add;
119 				}
120 			}
121 
122 			where = list;
123 add:
124 			i = kmalloc(offsetof(struct journal_replay, j) +
125 				    bytes, GFP_KERNEL);
126 			if (!i)
127 				return -ENOMEM;
128 			memcpy(&i->j, j, bytes);
129 			list_add(&i->list, where);
130 			ret = 1;
131 
132 			ja->seq[bucket_index] = j->seq;
133 next_set:
134 			offset	+= blocks * ca->sb.block_size;
135 			len	-= blocks * ca->sb.block_size;
136 			j = ((void *) j) + blocks * block_bytes(ca);
137 		}
138 	}
139 
140 	return ret;
141 }
142 
143 int bch_journal_read(struct cache_set *c, struct list_head *list)
144 {
145 #define read_bucket(b)							\
146 	({								\
147 		int ret = journal_read_bucket(ca, list, b);		\
148 		__set_bit(b, bitmap);					\
149 		if (ret < 0)						\
150 			return ret;					\
151 		ret;							\
152 	})
153 
154 	struct cache *ca;
155 	unsigned iter;
156 
157 	for_each_cache(ca, c, iter) {
158 		struct journal_device *ja = &ca->journal;
159 		unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
160 		unsigned i, l, r, m;
161 		uint64_t seq;
162 
163 		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
164 		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
165 
166 		/*
167 		 * Read journal buckets ordered by golden ratio hash to quickly
168 		 * find a sequence of buckets with valid journal entries
169 		 */
170 		for (i = 0; i < ca->sb.njournal_buckets; i++) {
171 			l = (i * 2654435769U) % ca->sb.njournal_buckets;
172 
173 			if (test_bit(l, bitmap))
174 				break;
175 
176 			if (read_bucket(l))
177 				goto bsearch;
178 		}
179 
180 		/*
181 		 * If that fails, check all the buckets we haven't checked
182 		 * already
183 		 */
184 		pr_debug("falling back to linear search");
185 
186 		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
187 		     l < ca->sb.njournal_buckets;
188 		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
189 			if (read_bucket(l))
190 				goto bsearch;
191 
192 		if (list_empty(list))
193 			continue;
194 bsearch:
195 		/* Binary search */
196 		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
197 		pr_debug("starting binary search, l %u r %u", l, r);
198 
199 		while (l + 1 < r) {
200 			seq = list_entry(list->prev, struct journal_replay,
201 					 list)->j.seq;
202 
203 			m = (l + r) >> 1;
204 			read_bucket(m);
205 
206 			if (seq != list_entry(list->prev, struct journal_replay,
207 					      list)->j.seq)
208 				l = m;
209 			else
210 				r = m;
211 		}
212 
213 		/*
214 		 * Read buckets in reverse order until we stop finding more
215 		 * journal entries
216 		 */
217 		pr_debug("finishing up: m %u njournal_buckets %u",
218 			 m, ca->sb.njournal_buckets);
219 		l = m;
220 
221 		while (1) {
222 			if (!l--)
223 				l = ca->sb.njournal_buckets - 1;
224 
225 			if (l == m)
226 				break;
227 
228 			if (test_bit(l, bitmap))
229 				continue;
230 
231 			if (!read_bucket(l))
232 				break;
233 		}
234 
235 		seq = 0;
236 
237 		for (i = 0; i < ca->sb.njournal_buckets; i++)
238 			if (ja->seq[i] > seq) {
239 				seq = ja->seq[i];
240 				/*
241 				 * When journal_reclaim() goes to allocate for
242 				 * the first time, it'll use the bucket after
243 				 * ja->cur_idx
244 				 */
245 				ja->cur_idx = i;
246 				ja->last_idx = ja->discard_idx = (i + 1) %
247 					ca->sb.njournal_buckets;
248 
249 			}
250 	}
251 
252 	if (!list_empty(list))
253 		c->journal.seq = list_entry(list->prev,
254 					    struct journal_replay,
255 					    list)->j.seq;
256 
257 	return 0;
258 #undef read_bucket
259 }
260 
261 void bch_journal_mark(struct cache_set *c, struct list_head *list)
262 {
263 	atomic_t p = { 0 };
264 	struct bkey *k;
265 	struct journal_replay *i;
266 	struct journal *j = &c->journal;
267 	uint64_t last = j->seq;
268 
269 	/*
270 	 * journal.pin should never fill up - we never write a journal
271 	 * entry when it would fill up. But if for some reason it does, we
272 	 * iterate over the list in reverse order so that we can just skip that
273 	 * refcount instead of bugging.
274 	 */
275 
276 	list_for_each_entry_reverse(i, list, list) {
277 		BUG_ON(last < i->j.seq);
278 		i->pin = NULL;
279 
280 		while (last-- != i->j.seq)
281 			if (fifo_free(&j->pin) > 1) {
282 				fifo_push_front(&j->pin, p);
283 				atomic_set(&fifo_front(&j->pin), 0);
284 			}
285 
286 		if (fifo_free(&j->pin) > 1) {
287 			fifo_push_front(&j->pin, p);
288 			i->pin = &fifo_front(&j->pin);
289 			atomic_set(i->pin, 1);
290 		}
291 
292 		for (k = i->j.start;
293 		     k < bset_bkey_last(&i->j);
294 		     k = bkey_next(k)) {
295 			unsigned j;
296 
297 			for (j = 0; j < KEY_PTRS(k); j++)
298 				if (ptr_available(c, k, j))
299 					atomic_inc(&PTR_BUCKET(c, k, j)->pin);
300 
301 			bch_initial_mark_key(c, 0, k);
302 		}
303 	}
304 }
305 
306 int bch_journal_replay(struct cache_set *s, struct list_head *list)
307 {
308 	int ret = 0, keys = 0, entries = 0;
309 	struct bkey *k;
310 	struct journal_replay *i =
311 		list_entry(list->prev, struct journal_replay, list);
312 
313 	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
314 	struct keylist keylist;
315 
316 	list_for_each_entry(i, list, list) {
317 		BUG_ON(i->pin && atomic_read(i->pin) != 1);
318 
319 		cache_set_err_on(n != i->j.seq, s,
320 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
321 				 n, i->j.seq - 1, start, end);
322 
323 		for (k = i->j.start;
324 		     k < bset_bkey_last(&i->j);
325 		     k = bkey_next(k)) {
326 			trace_bcache_journal_replay_key(k);
327 
328 			bch_keylist_init_single(&keylist, k);
329 
330 			ret = bch_btree_insert(s, &keylist, i->pin, NULL);
331 			if (ret)
332 				goto err;
333 
334 			BUG_ON(!bch_keylist_empty(&keylist));
335 			keys++;
336 
337 			cond_resched();
338 		}
339 
340 		if (i->pin)
341 			atomic_dec(i->pin);
342 		n = i->j.seq + 1;
343 		entries++;
344 	}
345 
346 	pr_info("journal replay done, %i keys in %i entries, seq %llu",
347 		keys, entries, end);
348 err:
349 	while (!list_empty(list)) {
350 		i = list_first_entry(list, struct journal_replay, list);
351 		list_del(&i->list);
352 		kfree(i);
353 	}
354 
355 	return ret;
356 }
357 
358 /* Journalling */
359 
360 static void btree_flush_write(struct cache_set *c)
361 {
362 	/*
363 	 * Try to find the btree node with that references the oldest journal
364 	 * entry, best is our current candidate and is locked if non NULL:
365 	 */
366 	struct btree *b, *best;
367 	unsigned i;
368 retry:
369 	best = NULL;
370 
371 	for_each_cached_btree(b, c, i)
372 		if (btree_current_write(b)->journal) {
373 			if (!best)
374 				best = b;
375 			else if (journal_pin_cmp(c,
376 					btree_current_write(best)->journal,
377 					btree_current_write(b)->journal)) {
378 				best = b;
379 			}
380 		}
381 
382 	b = best;
383 	if (b) {
384 		mutex_lock(&b->write_lock);
385 		if (!btree_current_write(b)->journal) {
386 			mutex_unlock(&b->write_lock);
387 			/* We raced */
388 			goto retry;
389 		}
390 
391 		__bch_btree_node_write(b, NULL);
392 		mutex_unlock(&b->write_lock);
393 	}
394 }
395 
396 #define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
397 
398 static void journal_discard_endio(struct bio *bio, int error)
399 {
400 	struct journal_device *ja =
401 		container_of(bio, struct journal_device, discard_bio);
402 	struct cache *ca = container_of(ja, struct cache, journal);
403 
404 	atomic_set(&ja->discard_in_flight, DISCARD_DONE);
405 
406 	closure_wake_up(&ca->set->journal.wait);
407 	closure_put(&ca->set->cl);
408 }
409 
410 static void journal_discard_work(struct work_struct *work)
411 {
412 	struct journal_device *ja =
413 		container_of(work, struct journal_device, discard_work);
414 
415 	submit_bio(0, &ja->discard_bio);
416 }
417 
418 static void do_journal_discard(struct cache *ca)
419 {
420 	struct journal_device *ja = &ca->journal;
421 	struct bio *bio = &ja->discard_bio;
422 
423 	if (!ca->discard) {
424 		ja->discard_idx = ja->last_idx;
425 		return;
426 	}
427 
428 	switch (atomic_read(&ja->discard_in_flight)) {
429 	case DISCARD_IN_FLIGHT:
430 		return;
431 
432 	case DISCARD_DONE:
433 		ja->discard_idx = (ja->discard_idx + 1) %
434 			ca->sb.njournal_buckets;
435 
436 		atomic_set(&ja->discard_in_flight, DISCARD_READY);
437 		/* fallthrough */
438 
439 	case DISCARD_READY:
440 		if (ja->discard_idx == ja->last_idx)
441 			return;
442 
443 		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
444 
445 		bio_init(bio);
446 		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
447 						ca->sb.d[ja->discard_idx]);
448 		bio->bi_bdev		= ca->bdev;
449 		bio->bi_rw		= REQ_WRITE|REQ_DISCARD;
450 		bio->bi_max_vecs	= 1;
451 		bio->bi_io_vec		= bio->bi_inline_vecs;
452 		bio->bi_iter.bi_size	= bucket_bytes(ca);
453 		bio->bi_end_io		= journal_discard_endio;
454 
455 		closure_get(&ca->set->cl);
456 		INIT_WORK(&ja->discard_work, journal_discard_work);
457 		schedule_work(&ja->discard_work);
458 	}
459 }
460 
461 static void journal_reclaim(struct cache_set *c)
462 {
463 	struct bkey *k = &c->journal.key;
464 	struct cache *ca;
465 	uint64_t last_seq;
466 	unsigned iter, n = 0;
467 	atomic_t p;
468 
469 	while (!atomic_read(&fifo_front(&c->journal.pin)))
470 		fifo_pop(&c->journal.pin, p);
471 
472 	last_seq = last_seq(&c->journal);
473 
474 	/* Update last_idx */
475 
476 	for_each_cache(ca, c, iter) {
477 		struct journal_device *ja = &ca->journal;
478 
479 		while (ja->last_idx != ja->cur_idx &&
480 		       ja->seq[ja->last_idx] < last_seq)
481 			ja->last_idx = (ja->last_idx + 1) %
482 				ca->sb.njournal_buckets;
483 	}
484 
485 	for_each_cache(ca, c, iter)
486 		do_journal_discard(ca);
487 
488 	if (c->journal.blocks_free)
489 		goto out;
490 
491 	/*
492 	 * Allocate:
493 	 * XXX: Sort by free journal space
494 	 */
495 
496 	for_each_cache(ca, c, iter) {
497 		struct journal_device *ja = &ca->journal;
498 		unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
499 
500 		/* No space available on this device */
501 		if (next == ja->discard_idx)
502 			continue;
503 
504 		ja->cur_idx = next;
505 		k->ptr[n++] = PTR(0,
506 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
507 				  ca->sb.nr_this_dev);
508 	}
509 
510 	bkey_init(k);
511 	SET_KEY_PTRS(k, n);
512 
513 	if (n)
514 		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
515 out:
516 	if (!journal_full(&c->journal))
517 		__closure_wake_up(&c->journal.wait);
518 }
519 
520 void bch_journal_next(struct journal *j)
521 {
522 	atomic_t p = { 1 };
523 
524 	j->cur = (j->cur == j->w)
525 		? &j->w[1]
526 		: &j->w[0];
527 
528 	/*
529 	 * The fifo_push() needs to happen at the same time as j->seq is
530 	 * incremented for last_seq() to be calculated correctly
531 	 */
532 	BUG_ON(!fifo_push(&j->pin, p));
533 	atomic_set(&fifo_back(&j->pin), 1);
534 
535 	j->cur->data->seq	= ++j->seq;
536 	j->cur->dirty		= false;
537 	j->cur->need_write	= false;
538 	j->cur->data->keys	= 0;
539 
540 	if (fifo_full(&j->pin))
541 		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
542 }
543 
544 static void journal_write_endio(struct bio *bio, int error)
545 {
546 	struct journal_write *w = bio->bi_private;
547 
548 	cache_set_err_on(error, w->c, "journal io error");
549 	closure_put(&w->c->journal.io);
550 }
551 
552 static void journal_write(struct closure *);
553 
554 static void journal_write_done(struct closure *cl)
555 {
556 	struct journal *j = container_of(cl, struct journal, io);
557 	struct journal_write *w = (j->cur == j->w)
558 		? &j->w[1]
559 		: &j->w[0];
560 
561 	__closure_wake_up(&w->wait);
562 	continue_at_nobarrier(cl, journal_write, system_wq);
563 }
564 
565 static void journal_write_unlock(struct closure *cl)
566 {
567 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
568 
569 	c->journal.io_in_flight = 0;
570 	spin_unlock(&c->journal.lock);
571 }
572 
573 static void journal_write_unlocked(struct closure *cl)
574 	__releases(c->journal.lock)
575 {
576 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
577 	struct cache *ca;
578 	struct journal_write *w = c->journal.cur;
579 	struct bkey *k = &c->journal.key;
580 	unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
581 		c->sb.block_size;
582 
583 	struct bio *bio;
584 	struct bio_list list;
585 	bio_list_init(&list);
586 
587 	if (!w->need_write) {
588 		closure_return_with_destructor(cl, journal_write_unlock);
589 	} else if (journal_full(&c->journal)) {
590 		journal_reclaim(c);
591 		spin_unlock(&c->journal.lock);
592 
593 		btree_flush_write(c);
594 		continue_at(cl, journal_write, system_wq);
595 	}
596 
597 	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
598 
599 	w->data->btree_level = c->root->level;
600 
601 	bkey_copy(&w->data->btree_root, &c->root->key);
602 	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
603 
604 	for_each_cache(ca, c, i)
605 		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
606 
607 	w->data->magic		= jset_magic(&c->sb);
608 	w->data->version	= BCACHE_JSET_VERSION;
609 	w->data->last_seq	= last_seq(&c->journal);
610 	w->data->csum		= csum_set(w->data);
611 
612 	for (i = 0; i < KEY_PTRS(k); i++) {
613 		ca = PTR_CACHE(c, k, i);
614 		bio = &ca->journal.bio;
615 
616 		atomic_long_add(sectors, &ca->meta_sectors_written);
617 
618 		bio_reset(bio);
619 		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
620 		bio->bi_bdev	= ca->bdev;
621 		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
622 		bio->bi_iter.bi_size = sectors << 9;
623 
624 		bio->bi_end_io	= journal_write_endio;
625 		bio->bi_private = w;
626 		bch_bio_map(bio, w->data);
627 
628 		trace_bcache_journal_write(bio);
629 		bio_list_add(&list, bio);
630 
631 		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
632 
633 		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
634 	}
635 
636 	atomic_dec_bug(&fifo_back(&c->journal.pin));
637 	bch_journal_next(&c->journal);
638 	journal_reclaim(c);
639 
640 	spin_unlock(&c->journal.lock);
641 
642 	while ((bio = bio_list_pop(&list)))
643 		closure_bio_submit(bio, cl, c->cache[0]);
644 
645 	continue_at(cl, journal_write_done, NULL);
646 }
647 
648 static void journal_write(struct closure *cl)
649 {
650 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
651 
652 	spin_lock(&c->journal.lock);
653 	journal_write_unlocked(cl);
654 }
655 
656 static void journal_try_write(struct cache_set *c)
657 	__releases(c->journal.lock)
658 {
659 	struct closure *cl = &c->journal.io;
660 	struct journal_write *w = c->journal.cur;
661 
662 	w->need_write = true;
663 
664 	if (!c->journal.io_in_flight) {
665 		c->journal.io_in_flight = 1;
666 		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
667 	} else {
668 		spin_unlock(&c->journal.lock);
669 	}
670 }
671 
672 static struct journal_write *journal_wait_for_write(struct cache_set *c,
673 						    unsigned nkeys)
674 {
675 	size_t sectors;
676 	struct closure cl;
677 	bool wait = false;
678 
679 	closure_init_stack(&cl);
680 
681 	spin_lock(&c->journal.lock);
682 
683 	while (1) {
684 		struct journal_write *w = c->journal.cur;
685 
686 		sectors = __set_blocks(w->data, w->data->keys + nkeys,
687 				       block_bytes(c)) * c->sb.block_size;
688 
689 		if (sectors <= min_t(size_t,
690 				     c->journal.blocks_free * c->sb.block_size,
691 				     PAGE_SECTORS << JSET_BITS))
692 			return w;
693 
694 		if (wait)
695 			closure_wait(&c->journal.wait, &cl);
696 
697 		if (!journal_full(&c->journal)) {
698 			if (wait)
699 				trace_bcache_journal_entry_full(c);
700 
701 			/*
702 			 * XXX: If we were inserting so many keys that they
703 			 * won't fit in an _empty_ journal write, we'll
704 			 * deadlock. For now, handle this in
705 			 * bch_keylist_realloc() - but something to think about.
706 			 */
707 			BUG_ON(!w->data->keys);
708 
709 			journal_try_write(c); /* unlocks */
710 		} else {
711 			if (wait)
712 				trace_bcache_journal_full(c);
713 
714 			journal_reclaim(c);
715 			spin_unlock(&c->journal.lock);
716 
717 			btree_flush_write(c);
718 		}
719 
720 		closure_sync(&cl);
721 		spin_lock(&c->journal.lock);
722 		wait = true;
723 	}
724 }
725 
726 static void journal_write_work(struct work_struct *work)
727 {
728 	struct cache_set *c = container_of(to_delayed_work(work),
729 					   struct cache_set,
730 					   journal.work);
731 	spin_lock(&c->journal.lock);
732 	if (c->journal.cur->dirty)
733 		journal_try_write(c);
734 	else
735 		spin_unlock(&c->journal.lock);
736 }
737 
738 /*
739  * Entry point to the journalling code - bio_insert() and btree_invalidate()
740  * pass bch_journal() a list of keys to be journalled, and then
741  * bch_journal() hands those same keys off to btree_insert_async()
742  */
743 
744 atomic_t *bch_journal(struct cache_set *c,
745 		      struct keylist *keys,
746 		      struct closure *parent)
747 {
748 	struct journal_write *w;
749 	atomic_t *ret;
750 
751 	if (!CACHE_SYNC(&c->sb))
752 		return NULL;
753 
754 	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
755 
756 	memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
757 	w->data->keys += bch_keylist_nkeys(keys);
758 
759 	ret = &fifo_back(&c->journal.pin);
760 	atomic_inc(ret);
761 
762 	if (parent) {
763 		closure_wait(&w->wait, parent);
764 		journal_try_write(c);
765 	} else if (!w->dirty) {
766 		w->dirty = true;
767 		schedule_delayed_work(&c->journal.work,
768 				      msecs_to_jiffies(c->journal_delay_ms));
769 		spin_unlock(&c->journal.lock);
770 	} else {
771 		spin_unlock(&c->journal.lock);
772 	}
773 
774 
775 	return ret;
776 }
777 
778 void bch_journal_meta(struct cache_set *c, struct closure *cl)
779 {
780 	struct keylist keys;
781 	atomic_t *ref;
782 
783 	bch_keylist_init(&keys);
784 
785 	ref = bch_journal(c, &keys, cl);
786 	if (ref)
787 		atomic_dec_bug(ref);
788 }
789 
790 void bch_journal_free(struct cache_set *c)
791 {
792 	free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
793 	free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
794 	free_fifo(&c->journal.pin);
795 }
796 
797 int bch_journal_alloc(struct cache_set *c)
798 {
799 	struct journal *j = &c->journal;
800 
801 	spin_lock_init(&j->lock);
802 	INIT_DELAYED_WORK(&j->work, journal_write_work);
803 
804 	c->journal_delay_ms = 100;
805 
806 	j->w[0].c = c;
807 	j->w[1].c = c;
808 
809 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
810 	    !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
811 	    !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
812 		return -ENOMEM;
813 
814 	return 0;
815 }
816