xref: /openbmc/linux/drivers/md/bcache/journal.c (revision f677b30b487ca3763c3de3f1b4d8c976c2961cd1)
1 /*
2  * bcache journalling code, for btree insertions
3  *
4  * Copyright 2012 Google, Inc.
5  */
6 
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 
11 #include <trace/events/bcache.h>
12 
13 /*
14  * Journal replay/recovery:
15  *
16  * This code is all driven from run_cache_set(); we first read the journal
17  * entries, do some other stuff, then we mark all the keys in the journal
18  * entries (same as garbage collection would), then we replay them - reinserting
19  * them into the cache in precisely the same order as they appear in the
20  * journal.
21  *
22  * We only journal keys that go in leaf nodes, which simplifies things quite a
23  * bit.
24  */
25 
26 static void journal_read_endio(struct bio *bio, int error)
27 {
28 	struct closure *cl = bio->bi_private;
29 	closure_put(cl);
30 }
31 
32 static int journal_read_bucket(struct cache *ca, struct list_head *list,
33 			       unsigned bucket_index)
34 {
35 	struct journal_device *ja = &ca->journal;
36 	struct bio *bio = &ja->bio;
37 
38 	struct journal_replay *i;
39 	struct jset *j, *data = ca->set->journal.w[0].data;
40 	struct closure cl;
41 	unsigned len, left, offset = 0;
42 	int ret = 0;
43 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44 
45 	closure_init_stack(&cl);
46 
47 	pr_debug("reading %llu", (uint64_t) bucket);
48 
49 	while (offset < ca->sb.bucket_size) {
50 reread:		left = ca->sb.bucket_size - offset;
51 		len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 
53 		bio_reset(bio);
54 		bio->bi_sector	= bucket + offset;
55 		bio->bi_bdev	= ca->bdev;
56 		bio->bi_rw	= READ;
57 		bio->bi_size	= len << 9;
58 
59 		bio->bi_end_io	= journal_read_endio;
60 		bio->bi_private = &cl;
61 		bch_bio_map(bio, data);
62 
63 		closure_bio_submit(bio, &cl, ca);
64 		closure_sync(&cl);
65 
66 		/* This function could be simpler now since we no longer write
67 		 * journal entries that overlap bucket boundaries; this means
68 		 * the start of a bucket will always have a valid journal entry
69 		 * if it has any journal entries at all.
70 		 */
71 
72 		j = data;
73 		while (len) {
74 			struct list_head *where;
75 			size_t blocks, bytes = set_bytes(j);
76 
77 			if (j->magic != jset_magic(&ca->sb))
78 				return ret;
79 
80 			if (bytes > left << 9)
81 				return ret;
82 
83 			if (bytes > len << 9)
84 				goto reread;
85 
86 			if (j->csum != csum_set(j))
87 				return ret;
88 
89 			blocks = set_blocks(j, ca->set);
90 
91 			while (!list_empty(list)) {
92 				i = list_first_entry(list,
93 					struct journal_replay, list);
94 				if (i->j.seq >= j->last_seq)
95 					break;
96 				list_del(&i->list);
97 				kfree(i);
98 			}
99 
100 			list_for_each_entry_reverse(i, list, list) {
101 				if (j->seq == i->j.seq)
102 					goto next_set;
103 
104 				if (j->seq < i->j.last_seq)
105 					goto next_set;
106 
107 				if (j->seq > i->j.seq) {
108 					where = &i->list;
109 					goto add;
110 				}
111 			}
112 
113 			where = list;
114 add:
115 			i = kmalloc(offsetof(struct journal_replay, j) +
116 				    bytes, GFP_KERNEL);
117 			if (!i)
118 				return -ENOMEM;
119 			memcpy(&i->j, j, bytes);
120 			list_add(&i->list, where);
121 			ret = 1;
122 
123 			ja->seq[bucket_index] = j->seq;
124 next_set:
125 			offset	+= blocks * ca->sb.block_size;
126 			len	-= blocks * ca->sb.block_size;
127 			j = ((void *) j) + blocks * block_bytes(ca);
128 		}
129 	}
130 
131 	return ret;
132 }
133 
134 int bch_journal_read(struct cache_set *c, struct list_head *list)
135 {
136 #define read_bucket(b)							\
137 	({								\
138 		int ret = journal_read_bucket(ca, list, b);		\
139 		__set_bit(b, bitmap);					\
140 		if (ret < 0)						\
141 			return ret;					\
142 		ret;							\
143 	})
144 
145 	struct cache *ca;
146 	unsigned iter;
147 
148 	for_each_cache(ca, c, iter) {
149 		struct journal_device *ja = &ca->journal;
150 		unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
151 		unsigned i, l, r, m;
152 		uint64_t seq;
153 
154 		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
155 		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
156 
157 		/*
158 		 * Read journal buckets ordered by golden ratio hash to quickly
159 		 * find a sequence of buckets with valid journal entries
160 		 */
161 		for (i = 0; i < ca->sb.njournal_buckets; i++) {
162 			l = (i * 2654435769U) % ca->sb.njournal_buckets;
163 
164 			if (test_bit(l, bitmap))
165 				break;
166 
167 			if (read_bucket(l))
168 				goto bsearch;
169 		}
170 
171 		/*
172 		 * If that fails, check all the buckets we haven't checked
173 		 * already
174 		 */
175 		pr_debug("falling back to linear search");
176 
177 		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
178 		     l < ca->sb.njournal_buckets;
179 		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
180 			if (read_bucket(l))
181 				goto bsearch;
182 
183 		if (list_empty(list))
184 			continue;
185 bsearch:
186 		/* Binary search */
187 		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
188 		pr_debug("starting binary search, l %u r %u", l, r);
189 
190 		while (l + 1 < r) {
191 			seq = list_entry(list->prev, struct journal_replay,
192 					 list)->j.seq;
193 
194 			m = (l + r) >> 1;
195 			read_bucket(m);
196 
197 			if (seq != list_entry(list->prev, struct journal_replay,
198 					      list)->j.seq)
199 				l = m;
200 			else
201 				r = m;
202 		}
203 
204 		/*
205 		 * Read buckets in reverse order until we stop finding more
206 		 * journal entries
207 		 */
208 		pr_debug("finishing up: m %u njournal_buckets %u",
209 			 m, ca->sb.njournal_buckets);
210 		l = m;
211 
212 		while (1) {
213 			if (!l--)
214 				l = ca->sb.njournal_buckets - 1;
215 
216 			if (l == m)
217 				break;
218 
219 			if (test_bit(l, bitmap))
220 				continue;
221 
222 			if (!read_bucket(l))
223 				break;
224 		}
225 
226 		seq = 0;
227 
228 		for (i = 0; i < ca->sb.njournal_buckets; i++)
229 			if (ja->seq[i] > seq) {
230 				seq = ja->seq[i];
231 				ja->cur_idx = ja->discard_idx =
232 					ja->last_idx = i;
233 
234 			}
235 	}
236 
237 	if (!list_empty(list))
238 		c->journal.seq = list_entry(list->prev,
239 					    struct journal_replay,
240 					    list)->j.seq;
241 
242 	return 0;
243 #undef read_bucket
244 }
245 
246 void bch_journal_mark(struct cache_set *c, struct list_head *list)
247 {
248 	atomic_t p = { 0 };
249 	struct bkey *k;
250 	struct journal_replay *i;
251 	struct journal *j = &c->journal;
252 	uint64_t last = j->seq;
253 
254 	/*
255 	 * journal.pin should never fill up - we never write a journal
256 	 * entry when it would fill up. But if for some reason it does, we
257 	 * iterate over the list in reverse order so that we can just skip that
258 	 * refcount instead of bugging.
259 	 */
260 
261 	list_for_each_entry_reverse(i, list, list) {
262 		BUG_ON(last < i->j.seq);
263 		i->pin = NULL;
264 
265 		while (last-- != i->j.seq)
266 			if (fifo_free(&j->pin) > 1) {
267 				fifo_push_front(&j->pin, p);
268 				atomic_set(&fifo_front(&j->pin), 0);
269 			}
270 
271 		if (fifo_free(&j->pin) > 1) {
272 			fifo_push_front(&j->pin, p);
273 			i->pin = &fifo_front(&j->pin);
274 			atomic_set(i->pin, 1);
275 		}
276 
277 		for (k = i->j.start;
278 		     k < end(&i->j);
279 		     k = bkey_next(k)) {
280 			unsigned j;
281 
282 			for (j = 0; j < KEY_PTRS(k); j++) {
283 				struct bucket *g = PTR_BUCKET(c, k, j);
284 				atomic_inc(&g->pin);
285 
286 				if (g->prio == BTREE_PRIO &&
287 				    !ptr_stale(c, k, j))
288 					g->prio = INITIAL_PRIO;
289 			}
290 
291 			__bch_btree_mark_key(c, 0, k);
292 		}
293 	}
294 }
295 
296 int bch_journal_replay(struct cache_set *s, struct list_head *list)
297 {
298 	int ret = 0, keys = 0, entries = 0;
299 	struct bkey *k;
300 	struct journal_replay *i =
301 		list_entry(list->prev, struct journal_replay, list);
302 
303 	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
304 	struct keylist keylist;
305 
306 	bch_keylist_init(&keylist);
307 
308 	list_for_each_entry(i, list, list) {
309 		BUG_ON(i->pin && atomic_read(i->pin) != 1);
310 
311 		cache_set_err_on(n != i->j.seq, s,
312 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
313 				 n, i->j.seq - 1, start, end);
314 
315 		for (k = i->j.start;
316 		     k < end(&i->j);
317 		     k = bkey_next(k)) {
318 			trace_bcache_journal_replay_key(k);
319 
320 			bkey_copy(keylist.top, k);
321 			bch_keylist_push(&keylist);
322 
323 			ret = bch_btree_insert(s, &keylist, i->pin, NULL);
324 			if (ret)
325 				goto err;
326 
327 			BUG_ON(!bch_keylist_empty(&keylist));
328 			keys++;
329 
330 			cond_resched();
331 		}
332 
333 		if (i->pin)
334 			atomic_dec(i->pin);
335 		n = i->j.seq + 1;
336 		entries++;
337 	}
338 
339 	pr_info("journal replay done, %i keys in %i entries, seq %llu",
340 		keys, entries, end);
341 err:
342 	while (!list_empty(list)) {
343 		i = list_first_entry(list, struct journal_replay, list);
344 		list_del(&i->list);
345 		kfree(i);
346 	}
347 
348 	return ret;
349 }
350 
351 /* Journalling */
352 
353 static void btree_flush_write(struct cache_set *c)
354 {
355 	/*
356 	 * Try to find the btree node with that references the oldest journal
357 	 * entry, best is our current candidate and is locked if non NULL:
358 	 */
359 	struct btree *b, *best;
360 	unsigned i;
361 retry:
362 	best = NULL;
363 
364 	for_each_cached_btree(b, c, i)
365 		if (btree_current_write(b)->journal) {
366 			if (!best)
367 				best = b;
368 			else if (journal_pin_cmp(c,
369 					btree_current_write(best)->journal,
370 					btree_current_write(b)->journal)) {
371 				best = b;
372 			}
373 		}
374 
375 	b = best;
376 	if (b) {
377 		rw_lock(true, b, b->level);
378 
379 		if (!btree_current_write(b)->journal) {
380 			rw_unlock(true, b);
381 			/* We raced */
382 			goto retry;
383 		}
384 
385 		bch_btree_node_write(b, NULL);
386 		rw_unlock(true, b);
387 	}
388 }
389 
390 #define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
391 
392 static void journal_discard_endio(struct bio *bio, int error)
393 {
394 	struct journal_device *ja =
395 		container_of(bio, struct journal_device, discard_bio);
396 	struct cache *ca = container_of(ja, struct cache, journal);
397 
398 	atomic_set(&ja->discard_in_flight, DISCARD_DONE);
399 
400 	closure_wake_up(&ca->set->journal.wait);
401 	closure_put(&ca->set->cl);
402 }
403 
404 static void journal_discard_work(struct work_struct *work)
405 {
406 	struct journal_device *ja =
407 		container_of(work, struct journal_device, discard_work);
408 
409 	submit_bio(0, &ja->discard_bio);
410 }
411 
412 static void do_journal_discard(struct cache *ca)
413 {
414 	struct journal_device *ja = &ca->journal;
415 	struct bio *bio = &ja->discard_bio;
416 
417 	if (!ca->discard) {
418 		ja->discard_idx = ja->last_idx;
419 		return;
420 	}
421 
422 	switch (atomic_read(&ja->discard_in_flight)) {
423 	case DISCARD_IN_FLIGHT:
424 		return;
425 
426 	case DISCARD_DONE:
427 		ja->discard_idx = (ja->discard_idx + 1) %
428 			ca->sb.njournal_buckets;
429 
430 		atomic_set(&ja->discard_in_flight, DISCARD_READY);
431 		/* fallthrough */
432 
433 	case DISCARD_READY:
434 		if (ja->discard_idx == ja->last_idx)
435 			return;
436 
437 		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 
439 		bio_init(bio);
440 		bio->bi_sector		= bucket_to_sector(ca->set,
441 						ca->sb.d[ja->discard_idx]);
442 		bio->bi_bdev		= ca->bdev;
443 		bio->bi_rw		= REQ_WRITE|REQ_DISCARD;
444 		bio->bi_max_vecs	= 1;
445 		bio->bi_io_vec		= bio->bi_inline_vecs;
446 		bio->bi_size		= bucket_bytes(ca);
447 		bio->bi_end_io		= journal_discard_endio;
448 
449 		closure_get(&ca->set->cl);
450 		INIT_WORK(&ja->discard_work, journal_discard_work);
451 		schedule_work(&ja->discard_work);
452 	}
453 }
454 
455 static void journal_reclaim(struct cache_set *c)
456 {
457 	struct bkey *k = &c->journal.key;
458 	struct cache *ca;
459 	uint64_t last_seq;
460 	unsigned iter, n = 0;
461 	atomic_t p;
462 
463 	while (!atomic_read(&fifo_front(&c->journal.pin)))
464 		fifo_pop(&c->journal.pin, p);
465 
466 	last_seq = last_seq(&c->journal);
467 
468 	/* Update last_idx */
469 
470 	for_each_cache(ca, c, iter) {
471 		struct journal_device *ja = &ca->journal;
472 
473 		while (ja->last_idx != ja->cur_idx &&
474 		       ja->seq[ja->last_idx] < last_seq)
475 			ja->last_idx = (ja->last_idx + 1) %
476 				ca->sb.njournal_buckets;
477 	}
478 
479 	for_each_cache(ca, c, iter)
480 		do_journal_discard(ca);
481 
482 	if (c->journal.blocks_free)
483 		goto out;
484 
485 	/*
486 	 * Allocate:
487 	 * XXX: Sort by free journal space
488 	 */
489 
490 	for_each_cache(ca, c, iter) {
491 		struct journal_device *ja = &ca->journal;
492 		unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
493 
494 		/* No space available on this device */
495 		if (next == ja->discard_idx)
496 			continue;
497 
498 		ja->cur_idx = next;
499 		k->ptr[n++] = PTR(0,
500 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
501 				  ca->sb.nr_this_dev);
502 	}
503 
504 	bkey_init(k);
505 	SET_KEY_PTRS(k, n);
506 
507 	if (n)
508 		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
509 out:
510 	if (!journal_full(&c->journal))
511 		__closure_wake_up(&c->journal.wait);
512 }
513 
514 void bch_journal_next(struct journal *j)
515 {
516 	atomic_t p = { 1 };
517 
518 	j->cur = (j->cur == j->w)
519 		? &j->w[1]
520 		: &j->w[0];
521 
522 	/*
523 	 * The fifo_push() needs to happen at the same time as j->seq is
524 	 * incremented for last_seq() to be calculated correctly
525 	 */
526 	BUG_ON(!fifo_push(&j->pin, p));
527 	atomic_set(&fifo_back(&j->pin), 1);
528 
529 	j->cur->data->seq	= ++j->seq;
530 	j->cur->need_write	= false;
531 	j->cur->data->keys	= 0;
532 
533 	if (fifo_full(&j->pin))
534 		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
535 }
536 
537 static void journal_write_endio(struct bio *bio, int error)
538 {
539 	struct journal_write *w = bio->bi_private;
540 
541 	cache_set_err_on(error, w->c, "journal io error");
542 	closure_put(&w->c->journal.io);
543 }
544 
545 static void journal_write(struct closure *);
546 
547 static void journal_write_done(struct closure *cl)
548 {
549 	struct journal *j = container_of(cl, struct journal, io);
550 	struct journal_write *w = (j->cur == j->w)
551 		? &j->w[1]
552 		: &j->w[0];
553 
554 	__closure_wake_up(&w->wait);
555 	continue_at_nobarrier(cl, journal_write, system_wq);
556 }
557 
558 static void journal_write_unlocked(struct closure *cl)
559 	__releases(c->journal.lock)
560 {
561 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
562 	struct cache *ca;
563 	struct journal_write *w = c->journal.cur;
564 	struct bkey *k = &c->journal.key;
565 	unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
566 
567 	struct bio *bio;
568 	struct bio_list list;
569 	bio_list_init(&list);
570 
571 	if (!w->need_write) {
572 		/*
573 		 * XXX: have to unlock closure before we unlock journal lock,
574 		 * else we race with bch_journal(). But this way we race
575 		 * against cache set unregister. Doh.
576 		 */
577 		set_closure_fn(cl, NULL, NULL);
578 		closure_sub(cl, CLOSURE_RUNNING + 1);
579 		spin_unlock(&c->journal.lock);
580 		return;
581 	} else if (journal_full(&c->journal)) {
582 		journal_reclaim(c);
583 		spin_unlock(&c->journal.lock);
584 
585 		btree_flush_write(c);
586 		continue_at(cl, journal_write, system_wq);
587 	}
588 
589 	c->journal.blocks_free -= set_blocks(w->data, c);
590 
591 	w->data->btree_level = c->root->level;
592 
593 	bkey_copy(&w->data->btree_root, &c->root->key);
594 	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
595 
596 	for_each_cache(ca, c, i)
597 		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
598 
599 	w->data->magic		= jset_magic(&c->sb);
600 	w->data->version	= BCACHE_JSET_VERSION;
601 	w->data->last_seq	= last_seq(&c->journal);
602 	w->data->csum		= csum_set(w->data);
603 
604 	for (i = 0; i < KEY_PTRS(k); i++) {
605 		ca = PTR_CACHE(c, k, i);
606 		bio = &ca->journal.bio;
607 
608 		atomic_long_add(sectors, &ca->meta_sectors_written);
609 
610 		bio_reset(bio);
611 		bio->bi_sector	= PTR_OFFSET(k, i);
612 		bio->bi_bdev	= ca->bdev;
613 		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 		bio->bi_size	= sectors << 9;
615 
616 		bio->bi_end_io	= journal_write_endio;
617 		bio->bi_private = w;
618 		bch_bio_map(bio, w->data);
619 
620 		trace_bcache_journal_write(bio);
621 		bio_list_add(&list, bio);
622 
623 		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
624 
625 		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
626 	}
627 
628 	atomic_dec_bug(&fifo_back(&c->journal.pin));
629 	bch_journal_next(&c->journal);
630 	journal_reclaim(c);
631 
632 	spin_unlock(&c->journal.lock);
633 
634 	while ((bio = bio_list_pop(&list)))
635 		closure_bio_submit(bio, cl, c->cache[0]);
636 
637 	continue_at(cl, journal_write_done, NULL);
638 }
639 
640 static void journal_write(struct closure *cl)
641 {
642 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
643 
644 	spin_lock(&c->journal.lock);
645 	journal_write_unlocked(cl);
646 }
647 
648 static void journal_try_write(struct cache_set *c)
649 	__releases(c->journal.lock)
650 {
651 	struct closure *cl = &c->journal.io;
652 	struct journal_write *w = c->journal.cur;
653 
654 	w->need_write = true;
655 
656 	if (closure_trylock(cl, &c->cl))
657 		journal_write_unlocked(cl);
658 	else
659 		spin_unlock(&c->journal.lock);
660 }
661 
662 static struct journal_write *journal_wait_for_write(struct cache_set *c,
663 						    unsigned nkeys)
664 {
665 	size_t sectors;
666 	struct closure cl;
667 
668 	closure_init_stack(&cl);
669 
670 	spin_lock(&c->journal.lock);
671 
672 	while (1) {
673 		struct journal_write *w = c->journal.cur;
674 
675 		sectors = __set_blocks(w->data, w->data->keys + nkeys,
676 				       c) * c->sb.block_size;
677 
678 		if (sectors <= min_t(size_t,
679 				     c->journal.blocks_free * c->sb.block_size,
680 				     PAGE_SECTORS << JSET_BITS))
681 			return w;
682 
683 		/* XXX: tracepoint */
684 		if (!journal_full(&c->journal)) {
685 			trace_bcache_journal_entry_full(c);
686 
687 			/*
688 			 * XXX: If we were inserting so many keys that they
689 			 * won't fit in an _empty_ journal write, we'll
690 			 * deadlock. For now, handle this in
691 			 * bch_keylist_realloc() - but something to think about.
692 			 */
693 			BUG_ON(!w->data->keys);
694 
695 			closure_wait(&w->wait, &cl);
696 			journal_try_write(c); /* unlocks */
697 		} else {
698 			trace_bcache_journal_full(c);
699 
700 			closure_wait(&c->journal.wait, &cl);
701 			journal_reclaim(c);
702 			spin_unlock(&c->journal.lock);
703 
704 			btree_flush_write(c);
705 		}
706 
707 		closure_sync(&cl);
708 		spin_lock(&c->journal.lock);
709 	}
710 }
711 
712 static void journal_write_work(struct work_struct *work)
713 {
714 	struct cache_set *c = container_of(to_delayed_work(work),
715 					   struct cache_set,
716 					   journal.work);
717 	spin_lock(&c->journal.lock);
718 	journal_try_write(c);
719 }
720 
721 /*
722  * Entry point to the journalling code - bio_insert() and btree_invalidate()
723  * pass bch_journal() a list of keys to be journalled, and then
724  * bch_journal() hands those same keys off to btree_insert_async()
725  */
726 
727 atomic_t *bch_journal(struct cache_set *c,
728 		      struct keylist *keys,
729 		      struct closure *parent)
730 {
731 	struct journal_write *w;
732 	atomic_t *ret;
733 
734 	if (!CACHE_SYNC(&c->sb))
735 		return NULL;
736 
737 	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
738 
739 	memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
740 	w->data->keys += bch_keylist_nkeys(keys);
741 
742 	ret = &fifo_back(&c->journal.pin);
743 	atomic_inc(ret);
744 
745 	if (parent) {
746 		closure_wait(&w->wait, parent);
747 		journal_try_write(c);
748 	} else if (!w->need_write) {
749 		schedule_delayed_work(&c->journal.work,
750 				      msecs_to_jiffies(c->journal_delay_ms));
751 		spin_unlock(&c->journal.lock);
752 	} else {
753 		spin_unlock(&c->journal.lock);
754 	}
755 
756 
757 	return ret;
758 }
759 
760 void bch_journal_meta(struct cache_set *c, struct closure *cl)
761 {
762 	struct keylist keys;
763 	atomic_t *ref;
764 
765 	bch_keylist_init(&keys);
766 
767 	ref = bch_journal(c, &keys, cl);
768 	if (ref)
769 		atomic_dec_bug(ref);
770 }
771 
772 void bch_journal_free(struct cache_set *c)
773 {
774 	free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
775 	free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
776 	free_fifo(&c->journal.pin);
777 }
778 
779 int bch_journal_alloc(struct cache_set *c)
780 {
781 	struct journal *j = &c->journal;
782 
783 	closure_init_unlocked(&j->io);
784 	spin_lock_init(&j->lock);
785 	INIT_DELAYED_WORK(&j->work, journal_write_work);
786 
787 	c->journal_delay_ms = 100;
788 
789 	j->w[0].c = c;
790 	j->w[1].c = c;
791 
792 	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
793 	    !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
794 	    !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
795 		return -ENOMEM;
796 
797 	return 0;
798 }
799