xref: /openbmc/linux/drivers/md/bcache/request.c (revision 089a49b6)
1 /*
2  * Main bcache entry point - handle a read or a write request and decide what to
3  * do with it; the make_request functions are called by the block layer.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
20 
21 #include <trace/events/bcache.h>
22 
23 #define CUTOFF_CACHE_ADD	95
24 #define CUTOFF_CACHE_READA	90
25 
26 struct kmem_cache *bch_search_cache;
27 
28 static void check_should_skip(struct cached_dev *, struct search *);
29 
30 /* Cgroup interface */
31 
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
34 
35 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
36 {
37 	struct cgroup_subsys_state *css;
38 	return cgroup &&
39 		(css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40 		? container_of(css, struct bch_cgroup, css)
41 		: &bcache_default_cgroup;
42 }
43 
44 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
45 {
46 	struct cgroup_subsys_state *css = bio->bi_css
47 		? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48 		: task_subsys_state(current, bcache_subsys_id);
49 
50 	return css
51 		? container_of(css, struct bch_cgroup, css)
52 		: &bcache_default_cgroup;
53 }
54 
55 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
56 			struct file *file,
57 			char __user *buf, size_t nbytes, loff_t *ppos)
58 {
59 	char tmp[1024];
60 	int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61 					  cgroup_to_bcache(cgrp)->cache_mode + 1);
62 
63 	if (len < 0)
64 		return len;
65 
66 	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
67 }
68 
69 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
70 			    const char *buf)
71 {
72 	int v = bch_read_string_list(buf, bch_cache_modes);
73 	if (v < 0)
74 		return v;
75 
76 	cgroup_to_bcache(cgrp)->cache_mode = v - 1;
77 	return 0;
78 }
79 
80 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
81 {
82 	return cgroup_to_bcache(cgrp)->verify;
83 }
84 
85 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
86 {
87 	cgroup_to_bcache(cgrp)->verify = val;
88 	return 0;
89 }
90 
91 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
92 {
93 	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94 	return atomic_read(&bcachecg->stats.cache_hits);
95 }
96 
97 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
98 {
99 	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100 	return atomic_read(&bcachecg->stats.cache_misses);
101 }
102 
103 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
104 					 struct cftype *cft)
105 {
106 	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107 	return atomic_read(&bcachecg->stats.cache_bypass_hits);
108 }
109 
110 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
111 					   struct cftype *cft)
112 {
113 	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114 	return atomic_read(&bcachecg->stats.cache_bypass_misses);
115 }
116 
117 static struct cftype bch_files[] = {
118 	{
119 		.name		= "cache_mode",
120 		.read		= cache_mode_read,
121 		.write_string	= cache_mode_write,
122 	},
123 	{
124 		.name		= "verify",
125 		.read_u64	= bch_verify_read,
126 		.write_u64	= bch_verify_write,
127 	},
128 	{
129 		.name		= "cache_hits",
130 		.read_u64	= bch_cache_hits_read,
131 	},
132 	{
133 		.name		= "cache_misses",
134 		.read_u64	= bch_cache_misses_read,
135 	},
136 	{
137 		.name		= "cache_bypass_hits",
138 		.read_u64	= bch_cache_bypass_hits_read,
139 	},
140 	{
141 		.name		= "cache_bypass_misses",
142 		.read_u64	= bch_cache_bypass_misses_read,
143 	},
144 	{ }	/* terminate */
145 };
146 
147 static void init_bch_cgroup(struct bch_cgroup *cg)
148 {
149 	cg->cache_mode = -1;
150 }
151 
152 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
153 {
154 	struct bch_cgroup *cg;
155 
156 	cg = kzalloc(sizeof(*cg), GFP_KERNEL);
157 	if (!cg)
158 		return ERR_PTR(-ENOMEM);
159 	init_bch_cgroup(cg);
160 	return &cg->css;
161 }
162 
163 static void bcachecg_destroy(struct cgroup *cgroup)
164 {
165 	struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166 	free_css_id(&bcache_subsys, &cg->css);
167 	kfree(cg);
168 }
169 
170 struct cgroup_subsys bcache_subsys = {
171 	.create		= bcachecg_create,
172 	.destroy	= bcachecg_destroy,
173 	.subsys_id	= bcache_subsys_id,
174 	.name		= "bcache",
175 	.module		= THIS_MODULE,
176 };
177 EXPORT_SYMBOL_GPL(bcache_subsys);
178 #endif
179 
180 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
181 {
182 #ifdef CONFIG_CGROUP_BCACHE
183 	int r = bch_bio_to_cgroup(bio)->cache_mode;
184 	if (r >= 0)
185 		return r;
186 #endif
187 	return BDEV_CACHE_MODE(&dc->sb);
188 }
189 
190 static bool verify(struct cached_dev *dc, struct bio *bio)
191 {
192 #ifdef CONFIG_CGROUP_BCACHE
193 	if (bch_bio_to_cgroup(bio)->verify)
194 		return true;
195 #endif
196 	return dc->verify;
197 }
198 
199 static void bio_csum(struct bio *bio, struct bkey *k)
200 {
201 	struct bio_vec *bv;
202 	uint64_t csum = 0;
203 	int i;
204 
205 	bio_for_each_segment(bv, bio, i) {
206 		void *d = kmap(bv->bv_page) + bv->bv_offset;
207 		csum = bch_crc64_update(csum, d, bv->bv_len);
208 		kunmap(bv->bv_page);
209 	}
210 
211 	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
212 }
213 
214 /* Insert data into cache */
215 
216 static void bio_invalidate(struct closure *cl)
217 {
218 	struct btree_op *op = container_of(cl, struct btree_op, cl);
219 	struct bio *bio = op->cache_bio;
220 
221 	pr_debug("invalidating %i sectors from %llu",
222 		 bio_sectors(bio), (uint64_t) bio->bi_sector);
223 
224 	while (bio_sectors(bio)) {
225 		unsigned len = min(bio_sectors(bio), 1U << 14);
226 
227 		if (bch_keylist_realloc(&op->keys, 0, op->c))
228 			goto out;
229 
230 		bio->bi_sector	+= len;
231 		bio->bi_size	-= len << 9;
232 
233 		bch_keylist_add(&op->keys,
234 				&KEY(op->inode, bio->bi_sector, len));
235 	}
236 
237 	op->insert_data_done = true;
238 	bio_put(bio);
239 out:
240 	continue_at(cl, bch_journal, bcache_wq);
241 }
242 
243 struct open_bucket {
244 	struct list_head	list;
245 	struct task_struct	*last;
246 	unsigned		sectors_free;
247 	BKEY_PADDED(key);
248 };
249 
250 void bch_open_buckets_free(struct cache_set *c)
251 {
252 	struct open_bucket *b;
253 
254 	while (!list_empty(&c->data_buckets)) {
255 		b = list_first_entry(&c->data_buckets,
256 				     struct open_bucket, list);
257 		list_del(&b->list);
258 		kfree(b);
259 	}
260 }
261 
262 int bch_open_buckets_alloc(struct cache_set *c)
263 {
264 	int i;
265 
266 	spin_lock_init(&c->data_bucket_lock);
267 
268 	for (i = 0; i < 6; i++) {
269 		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
270 		if (!b)
271 			return -ENOMEM;
272 
273 		list_add(&b->list, &c->data_buckets);
274 	}
275 
276 	return 0;
277 }
278 
279 /*
280  * We keep multiple buckets open for writes, and try to segregate different
281  * write streams for better cache utilization: first we look for a bucket where
282  * the last write to it was sequential with the current write, and failing that
283  * we look for a bucket that was last used by the same task.
284  *
285  * The ideas is if you've got multiple tasks pulling data into the cache at the
286  * same time, you'll get better cache utilization if you try to segregate their
287  * data and preserve locality.
288  *
289  * For example, say you've starting Firefox at the same time you're copying a
290  * bunch of files. Firefox will likely end up being fairly hot and stay in the
291  * cache awhile, but the data you copied might not be; if you wrote all that
292  * data to the same buckets it'd get invalidated at the same time.
293  *
294  * Both of those tasks will be doing fairly random IO so we can't rely on
295  * detecting sequential IO to segregate their data, but going off of the task
296  * should be a sane heuristic.
297  */
298 static struct open_bucket *pick_data_bucket(struct cache_set *c,
299 					    const struct bkey *search,
300 					    struct task_struct *task,
301 					    struct bkey *alloc)
302 {
303 	struct open_bucket *ret, *ret_task = NULL;
304 
305 	list_for_each_entry_reverse(ret, &c->data_buckets, list)
306 		if (!bkey_cmp(&ret->key, search))
307 			goto found;
308 		else if (ret->last == task)
309 			ret_task = ret;
310 
311 	ret = ret_task ?: list_first_entry(&c->data_buckets,
312 					   struct open_bucket, list);
313 found:
314 	if (!ret->sectors_free && KEY_PTRS(alloc)) {
315 		ret->sectors_free = c->sb.bucket_size;
316 		bkey_copy(&ret->key, alloc);
317 		bkey_init(alloc);
318 	}
319 
320 	if (!ret->sectors_free)
321 		ret = NULL;
322 
323 	return ret;
324 }
325 
326 /*
327  * Allocates some space in the cache to write to, and k to point to the newly
328  * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
329  * end of the newly allocated space).
330  *
331  * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
332  * sectors were actually allocated.
333  *
334  * If s->writeback is true, will not fail.
335  */
336 static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
337 			      struct search *s)
338 {
339 	struct cache_set *c = s->op.c;
340 	struct open_bucket *b;
341 	BKEY_PADDED(key) alloc;
342 	struct closure cl, *w = NULL;
343 	unsigned i;
344 
345 	if (s->writeback) {
346 		closure_init_stack(&cl);
347 		w = &cl;
348 	}
349 
350 	/*
351 	 * We might have to allocate a new bucket, which we can't do with a
352 	 * spinlock held. So if we have to allocate, we drop the lock, allocate
353 	 * and then retry. KEY_PTRS() indicates whether alloc points to
354 	 * allocated bucket(s).
355 	 */
356 
357 	bkey_init(&alloc.key);
358 	spin_lock(&c->data_bucket_lock);
359 
360 	while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
361 		unsigned watermark = s->op.write_prio
362 			? WATERMARK_MOVINGGC
363 			: WATERMARK_NONE;
364 
365 		spin_unlock(&c->data_bucket_lock);
366 
367 		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
368 			return false;
369 
370 		spin_lock(&c->data_bucket_lock);
371 	}
372 
373 	/*
374 	 * If we had to allocate, we might race and not need to allocate the
375 	 * second time we call find_data_bucket(). If we allocated a bucket but
376 	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
377 	 */
378 	if (KEY_PTRS(&alloc.key))
379 		__bkey_put(c, &alloc.key);
380 
381 	for (i = 0; i < KEY_PTRS(&b->key); i++)
382 		EBUG_ON(ptr_stale(c, &b->key, i));
383 
384 	/* Set up the pointer to the space we're allocating: */
385 
386 	for (i = 0; i < KEY_PTRS(&b->key); i++)
387 		k->ptr[i] = b->key.ptr[i];
388 
389 	sectors = min(sectors, b->sectors_free);
390 
391 	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
392 	SET_KEY_SIZE(k, sectors);
393 	SET_KEY_PTRS(k, KEY_PTRS(&b->key));
394 
395 	/*
396 	 * Move b to the end of the lru, and keep track of what this bucket was
397 	 * last used for:
398 	 */
399 	list_move_tail(&b->list, &c->data_buckets);
400 	bkey_copy_key(&b->key, k);
401 	b->last = s->task;
402 
403 	b->sectors_free	-= sectors;
404 
405 	for (i = 0; i < KEY_PTRS(&b->key); i++) {
406 		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
407 
408 		atomic_long_add(sectors,
409 				&PTR_CACHE(c, &b->key, i)->sectors_written);
410 	}
411 
412 	if (b->sectors_free < c->sb.block_size)
413 		b->sectors_free = 0;
414 
415 	/*
416 	 * k takes refcounts on the buckets it points to until it's inserted
417 	 * into the btree, but if we're done with this bucket we just transfer
418 	 * get_data_bucket()'s refcount.
419 	 */
420 	if (b->sectors_free)
421 		for (i = 0; i < KEY_PTRS(&b->key); i++)
422 			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
423 
424 	spin_unlock(&c->data_bucket_lock);
425 	return true;
426 }
427 
428 static void bch_insert_data_error(struct closure *cl)
429 {
430 	struct btree_op *op = container_of(cl, struct btree_op, cl);
431 
432 	/*
433 	 * Our data write just errored, which means we've got a bunch of keys to
434 	 * insert that point to data that wasn't succesfully written.
435 	 *
436 	 * We don't have to insert those keys but we still have to invalidate
437 	 * that region of the cache - so, if we just strip off all the pointers
438 	 * from the keys we'll accomplish just that.
439 	 */
440 
441 	struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
442 
443 	while (src != op->keys.top) {
444 		struct bkey *n = bkey_next(src);
445 
446 		SET_KEY_PTRS(src, 0);
447 		bkey_copy(dst, src);
448 
449 		dst = bkey_next(dst);
450 		src = n;
451 	}
452 
453 	op->keys.top = dst;
454 
455 	bch_journal(cl);
456 }
457 
458 static void bch_insert_data_endio(struct bio *bio, int error)
459 {
460 	struct closure *cl = bio->bi_private;
461 	struct btree_op *op = container_of(cl, struct btree_op, cl);
462 	struct search *s = container_of(op, struct search, op);
463 
464 	if (error) {
465 		/* TODO: We could try to recover from this. */
466 		if (s->writeback)
467 			s->error = error;
468 		else if (s->write)
469 			set_closure_fn(cl, bch_insert_data_error, bcache_wq);
470 		else
471 			set_closure_fn(cl, NULL, NULL);
472 	}
473 
474 	bch_bbio_endio(op->c, bio, error, "writing data to cache");
475 }
476 
477 static void bch_insert_data_loop(struct closure *cl)
478 {
479 	struct btree_op *op = container_of(cl, struct btree_op, cl);
480 	struct search *s = container_of(op, struct search, op);
481 	struct bio *bio = op->cache_bio, *n;
482 
483 	if (op->skip)
484 		return bio_invalidate(cl);
485 
486 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
487 		set_gc_sectors(op->c);
488 		bch_queue_gc(op->c);
489 	}
490 
491 	/*
492 	 * Journal writes are marked REQ_FLUSH; if the original write was a
493 	 * flush, it'll wait on the journal write.
494 	 */
495 	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
496 
497 	do {
498 		unsigned i;
499 		struct bkey *k;
500 		struct bio_set *split = s->d
501 			? s->d->bio_split : op->c->bio_split;
502 
503 		/* 1 for the device pointer and 1 for the chksum */
504 		if (bch_keylist_realloc(&op->keys,
505 					1 + (op->csum ? 1 : 0),
506 					op->c))
507 			continue_at(cl, bch_journal, bcache_wq);
508 
509 		k = op->keys.top;
510 		bkey_init(k);
511 		SET_KEY_INODE(k, op->inode);
512 		SET_KEY_OFFSET(k, bio->bi_sector);
513 
514 		if (!bch_alloc_sectors(k, bio_sectors(bio), s))
515 			goto err;
516 
517 		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
518 
519 		n->bi_end_io	= bch_insert_data_endio;
520 		n->bi_private	= cl;
521 
522 		if (s->writeback) {
523 			SET_KEY_DIRTY(k, true);
524 
525 			for (i = 0; i < KEY_PTRS(k); i++)
526 				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
527 					    GC_MARK_DIRTY);
528 		}
529 
530 		SET_KEY_CSUM(k, op->csum);
531 		if (KEY_CSUM(k))
532 			bio_csum(n, k);
533 
534 		trace_bcache_cache_insert(k);
535 		bch_keylist_push(&op->keys);
536 
537 		n->bi_rw |= REQ_WRITE;
538 		bch_submit_bbio(n, op->c, k, 0);
539 	} while (n != bio);
540 
541 	op->insert_data_done = true;
542 	continue_at(cl, bch_journal, bcache_wq);
543 err:
544 	/* bch_alloc_sectors() blocks if s->writeback = true */
545 	BUG_ON(s->writeback);
546 
547 	/*
548 	 * But if it's not a writeback write we'd rather just bail out if
549 	 * there aren't any buckets ready to write to - it might take awhile and
550 	 * we might be starving btree writes for gc or something.
551 	 */
552 
553 	if (s->write) {
554 		/*
555 		 * Writethrough write: We can't complete the write until we've
556 		 * updated the index. But we don't want to delay the write while
557 		 * we wait for buckets to be freed up, so just invalidate the
558 		 * rest of the write.
559 		 */
560 		op->skip = true;
561 		return bio_invalidate(cl);
562 	} else {
563 		/*
564 		 * From a cache miss, we can just insert the keys for the data
565 		 * we have written or bail out if we didn't do anything.
566 		 */
567 		op->insert_data_done = true;
568 		bio_put(bio);
569 
570 		if (!bch_keylist_empty(&op->keys))
571 			continue_at(cl, bch_journal, bcache_wq);
572 		else
573 			closure_return(cl);
574 	}
575 }
576 
577 /**
578  * bch_insert_data - stick some data in the cache
579  *
580  * This is the starting point for any data to end up in a cache device; it could
581  * be from a normal write, or a writeback write, or a write to a flash only
582  * volume - it's also used by the moving garbage collector to compact data in
583  * mostly empty buckets.
584  *
585  * It first writes the data to the cache, creating a list of keys to be inserted
586  * (if the data had to be fragmented there will be multiple keys); after the
587  * data is written it calls bch_journal, and after the keys have been added to
588  * the next journal write they're inserted into the btree.
589  *
590  * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
591  * and op->inode is used for the key inode.
592  *
593  * If op->skip is true, instead of inserting the data it invalidates the region
594  * of the cache represented by op->cache_bio and op->inode.
595  */
596 void bch_insert_data(struct closure *cl)
597 {
598 	struct btree_op *op = container_of(cl, struct btree_op, cl);
599 
600 	bch_keylist_init(&op->keys);
601 	bio_get(op->cache_bio);
602 	bch_insert_data_loop(cl);
603 }
604 
605 void bch_btree_insert_async(struct closure *cl)
606 {
607 	struct btree_op *op = container_of(cl, struct btree_op, cl);
608 	struct search *s = container_of(op, struct search, op);
609 
610 	if (bch_btree_insert(op, op->c)) {
611 		s->error		= -ENOMEM;
612 		op->insert_data_done	= true;
613 	}
614 
615 	if (op->insert_data_done) {
616 		bch_keylist_free(&op->keys);
617 		closure_return(cl);
618 	} else
619 		continue_at(cl, bch_insert_data_loop, bcache_wq);
620 }
621 
622 /* Common code for the make_request functions */
623 
624 static void request_endio(struct bio *bio, int error)
625 {
626 	struct closure *cl = bio->bi_private;
627 
628 	if (error) {
629 		struct search *s = container_of(cl, struct search, cl);
630 		s->error = error;
631 		/* Only cache read errors are recoverable */
632 		s->recoverable = false;
633 	}
634 
635 	bio_put(bio);
636 	closure_put(cl);
637 }
638 
639 void bch_cache_read_endio(struct bio *bio, int error)
640 {
641 	struct bbio *b = container_of(bio, struct bbio, bio);
642 	struct closure *cl = bio->bi_private;
643 	struct search *s = container_of(cl, struct search, cl);
644 
645 	/*
646 	 * If the bucket was reused while our bio was in flight, we might have
647 	 * read the wrong data. Set s->error but not error so it doesn't get
648 	 * counted against the cache device, but we'll still reread the data
649 	 * from the backing device.
650 	 */
651 
652 	if (error)
653 		s->error = error;
654 	else if (ptr_stale(s->op.c, &b->key, 0)) {
655 		atomic_long_inc(&s->op.c->cache_read_races);
656 		s->error = -EINTR;
657 	}
658 
659 	bch_bbio_endio(s->op.c, bio, error, "reading from cache");
660 }
661 
662 static void bio_complete(struct search *s)
663 {
664 	if (s->orig_bio) {
665 		int cpu, rw = bio_data_dir(s->orig_bio);
666 		unsigned long duration = jiffies - s->start_time;
667 
668 		cpu = part_stat_lock();
669 		part_round_stats(cpu, &s->d->disk->part0);
670 		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
671 		part_stat_unlock();
672 
673 		trace_bcache_request_end(s, s->orig_bio);
674 		bio_endio(s->orig_bio, s->error);
675 		s->orig_bio = NULL;
676 	}
677 }
678 
679 static void do_bio_hook(struct search *s)
680 {
681 	struct bio *bio = &s->bio.bio;
682 	memcpy(bio, s->orig_bio, sizeof(struct bio));
683 
684 	bio->bi_end_io		= request_endio;
685 	bio->bi_private		= &s->cl;
686 	atomic_set(&bio->bi_cnt, 3);
687 }
688 
689 static void search_free(struct closure *cl)
690 {
691 	struct search *s = container_of(cl, struct search, cl);
692 	bio_complete(s);
693 
694 	if (s->op.cache_bio)
695 		bio_put(s->op.cache_bio);
696 
697 	if (s->unaligned_bvec)
698 		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
699 
700 	closure_debug_destroy(cl);
701 	mempool_free(s, s->d->c->search);
702 }
703 
704 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
705 {
706 	struct bio_vec *bv;
707 	struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
708 	memset(s, 0, offsetof(struct search, op.keys));
709 
710 	__closure_init(&s->cl, NULL);
711 
712 	s->op.inode		= d->id;
713 	s->op.c			= d->c;
714 	s->d			= d;
715 	s->op.lock		= -1;
716 	s->task			= current;
717 	s->orig_bio		= bio;
718 	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
719 	s->op.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
720 	s->op.skip		= (bio->bi_rw & REQ_DISCARD) != 0;
721 	s->recoverable		= 1;
722 	s->start_time		= jiffies;
723 	do_bio_hook(s);
724 
725 	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
726 		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
727 		memcpy(bv, bio_iovec(bio),
728 		       sizeof(struct bio_vec) * bio_segments(bio));
729 
730 		s->bio.bio.bi_io_vec	= bv;
731 		s->unaligned_bvec	= 1;
732 	}
733 
734 	return s;
735 }
736 
737 static void btree_read_async(struct closure *cl)
738 {
739 	struct btree_op *op = container_of(cl, struct btree_op, cl);
740 
741 	int ret = btree_root(search_recurse, op->c, op);
742 
743 	if (ret == -EAGAIN)
744 		continue_at(cl, btree_read_async, bcache_wq);
745 
746 	closure_return(cl);
747 }
748 
749 /* Cached devices */
750 
751 static void cached_dev_bio_complete(struct closure *cl)
752 {
753 	struct search *s = container_of(cl, struct search, cl);
754 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
755 
756 	search_free(cl);
757 	cached_dev_put(dc);
758 }
759 
760 /* Process reads */
761 
762 static void cached_dev_read_complete(struct closure *cl)
763 {
764 	struct search *s = container_of(cl, struct search, cl);
765 
766 	if (s->op.insert_collision)
767 		bch_mark_cache_miss_collision(s);
768 
769 	if (s->op.cache_bio) {
770 		int i;
771 		struct bio_vec *bv;
772 
773 		__bio_for_each_segment(bv, s->op.cache_bio, i, 0)
774 			__free_page(bv->bv_page);
775 	}
776 
777 	cached_dev_bio_complete(cl);
778 }
779 
780 static void request_read_error(struct closure *cl)
781 {
782 	struct search *s = container_of(cl, struct search, cl);
783 	struct bio_vec *bv;
784 	int i;
785 
786 	if (s->recoverable) {
787 		/* Retry from the backing device: */
788 		trace_bcache_read_retry(s->orig_bio);
789 
790 		s->error = 0;
791 		bv = s->bio.bio.bi_io_vec;
792 		do_bio_hook(s);
793 		s->bio.bio.bi_io_vec = bv;
794 
795 		if (!s->unaligned_bvec)
796 			bio_for_each_segment(bv, s->orig_bio, i)
797 				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
798 		else
799 			memcpy(s->bio.bio.bi_io_vec,
800 			       bio_iovec(s->orig_bio),
801 			       sizeof(struct bio_vec) *
802 			       bio_segments(s->orig_bio));
803 
804 		/* XXX: invalidate cache */
805 
806 		closure_bio_submit(&s->bio.bio, &s->cl, s->d);
807 	}
808 
809 	continue_at(cl, cached_dev_read_complete, NULL);
810 }
811 
812 static void request_read_done(struct closure *cl)
813 {
814 	struct search *s = container_of(cl, struct search, cl);
815 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
816 
817 	/*
818 	 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
819 	 * contains data ready to be inserted into the cache.
820 	 *
821 	 * First, we copy the data we just read from cache_bio's bounce buffers
822 	 * to the buffers the original bio pointed to:
823 	 */
824 
825 	if (s->op.cache_bio) {
826 		bio_reset(s->op.cache_bio);
827 		s->op.cache_bio->bi_sector	= s->cache_miss->bi_sector;
828 		s->op.cache_bio->bi_bdev	= s->cache_miss->bi_bdev;
829 		s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
830 		bch_bio_map(s->op.cache_bio, NULL);
831 
832 		bio_copy_data(s->cache_miss, s->op.cache_bio);
833 
834 		bio_put(s->cache_miss);
835 		s->cache_miss = NULL;
836 	}
837 
838 	if (verify(dc, &s->bio.bio) && s->recoverable)
839 		bch_data_verify(s);
840 
841 	bio_complete(s);
842 
843 	if (s->op.cache_bio &&
844 	    !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
845 		s->op.type = BTREE_REPLACE;
846 		closure_call(&s->op.cl, bch_insert_data, NULL, cl);
847 	}
848 
849 	continue_at(cl, cached_dev_read_complete, NULL);
850 }
851 
852 static void request_read_done_bh(struct closure *cl)
853 {
854 	struct search *s = container_of(cl, struct search, cl);
855 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
856 
857 	bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
858 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
859 
860 	if (s->error)
861 		continue_at_nobarrier(cl, request_read_error, bcache_wq);
862 	else if (s->op.cache_bio || verify(dc, &s->bio.bio))
863 		continue_at_nobarrier(cl, request_read_done, bcache_wq);
864 	else
865 		continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
866 }
867 
868 static int cached_dev_cache_miss(struct btree *b, struct search *s,
869 				 struct bio *bio, unsigned sectors)
870 {
871 	int ret = 0;
872 	unsigned reada;
873 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
874 	struct bio *miss;
875 
876 	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
877 	if (miss == bio)
878 		s->op.lookup_done = true;
879 
880 	miss->bi_end_io		= request_endio;
881 	miss->bi_private	= &s->cl;
882 
883 	if (s->cache_miss || s->op.skip)
884 		goto out_submit;
885 
886 	if (miss != bio ||
887 	    (bio->bi_rw & REQ_RAHEAD) ||
888 	    (bio->bi_rw & REQ_META) ||
889 	    s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
890 		reada = 0;
891 	else {
892 		reada = min(dc->readahead >> 9,
893 			    sectors - bio_sectors(miss));
894 
895 		if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
896 			reada = bdev_sectors(miss->bi_bdev) -
897 				bio_end_sector(miss);
898 	}
899 
900 	s->cache_bio_sectors = bio_sectors(miss) + reada;
901 	s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
902 			DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
903 			dc->disk.bio_split);
904 
905 	if (!s->op.cache_bio)
906 		goto out_submit;
907 
908 	s->op.cache_bio->bi_sector	= miss->bi_sector;
909 	s->op.cache_bio->bi_bdev	= miss->bi_bdev;
910 	s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
911 
912 	s->op.cache_bio->bi_end_io	= request_endio;
913 	s->op.cache_bio->bi_private	= &s->cl;
914 
915 	/* btree_search_recurse()'s btree iterator is no good anymore */
916 	ret = -EINTR;
917 	if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
918 		goto out_put;
919 
920 	bch_bio_map(s->op.cache_bio, NULL);
921 	if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
922 		goto out_put;
923 
924 	s->cache_miss = miss;
925 	bio_get(s->op.cache_bio);
926 
927 	closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
928 
929 	return ret;
930 out_put:
931 	bio_put(s->op.cache_bio);
932 	s->op.cache_bio = NULL;
933 out_submit:
934 	closure_bio_submit(miss, &s->cl, s->d);
935 	return ret;
936 }
937 
938 static void request_read(struct cached_dev *dc, struct search *s)
939 {
940 	struct closure *cl = &s->cl;
941 
942 	check_should_skip(dc, s);
943 	closure_call(&s->op.cl, btree_read_async, NULL, cl);
944 
945 	continue_at(cl, request_read_done_bh, NULL);
946 }
947 
948 /* Process writes */
949 
950 static void cached_dev_write_complete(struct closure *cl)
951 {
952 	struct search *s = container_of(cl, struct search, cl);
953 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
954 
955 	up_read_non_owner(&dc->writeback_lock);
956 	cached_dev_bio_complete(cl);
957 }
958 
959 static void request_write(struct cached_dev *dc, struct search *s)
960 {
961 	struct closure *cl = &s->cl;
962 	struct bio *bio = &s->bio.bio;
963 	struct bkey start, end;
964 	start = KEY(dc->disk.id, bio->bi_sector, 0);
965 	end = KEY(dc->disk.id, bio_end_sector(bio), 0);
966 
967 	bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
968 
969 	check_should_skip(dc, s);
970 	down_read_non_owner(&dc->writeback_lock);
971 
972 	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
973 		s->op.skip	= false;
974 		s->writeback	= true;
975 	}
976 
977 	if (bio->bi_rw & REQ_DISCARD)
978 		goto skip;
979 
980 	if (should_writeback(dc, s->orig_bio,
981 			     cache_mode(dc, bio),
982 			     s->op.skip)) {
983 		s->op.skip = false;
984 		s->writeback = true;
985 	}
986 
987 	if (s->op.skip)
988 		goto skip;
989 
990 	trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
991 
992 	if (!s->writeback) {
993 		s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
994 						   dc->disk.bio_split);
995 
996 		closure_bio_submit(bio, cl, s->d);
997 	} else {
998 		bch_writeback_add(dc);
999 
1000 		if (bio->bi_rw & REQ_FLUSH) {
1001 			/* Also need to send a flush to the backing device */
1002 			struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
1003 							     dc->disk.bio_split);
1004 
1005 			flush->bi_rw	= WRITE_FLUSH;
1006 			flush->bi_bdev	= bio->bi_bdev;
1007 			flush->bi_end_io = request_endio;
1008 			flush->bi_private = cl;
1009 
1010 			closure_bio_submit(flush, cl, s->d);
1011 		} else {
1012 			s->op.cache_bio = bio;
1013 		}
1014 	}
1015 out:
1016 	closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1017 	continue_at(cl, cached_dev_write_complete, NULL);
1018 skip:
1019 	s->op.skip = true;
1020 	s->op.cache_bio = s->orig_bio;
1021 	bio_get(s->op.cache_bio);
1022 
1023 	if ((bio->bi_rw & REQ_DISCARD) &&
1024 	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1025 		goto out;
1026 
1027 	closure_bio_submit(bio, cl, s->d);
1028 	goto out;
1029 }
1030 
1031 static void request_nodata(struct cached_dev *dc, struct search *s)
1032 {
1033 	struct closure *cl = &s->cl;
1034 	struct bio *bio = &s->bio.bio;
1035 
1036 	if (bio->bi_rw & REQ_DISCARD) {
1037 		request_write(dc, s);
1038 		return;
1039 	}
1040 
1041 	if (s->op.flush_journal)
1042 		bch_journal_meta(s->op.c, cl);
1043 
1044 	closure_bio_submit(bio, cl, s->d);
1045 
1046 	continue_at(cl, cached_dev_bio_complete, NULL);
1047 }
1048 
1049 /* Cached devices - read & write stuff */
1050 
1051 unsigned bch_get_congested(struct cache_set *c)
1052 {
1053 	int i;
1054 	long rand;
1055 
1056 	if (!c->congested_read_threshold_us &&
1057 	    !c->congested_write_threshold_us)
1058 		return 0;
1059 
1060 	i = (local_clock_us() - c->congested_last_us) / 1024;
1061 	if (i < 0)
1062 		return 0;
1063 
1064 	i += atomic_read(&c->congested);
1065 	if (i >= 0)
1066 		return 0;
1067 
1068 	i += CONGESTED_MAX;
1069 
1070 	if (i > 0)
1071 		i = fract_exp_two(i, 6);
1072 
1073 	rand = get_random_int();
1074 	i -= bitmap_weight(&rand, BITS_PER_LONG);
1075 
1076 	return i > 0 ? i : 1;
1077 }
1078 
1079 static void add_sequential(struct task_struct *t)
1080 {
1081 	ewma_add(t->sequential_io_avg,
1082 		 t->sequential_io, 8, 0);
1083 
1084 	t->sequential_io = 0;
1085 }
1086 
1087 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
1088 {
1089 	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1090 }
1091 
1092 static void check_should_skip(struct cached_dev *dc, struct search *s)
1093 {
1094 	struct cache_set *c = s->op.c;
1095 	struct bio *bio = &s->bio.bio;
1096 	unsigned mode = cache_mode(dc, bio);
1097 	unsigned sectors, congested = bch_get_congested(c);
1098 
1099 	if (atomic_read(&dc->disk.detaching) ||
1100 	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1101 	    (bio->bi_rw & REQ_DISCARD))
1102 		goto skip;
1103 
1104 	if (mode == CACHE_MODE_NONE ||
1105 	    (mode == CACHE_MODE_WRITEAROUND &&
1106 	     (bio->bi_rw & REQ_WRITE)))
1107 		goto skip;
1108 
1109 	if (bio->bi_sector   & (c->sb.block_size - 1) ||
1110 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
1111 		pr_debug("skipping unaligned io");
1112 		goto skip;
1113 	}
1114 
1115 	if (!congested && !dc->sequential_cutoff)
1116 		goto rescale;
1117 
1118 	if (!congested &&
1119 	    mode == CACHE_MODE_WRITEBACK &&
1120 	    (bio->bi_rw & REQ_WRITE) &&
1121 	    (bio->bi_rw & REQ_SYNC))
1122 		goto rescale;
1123 
1124 	if (dc->sequential_merge) {
1125 		struct io *i;
1126 
1127 		spin_lock(&dc->io_lock);
1128 
1129 		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
1130 			if (i->last == bio->bi_sector &&
1131 			    time_before(jiffies, i->jiffies))
1132 				goto found;
1133 
1134 		i = list_first_entry(&dc->io_lru, struct io, lru);
1135 
1136 		add_sequential(s->task);
1137 		i->sequential = 0;
1138 found:
1139 		if (i->sequential + bio->bi_size > i->sequential)
1140 			i->sequential	+= bio->bi_size;
1141 
1142 		i->last			 = bio_end_sector(bio);
1143 		i->jiffies		 = jiffies + msecs_to_jiffies(5000);
1144 		s->task->sequential_io	 = i->sequential;
1145 
1146 		hlist_del(&i->hash);
1147 		hlist_add_head(&i->hash, iohash(dc, i->last));
1148 		list_move_tail(&i->lru, &dc->io_lru);
1149 
1150 		spin_unlock(&dc->io_lock);
1151 	} else {
1152 		s->task->sequential_io = bio->bi_size;
1153 
1154 		add_sequential(s->task);
1155 	}
1156 
1157 	sectors = max(s->task->sequential_io,
1158 		      s->task->sequential_io_avg) >> 9;
1159 
1160 	if (dc->sequential_cutoff &&
1161 	    sectors >= dc->sequential_cutoff >> 9) {
1162 		trace_bcache_bypass_sequential(s->orig_bio);
1163 		goto skip;
1164 	}
1165 
1166 	if (congested && sectors >= congested) {
1167 		trace_bcache_bypass_congested(s->orig_bio);
1168 		goto skip;
1169 	}
1170 
1171 rescale:
1172 	bch_rescale_priorities(c, bio_sectors(bio));
1173 	return;
1174 skip:
1175 	bch_mark_sectors_bypassed(s, bio_sectors(bio));
1176 	s->op.skip = true;
1177 }
1178 
1179 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1180 {
1181 	struct search *s;
1182 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1183 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1184 	int cpu, rw = bio_data_dir(bio);
1185 
1186 	cpu = part_stat_lock();
1187 	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1188 	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1189 	part_stat_unlock();
1190 
1191 	bio->bi_bdev = dc->bdev;
1192 	bio->bi_sector += dc->sb.data_offset;
1193 
1194 	if (cached_dev_get(dc)) {
1195 		s = search_alloc(bio, d);
1196 		trace_bcache_request_start(s, bio);
1197 
1198 		if (!bio_has_data(bio))
1199 			request_nodata(dc, s);
1200 		else if (rw)
1201 			request_write(dc, s);
1202 		else
1203 			request_read(dc, s);
1204 	} else {
1205 		if ((bio->bi_rw & REQ_DISCARD) &&
1206 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1207 			bio_endio(bio, 0);
1208 		else
1209 			bch_generic_make_request(bio, &d->bio_split_hook);
1210 	}
1211 }
1212 
1213 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1214 			    unsigned int cmd, unsigned long arg)
1215 {
1216 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1217 	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1218 }
1219 
1220 static int cached_dev_congested(void *data, int bits)
1221 {
1222 	struct bcache_device *d = data;
1223 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1224 	struct request_queue *q = bdev_get_queue(dc->bdev);
1225 	int ret = 0;
1226 
1227 	if (bdi_congested(&q->backing_dev_info, bits))
1228 		return 1;
1229 
1230 	if (cached_dev_get(dc)) {
1231 		unsigned i;
1232 		struct cache *ca;
1233 
1234 		for_each_cache(ca, d->c, i) {
1235 			q = bdev_get_queue(ca->bdev);
1236 			ret |= bdi_congested(&q->backing_dev_info, bits);
1237 		}
1238 
1239 		cached_dev_put(dc);
1240 	}
1241 
1242 	return ret;
1243 }
1244 
1245 void bch_cached_dev_request_init(struct cached_dev *dc)
1246 {
1247 	struct gendisk *g = dc->disk.disk;
1248 
1249 	g->queue->make_request_fn		= cached_dev_make_request;
1250 	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1251 	dc->disk.cache_miss			= cached_dev_cache_miss;
1252 	dc->disk.ioctl				= cached_dev_ioctl;
1253 }
1254 
1255 /* Flash backed devices */
1256 
1257 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1258 				struct bio *bio, unsigned sectors)
1259 {
1260 	struct bio_vec *bv;
1261 	int i;
1262 
1263 	/* Zero fill bio */
1264 
1265 	bio_for_each_segment(bv, bio, i) {
1266 		unsigned j = min(bv->bv_len >> 9, sectors);
1267 
1268 		void *p = kmap(bv->bv_page);
1269 		memset(p + bv->bv_offset, 0, j << 9);
1270 		kunmap(bv->bv_page);
1271 
1272 		sectors	-= j;
1273 	}
1274 
1275 	bio_advance(bio, min(sectors << 9, bio->bi_size));
1276 
1277 	if (!bio->bi_size)
1278 		s->op.lookup_done = true;
1279 
1280 	return 0;
1281 }
1282 
1283 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1284 {
1285 	struct search *s;
1286 	struct closure *cl;
1287 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1288 	int cpu, rw = bio_data_dir(bio);
1289 
1290 	cpu = part_stat_lock();
1291 	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1292 	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1293 	part_stat_unlock();
1294 
1295 	s = search_alloc(bio, d);
1296 	cl = &s->cl;
1297 	bio = &s->bio.bio;
1298 
1299 	trace_bcache_request_start(s, bio);
1300 
1301 	if (bio_has_data(bio) && !rw) {
1302 		closure_call(&s->op.cl, btree_read_async, NULL, cl);
1303 	} else if (bio_has_data(bio) || s->op.skip) {
1304 		bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1305 					&KEY(d->id, bio->bi_sector, 0),
1306 					&KEY(d->id, bio_end_sector(bio), 0));
1307 
1308 		s->writeback	= true;
1309 		s->op.cache_bio	= bio;
1310 
1311 		closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1312 	} else {
1313 		/* No data - probably a cache flush */
1314 		if (s->op.flush_journal)
1315 			bch_journal_meta(s->op.c, cl);
1316 	}
1317 
1318 	continue_at(cl, search_free, NULL);
1319 }
1320 
1321 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1322 			   unsigned int cmd, unsigned long arg)
1323 {
1324 	return -ENOTTY;
1325 }
1326 
1327 static int flash_dev_congested(void *data, int bits)
1328 {
1329 	struct bcache_device *d = data;
1330 	struct request_queue *q;
1331 	struct cache *ca;
1332 	unsigned i;
1333 	int ret = 0;
1334 
1335 	for_each_cache(ca, d->c, i) {
1336 		q = bdev_get_queue(ca->bdev);
1337 		ret |= bdi_congested(&q->backing_dev_info, bits);
1338 	}
1339 
1340 	return ret;
1341 }
1342 
1343 void bch_flash_dev_request_init(struct bcache_device *d)
1344 {
1345 	struct gendisk *g = d->disk;
1346 
1347 	g->queue->make_request_fn		= flash_dev_make_request;
1348 	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1349 	d->cache_miss				= flash_dev_cache_miss;
1350 	d->ioctl				= flash_dev_ioctl;
1351 }
1352 
1353 void bch_request_exit(void)
1354 {
1355 #ifdef CONFIG_CGROUP_BCACHE
1356 	cgroup_unload_subsys(&bcache_subsys);
1357 #endif
1358 	if (bch_search_cache)
1359 		kmem_cache_destroy(bch_search_cache);
1360 }
1361 
1362 int __init bch_request_init(void)
1363 {
1364 	bch_search_cache = KMEM_CACHE(search, 0);
1365 	if (!bch_search_cache)
1366 		return -ENOMEM;
1367 
1368 #ifdef CONFIG_CGROUP_BCACHE
1369 	cgroup_load_subsys(&bcache_subsys);
1370 	init_bch_cgroup(&bcache_default_cgroup);
1371 
1372 	cgroup_add_cftypes(&bcache_subsys, bch_files);
1373 #endif
1374 	return 0;
1375 }
1376