xref: /openbmc/linux/drivers/md/bcache/request.c (revision 9dca4168)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cafe5635SKent Overstreet /*
3cafe5635SKent Overstreet  * Main bcache entry point - handle a read or a write request and decide what to
4cafe5635SKent Overstreet  * do with it; the make_request functions are called by the block layer.
5cafe5635SKent Overstreet  *
6cafe5635SKent Overstreet  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7cafe5635SKent Overstreet  * Copyright 2012 Google, Inc.
8cafe5635SKent Overstreet  */
9cafe5635SKent Overstreet 
10cafe5635SKent Overstreet #include "bcache.h"
11cafe5635SKent Overstreet #include "btree.h"
12cafe5635SKent Overstreet #include "debug.h"
13cafe5635SKent Overstreet #include "request.h"
14279afbadSKent Overstreet #include "writeback.h"
15cafe5635SKent Overstreet 
16cafe5635SKent Overstreet #include <linux/module.h>
17cafe5635SKent Overstreet #include <linux/hash.h>
18cafe5635SKent Overstreet #include <linux/random.h>
1966114cadSTejun Heo #include <linux/backing-dev.h>
20cafe5635SKent Overstreet 
21cafe5635SKent Overstreet #include <trace/events/bcache.h>
22cafe5635SKent Overstreet 
23cafe5635SKent Overstreet #define CUTOFF_CACHE_ADD	95
24cafe5635SKent Overstreet #define CUTOFF_CACHE_READA	90
25cafe5635SKent Overstreet 
26cafe5635SKent Overstreet struct kmem_cache *bch_search_cache;
27cafe5635SKent Overstreet 
28fc2d5988SColy Li static void bch_data_insert_start(struct closure *cl);
29a34a8bfdSKent Overstreet 
306f10f7d1SColy Li static unsigned int cache_mode(struct cached_dev *dc)
31cafe5635SKent Overstreet {
32cafe5635SKent Overstreet 	return BDEV_CACHE_MODE(&dc->sb);
33cafe5635SKent Overstreet }
34cafe5635SKent Overstreet 
3523850102SYijing Wang static bool verify(struct cached_dev *dc)
36cafe5635SKent Overstreet {
37cafe5635SKent Overstreet 	return dc->verify;
38cafe5635SKent Overstreet }
39cafe5635SKent Overstreet 
40cafe5635SKent Overstreet static void bio_csum(struct bio *bio, struct bkey *k)
41cafe5635SKent Overstreet {
427988613bSKent Overstreet 	struct bio_vec bv;
437988613bSKent Overstreet 	struct bvec_iter iter;
44cafe5635SKent Overstreet 	uint64_t csum = 0;
45cafe5635SKent Overstreet 
467988613bSKent Overstreet 	bio_for_each_segment(bv, bio, iter) {
4707fee7abSChristoph Hellwig 		void *d = bvec_kmap_local(&bv);
481fae7cf0SColy Li 
4939fa7a95SChristoph Hellwig 		csum = crc64_be(csum, d, bv.bv_len);
5007fee7abSChristoph Hellwig 		kunmap_local(d);
51cafe5635SKent Overstreet 	}
52cafe5635SKent Overstreet 
53cafe5635SKent Overstreet 	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54cafe5635SKent Overstreet }
55cafe5635SKent Overstreet 
56cafe5635SKent Overstreet /* Insert data into cache */
57cafe5635SKent Overstreet 
58a34a8bfdSKent Overstreet static void bch_data_insert_keys(struct closure *cl)
59cafe5635SKent Overstreet {
60220bb38cSKent Overstreet 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61c18536a7SKent Overstreet 	atomic_t *journal_ref = NULL;
62220bb38cSKent Overstreet 	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
636054c6d4SKent Overstreet 	int ret;
64cafe5635SKent Overstreet 
65220bb38cSKent Overstreet 	if (!op->replace)
66220bb38cSKent Overstreet 		journal_ref = bch_journal(op->c, &op->insert_keys,
67220bb38cSKent Overstreet 					  op->flush_journal ? cl : NULL);
68cafe5635SKent Overstreet 
69220bb38cSKent Overstreet 	ret = bch_btree_insert(op->c, &op->insert_keys,
706054c6d4SKent Overstreet 			       journal_ref, replace_key);
716054c6d4SKent Overstreet 	if (ret == -ESRCH) {
72220bb38cSKent Overstreet 		op->replace_collision = true;
736054c6d4SKent Overstreet 	} else if (ret) {
744e4cbee9SChristoph Hellwig 		op->status		= BLK_STS_RESOURCE;
75220bb38cSKent Overstreet 		op->insert_data_done	= true;
76cafe5635SKent Overstreet 	}
77cafe5635SKent Overstreet 
78c18536a7SKent Overstreet 	if (journal_ref)
79c18536a7SKent Overstreet 		atomic_dec_bug(journal_ref);
80a34a8bfdSKent Overstreet 
8177b5a084SJens Axboe 	if (!op->insert_data_done) {
82da415a09SNicholas Swenson 		continue_at(cl, bch_data_insert_start, op->wq);
8377b5a084SJens Axboe 		return;
8477b5a084SJens Axboe 	}
85a34a8bfdSKent Overstreet 
86220bb38cSKent Overstreet 	bch_keylist_free(&op->insert_keys);
87a34a8bfdSKent Overstreet 	closure_return(cl);
88cafe5635SKent Overstreet }
89cafe5635SKent Overstreet 
906f10f7d1SColy Li static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91085d2a3dSKent Overstreet 			       struct cache_set *c)
92085d2a3dSKent Overstreet {
93085d2a3dSKent Overstreet 	size_t oldsize = bch_keylist_nkeys(l);
94085d2a3dSKent Overstreet 	size_t newsize = oldsize + u64s;
95085d2a3dSKent Overstreet 
96085d2a3dSKent Overstreet 	/*
97085d2a3dSKent Overstreet 	 * The journalling code doesn't handle the case where the keys to insert
98085d2a3dSKent Overstreet 	 * is bigger than an empty write: If we just return -ENOMEM here,
990cba2e71SColy Li 	 * bch_data_insert_keys() will insert the keys created so far
100085d2a3dSKent Overstreet 	 * and finish the rest when the keylist is empty.
101085d2a3dSKent Overstreet 	 */
1024e1ebae3SColy Li 	if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
103085d2a3dSKent Overstreet 		return -ENOMEM;
104085d2a3dSKent Overstreet 
105085d2a3dSKent Overstreet 	return __bch_keylist_realloc(l, u64s);
106085d2a3dSKent Overstreet }
107085d2a3dSKent Overstreet 
108a34a8bfdSKent Overstreet static void bch_data_invalidate(struct closure *cl)
109a34a8bfdSKent Overstreet {
110220bb38cSKent Overstreet 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111220bb38cSKent Overstreet 	struct bio *bio = op->bio;
112a34a8bfdSKent Overstreet 
11346f5aa88SJoe Perches 	pr_debug("invalidating %i sectors from %llu\n",
1144f024f37SKent Overstreet 		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
115a34a8bfdSKent Overstreet 
116a34a8bfdSKent Overstreet 	while (bio_sectors(bio)) {
1176f10f7d1SColy Li 		unsigned int sectors = min(bio_sectors(bio),
11881ab4190SKent Overstreet 				       1U << (KEY_SIZE_BITS - 1));
119a34a8bfdSKent Overstreet 
120085d2a3dSKent Overstreet 		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121a34a8bfdSKent Overstreet 			goto out;
122a34a8bfdSKent Overstreet 
1234f024f37SKent Overstreet 		bio->bi_iter.bi_sector	+= sectors;
1244f024f37SKent Overstreet 		bio->bi_iter.bi_size	-= sectors << 9;
125a34a8bfdSKent Overstreet 
126220bb38cSKent Overstreet 		bch_keylist_add(&op->insert_keys,
127b0d30981SColy Li 				&KEY(op->inode,
128b0d30981SColy Li 				     bio->bi_iter.bi_sector,
129b0d30981SColy Li 				     sectors));
130a34a8bfdSKent Overstreet 	}
131a34a8bfdSKent Overstreet 
132220bb38cSKent Overstreet 	op->insert_data_done = true;
13327a40ab9SColy Li 	/* get in bch_data_insert() */
134a34a8bfdSKent Overstreet 	bio_put(bio);
135a34a8bfdSKent Overstreet out:
136da415a09SNicholas Swenson 	continue_at(cl, bch_data_insert_keys, op->wq);
137a34a8bfdSKent Overstreet }
138a34a8bfdSKent Overstreet 
139a34a8bfdSKent Overstreet static void bch_data_insert_error(struct closure *cl)
140cafe5635SKent Overstreet {
141220bb38cSKent Overstreet 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
142cafe5635SKent Overstreet 
143cafe5635SKent Overstreet 	/*
144cafe5635SKent Overstreet 	 * Our data write just errored, which means we've got a bunch of keys to
1452b1edd23SColy Li 	 * insert that point to data that wasn't successfully written.
146cafe5635SKent Overstreet 	 *
147cafe5635SKent Overstreet 	 * We don't have to insert those keys but we still have to invalidate
148cafe5635SKent Overstreet 	 * that region of the cache - so, if we just strip off all the pointers
149cafe5635SKent Overstreet 	 * from the keys we'll accomplish just that.
150cafe5635SKent Overstreet 	 */
151cafe5635SKent Overstreet 
152220bb38cSKent Overstreet 	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
153cafe5635SKent Overstreet 
154220bb38cSKent Overstreet 	while (src != op->insert_keys.top) {
155cafe5635SKent Overstreet 		struct bkey *n = bkey_next(src);
156cafe5635SKent Overstreet 
157cafe5635SKent Overstreet 		SET_KEY_PTRS(src, 0);
158c2f95ae2SKent Overstreet 		memmove(dst, src, bkey_bytes(src));
159cafe5635SKent Overstreet 
160cafe5635SKent Overstreet 		dst = bkey_next(dst);
161cafe5635SKent Overstreet 		src = n;
162cafe5635SKent Overstreet 	}
163cafe5635SKent Overstreet 
164220bb38cSKent Overstreet 	op->insert_keys.top = dst;
165cafe5635SKent Overstreet 
166a34a8bfdSKent Overstreet 	bch_data_insert_keys(cl);
167cafe5635SKent Overstreet }
168cafe5635SKent Overstreet 
1694246a0b6SChristoph Hellwig static void bch_data_insert_endio(struct bio *bio)
170cafe5635SKent Overstreet {
171cafe5635SKent Overstreet 	struct closure *cl = bio->bi_private;
172220bb38cSKent Overstreet 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
173cafe5635SKent Overstreet 
1744e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
175cafe5635SKent Overstreet 		/* TODO: We could try to recover from this. */
176220bb38cSKent Overstreet 		if (op->writeback)
1774e4cbee9SChristoph Hellwig 			op->status = bio->bi_status;
178220bb38cSKent Overstreet 		else if (!op->replace)
179da415a09SNicholas Swenson 			set_closure_fn(cl, bch_data_insert_error, op->wq);
180cafe5635SKent Overstreet 		else
181cafe5635SKent Overstreet 			set_closure_fn(cl, NULL, NULL);
182cafe5635SKent Overstreet 	}
183cafe5635SKent Overstreet 
1844e4cbee9SChristoph Hellwig 	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185cafe5635SKent Overstreet }
186cafe5635SKent Overstreet 
187a34a8bfdSKent Overstreet static void bch_data_insert_start(struct closure *cl)
188cafe5635SKent Overstreet {
189220bb38cSKent Overstreet 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
190220bb38cSKent Overstreet 	struct bio *bio = op->bio, *n;
191cafe5635SKent Overstreet 
192e3b4825bSNicholas Swenson 	if (op->bypass)
193e3b4825bSNicholas Swenson 		return bch_data_invalidate(cl);
194e3b4825bSNicholas Swenson 
19569daf03aSTang Junhui 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
19669daf03aSTang Junhui 		wake_up_gc(op->c);
19769daf03aSTang Junhui 
19854d12f2bSKent Overstreet 	/*
19928a8f0d3SMike Christie 	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
20054d12f2bSKent Overstreet 	 * flush, it'll wait on the journal write.
20154d12f2bSKent Overstreet 	 */
2021eff9d32SJens Axboe 	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
20354d12f2bSKent Overstreet 
204cafe5635SKent Overstreet 	do {
2056f10f7d1SColy Li 		unsigned int i;
206cafe5635SKent Overstreet 		struct bkey *k;
207d19936a2SKent Overstreet 		struct bio_set *split = &op->c->bio_split;
208cafe5635SKent Overstreet 
209cafe5635SKent Overstreet 		/* 1 for the device pointer and 1 for the chksum */
210220bb38cSKent Overstreet 		if (bch_keylist_realloc(&op->insert_keys,
211085d2a3dSKent Overstreet 					3 + (op->csum ? 1 : 0),
21277b5a084SJens Axboe 					op->c)) {
213da415a09SNicholas Swenson 			continue_at(cl, bch_data_insert_keys, op->wq);
21477b5a084SJens Axboe 			return;
21577b5a084SJens Axboe 		}
216cafe5635SKent Overstreet 
217220bb38cSKent Overstreet 		k = op->insert_keys.top;
218cafe5635SKent Overstreet 		bkey_init(k);
219220bb38cSKent Overstreet 		SET_KEY_INODE(k, op->inode);
2204f024f37SKent Overstreet 		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
221cafe5635SKent Overstreet 
2222599b53bSKent Overstreet 		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
2232599b53bSKent Overstreet 				       op->write_point, op->write_prio,
2242599b53bSKent Overstreet 				       op->writeback))
225cafe5635SKent Overstreet 			goto err;
226cafe5635SKent Overstreet 
22720d0189bSKent Overstreet 		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
228cafe5635SKent Overstreet 
229a34a8bfdSKent Overstreet 		n->bi_end_io	= bch_data_insert_endio;
230cafe5635SKent Overstreet 		n->bi_private	= cl;
231cafe5635SKent Overstreet 
232220bb38cSKent Overstreet 		if (op->writeback) {
233cafe5635SKent Overstreet 			SET_KEY_DIRTY(k, true);
234cafe5635SKent Overstreet 
235cafe5635SKent Overstreet 			for (i = 0; i < KEY_PTRS(k); i++)
236220bb38cSKent Overstreet 				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237cafe5635SKent Overstreet 					    GC_MARK_DIRTY);
238cafe5635SKent Overstreet 		}
239cafe5635SKent Overstreet 
240220bb38cSKent Overstreet 		SET_KEY_CSUM(k, op->csum);
241cafe5635SKent Overstreet 		if (KEY_CSUM(k))
242cafe5635SKent Overstreet 			bio_csum(n, k);
243cafe5635SKent Overstreet 
244c37511b8SKent Overstreet 		trace_bcache_cache_insert(k);
245220bb38cSKent Overstreet 		bch_keylist_push(&op->insert_keys);
246cafe5635SKent Overstreet 
247ad0d9e76SMike Christie 		bio_set_op_attrs(n, REQ_OP_WRITE, 0);
248220bb38cSKent Overstreet 		bch_submit_bbio(n, op->c, k, 0);
249cafe5635SKent Overstreet 	} while (n != bio);
250cafe5635SKent Overstreet 
251220bb38cSKent Overstreet 	op->insert_data_done = true;
252da415a09SNicholas Swenson 	continue_at(cl, bch_data_insert_keys, op->wq);
25377b5a084SJens Axboe 	return;
254cafe5635SKent Overstreet err:
255cafe5635SKent Overstreet 	/* bch_alloc_sectors() blocks if s->writeback = true */
256220bb38cSKent Overstreet 	BUG_ON(op->writeback);
257cafe5635SKent Overstreet 
258cafe5635SKent Overstreet 	/*
259cafe5635SKent Overstreet 	 * But if it's not a writeback write we'd rather just bail out if
260cafe5635SKent Overstreet 	 * there aren't any buckets ready to write to - it might take awhile and
261cafe5635SKent Overstreet 	 * we might be starving btree writes for gc or something.
262cafe5635SKent Overstreet 	 */
263cafe5635SKent Overstreet 
264220bb38cSKent Overstreet 	if (!op->replace) {
265cafe5635SKent Overstreet 		/*
266cafe5635SKent Overstreet 		 * Writethrough write: We can't complete the write until we've
267cafe5635SKent Overstreet 		 * updated the index. But we don't want to delay the write while
268cafe5635SKent Overstreet 		 * we wait for buckets to be freed up, so just invalidate the
269cafe5635SKent Overstreet 		 * rest of the write.
270cafe5635SKent Overstreet 		 */
271220bb38cSKent Overstreet 		op->bypass = true;
272a34a8bfdSKent Overstreet 		return bch_data_invalidate(cl);
273cafe5635SKent Overstreet 	} else {
274cafe5635SKent Overstreet 		/*
275cafe5635SKent Overstreet 		 * From a cache miss, we can just insert the keys for the data
276cafe5635SKent Overstreet 		 * we have written or bail out if we didn't do anything.
277cafe5635SKent Overstreet 		 */
278220bb38cSKent Overstreet 		op->insert_data_done = true;
279cafe5635SKent Overstreet 		bio_put(bio);
280cafe5635SKent Overstreet 
281220bb38cSKent Overstreet 		if (!bch_keylist_empty(&op->insert_keys))
282da415a09SNicholas Swenson 			continue_at(cl, bch_data_insert_keys, op->wq);
283cafe5635SKent Overstreet 		else
284cafe5635SKent Overstreet 			closure_return(cl);
285cafe5635SKent Overstreet 	}
286cafe5635SKent Overstreet }
287cafe5635SKent Overstreet 
288cafe5635SKent Overstreet /**
289a34a8bfdSKent Overstreet  * bch_data_insert - stick some data in the cache
29047344e33SBart Van Assche  * @cl: closure pointer.
291cafe5635SKent Overstreet  *
292cafe5635SKent Overstreet  * This is the starting point for any data to end up in a cache device; it could
293cafe5635SKent Overstreet  * be from a normal write, or a writeback write, or a write to a flash only
294cafe5635SKent Overstreet  * volume - it's also used by the moving garbage collector to compact data in
295cafe5635SKent Overstreet  * mostly empty buckets.
296cafe5635SKent Overstreet  *
297cafe5635SKent Overstreet  * It first writes the data to the cache, creating a list of keys to be inserted
298cafe5635SKent Overstreet  * (if the data had to be fragmented there will be multiple keys); after the
299cafe5635SKent Overstreet  * data is written it calls bch_journal, and after the keys have been added to
300cafe5635SKent Overstreet  * the next journal write they're inserted into the btree.
301cafe5635SKent Overstreet  *
3023db4d078SShenghui Wang  * It inserts the data in op->bio; bi_sector is used for the key offset,
303cafe5635SKent Overstreet  * and op->inode is used for the key inode.
304cafe5635SKent Overstreet  *
3053db4d078SShenghui Wang  * If op->bypass is true, instead of inserting the data it invalidates the
3063db4d078SShenghui Wang  * region of the cache represented by op->bio and op->inode.
307cafe5635SKent Overstreet  */
308a34a8bfdSKent Overstreet void bch_data_insert(struct closure *cl)
309cafe5635SKent Overstreet {
310220bb38cSKent Overstreet 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
311cafe5635SKent Overstreet 
31260ae81eeSSlava Pestov 	trace_bcache_write(op->c, op->inode, op->bio,
31360ae81eeSSlava Pestov 			   op->writeback, op->bypass);
314220bb38cSKent Overstreet 
315220bb38cSKent Overstreet 	bch_keylist_init(&op->insert_keys);
316220bb38cSKent Overstreet 	bio_get(op->bio);
317a34a8bfdSKent Overstreet 	bch_data_insert_start(cl);
318cafe5635SKent Overstreet }
319cafe5635SKent Overstreet 
3203a394727SGeorge Spelvin /*
3213a394727SGeorge Spelvin  * Congested?  Return 0 (not congested) or the limit (in sectors)
3223a394727SGeorge Spelvin  * beyond which we should bypass the cache due to congestion.
3233a394727SGeorge Spelvin  */
3243a394727SGeorge Spelvin unsigned int bch_get_congested(const struct cache_set *c)
32584f0db03SKent Overstreet {
32684f0db03SKent Overstreet 	int i;
32784f0db03SKent Overstreet 
32884f0db03SKent Overstreet 	if (!c->congested_read_threshold_us &&
32984f0db03SKent Overstreet 	    !c->congested_write_threshold_us)
33084f0db03SKent Overstreet 		return 0;
33184f0db03SKent Overstreet 
33284f0db03SKent Overstreet 	i = (local_clock_us() - c->congested_last_us) / 1024;
33384f0db03SKent Overstreet 	if (i < 0)
33484f0db03SKent Overstreet 		return 0;
33584f0db03SKent Overstreet 
33684f0db03SKent Overstreet 	i += atomic_read(&c->congested);
33784f0db03SKent Overstreet 	if (i >= 0)
33884f0db03SKent Overstreet 		return 0;
33984f0db03SKent Overstreet 
34084f0db03SKent Overstreet 	i += CONGESTED_MAX;
34184f0db03SKent Overstreet 
34284f0db03SKent Overstreet 	if (i > 0)
34384f0db03SKent Overstreet 		i = fract_exp_two(i, 6);
34484f0db03SKent Overstreet 
3453a394727SGeorge Spelvin 	i -= hweight32(get_random_u32());
34684f0db03SKent Overstreet 
34784f0db03SKent Overstreet 	return i > 0 ? i : 1;
34884f0db03SKent Overstreet }
34984f0db03SKent Overstreet 
35084f0db03SKent Overstreet static void add_sequential(struct task_struct *t)
35184f0db03SKent Overstreet {
35284f0db03SKent Overstreet 	ewma_add(t->sequential_io_avg,
35384f0db03SKent Overstreet 		 t->sequential_io, 8, 0);
35484f0db03SKent Overstreet 
35584f0db03SKent Overstreet 	t->sequential_io = 0;
35684f0db03SKent Overstreet }
35784f0db03SKent Overstreet 
35884f0db03SKent Overstreet static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
35984f0db03SKent Overstreet {
36084f0db03SKent Overstreet 	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
36184f0db03SKent Overstreet }
36284f0db03SKent Overstreet 
363220bb38cSKent Overstreet static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
36484f0db03SKent Overstreet {
365220bb38cSKent Overstreet 	struct cache_set *c = dc->disk.c;
3666f10f7d1SColy Li 	unsigned int mode = cache_mode(dc);
3673a394727SGeorge Spelvin 	unsigned int sectors, congested;
368220bb38cSKent Overstreet 	struct task_struct *task = current;
3698aee1220SKent Overstreet 	struct io *i;
37084f0db03SKent Overstreet 
371c4d951ddSKent Overstreet 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
37284f0db03SKent Overstreet 	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
373ad0d9e76SMike Christie 	    (bio_op(bio) == REQ_OP_DISCARD))
37484f0db03SKent Overstreet 		goto skip;
37584f0db03SKent Overstreet 
37684f0db03SKent Overstreet 	if (mode == CACHE_MODE_NONE ||
37784f0db03SKent Overstreet 	    (mode == CACHE_MODE_WRITEAROUND &&
378c8d93247SMike Christie 	     op_is_write(bio_op(bio))))
37984f0db03SKent Overstreet 		goto skip;
38084f0db03SKent Overstreet 
381b41c9b02SEric Wheeler 	/*
382038ba8ccSColy Li 	 * If the bio is for read-ahead or background IO, bypass it or
383038ba8ccSColy Li 	 * not depends on the following situations,
384038ba8ccSColy Li 	 * - If the IO is for meta data, always cache it and no bypass
385038ba8ccSColy Li 	 * - If the IO is not meta data, check dc->cache_reada_policy,
386038ba8ccSColy Li 	 *      BCH_CACHE_READA_ALL: cache it and not bypass
387038ba8ccSColy Li 	 *      BCH_CACHE_READA_META_ONLY: not cache it and bypass
388038ba8ccSColy Li 	 * That is, read-ahead request for metadata always get cached
389dc7292a5SColy Li 	 * (eg, for gfs2 or xfs).
390b41c9b02SEric Wheeler 	 */
391038ba8ccSColy Li 	if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
392038ba8ccSColy Li 		if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
393038ba8ccSColy Li 		    (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
394b41c9b02SEric Wheeler 			goto skip;
395038ba8ccSColy Li 	}
396b41c9b02SEric Wheeler 
3974a784266SColy Li 	if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
3984a784266SColy Li 	    bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
39946f5aa88SJoe Perches 		pr_debug("skipping unaligned io\n");
40084f0db03SKent Overstreet 		goto skip;
40184f0db03SKent Overstreet 	}
40284f0db03SKent Overstreet 
4035ceaaad7SKent Overstreet 	if (bypass_torture_test(dc)) {
4045ceaaad7SKent Overstreet 		if ((get_random_int() & 3) == 3)
4055ceaaad7SKent Overstreet 			goto skip;
4065ceaaad7SKent Overstreet 		else
4075ceaaad7SKent Overstreet 			goto rescale;
4085ceaaad7SKent Overstreet 	}
4095ceaaad7SKent Overstreet 
4103a394727SGeorge Spelvin 	congested = bch_get_congested(c);
41184f0db03SKent Overstreet 	if (!congested && !dc->sequential_cutoff)
41284f0db03SKent Overstreet 		goto rescale;
41384f0db03SKent Overstreet 
41484f0db03SKent Overstreet 	spin_lock(&dc->io_lock);
41584f0db03SKent Overstreet 
4164f024f37SKent Overstreet 	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
4174f024f37SKent Overstreet 		if (i->last == bio->bi_iter.bi_sector &&
41884f0db03SKent Overstreet 		    time_before(jiffies, i->jiffies))
41984f0db03SKent Overstreet 			goto found;
42084f0db03SKent Overstreet 
42184f0db03SKent Overstreet 	i = list_first_entry(&dc->io_lru, struct io, lru);
42284f0db03SKent Overstreet 
423220bb38cSKent Overstreet 	add_sequential(task);
42484f0db03SKent Overstreet 	i->sequential = 0;
42584f0db03SKent Overstreet found:
4264f024f37SKent Overstreet 	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
4274f024f37SKent Overstreet 		i->sequential	+= bio->bi_iter.bi_size;
42884f0db03SKent Overstreet 
42984f0db03SKent Overstreet 	i->last			 = bio_end_sector(bio);
43084f0db03SKent Overstreet 	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
431220bb38cSKent Overstreet 	task->sequential_io	 = i->sequential;
43284f0db03SKent Overstreet 
43384f0db03SKent Overstreet 	hlist_del(&i->hash);
43484f0db03SKent Overstreet 	hlist_add_head(&i->hash, iohash(dc, i->last));
43584f0db03SKent Overstreet 	list_move_tail(&i->lru, &dc->io_lru);
43684f0db03SKent Overstreet 
43784f0db03SKent Overstreet 	spin_unlock(&dc->io_lock);
43884f0db03SKent Overstreet 
439220bb38cSKent Overstreet 	sectors = max(task->sequential_io,
440220bb38cSKent Overstreet 		      task->sequential_io_avg) >> 9;
44184f0db03SKent Overstreet 
44284f0db03SKent Overstreet 	if (dc->sequential_cutoff &&
44384f0db03SKent Overstreet 	    sectors >= dc->sequential_cutoff >> 9) {
444220bb38cSKent Overstreet 		trace_bcache_bypass_sequential(bio);
44584f0db03SKent Overstreet 		goto skip;
44684f0db03SKent Overstreet 	}
44784f0db03SKent Overstreet 
44884f0db03SKent Overstreet 	if (congested && sectors >= congested) {
449220bb38cSKent Overstreet 		trace_bcache_bypass_congested(bio);
45084f0db03SKent Overstreet 		goto skip;
45184f0db03SKent Overstreet 	}
45284f0db03SKent Overstreet 
45384f0db03SKent Overstreet rescale:
45484f0db03SKent Overstreet 	bch_rescale_priorities(c, bio_sectors(bio));
45584f0db03SKent Overstreet 	return false;
45684f0db03SKent Overstreet skip:
457220bb38cSKent Overstreet 	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
45884f0db03SKent Overstreet 	return true;
45984f0db03SKent Overstreet }
46084f0db03SKent Overstreet 
461220bb38cSKent Overstreet /* Cache lookup */
462220bb38cSKent Overstreet 
463220bb38cSKent Overstreet struct search {
464220bb38cSKent Overstreet 	/* Stack frame for bio_complete */
465220bb38cSKent Overstreet 	struct closure		cl;
466220bb38cSKent Overstreet 
467220bb38cSKent Overstreet 	struct bbio		bio;
468220bb38cSKent Overstreet 	struct bio		*orig_bio;
469220bb38cSKent Overstreet 	struct bio		*cache_miss;
470a5ae4300SKent Overstreet 	struct bcache_device	*d;
471220bb38cSKent Overstreet 
4726f10f7d1SColy Li 	unsigned int		insert_bio_sectors;
4736f10f7d1SColy Li 	unsigned int		recoverable:1;
4746f10f7d1SColy Li 	unsigned int		write:1;
4756f10f7d1SColy Li 	unsigned int		read_dirty_data:1;
4766f10f7d1SColy Li 	unsigned int		cache_missed:1;
477220bb38cSKent Overstreet 
47899dfc43eSChristoph Hellwig 	struct block_device	*orig_bdev;
479220bb38cSKent Overstreet 	unsigned long		start_time;
480220bb38cSKent Overstreet 
481220bb38cSKent Overstreet 	struct btree_op		op;
482220bb38cSKent Overstreet 	struct data_insert_op	iop;
483220bb38cSKent Overstreet };
484220bb38cSKent Overstreet 
4854246a0b6SChristoph Hellwig static void bch_cache_read_endio(struct bio *bio)
486220bb38cSKent Overstreet {
487220bb38cSKent Overstreet 	struct bbio *b = container_of(bio, struct bbio, bio);
488220bb38cSKent Overstreet 	struct closure *cl = bio->bi_private;
489220bb38cSKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
490220bb38cSKent Overstreet 
491220bb38cSKent Overstreet 	/*
492220bb38cSKent Overstreet 	 * If the bucket was reused while our bio was in flight, we might have
493220bb38cSKent Overstreet 	 * read the wrong data. Set s->error but not error so it doesn't get
494220bb38cSKent Overstreet 	 * counted against the cache device, but we'll still reread the data
495220bb38cSKent Overstreet 	 * from the backing device.
496220bb38cSKent Overstreet 	 */
497220bb38cSKent Overstreet 
4984e4cbee9SChristoph Hellwig 	if (bio->bi_status)
4994e4cbee9SChristoph Hellwig 		s->iop.status = bio->bi_status;
500d56d000aSKent Overstreet 	else if (!KEY_DIRTY(&b->key) &&
501d56d000aSKent Overstreet 		 ptr_stale(s->iop.c, &b->key, 0)) {
502220bb38cSKent Overstreet 		atomic_long_inc(&s->iop.c->cache_read_races);
5034e4cbee9SChristoph Hellwig 		s->iop.status = BLK_STS_IOERR;
504220bb38cSKent Overstreet 	}
505220bb38cSKent Overstreet 
5064e4cbee9SChristoph Hellwig 	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
507220bb38cSKent Overstreet }
508220bb38cSKent Overstreet 
509220bb38cSKent Overstreet /*
510220bb38cSKent Overstreet  * Read from a single key, handling the initial cache miss if the key starts in
511220bb38cSKent Overstreet  * the middle of the bio
512220bb38cSKent Overstreet  */
513220bb38cSKent Overstreet static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
514220bb38cSKent Overstreet {
515220bb38cSKent Overstreet 	struct search *s = container_of(op, struct search, op);
516220bb38cSKent Overstreet 	struct bio *n, *bio = &s->bio.bio;
517220bb38cSKent Overstreet 	struct bkey *bio_key;
5186f10f7d1SColy Li 	unsigned int ptr;
519220bb38cSKent Overstreet 
5204f024f37SKent Overstreet 	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
521220bb38cSKent Overstreet 		return MAP_CONTINUE;
522220bb38cSKent Overstreet 
523220bb38cSKent Overstreet 	if (KEY_INODE(k) != s->iop.inode ||
5244f024f37SKent Overstreet 	    KEY_START(k) > bio->bi_iter.bi_sector) {
5256f10f7d1SColy Li 		unsigned int bio_sectors = bio_sectors(bio);
5266f10f7d1SColy Li 		unsigned int sectors = KEY_INODE(k) == s->iop.inode
527220bb38cSKent Overstreet 			? min_t(uint64_t, INT_MAX,
5284f024f37SKent Overstreet 				KEY_START(k) - bio->bi_iter.bi_sector)
529220bb38cSKent Overstreet 			: INT_MAX;
530220bb38cSKent Overstreet 		int ret = s->d->cache_miss(b, s, bio, sectors);
5311fae7cf0SColy Li 
532220bb38cSKent Overstreet 		if (ret != MAP_CONTINUE)
533220bb38cSKent Overstreet 			return ret;
534220bb38cSKent Overstreet 
535220bb38cSKent Overstreet 		/* if this was a complete miss we shouldn't get here */
536220bb38cSKent Overstreet 		BUG_ON(bio_sectors <= sectors);
537220bb38cSKent Overstreet 	}
538220bb38cSKent Overstreet 
539220bb38cSKent Overstreet 	if (!KEY_SIZE(k))
540220bb38cSKent Overstreet 		return MAP_CONTINUE;
541220bb38cSKent Overstreet 
542220bb38cSKent Overstreet 	/* XXX: figure out best pointer - for multiple cache devices */
543220bb38cSKent Overstreet 	ptr = 0;
544220bb38cSKent Overstreet 
545220bb38cSKent Overstreet 	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
546220bb38cSKent Overstreet 
5475ceaaad7SKent Overstreet 	if (KEY_DIRTY(k))
5485ceaaad7SKent Overstreet 		s->read_dirty_data = true;
5495ceaaad7SKent Overstreet 
55020d0189bSKent Overstreet 	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
5514f024f37SKent Overstreet 				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
552d19936a2SKent Overstreet 			   GFP_NOIO, &s->d->bio_split);
553220bb38cSKent Overstreet 
554220bb38cSKent Overstreet 	bio_key = &container_of(n, struct bbio, bio)->key;
555220bb38cSKent Overstreet 	bch_bkey_copy_single_ptr(bio_key, k, ptr);
556220bb38cSKent Overstreet 
5574f024f37SKent Overstreet 	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
558220bb38cSKent Overstreet 	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
559220bb38cSKent Overstreet 
560220bb38cSKent Overstreet 	n->bi_end_io	= bch_cache_read_endio;
561220bb38cSKent Overstreet 	n->bi_private	= &s->cl;
562220bb38cSKent Overstreet 
563220bb38cSKent Overstreet 	/*
564220bb38cSKent Overstreet 	 * The bucket we're reading from might be reused while our bio
565220bb38cSKent Overstreet 	 * is in flight, and we could then end up reading the wrong
566220bb38cSKent Overstreet 	 * data.
567220bb38cSKent Overstreet 	 *
568220bb38cSKent Overstreet 	 * We guard against this by checking (in cache_read_endio()) if
569220bb38cSKent Overstreet 	 * the pointer is stale again; if so, we treat it as an error
570220bb38cSKent Overstreet 	 * and reread from the backing device (but we don't pass that
571220bb38cSKent Overstreet 	 * error up anywhere).
572220bb38cSKent Overstreet 	 */
573220bb38cSKent Overstreet 
574220bb38cSKent Overstreet 	__bch_submit_bbio(n, b->c);
575220bb38cSKent Overstreet 	return n == bio ? MAP_DONE : MAP_CONTINUE;
576220bb38cSKent Overstreet }
577220bb38cSKent Overstreet 
578220bb38cSKent Overstreet static void cache_lookup(struct closure *cl)
579220bb38cSKent Overstreet {
580220bb38cSKent Overstreet 	struct search *s = container_of(cl, struct search, iop.cl);
581220bb38cSKent Overstreet 	struct bio *bio = &s->bio.bio;
582b221fc13SRui Hua 	struct cached_dev *dc;
583a5ae4300SKent Overstreet 	int ret;
584220bb38cSKent Overstreet 
585a5ae4300SKent Overstreet 	bch_btree_op_init(&s->op, -1);
586a5ae4300SKent Overstreet 
587a5ae4300SKent Overstreet 	ret = bch_btree_map_keys(&s->op, s->iop.c,
5884f024f37SKent Overstreet 				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
589220bb38cSKent Overstreet 				 cache_lookup_fn, MAP_END_KEY);
59077b5a084SJens Axboe 	if (ret == -EAGAIN) {
591220bb38cSKent Overstreet 		continue_at(cl, cache_lookup, bcache_wq);
59277b5a084SJens Axboe 		return;
59377b5a084SJens Axboe 	}
594220bb38cSKent Overstreet 
595b221fc13SRui Hua 	/*
596b221fc13SRui Hua 	 * We might meet err when searching the btree, If that happens, we will
597b221fc13SRui Hua 	 * get negative ret, in this scenario we should not recover data from
598b221fc13SRui Hua 	 * backing device (when cache device is dirty) because we don't know
599b221fc13SRui Hua 	 * whether bkeys the read request covered are all clean.
600b221fc13SRui Hua 	 *
601b221fc13SRui Hua 	 * And after that happened, s->iop.status is still its initial value
602b221fc13SRui Hua 	 * before we submit s->bio.bio
603b221fc13SRui Hua 	 */
604b221fc13SRui Hua 	if (ret < 0) {
605b221fc13SRui Hua 		BUG_ON(ret == -EINTR);
606b221fc13SRui Hua 		if (s->d && s->d->c &&
607b221fc13SRui Hua 				!UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
608b221fc13SRui Hua 			dc = container_of(s->d, struct cached_dev, disk);
609b221fc13SRui Hua 			if (dc && atomic_read(&dc->has_dirty))
610b221fc13SRui Hua 				s->recoverable = false;
611b221fc13SRui Hua 		}
612b221fc13SRui Hua 		if (!s->iop.status)
613b221fc13SRui Hua 			s->iop.status = BLK_STS_IOERR;
614b221fc13SRui Hua 	}
615b221fc13SRui Hua 
616220bb38cSKent Overstreet 	closure_return(cl);
617220bb38cSKent Overstreet }
618220bb38cSKent Overstreet 
619220bb38cSKent Overstreet /* Common code for the make_request functions */
620220bb38cSKent Overstreet 
6214246a0b6SChristoph Hellwig static void request_endio(struct bio *bio)
622220bb38cSKent Overstreet {
623220bb38cSKent Overstreet 	struct closure *cl = bio->bi_private;
624220bb38cSKent Overstreet 
6254e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
626220bb38cSKent Overstreet 		struct search *s = container_of(cl, struct search, cl);
6271fae7cf0SColy Li 
6284e4cbee9SChristoph Hellwig 		s->iop.status = bio->bi_status;
629220bb38cSKent Overstreet 		/* Only cache read errors are recoverable */
630220bb38cSKent Overstreet 		s->recoverable = false;
631220bb38cSKent Overstreet 	}
632220bb38cSKent Overstreet 
633220bb38cSKent Overstreet 	bio_put(bio);
634220bb38cSKent Overstreet 	closure_put(cl);
635220bb38cSKent Overstreet }
636220bb38cSKent Overstreet 
63727a40ab9SColy Li static void backing_request_endio(struct bio *bio)
63827a40ab9SColy Li {
63927a40ab9SColy Li 	struct closure *cl = bio->bi_private;
64027a40ab9SColy Li 
64127a40ab9SColy Li 	if (bio->bi_status) {
64227a40ab9SColy Li 		struct search *s = container_of(cl, struct search, cl);
643c7b7bd07SColy Li 		struct cached_dev *dc = container_of(s->d,
644c7b7bd07SColy Li 						     struct cached_dev, disk);
64527a40ab9SColy Li 		/*
64627a40ab9SColy Li 		 * If a bio has REQ_PREFLUSH for writeback mode, it is
64727a40ab9SColy Li 		 * speically assembled in cached_dev_write() for a non-zero
64827a40ab9SColy Li 		 * write request which has REQ_PREFLUSH. we don't set
64927a40ab9SColy Li 		 * s->iop.status by this failure, the status will be decided
65027a40ab9SColy Li 		 * by result of bch_data_insert() operation.
65127a40ab9SColy Li 		 */
65227a40ab9SColy Li 		if (unlikely(s->iop.writeback &&
65327a40ab9SColy Li 			     bio->bi_opf & REQ_PREFLUSH)) {
6540f5cd781SChristoph Hellwig 			pr_err("Can't flush %pg: returned bi_status %i\n",
6550f5cd781SChristoph Hellwig 				dc->bdev, bio->bi_status);
65627a40ab9SColy Li 		} else {
65727a40ab9SColy Li 			/* set to orig_bio->bi_status in bio_complete() */
65827a40ab9SColy Li 			s->iop.status = bio->bi_status;
65927a40ab9SColy Li 		}
66027a40ab9SColy Li 		s->recoverable = false;
66127a40ab9SColy Li 		/* should count I/O error for backing device here */
662c7b7bd07SColy Li 		bch_count_backing_io_errors(dc, bio);
66327a40ab9SColy Li 	}
66427a40ab9SColy Li 
66527a40ab9SColy Li 	bio_put(bio);
66627a40ab9SColy Li 	closure_put(cl);
66727a40ab9SColy Li }
66827a40ab9SColy Li 
669220bb38cSKent Overstreet static void bio_complete(struct search *s)
670220bb38cSKent Overstreet {
671220bb38cSKent Overstreet 	if (s->orig_bio) {
672c5be1f2cSColy Li 		/* Count on bcache device */
67399dfc43eSChristoph Hellwig 		bio_end_io_acct_remapped(s->orig_bio, s->start_time,
67499dfc43eSChristoph Hellwig 					 s->orig_bdev);
675220bb38cSKent Overstreet 		trace_bcache_request_end(s->d, s->orig_bio);
6764e4cbee9SChristoph Hellwig 		s->orig_bio->bi_status = s->iop.status;
6774246a0b6SChristoph Hellwig 		bio_endio(s->orig_bio);
678220bb38cSKent Overstreet 		s->orig_bio = NULL;
679220bb38cSKent Overstreet 	}
680220bb38cSKent Overstreet }
681220bb38cSKent Overstreet 
68227a40ab9SColy Li static void do_bio_hook(struct search *s,
68327a40ab9SColy Li 			struct bio *orig_bio,
68427a40ab9SColy Li 			bio_end_io_t *end_io_fn)
685220bb38cSKent Overstreet {
686220bb38cSKent Overstreet 	struct bio *bio = &s->bio.bio;
687220bb38cSKent Overstreet 
688*9dca4168SColy Li 	bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
68927a40ab9SColy Li 	/*
69027a40ab9SColy Li 	 * bi_end_io can be set separately somewhere else, e.g. the
69127a40ab9SColy Li 	 * variants in,
69227a40ab9SColy Li 	 * - cache_bio->bi_end_io from cached_dev_cache_miss()
69327a40ab9SColy Li 	 * - n->bi_end_io from cache_lookup_fn()
69427a40ab9SColy Li 	 */
69527a40ab9SColy Li 	bio->bi_end_io		= end_io_fn;
696220bb38cSKent Overstreet 	bio->bi_private		= &s->cl;
697ed9c47beSKent Overstreet 
698dac56212SJens Axboe 	bio_cnt_set(bio, 3);
699220bb38cSKent Overstreet }
700220bb38cSKent Overstreet 
701220bb38cSKent Overstreet static void search_free(struct closure *cl)
702220bb38cSKent Overstreet {
703220bb38cSKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
704220bb38cSKent Overstreet 
7051568ee7eSGuoju Fang 	atomic_dec(&s->iop.c->search_inflight);
7065c25c4fcSTang Junhui 
707220bb38cSKent Overstreet 	if (s->iop.bio)
708220bb38cSKent Overstreet 		bio_put(s->iop.bio);
709220bb38cSKent Overstreet 
71060eb34ecSTang Junhui 	bio_complete(s);
711220bb38cSKent Overstreet 	closure_debug_destroy(cl);
7121568ee7eSGuoju Fang 	mempool_free(s, &s->iop.c->search);
713220bb38cSKent Overstreet }
714220bb38cSKent Overstreet 
715a5ae4300SKent Overstreet static inline struct search *search_alloc(struct bio *bio,
71699dfc43eSChristoph Hellwig 		struct bcache_device *d, struct block_device *orig_bdev,
71799dfc43eSChristoph Hellwig 		unsigned long start_time)
718220bb38cSKent Overstreet {
719220bb38cSKent Overstreet 	struct search *s;
720220bb38cSKent Overstreet 
721d19936a2SKent Overstreet 	s = mempool_alloc(&d->c->search, GFP_NOIO);
722220bb38cSKent Overstreet 
723a5ae4300SKent Overstreet 	closure_init(&s->cl, NULL);
72427a40ab9SColy Li 	do_bio_hook(s, bio, request_endio);
7255c25c4fcSTang Junhui 	atomic_inc(&d->c->search_inflight);
726220bb38cSKent Overstreet 
727220bb38cSKent Overstreet 	s->orig_bio		= bio;
728a5ae4300SKent Overstreet 	s->cache_miss		= NULL;
729c1573137Stang.junhui 	s->cache_missed		= 0;
730a5ae4300SKent Overstreet 	s->d			= d;
731220bb38cSKent Overstreet 	s->recoverable		= 1;
732c8d93247SMike Christie 	s->write		= op_is_write(bio_op(bio));
733a5ae4300SKent Overstreet 	s->read_dirty_data	= 0;
734c5be1f2cSColy Li 	/* Count on the bcache device */
73599dfc43eSChristoph Hellwig 	s->orig_bdev		= orig_bdev;
73699dfc43eSChristoph Hellwig 	s->start_time		= start_time;
737a5ae4300SKent Overstreet 	s->iop.c		= d->c;
738a5ae4300SKent Overstreet 	s->iop.bio		= NULL;
739a5ae4300SKent Overstreet 	s->iop.inode		= d->id;
740a5ae4300SKent Overstreet 	s->iop.write_point	= hash_long((unsigned long) current, 16);
741a5ae4300SKent Overstreet 	s->iop.write_prio	= 0;
7424e4cbee9SChristoph Hellwig 	s->iop.status		= 0;
743a5ae4300SKent Overstreet 	s->iop.flags		= 0;
744f73f44ebSChristoph Hellwig 	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
745da415a09SNicholas Swenson 	s->iop.wq		= bcache_wq;
746220bb38cSKent Overstreet 
747220bb38cSKent Overstreet 	return s;
748220bb38cSKent Overstreet }
749220bb38cSKent Overstreet 
750220bb38cSKent Overstreet /* Cached devices */
751220bb38cSKent Overstreet 
752220bb38cSKent Overstreet static void cached_dev_bio_complete(struct closure *cl)
753220bb38cSKent Overstreet {
754220bb38cSKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
755220bb38cSKent Overstreet 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
756220bb38cSKent Overstreet 
757220bb38cSKent Overstreet 	cached_dev_put(dc);
7581568ee7eSGuoju Fang 	search_free(cl);
759220bb38cSKent Overstreet }
760220bb38cSKent Overstreet 
761cafe5635SKent Overstreet /* Process reads */
762cafe5635SKent Overstreet 
7631568ee7eSGuoju Fang static void cached_dev_read_error_done(struct closure *cl)
764cafe5635SKent Overstreet {
765cafe5635SKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
766cafe5635SKent Overstreet 
767220bb38cSKent Overstreet 	if (s->iop.replace_collision)
768220bb38cSKent Overstreet 		bch_mark_cache_miss_collision(s->iop.c, s->d);
769cafe5635SKent Overstreet 
770491221f8SGuoqing Jiang 	if (s->iop.bio)
771491221f8SGuoqing Jiang 		bio_free_pages(s->iop.bio);
772cafe5635SKent Overstreet 
773cafe5635SKent Overstreet 	cached_dev_bio_complete(cl);
774cafe5635SKent Overstreet }
775cafe5635SKent Overstreet 
776cdd972b1SKent Overstreet static void cached_dev_read_error(struct closure *cl)
777cafe5635SKent Overstreet {
778cafe5635SKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
779cdd972b1SKent Overstreet 	struct bio *bio = &s->bio.bio;
780cafe5635SKent Overstreet 
781d59b2379SColy Li 	/*
782e393aa24SRui Hua 	 * If read request hit dirty data (s->read_dirty_data is true),
783e393aa24SRui Hua 	 * then recovery a failed read request from cached device may
784e393aa24SRui Hua 	 * get a stale data back. So read failure recovery is only
785e393aa24SRui Hua 	 * permitted when read request hit clean data in cache device,
786e393aa24SRui Hua 	 * or when cache read race happened.
787d59b2379SColy Li 	 */
788e393aa24SRui Hua 	if (s->recoverable && !s->read_dirty_data) {
789c37511b8SKent Overstreet 		/* Retry from the backing device: */
790c37511b8SKent Overstreet 		trace_bcache_read_retry(s->orig_bio);
791cafe5635SKent Overstreet 
7924e4cbee9SChristoph Hellwig 		s->iop.status = 0;
79327a40ab9SColy Li 		do_bio_hook(s, s->orig_bio, backing_request_endio);
794cafe5635SKent Overstreet 
795cafe5635SKent Overstreet 		/* XXX: invalidate cache */
796cafe5635SKent Overstreet 
79727a40ab9SColy Li 		/* I/O request sent to backing device */
798771f393eSColy Li 		closure_bio_submit(s->iop.c, bio, cl);
799cafe5635SKent Overstreet 	}
800cafe5635SKent Overstreet 
8011568ee7eSGuoju Fang 	continue_at(cl, cached_dev_read_error_done, NULL);
8021568ee7eSGuoju Fang }
8031568ee7eSGuoju Fang 
8041568ee7eSGuoju Fang static void cached_dev_cache_miss_done(struct closure *cl)
8051568ee7eSGuoju Fang {
8061568ee7eSGuoju Fang 	struct search *s = container_of(cl, struct search, cl);
8071568ee7eSGuoju Fang 	struct bcache_device *d = s->d;
8081568ee7eSGuoju Fang 
8091568ee7eSGuoju Fang 	if (s->iop.replace_collision)
8101568ee7eSGuoju Fang 		bch_mark_cache_miss_collision(s->iop.c, s->d);
8111568ee7eSGuoju Fang 
8121568ee7eSGuoju Fang 	if (s->iop.bio)
8131568ee7eSGuoju Fang 		bio_free_pages(s->iop.bio);
8141568ee7eSGuoju Fang 
8151568ee7eSGuoju Fang 	cached_dev_bio_complete(cl);
8161568ee7eSGuoju Fang 	closure_put(&d->cl);
817cafe5635SKent Overstreet }
818cafe5635SKent Overstreet 
819cdd972b1SKent Overstreet static void cached_dev_read_done(struct closure *cl)
820cafe5635SKent Overstreet {
821cafe5635SKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
822cafe5635SKent Overstreet 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
823cafe5635SKent Overstreet 
824cafe5635SKent Overstreet 	/*
825cdd972b1SKent Overstreet 	 * We had a cache miss; cache_bio now contains data ready to be inserted
826cdd972b1SKent Overstreet 	 * into the cache.
827cafe5635SKent Overstreet 	 *
828cafe5635SKent Overstreet 	 * First, we copy the data we just read from cache_bio's bounce buffers
829cafe5635SKent Overstreet 	 * to the buffers the original bio pointed to:
830cafe5635SKent Overstreet 	 */
831cafe5635SKent Overstreet 
832220bb38cSKent Overstreet 	if (s->iop.bio) {
833a7c50c94SChristoph Hellwig 		bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
834b0d30981SColy Li 		s->iop.bio->bi_iter.bi_sector =
835b0d30981SColy Li 			s->cache_miss->bi_iter.bi_sector;
8364f024f37SKent Overstreet 		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
837a7c50c94SChristoph Hellwig 		bio_clone_blkg_association(s->iop.bio, s->cache_miss);
838220bb38cSKent Overstreet 		bch_bio_map(s->iop.bio, NULL);
839cafe5635SKent Overstreet 
840220bb38cSKent Overstreet 		bio_copy_data(s->cache_miss, s->iop.bio);
841cafe5635SKent Overstreet 
842cafe5635SKent Overstreet 		bio_put(s->cache_miss);
843cafe5635SKent Overstreet 		s->cache_miss = NULL;
844cafe5635SKent Overstreet 	}
845cafe5635SKent Overstreet 
84623850102SYijing Wang 	if (verify(dc) && s->recoverable && !s->read_dirty_data)
847220bb38cSKent Overstreet 		bch_data_verify(dc, s->orig_bio);
848cafe5635SKent Overstreet 
8491568ee7eSGuoju Fang 	closure_get(&dc->disk.cl);
850cafe5635SKent Overstreet 	bio_complete(s);
851cafe5635SKent Overstreet 
852220bb38cSKent Overstreet 	if (s->iop.bio &&
853220bb38cSKent Overstreet 	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
854220bb38cSKent Overstreet 		BUG_ON(!s->iop.replace);
855220bb38cSKent Overstreet 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
856cafe5635SKent Overstreet 	}
857cafe5635SKent Overstreet 
858cdd972b1SKent Overstreet 	continue_at(cl, cached_dev_cache_miss_done, NULL);
859cafe5635SKent Overstreet }
860cafe5635SKent Overstreet 
861cdd972b1SKent Overstreet static void cached_dev_read_done_bh(struct closure *cl)
862cafe5635SKent Overstreet {
863cafe5635SKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
864cafe5635SKent Overstreet 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
865cafe5635SKent Overstreet 
866220bb38cSKent Overstreet 	bch_mark_cache_accounting(s->iop.c, s->d,
867c1573137Stang.junhui 				  !s->cache_missed, s->iop.bypass);
868502b2915STang Junhui 	trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
869cafe5635SKent Overstreet 
8704e4cbee9SChristoph Hellwig 	if (s->iop.status)
871cdd972b1SKent Overstreet 		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
87223850102SYijing Wang 	else if (s->iop.bio || verify(dc))
873cdd972b1SKent Overstreet 		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
874cafe5635SKent Overstreet 	else
875cdd972b1SKent Overstreet 		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
876cafe5635SKent Overstreet }
877cafe5635SKent Overstreet 
878cafe5635SKent Overstreet static int cached_dev_cache_miss(struct btree *b, struct search *s,
8796f10f7d1SColy Li 				 struct bio *bio, unsigned int sectors)
880cafe5635SKent Overstreet {
8812c1953e2SKent Overstreet 	int ret = MAP_CONTINUE;
882cafe5635SKent Overstreet 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
883cdd972b1SKent Overstreet 	struct bio *miss, *cache_bio;
88441fe8d08SColy Li 	unsigned int size_limit;
885cafe5635SKent Overstreet 
886c1573137Stang.junhui 	s->cache_missed = 1;
887c1573137Stang.junhui 
888220bb38cSKent Overstreet 	if (s->cache_miss || s->iop.bypass) {
889d19936a2SKent Overstreet 		miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
8902c1953e2SKent Overstreet 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
891e7c590ebSKent Overstreet 		goto out_submit;
892e7c590ebSKent Overstreet 	}
893e7c590ebSKent Overstreet 
89441fe8d08SColy Li 	/* Limitation for valid replace key size and cache_bio bvecs number */
89541fe8d08SColy Li 	size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
89641fe8d08SColy Li 			   (1 << KEY_SIZE_BITS) - 1);
89741fe8d08SColy Li 	s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
898e7c590ebSKent Overstreet 
899220bb38cSKent Overstreet 	s->iop.replace_key = KEY(s->iop.inode,
9004f024f37SKent Overstreet 				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
901220bb38cSKent Overstreet 				 s->insert_bio_sectors);
902e7c590ebSKent Overstreet 
903220bb38cSKent Overstreet 	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
904e7c590ebSKent Overstreet 	if (ret)
905e7c590ebSKent Overstreet 		return ret;
906e7c590ebSKent Overstreet 
907220bb38cSKent Overstreet 	s->iop.replace = true;
9081b207d80SKent Overstreet 
90941fe8d08SColy Li 	miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
91041fe8d08SColy Li 			      &s->d->bio_split);
9112c1953e2SKent Overstreet 
912e7c590ebSKent Overstreet 	/* btree_search_recurse()'s btree iterator is no good anymore */
9132c1953e2SKent Overstreet 	ret = miss == bio ? MAP_DONE : -EINTR;
914cafe5635SKent Overstreet 
915609be106SChristoph Hellwig 	cache_bio = bio_alloc_bioset(miss->bi_bdev,
916220bb38cSKent Overstreet 			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
917609be106SChristoph Hellwig 			0, GFP_NOWAIT, &dc->disk.bio_split);
918cdd972b1SKent Overstreet 	if (!cache_bio)
919cafe5635SKent Overstreet 		goto out_submit;
920cafe5635SKent Overstreet 
9214f024f37SKent Overstreet 	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
9224f024f37SKent Overstreet 	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
923cafe5635SKent Overstreet 
92427a40ab9SColy Li 	cache_bio->bi_end_io	= backing_request_endio;
925cdd972b1SKent Overstreet 	cache_bio->bi_private	= &s->cl;
926cafe5635SKent Overstreet 
927cdd972b1SKent Overstreet 	bch_bio_map(cache_bio, NULL);
92825d8be77SMing Lei 	if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
929cafe5635SKent Overstreet 		goto out_put;
930cafe5635SKent Overstreet 
931cafe5635SKent Overstreet 	s->cache_miss	= miss;
932220bb38cSKent Overstreet 	s->iop.bio	= cache_bio;
933cdd972b1SKent Overstreet 	bio_get(cache_bio);
93427a40ab9SColy Li 	/* I/O request sent to backing device */
935771f393eSColy Li 	closure_bio_submit(s->iop.c, cache_bio, &s->cl);
936cafe5635SKent Overstreet 
937cafe5635SKent Overstreet 	return ret;
938cafe5635SKent Overstreet out_put:
939cdd972b1SKent Overstreet 	bio_put(cache_bio);
940cafe5635SKent Overstreet out_submit:
94127a40ab9SColy Li 	miss->bi_end_io		= backing_request_endio;
942e7c590ebSKent Overstreet 	miss->bi_private	= &s->cl;
94327a40ab9SColy Li 	/* I/O request sent to backing device */
944771f393eSColy Li 	closure_bio_submit(s->iop.c, miss, &s->cl);
945cafe5635SKent Overstreet 	return ret;
946cafe5635SKent Overstreet }
947cafe5635SKent Overstreet 
948cdd972b1SKent Overstreet static void cached_dev_read(struct cached_dev *dc, struct search *s)
949cafe5635SKent Overstreet {
950cafe5635SKent Overstreet 	struct closure *cl = &s->cl;
951cafe5635SKent Overstreet 
952220bb38cSKent Overstreet 	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
953cdd972b1SKent Overstreet 	continue_at(cl, cached_dev_read_done_bh, NULL);
954cafe5635SKent Overstreet }
955cafe5635SKent Overstreet 
956cafe5635SKent Overstreet /* Process writes */
957cafe5635SKent Overstreet 
958cafe5635SKent Overstreet static void cached_dev_write_complete(struct closure *cl)
959cafe5635SKent Overstreet {
960cafe5635SKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
961cafe5635SKent Overstreet 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
962cafe5635SKent Overstreet 
963cafe5635SKent Overstreet 	up_read_non_owner(&dc->writeback_lock);
964cafe5635SKent Overstreet 	cached_dev_bio_complete(cl);
965cafe5635SKent Overstreet }
966cafe5635SKent Overstreet 
967cdd972b1SKent Overstreet static void cached_dev_write(struct cached_dev *dc, struct search *s)
968cafe5635SKent Overstreet {
969cafe5635SKent Overstreet 	struct closure *cl = &s->cl;
970cafe5635SKent Overstreet 	struct bio *bio = &s->bio.bio;
9714f024f37SKent Overstreet 	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
97284f0db03SKent Overstreet 	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
973cafe5635SKent Overstreet 
974220bb38cSKent Overstreet 	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
975cafe5635SKent Overstreet 
976cafe5635SKent Overstreet 	down_read_non_owner(&dc->writeback_lock);
977cafe5635SKent Overstreet 	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
97884f0db03SKent Overstreet 		/*
97984f0db03SKent Overstreet 		 * We overlap with some dirty data undergoing background
98084f0db03SKent Overstreet 		 * writeback, force this write to writeback
98184f0db03SKent Overstreet 		 */
982220bb38cSKent Overstreet 		s->iop.bypass = false;
983220bb38cSKent Overstreet 		s->iop.writeback = true;
984cafe5635SKent Overstreet 	}
985cafe5635SKent Overstreet 
98684f0db03SKent Overstreet 	/*
98784f0db03SKent Overstreet 	 * Discards aren't _required_ to do anything, so skipping if
98884f0db03SKent Overstreet 	 * check_overlapping returned true is ok
98984f0db03SKent Overstreet 	 *
99084f0db03SKent Overstreet 	 * But check_overlapping drops dirty keys for which io hasn't started,
99184f0db03SKent Overstreet 	 * so we still want to call it.
99284f0db03SKent Overstreet 	 */
993ad0d9e76SMike Christie 	if (bio_op(bio) == REQ_OP_DISCARD)
994220bb38cSKent Overstreet 		s->iop.bypass = true;
995cafe5635SKent Overstreet 
99672c27061SKent Overstreet 	if (should_writeback(dc, s->orig_bio,
99723850102SYijing Wang 			     cache_mode(dc),
998220bb38cSKent Overstreet 			     s->iop.bypass)) {
999220bb38cSKent Overstreet 		s->iop.bypass = false;
1000220bb38cSKent Overstreet 		s->iop.writeback = true;
100172c27061SKent Overstreet 	}
100272c27061SKent Overstreet 
1003220bb38cSKent Overstreet 	if (s->iop.bypass) {
1004220bb38cSKent Overstreet 		s->iop.bio = s->orig_bio;
1005220bb38cSKent Overstreet 		bio_get(s->iop.bio);
1006c37511b8SKent Overstreet 
100727a40ab9SColy Li 		if (bio_op(bio) == REQ_OP_DISCARD &&
100827a40ab9SColy Li 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
100927a40ab9SColy Li 			goto insert_data;
101027a40ab9SColy Li 
101127a40ab9SColy Li 		/* I/O request sent to backing device */
101227a40ab9SColy Li 		bio->bi_end_io = backing_request_endio;
1013771f393eSColy Li 		closure_bio_submit(s->iop.c, bio, cl);
101427a40ab9SColy Li 
1015220bb38cSKent Overstreet 	} else if (s->iop.writeback) {
1016279afbadSKent Overstreet 		bch_writeback_add(dc);
1017220bb38cSKent Overstreet 		s->iop.bio = bio;
1018e49c7c37SKent Overstreet 
10191eff9d32SJens Axboe 		if (bio->bi_opf & REQ_PREFLUSH) {
102027a40ab9SColy Li 			/*
102127a40ab9SColy Li 			 * Also need to send a flush to the backing
102227a40ab9SColy Li 			 * device.
102327a40ab9SColy Li 			 */
102427a40ab9SColy Li 			struct bio *flush;
1025e49c7c37SKent Overstreet 
1026609be106SChristoph Hellwig 			flush = bio_alloc_bioset(bio->bi_bdev, 0,
1027609be106SChristoph Hellwig 						 REQ_OP_WRITE | REQ_PREFLUSH,
1028609be106SChristoph Hellwig 						 GFP_NOIO, &dc->disk.bio_split);
102927a40ab9SColy Li 			if (!flush) {
103027a40ab9SColy Li 				s->iop.status = BLK_STS_RESOURCE;
103127a40ab9SColy Li 				goto insert_data;
103227a40ab9SColy Li 			}
103327a40ab9SColy Li 			flush->bi_end_io = backing_request_endio;
1034c0f04d88SKent Overstreet 			flush->bi_private = cl;
103527a40ab9SColy Li 			/* I/O request sent to backing device */
1036771f393eSColy Li 			closure_bio_submit(s->iop.c, flush, cl);
1037e49c7c37SKent Overstreet 		}
103884f0db03SKent Overstreet 	} else {
1039abfc426dSChristoph Hellwig 		s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1040abfc426dSChristoph Hellwig 					     &dc->disk.bio_split);
104127a40ab9SColy Li 		/* I/O request sent to backing device */
104227a40ab9SColy Li 		bio->bi_end_io = backing_request_endio;
1043771f393eSColy Li 		closure_bio_submit(s->iop.c, bio, cl);
104484f0db03SKent Overstreet 	}
104584f0db03SKent Overstreet 
104627a40ab9SColy Li insert_data:
1047220bb38cSKent Overstreet 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
104884f0db03SKent Overstreet 	continue_at(cl, cached_dev_write_complete, NULL);
1049cafe5635SKent Overstreet }
1050cafe5635SKent Overstreet 
1051a34a8bfdSKent Overstreet static void cached_dev_nodata(struct closure *cl)
1052cafe5635SKent Overstreet {
1053a34a8bfdSKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
1054cafe5635SKent Overstreet 	struct bio *bio = &s->bio.bio;
1055cafe5635SKent Overstreet 
1056220bb38cSKent Overstreet 	if (s->iop.flush_journal)
1057220bb38cSKent Overstreet 		bch_journal_meta(s->iop.c, cl);
1058cafe5635SKent Overstreet 
105984f0db03SKent Overstreet 	/* If it's a flush, we send the flush to the backing device too */
106027a40ab9SColy Li 	bio->bi_end_io = backing_request_endio;
1061771f393eSColy Li 	closure_bio_submit(s->iop.c, bio, cl);
1062cafe5635SKent Overstreet 
1063cafe5635SKent Overstreet 	continue_at(cl, cached_dev_bio_complete, NULL);
1064cafe5635SKent Overstreet }
1065cafe5635SKent Overstreet 
1066bc082a55STang Junhui struct detached_dev_io_private {
1067bc082a55STang Junhui 	struct bcache_device	*d;
1068bc082a55STang Junhui 	unsigned long		start_time;
1069bc082a55STang Junhui 	bio_end_io_t		*bi_end_io;
1070bc082a55STang Junhui 	void			*bi_private;
107199dfc43eSChristoph Hellwig 	struct block_device	*orig_bdev;
1072bc082a55STang Junhui };
1073bc082a55STang Junhui 
1074bc082a55STang Junhui static void detached_dev_end_io(struct bio *bio)
1075bc082a55STang Junhui {
1076bc082a55STang Junhui 	struct detached_dev_io_private *ddip;
1077bc082a55STang Junhui 
1078bc082a55STang Junhui 	ddip = bio->bi_private;
1079bc082a55STang Junhui 	bio->bi_end_io = ddip->bi_end_io;
1080bc082a55STang Junhui 	bio->bi_private = ddip->bi_private;
1081bc082a55STang Junhui 
1082c5be1f2cSColy Li 	/* Count on the bcache device */
108399dfc43eSChristoph Hellwig 	bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
1084bc082a55STang Junhui 
1085c7b7bd07SColy Li 	if (bio->bi_status) {
1086c7b7bd07SColy Li 		struct cached_dev *dc = container_of(ddip->d,
1087c7b7bd07SColy Li 						     struct cached_dev, disk);
1088c7b7bd07SColy Li 		/* should count I/O error for backing device here */
1089c7b7bd07SColy Li 		bch_count_backing_io_errors(dc, bio);
1090c7b7bd07SColy Li 	}
1091bc082a55STang Junhui 
1092c7b7bd07SColy Li 	kfree(ddip);
1093bc082a55STang Junhui 	bio->bi_end_io(bio);
1094bc082a55STang Junhui }
1095bc082a55STang Junhui 
109699dfc43eSChristoph Hellwig static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
109799dfc43eSChristoph Hellwig 		struct block_device *orig_bdev, unsigned long start_time)
1098bc082a55STang Junhui {
1099bc082a55STang Junhui 	struct detached_dev_io_private *ddip;
1100bc082a55STang Junhui 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1101bc082a55STang Junhui 
1102bc082a55STang Junhui 	/*
1103bc082a55STang Junhui 	 * no need to call closure_get(&dc->disk.cl),
1104bc082a55STang Junhui 	 * because upper layer had already opened bcache device,
1105bc082a55STang Junhui 	 * which would call closure_get(&dc->disk.cl)
1106bc082a55STang Junhui 	 */
1107bc082a55STang Junhui 	ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1108bc082a55STang Junhui 	ddip->d = d;
1109c5be1f2cSColy Li 	/* Count on the bcache device */
111099dfc43eSChristoph Hellwig 	ddip->orig_bdev = orig_bdev;
111199dfc43eSChristoph Hellwig 	ddip->start_time = start_time;
1112bc082a55STang Junhui 	ddip->bi_end_io = bio->bi_end_io;
1113bc082a55STang Junhui 	ddip->bi_private = bio->bi_private;
1114bc082a55STang Junhui 	bio->bi_end_io = detached_dev_end_io;
1115bc082a55STang Junhui 	bio->bi_private = ddip;
1116bc082a55STang Junhui 
1117bc082a55STang Junhui 	if ((bio_op(bio) == REQ_OP_DISCARD) &&
1118bc082a55STang Junhui 	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1119bc082a55STang Junhui 		bio->bi_end_io(bio);
1120bc082a55STang Junhui 	else
1121ed00aabdSChristoph Hellwig 		submit_bio_noacct(bio);
1122bc082a55STang Junhui }
1123bc082a55STang Junhui 
1124ea8c5356SColy Li static void quit_max_writeback_rate(struct cache_set *c,
1125ea8c5356SColy Li 				    struct cached_dev *this_dc)
1126ea8c5356SColy Li {
1127ea8c5356SColy Li 	int i;
1128ea8c5356SColy Li 	struct bcache_device *d;
1129ea8c5356SColy Li 	struct cached_dev *dc;
1130ea8c5356SColy Li 
1131ea8c5356SColy Li 	/*
1132ea8c5356SColy Li 	 * mutex bch_register_lock may compete with other parallel requesters,
1133ea8c5356SColy Li 	 * or attach/detach operations on other backing device. Waiting to
1134ea8c5356SColy Li 	 * the mutex lock may increase I/O request latency for seconds or more.
1135ea8c5356SColy Li 	 * To avoid such situation, if mutext_trylock() failed, only writeback
1136ea8c5356SColy Li 	 * rate of current cached device is set to 1, and __update_write_back()
1137ea8c5356SColy Li 	 * will decide writeback rate of other cached devices (remember now
1138ea8c5356SColy Li 	 * c->idle_counter is 0 already).
1139ea8c5356SColy Li 	 */
1140ea8c5356SColy Li 	if (mutex_trylock(&bch_register_lock)) {
1141ea8c5356SColy Li 		for (i = 0; i < c->devices_max_used; i++) {
1142ea8c5356SColy Li 			if (!c->devices[i])
1143ea8c5356SColy Li 				continue;
1144ea8c5356SColy Li 
1145ea8c5356SColy Li 			if (UUID_FLASH_ONLY(&c->uuids[i]))
1146ea8c5356SColy Li 				continue;
1147ea8c5356SColy Li 
1148ea8c5356SColy Li 			d = c->devices[i];
1149ea8c5356SColy Li 			dc = container_of(d, struct cached_dev, disk);
1150ea8c5356SColy Li 			/*
1151ea8c5356SColy Li 			 * set writeback rate to default minimum value,
1152ea8c5356SColy Li 			 * then let update_writeback_rate() to decide the
1153ea8c5356SColy Li 			 * upcoming rate.
1154ea8c5356SColy Li 			 */
1155ea8c5356SColy Li 			atomic_long_set(&dc->writeback_rate.rate, 1);
1156ea8c5356SColy Li 		}
1157ea8c5356SColy Li 		mutex_unlock(&bch_register_lock);
1158ea8c5356SColy Li 	} else
1159ea8c5356SColy Li 		atomic_long_set(&this_dc->writeback_rate.rate, 1);
1160ea8c5356SColy Li }
1161ea8c5356SColy Li 
1162cafe5635SKent Overstreet /* Cached devices - read & write stuff */
1163cafe5635SKent Overstreet 
11643e08773cSChristoph Hellwig void cached_dev_submit_bio(struct bio *bio)
1165cafe5635SKent Overstreet {
1166cafe5635SKent Overstreet 	struct search *s;
116799dfc43eSChristoph Hellwig 	struct block_device *orig_bdev = bio->bi_bdev;
116899dfc43eSChristoph Hellwig 	struct bcache_device *d = orig_bdev->bd_disk->private_data;
1169cafe5635SKent Overstreet 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
117099dfc43eSChristoph Hellwig 	unsigned long start_time;
1171aae4933dSGu Zheng 	int rw = bio_data_dir(bio);
1172cafe5635SKent Overstreet 
1173c7b7bd07SColy Li 	if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1174c7b7bd07SColy Li 		     dc->io_disable)) {
1175771f393eSColy Li 		bio->bi_status = BLK_STS_IOERR;
1176771f393eSColy Li 		bio_endio(bio);
11773e08773cSChristoph Hellwig 		return;
1178771f393eSColy Li 	}
1179771f393eSColy Li 
1180ea8c5356SColy Li 	if (likely(d->c)) {
1181ea8c5356SColy Li 		if (atomic_read(&d->c->idle_counter))
1182ea8c5356SColy Li 			atomic_set(&d->c->idle_counter, 0);
1183ea8c5356SColy Li 		/*
1184ea8c5356SColy Li 		 * If at_max_writeback_rate of cache set is true and new I/O
1185ea8c5356SColy Li 		 * comes, quit max writeback rate of all cached devices
1186ea8c5356SColy Li 		 * attached to this cache set, and set at_max_writeback_rate
1187ea8c5356SColy Li 		 * to false.
1188ea8c5356SColy Li 		 */
1189ea8c5356SColy Li 		if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1190ea8c5356SColy Li 			atomic_set(&d->c->at_max_writeback_rate, 0);
1191ea8c5356SColy Li 			quit_max_writeback_rate(d->c, dc);
1192ea8c5356SColy Li 		}
1193ea8c5356SColy Li 	}
1194ea8c5356SColy Li 
119599dfc43eSChristoph Hellwig 	start_time = bio_start_io_acct(bio);
119699dfc43eSChristoph Hellwig 
119774d46992SChristoph Hellwig 	bio_set_dev(bio, dc->bdev);
11984f024f37SKent Overstreet 	bio->bi_iter.bi_sector += dc->sb.data_offset;
1199cafe5635SKent Overstreet 
1200cafe5635SKent Overstreet 	if (cached_dev_get(dc)) {
120199dfc43eSChristoph Hellwig 		s = search_alloc(bio, d, orig_bdev, start_time);
1202220bb38cSKent Overstreet 		trace_bcache_request_start(s->d, bio);
1203cafe5635SKent Overstreet 
12044f024f37SKent Overstreet 		if (!bio->bi_iter.bi_size) {
1205a34a8bfdSKent Overstreet 			/*
1206a34a8bfdSKent Overstreet 			 * can't call bch_journal_meta from under
1207ed00aabdSChristoph Hellwig 			 * submit_bio_noacct
1208a34a8bfdSKent Overstreet 			 */
1209a34a8bfdSKent Overstreet 			continue_at_nobarrier(&s->cl,
1210a34a8bfdSKent Overstreet 					      cached_dev_nodata,
1211a34a8bfdSKent Overstreet 					      bcache_wq);
1212a34a8bfdSKent Overstreet 		} else {
1213220bb38cSKent Overstreet 			s->iop.bypass = check_should_bypass(dc, bio);
121484f0db03SKent Overstreet 
121584f0db03SKent Overstreet 			if (rw)
1216cdd972b1SKent Overstreet 				cached_dev_write(dc, s);
1217cafe5635SKent Overstreet 			else
1218cdd972b1SKent Overstreet 				cached_dev_read(dc, s);
121984f0db03SKent Overstreet 		}
1220bc082a55STang Junhui 	} else
122127a40ab9SColy Li 		/* I/O request sent to backing device */
122299dfc43eSChristoph Hellwig 		detached_dev_do_request(d, bio, orig_bdev, start_time);
1223cafe5635SKent Overstreet }
1224cafe5635SKent Overstreet 
1225cafe5635SKent Overstreet static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1226cafe5635SKent Overstreet 			    unsigned int cmd, unsigned long arg)
1227cafe5635SKent Overstreet {
1228cafe5635SKent Overstreet 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
12291fae7cf0SColy Li 
1230dd0c9179STang Junhui 	if (dc->io_disable)
1231dd0c9179STang Junhui 		return -EIO;
1232a7cb3d2fSChristoph Hellwig 	if (!dc->bdev->bd_disk->fops->ioctl)
1233a7cb3d2fSChristoph Hellwig 		return -ENOTTY;
1234a7cb3d2fSChristoph Hellwig 	return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
1235cafe5635SKent Overstreet }
1236cafe5635SKent Overstreet 
1237cafe5635SKent Overstreet void bch_cached_dev_request_init(struct cached_dev *dc)
1238cafe5635SKent Overstreet {
1239cafe5635SKent Overstreet 	dc->disk.cache_miss			= cached_dev_cache_miss;
1240cafe5635SKent Overstreet 	dc->disk.ioctl				= cached_dev_ioctl;
1241cafe5635SKent Overstreet }
1242cafe5635SKent Overstreet 
1243cafe5635SKent Overstreet /* Flash backed devices */
1244cafe5635SKent Overstreet 
1245cafe5635SKent Overstreet static int flash_dev_cache_miss(struct btree *b, struct search *s,
12466f10f7d1SColy Li 				struct bio *bio, unsigned int sectors)
1247cafe5635SKent Overstreet {
12486f10f7d1SColy Li 	unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
12498e51e414SKent Overstreet 
12501b4eaf3dSKent Overstreet 	swap(bio->bi_iter.bi_size, bytes);
12511b4eaf3dSKent Overstreet 	zero_fill_bio(bio);
12521b4eaf3dSKent Overstreet 	swap(bio->bi_iter.bi_size, bytes);
1253cafe5635SKent Overstreet 
12541b4eaf3dSKent Overstreet 	bio_advance(bio, bytes);
12558e51e414SKent Overstreet 
12564f024f37SKent Overstreet 	if (!bio->bi_iter.bi_size)
12572c1953e2SKent Overstreet 		return MAP_DONE;
1258cafe5635SKent Overstreet 
12592c1953e2SKent Overstreet 	return MAP_CONTINUE;
1260cafe5635SKent Overstreet }
1261cafe5635SKent Overstreet 
1262a34a8bfdSKent Overstreet static void flash_dev_nodata(struct closure *cl)
1263a34a8bfdSKent Overstreet {
1264a34a8bfdSKent Overstreet 	struct search *s = container_of(cl, struct search, cl);
1265a34a8bfdSKent Overstreet 
1266220bb38cSKent Overstreet 	if (s->iop.flush_journal)
1267220bb38cSKent Overstreet 		bch_journal_meta(s->iop.c, cl);
1268a34a8bfdSKent Overstreet 
1269a34a8bfdSKent Overstreet 	continue_at(cl, search_free, NULL);
1270a34a8bfdSKent Overstreet }
1271a34a8bfdSKent Overstreet 
12723e08773cSChristoph Hellwig void flash_dev_submit_bio(struct bio *bio)
1273cafe5635SKent Overstreet {
1274cafe5635SKent Overstreet 	struct search *s;
1275cafe5635SKent Overstreet 	struct closure *cl;
1276309dca30SChristoph Hellwig 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1277cafe5635SKent Overstreet 
1278771f393eSColy Li 	if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1279771f393eSColy Li 		bio->bi_status = BLK_STS_IOERR;
1280771f393eSColy Li 		bio_endio(bio);
12813e08773cSChristoph Hellwig 		return;
1282771f393eSColy Li 	}
1283771f393eSColy Li 
128499dfc43eSChristoph Hellwig 	s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
1285cafe5635SKent Overstreet 	cl = &s->cl;
1286cafe5635SKent Overstreet 	bio = &s->bio.bio;
1287cafe5635SKent Overstreet 
1288220bb38cSKent Overstreet 	trace_bcache_request_start(s->d, bio);
1289cafe5635SKent Overstreet 
12904f024f37SKent Overstreet 	if (!bio->bi_iter.bi_size) {
1291a34a8bfdSKent Overstreet 		/*
1292ed00aabdSChristoph Hellwig 		 * can't call bch_journal_meta from under submit_bio_noacct
1293a34a8bfdSKent Overstreet 		 */
1294a34a8bfdSKent Overstreet 		continue_at_nobarrier(&s->cl,
1295a34a8bfdSKent Overstreet 				      flash_dev_nodata,
1296a34a8bfdSKent Overstreet 				      bcache_wq);
12973e08773cSChristoph Hellwig 		return;
1298ddcf35d3SMichael Callahan 	} else if (bio_data_dir(bio)) {
1299220bb38cSKent Overstreet 		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
13004f024f37SKent Overstreet 					&KEY(d->id, bio->bi_iter.bi_sector, 0),
13018e51e414SKent Overstreet 					&KEY(d->id, bio_end_sector(bio), 0));
1302cafe5635SKent Overstreet 
1303ad0d9e76SMike Christie 		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
1304220bb38cSKent Overstreet 		s->iop.writeback	= true;
1305220bb38cSKent Overstreet 		s->iop.bio		= bio;
1306cafe5635SKent Overstreet 
1307220bb38cSKent Overstreet 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1308cafe5635SKent Overstreet 	} else {
1309220bb38cSKent Overstreet 		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1310cafe5635SKent Overstreet 	}
1311cafe5635SKent Overstreet 
1312cafe5635SKent Overstreet 	continue_at(cl, search_free, NULL);
1313cafe5635SKent Overstreet }
1314cafe5635SKent Overstreet 
1315cafe5635SKent Overstreet static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1316cafe5635SKent Overstreet 			   unsigned int cmd, unsigned long arg)
1317cafe5635SKent Overstreet {
1318cafe5635SKent Overstreet 	return -ENOTTY;
1319cafe5635SKent Overstreet }
1320cafe5635SKent Overstreet 
1321cafe5635SKent Overstreet void bch_flash_dev_request_init(struct bcache_device *d)
1322cafe5635SKent Overstreet {
1323cafe5635SKent Overstreet 	d->cache_miss				= flash_dev_cache_miss;
1324cafe5635SKent Overstreet 	d->ioctl				= flash_dev_ioctl;
1325cafe5635SKent Overstreet }
1326cafe5635SKent Overstreet 
1327cafe5635SKent Overstreet void bch_request_exit(void)
1328cafe5635SKent Overstreet {
1329cafe5635SKent Overstreet 	kmem_cache_destroy(bch_search_cache);
1330cafe5635SKent Overstreet }
1331cafe5635SKent Overstreet 
1332cafe5635SKent Overstreet int __init bch_request_init(void)
1333cafe5635SKent Overstreet {
1334cafe5635SKent Overstreet 	bch_search_cache = KMEM_CACHE(search, 0);
1335cafe5635SKent Overstreet 	if (!bch_search_cache)
1336cafe5635SKent Overstreet 		return -ENOMEM;
1337cafe5635SKent Overstreet 
1338cafe5635SKent Overstreet 	return 0;
1339cafe5635SKent Overstreet }
1340