xref: /openbmc/linux/drivers/md/bcache/request.c (revision ddcf35d397976421a4ec1d0d00fbcc027a8cb034)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Main bcache entry point - handle a read or a write request and decide what to
4  * do with it; the make_request functions are called by the block layer.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9 
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "request.h"
14 #include "writeback.h"
15 
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
20 
21 #include <trace/events/bcache.h>
22 
23 #define CUTOFF_CACHE_ADD	95
24 #define CUTOFF_CACHE_READA	90
25 
26 struct kmem_cache *bch_search_cache;
27 
28 static void bch_data_insert_start(struct closure *);
29 
30 static unsigned cache_mode(struct cached_dev *dc)
31 {
32 	return BDEV_CACHE_MODE(&dc->sb);
33 }
34 
35 static bool verify(struct cached_dev *dc)
36 {
37 	return dc->verify;
38 }
39 
40 static void bio_csum(struct bio *bio, struct bkey *k)
41 {
42 	struct bio_vec bv;
43 	struct bvec_iter iter;
44 	uint64_t csum = 0;
45 
46 	bio_for_each_segment(bv, bio, iter) {
47 		void *d = kmap(bv.bv_page) + bv.bv_offset;
48 		csum = bch_crc64_update(csum, d, bv.bv_len);
49 		kunmap(bv.bv_page);
50 	}
51 
52 	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
53 }
54 
55 /* Insert data into cache */
56 
57 static void bch_data_insert_keys(struct closure *cl)
58 {
59 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
60 	atomic_t *journal_ref = NULL;
61 	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
62 	int ret;
63 
64 	/*
65 	 * If we're looping, might already be waiting on
66 	 * another journal write - can't wait on more than one journal write at
67 	 * a time
68 	 *
69 	 * XXX: this looks wrong
70 	 */
71 #if 0
72 	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
73 		closure_sync(&s->cl);
74 #endif
75 
76 	if (!op->replace)
77 		journal_ref = bch_journal(op->c, &op->insert_keys,
78 					  op->flush_journal ? cl : NULL);
79 
80 	ret = bch_btree_insert(op->c, &op->insert_keys,
81 			       journal_ref, replace_key);
82 	if (ret == -ESRCH) {
83 		op->replace_collision = true;
84 	} else if (ret) {
85 		op->status		= BLK_STS_RESOURCE;
86 		op->insert_data_done	= true;
87 	}
88 
89 	if (journal_ref)
90 		atomic_dec_bug(journal_ref);
91 
92 	if (!op->insert_data_done) {
93 		continue_at(cl, bch_data_insert_start, op->wq);
94 		return;
95 	}
96 
97 	bch_keylist_free(&op->insert_keys);
98 	closure_return(cl);
99 }
100 
101 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
102 			       struct cache_set *c)
103 {
104 	size_t oldsize = bch_keylist_nkeys(l);
105 	size_t newsize = oldsize + u64s;
106 
107 	/*
108 	 * The journalling code doesn't handle the case where the keys to insert
109 	 * is bigger than an empty write: If we just return -ENOMEM here,
110 	 * bio_insert() and bio_invalidate() will insert the keys created so far
111 	 * and finish the rest when the keylist is empty.
112 	 */
113 	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
114 		return -ENOMEM;
115 
116 	return __bch_keylist_realloc(l, u64s);
117 }
118 
119 static void bch_data_invalidate(struct closure *cl)
120 {
121 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
122 	struct bio *bio = op->bio;
123 
124 	pr_debug("invalidating %i sectors from %llu",
125 		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
126 
127 	while (bio_sectors(bio)) {
128 		unsigned sectors = min(bio_sectors(bio),
129 				       1U << (KEY_SIZE_BITS - 1));
130 
131 		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
132 			goto out;
133 
134 		bio->bi_iter.bi_sector	+= sectors;
135 		bio->bi_iter.bi_size	-= sectors << 9;
136 
137 		bch_keylist_add(&op->insert_keys,
138 				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
139 	}
140 
141 	op->insert_data_done = true;
142 	/* get in bch_data_insert() */
143 	bio_put(bio);
144 out:
145 	continue_at(cl, bch_data_insert_keys, op->wq);
146 }
147 
148 static void bch_data_insert_error(struct closure *cl)
149 {
150 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
151 
152 	/*
153 	 * Our data write just errored, which means we've got a bunch of keys to
154 	 * insert that point to data that wasn't succesfully written.
155 	 *
156 	 * We don't have to insert those keys but we still have to invalidate
157 	 * that region of the cache - so, if we just strip off all the pointers
158 	 * from the keys we'll accomplish just that.
159 	 */
160 
161 	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
162 
163 	while (src != op->insert_keys.top) {
164 		struct bkey *n = bkey_next(src);
165 
166 		SET_KEY_PTRS(src, 0);
167 		memmove(dst, src, bkey_bytes(src));
168 
169 		dst = bkey_next(dst);
170 		src = n;
171 	}
172 
173 	op->insert_keys.top = dst;
174 
175 	bch_data_insert_keys(cl);
176 }
177 
178 static void bch_data_insert_endio(struct bio *bio)
179 {
180 	struct closure *cl = bio->bi_private;
181 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
182 
183 	if (bio->bi_status) {
184 		/* TODO: We could try to recover from this. */
185 		if (op->writeback)
186 			op->status = bio->bi_status;
187 		else if (!op->replace)
188 			set_closure_fn(cl, bch_data_insert_error, op->wq);
189 		else
190 			set_closure_fn(cl, NULL, NULL);
191 	}
192 
193 	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
194 }
195 
196 static void bch_data_insert_start(struct closure *cl)
197 {
198 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
199 	struct bio *bio = op->bio, *n;
200 
201 	if (op->bypass)
202 		return bch_data_invalidate(cl);
203 
204 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
205 		wake_up_gc(op->c);
206 
207 	/*
208 	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
209 	 * flush, it'll wait on the journal write.
210 	 */
211 	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
212 
213 	do {
214 		unsigned i;
215 		struct bkey *k;
216 		struct bio_set *split = &op->c->bio_split;
217 
218 		/* 1 for the device pointer and 1 for the chksum */
219 		if (bch_keylist_realloc(&op->insert_keys,
220 					3 + (op->csum ? 1 : 0),
221 					op->c)) {
222 			continue_at(cl, bch_data_insert_keys, op->wq);
223 			return;
224 		}
225 
226 		k = op->insert_keys.top;
227 		bkey_init(k);
228 		SET_KEY_INODE(k, op->inode);
229 		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
230 
231 		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
232 				       op->write_point, op->write_prio,
233 				       op->writeback))
234 			goto err;
235 
236 		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
237 
238 		n->bi_end_io	= bch_data_insert_endio;
239 		n->bi_private	= cl;
240 
241 		if (op->writeback) {
242 			SET_KEY_DIRTY(k, true);
243 
244 			for (i = 0; i < KEY_PTRS(k); i++)
245 				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
246 					    GC_MARK_DIRTY);
247 		}
248 
249 		SET_KEY_CSUM(k, op->csum);
250 		if (KEY_CSUM(k))
251 			bio_csum(n, k);
252 
253 		trace_bcache_cache_insert(k);
254 		bch_keylist_push(&op->insert_keys);
255 
256 		bio_set_op_attrs(n, REQ_OP_WRITE, 0);
257 		bch_submit_bbio(n, op->c, k, 0);
258 	} while (n != bio);
259 
260 	op->insert_data_done = true;
261 	continue_at(cl, bch_data_insert_keys, op->wq);
262 	return;
263 err:
264 	/* bch_alloc_sectors() blocks if s->writeback = true */
265 	BUG_ON(op->writeback);
266 
267 	/*
268 	 * But if it's not a writeback write we'd rather just bail out if
269 	 * there aren't any buckets ready to write to - it might take awhile and
270 	 * we might be starving btree writes for gc or something.
271 	 */
272 
273 	if (!op->replace) {
274 		/*
275 		 * Writethrough write: We can't complete the write until we've
276 		 * updated the index. But we don't want to delay the write while
277 		 * we wait for buckets to be freed up, so just invalidate the
278 		 * rest of the write.
279 		 */
280 		op->bypass = true;
281 		return bch_data_invalidate(cl);
282 	} else {
283 		/*
284 		 * From a cache miss, we can just insert the keys for the data
285 		 * we have written or bail out if we didn't do anything.
286 		 */
287 		op->insert_data_done = true;
288 		bio_put(bio);
289 
290 		if (!bch_keylist_empty(&op->insert_keys))
291 			continue_at(cl, bch_data_insert_keys, op->wq);
292 		else
293 			closure_return(cl);
294 	}
295 }
296 
297 /**
298  * bch_data_insert - stick some data in the cache
299  * @cl: closure pointer.
300  *
301  * This is the starting point for any data to end up in a cache device; it could
302  * be from a normal write, or a writeback write, or a write to a flash only
303  * volume - it's also used by the moving garbage collector to compact data in
304  * mostly empty buckets.
305  *
306  * It first writes the data to the cache, creating a list of keys to be inserted
307  * (if the data had to be fragmented there will be multiple keys); after the
308  * data is written it calls bch_journal, and after the keys have been added to
309  * the next journal write they're inserted into the btree.
310  *
311  * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
312  * and op->inode is used for the key inode.
313  *
314  * If s->bypass is true, instead of inserting the data it invalidates the
315  * region of the cache represented by s->cache_bio and op->inode.
316  */
317 void bch_data_insert(struct closure *cl)
318 {
319 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
320 
321 	trace_bcache_write(op->c, op->inode, op->bio,
322 			   op->writeback, op->bypass);
323 
324 	bch_keylist_init(&op->insert_keys);
325 	bio_get(op->bio);
326 	bch_data_insert_start(cl);
327 }
328 
329 /* Congested? */
330 
331 unsigned bch_get_congested(struct cache_set *c)
332 {
333 	int i;
334 	long rand;
335 
336 	if (!c->congested_read_threshold_us &&
337 	    !c->congested_write_threshold_us)
338 		return 0;
339 
340 	i = (local_clock_us() - c->congested_last_us) / 1024;
341 	if (i < 0)
342 		return 0;
343 
344 	i += atomic_read(&c->congested);
345 	if (i >= 0)
346 		return 0;
347 
348 	i += CONGESTED_MAX;
349 
350 	if (i > 0)
351 		i = fract_exp_two(i, 6);
352 
353 	rand = get_random_int();
354 	i -= bitmap_weight(&rand, BITS_PER_LONG);
355 
356 	return i > 0 ? i : 1;
357 }
358 
359 static void add_sequential(struct task_struct *t)
360 {
361 	ewma_add(t->sequential_io_avg,
362 		 t->sequential_io, 8, 0);
363 
364 	t->sequential_io = 0;
365 }
366 
367 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
368 {
369 	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
370 }
371 
372 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
373 {
374 	struct cache_set *c = dc->disk.c;
375 	unsigned mode = cache_mode(dc);
376 	unsigned sectors, congested = bch_get_congested(c);
377 	struct task_struct *task = current;
378 	struct io *i;
379 
380 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
381 	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
382 	    (bio_op(bio) == REQ_OP_DISCARD))
383 		goto skip;
384 
385 	if (mode == CACHE_MODE_NONE ||
386 	    (mode == CACHE_MODE_WRITEAROUND &&
387 	     op_is_write(bio_op(bio))))
388 		goto skip;
389 
390 	/*
391 	 * Flag for bypass if the IO is for read-ahead or background,
392 	 * unless the read-ahead request is for metadata (eg, for gfs2).
393 	 */
394 	if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
395 	    !(bio->bi_opf & REQ_META))
396 		goto skip;
397 
398 	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
399 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
400 		pr_debug("skipping unaligned io");
401 		goto skip;
402 	}
403 
404 	if (bypass_torture_test(dc)) {
405 		if ((get_random_int() & 3) == 3)
406 			goto skip;
407 		else
408 			goto rescale;
409 	}
410 
411 	if (!congested && !dc->sequential_cutoff)
412 		goto rescale;
413 
414 	spin_lock(&dc->io_lock);
415 
416 	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 		if (i->last == bio->bi_iter.bi_sector &&
418 		    time_before(jiffies, i->jiffies))
419 			goto found;
420 
421 	i = list_first_entry(&dc->io_lru, struct io, lru);
422 
423 	add_sequential(task);
424 	i->sequential = 0;
425 found:
426 	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 		i->sequential	+= bio->bi_iter.bi_size;
428 
429 	i->last			 = bio_end_sector(bio);
430 	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
431 	task->sequential_io	 = i->sequential;
432 
433 	hlist_del(&i->hash);
434 	hlist_add_head(&i->hash, iohash(dc, i->last));
435 	list_move_tail(&i->lru, &dc->io_lru);
436 
437 	spin_unlock(&dc->io_lock);
438 
439 	sectors = max(task->sequential_io,
440 		      task->sequential_io_avg) >> 9;
441 
442 	if (dc->sequential_cutoff &&
443 	    sectors >= dc->sequential_cutoff >> 9) {
444 		trace_bcache_bypass_sequential(bio);
445 		goto skip;
446 	}
447 
448 	if (congested && sectors >= congested) {
449 		trace_bcache_bypass_congested(bio);
450 		goto skip;
451 	}
452 
453 rescale:
454 	bch_rescale_priorities(c, bio_sectors(bio));
455 	return false;
456 skip:
457 	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
458 	return true;
459 }
460 
461 /* Cache lookup */
462 
463 struct search {
464 	/* Stack frame for bio_complete */
465 	struct closure		cl;
466 
467 	struct bbio		bio;
468 	struct bio		*orig_bio;
469 	struct bio		*cache_miss;
470 	struct bcache_device	*d;
471 
472 	unsigned		insert_bio_sectors;
473 	unsigned		recoverable:1;
474 	unsigned		write:1;
475 	unsigned		read_dirty_data:1;
476 	unsigned		cache_missed:1;
477 
478 	unsigned long		start_time;
479 
480 	struct btree_op		op;
481 	struct data_insert_op	iop;
482 };
483 
484 static void bch_cache_read_endio(struct bio *bio)
485 {
486 	struct bbio *b = container_of(bio, struct bbio, bio);
487 	struct closure *cl = bio->bi_private;
488 	struct search *s = container_of(cl, struct search, cl);
489 
490 	/*
491 	 * If the bucket was reused while our bio was in flight, we might have
492 	 * read the wrong data. Set s->error but not error so it doesn't get
493 	 * counted against the cache device, but we'll still reread the data
494 	 * from the backing device.
495 	 */
496 
497 	if (bio->bi_status)
498 		s->iop.status = bio->bi_status;
499 	else if (!KEY_DIRTY(&b->key) &&
500 		 ptr_stale(s->iop.c, &b->key, 0)) {
501 		atomic_long_inc(&s->iop.c->cache_read_races);
502 		s->iop.status = BLK_STS_IOERR;
503 	}
504 
505 	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
506 }
507 
508 /*
509  * Read from a single key, handling the initial cache miss if the key starts in
510  * the middle of the bio
511  */
512 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
513 {
514 	struct search *s = container_of(op, struct search, op);
515 	struct bio *n, *bio = &s->bio.bio;
516 	struct bkey *bio_key;
517 	unsigned ptr;
518 
519 	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
520 		return MAP_CONTINUE;
521 
522 	if (KEY_INODE(k) != s->iop.inode ||
523 	    KEY_START(k) > bio->bi_iter.bi_sector) {
524 		unsigned bio_sectors = bio_sectors(bio);
525 		unsigned sectors = KEY_INODE(k) == s->iop.inode
526 			? min_t(uint64_t, INT_MAX,
527 				KEY_START(k) - bio->bi_iter.bi_sector)
528 			: INT_MAX;
529 
530 		int ret = s->d->cache_miss(b, s, bio, sectors);
531 		if (ret != MAP_CONTINUE)
532 			return ret;
533 
534 		/* if this was a complete miss we shouldn't get here */
535 		BUG_ON(bio_sectors <= sectors);
536 	}
537 
538 	if (!KEY_SIZE(k))
539 		return MAP_CONTINUE;
540 
541 	/* XXX: figure out best pointer - for multiple cache devices */
542 	ptr = 0;
543 
544 	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
545 
546 	if (KEY_DIRTY(k))
547 		s->read_dirty_data = true;
548 
549 	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
550 				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
551 			   GFP_NOIO, &s->d->bio_split);
552 
553 	bio_key = &container_of(n, struct bbio, bio)->key;
554 	bch_bkey_copy_single_ptr(bio_key, k, ptr);
555 
556 	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
557 	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
558 
559 	n->bi_end_io	= bch_cache_read_endio;
560 	n->bi_private	= &s->cl;
561 
562 	/*
563 	 * The bucket we're reading from might be reused while our bio
564 	 * is in flight, and we could then end up reading the wrong
565 	 * data.
566 	 *
567 	 * We guard against this by checking (in cache_read_endio()) if
568 	 * the pointer is stale again; if so, we treat it as an error
569 	 * and reread from the backing device (but we don't pass that
570 	 * error up anywhere).
571 	 */
572 
573 	__bch_submit_bbio(n, b->c);
574 	return n == bio ? MAP_DONE : MAP_CONTINUE;
575 }
576 
577 static void cache_lookup(struct closure *cl)
578 {
579 	struct search *s = container_of(cl, struct search, iop.cl);
580 	struct bio *bio = &s->bio.bio;
581 	struct cached_dev *dc;
582 	int ret;
583 
584 	bch_btree_op_init(&s->op, -1);
585 
586 	ret = bch_btree_map_keys(&s->op, s->iop.c,
587 				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
588 				 cache_lookup_fn, MAP_END_KEY);
589 	if (ret == -EAGAIN) {
590 		continue_at(cl, cache_lookup, bcache_wq);
591 		return;
592 	}
593 
594 	/*
595 	 * We might meet err when searching the btree, If that happens, we will
596 	 * get negative ret, in this scenario we should not recover data from
597 	 * backing device (when cache device is dirty) because we don't know
598 	 * whether bkeys the read request covered are all clean.
599 	 *
600 	 * And after that happened, s->iop.status is still its initial value
601 	 * before we submit s->bio.bio
602 	 */
603 	if (ret < 0) {
604 		BUG_ON(ret == -EINTR);
605 		if (s->d && s->d->c &&
606 				!UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
607 			dc = container_of(s->d, struct cached_dev, disk);
608 			if (dc && atomic_read(&dc->has_dirty))
609 				s->recoverable = false;
610 		}
611 		if (!s->iop.status)
612 			s->iop.status = BLK_STS_IOERR;
613 	}
614 
615 	closure_return(cl);
616 }
617 
618 /* Common code for the make_request functions */
619 
620 static void request_endio(struct bio *bio)
621 {
622 	struct closure *cl = bio->bi_private;
623 
624 	if (bio->bi_status) {
625 		struct search *s = container_of(cl, struct search, cl);
626 		s->iop.status = bio->bi_status;
627 		/* Only cache read errors are recoverable */
628 		s->recoverable = false;
629 	}
630 
631 	bio_put(bio);
632 	closure_put(cl);
633 }
634 
635 static void backing_request_endio(struct bio *bio)
636 {
637 	struct closure *cl = bio->bi_private;
638 
639 	if (bio->bi_status) {
640 		struct search *s = container_of(cl, struct search, cl);
641 		struct cached_dev *dc = container_of(s->d,
642 						     struct cached_dev, disk);
643 		/*
644 		 * If a bio has REQ_PREFLUSH for writeback mode, it is
645 		 * speically assembled in cached_dev_write() for a non-zero
646 		 * write request which has REQ_PREFLUSH. we don't set
647 		 * s->iop.status by this failure, the status will be decided
648 		 * by result of bch_data_insert() operation.
649 		 */
650 		if (unlikely(s->iop.writeback &&
651 			     bio->bi_opf & REQ_PREFLUSH)) {
652 			pr_err("Can't flush %s: returned bi_status %i",
653 				dc->backing_dev_name, bio->bi_status);
654 		} else {
655 			/* set to orig_bio->bi_status in bio_complete() */
656 			s->iop.status = bio->bi_status;
657 		}
658 		s->recoverable = false;
659 		/* should count I/O error for backing device here */
660 		bch_count_backing_io_errors(dc, bio);
661 	}
662 
663 	bio_put(bio);
664 	closure_put(cl);
665 }
666 
667 static void bio_complete(struct search *s)
668 {
669 	if (s->orig_bio) {
670 		generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
671 				    &s->d->disk->part0, s->start_time);
672 
673 		trace_bcache_request_end(s->d, s->orig_bio);
674 		s->orig_bio->bi_status = s->iop.status;
675 		bio_endio(s->orig_bio);
676 		s->orig_bio = NULL;
677 	}
678 }
679 
680 static void do_bio_hook(struct search *s,
681 			struct bio *orig_bio,
682 			bio_end_io_t *end_io_fn)
683 {
684 	struct bio *bio = &s->bio.bio;
685 
686 	bio_init(bio, NULL, 0);
687 	__bio_clone_fast(bio, orig_bio);
688 	/*
689 	 * bi_end_io can be set separately somewhere else, e.g. the
690 	 * variants in,
691 	 * - cache_bio->bi_end_io from cached_dev_cache_miss()
692 	 * - n->bi_end_io from cache_lookup_fn()
693 	 */
694 	bio->bi_end_io		= end_io_fn;
695 	bio->bi_private		= &s->cl;
696 
697 	bio_cnt_set(bio, 3);
698 }
699 
700 static void search_free(struct closure *cl)
701 {
702 	struct search *s = container_of(cl, struct search, cl);
703 
704 	if (s->iop.bio)
705 		bio_put(s->iop.bio);
706 
707 	bio_complete(s);
708 	closure_debug_destroy(cl);
709 	mempool_free(s, &s->d->c->search);
710 }
711 
712 static inline struct search *search_alloc(struct bio *bio,
713 					  struct bcache_device *d)
714 {
715 	struct search *s;
716 
717 	s = mempool_alloc(&d->c->search, GFP_NOIO);
718 
719 	closure_init(&s->cl, NULL);
720 	do_bio_hook(s, bio, request_endio);
721 
722 	s->orig_bio		= bio;
723 	s->cache_miss		= NULL;
724 	s->cache_missed		= 0;
725 	s->d			= d;
726 	s->recoverable		= 1;
727 	s->write		= op_is_write(bio_op(bio));
728 	s->read_dirty_data	= 0;
729 	s->start_time		= jiffies;
730 
731 	s->iop.c		= d->c;
732 	s->iop.bio		= NULL;
733 	s->iop.inode		= d->id;
734 	s->iop.write_point	= hash_long((unsigned long) current, 16);
735 	s->iop.write_prio	= 0;
736 	s->iop.status		= 0;
737 	s->iop.flags		= 0;
738 	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
739 	s->iop.wq		= bcache_wq;
740 
741 	return s;
742 }
743 
744 /* Cached devices */
745 
746 static void cached_dev_bio_complete(struct closure *cl)
747 {
748 	struct search *s = container_of(cl, struct search, cl);
749 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
750 
751 	search_free(cl);
752 	cached_dev_put(dc);
753 }
754 
755 /* Process reads */
756 
757 static void cached_dev_cache_miss_done(struct closure *cl)
758 {
759 	struct search *s = container_of(cl, struct search, cl);
760 
761 	if (s->iop.replace_collision)
762 		bch_mark_cache_miss_collision(s->iop.c, s->d);
763 
764 	if (s->iop.bio)
765 		bio_free_pages(s->iop.bio);
766 
767 	cached_dev_bio_complete(cl);
768 }
769 
770 static void cached_dev_read_error(struct closure *cl)
771 {
772 	struct search *s = container_of(cl, struct search, cl);
773 	struct bio *bio = &s->bio.bio;
774 
775 	/*
776 	 * If read request hit dirty data (s->read_dirty_data is true),
777 	 * then recovery a failed read request from cached device may
778 	 * get a stale data back. So read failure recovery is only
779 	 * permitted when read request hit clean data in cache device,
780 	 * or when cache read race happened.
781 	 */
782 	if (s->recoverable && !s->read_dirty_data) {
783 		/* Retry from the backing device: */
784 		trace_bcache_read_retry(s->orig_bio);
785 
786 		s->iop.status = 0;
787 		do_bio_hook(s, s->orig_bio, backing_request_endio);
788 
789 		/* XXX: invalidate cache */
790 
791 		/* I/O request sent to backing device */
792 		closure_bio_submit(s->iop.c, bio, cl);
793 	}
794 
795 	continue_at(cl, cached_dev_cache_miss_done, NULL);
796 }
797 
798 static void cached_dev_read_done(struct closure *cl)
799 {
800 	struct search *s = container_of(cl, struct search, cl);
801 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
802 
803 	/*
804 	 * We had a cache miss; cache_bio now contains data ready to be inserted
805 	 * into the cache.
806 	 *
807 	 * First, we copy the data we just read from cache_bio's bounce buffers
808 	 * to the buffers the original bio pointed to:
809 	 */
810 
811 	if (s->iop.bio) {
812 		bio_reset(s->iop.bio);
813 		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
814 		bio_copy_dev(s->iop.bio, s->cache_miss);
815 		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
816 		bch_bio_map(s->iop.bio, NULL);
817 
818 		bio_copy_data(s->cache_miss, s->iop.bio);
819 
820 		bio_put(s->cache_miss);
821 		s->cache_miss = NULL;
822 	}
823 
824 	if (verify(dc) && s->recoverable && !s->read_dirty_data)
825 		bch_data_verify(dc, s->orig_bio);
826 
827 	bio_complete(s);
828 
829 	if (s->iop.bio &&
830 	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
831 		BUG_ON(!s->iop.replace);
832 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
833 	}
834 
835 	continue_at(cl, cached_dev_cache_miss_done, NULL);
836 }
837 
838 static void cached_dev_read_done_bh(struct closure *cl)
839 {
840 	struct search *s = container_of(cl, struct search, cl);
841 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
842 
843 	bch_mark_cache_accounting(s->iop.c, s->d,
844 				  !s->cache_missed, s->iop.bypass);
845 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
846 
847 	if (s->iop.status)
848 		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
849 	else if (s->iop.bio || verify(dc))
850 		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
851 	else
852 		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
853 }
854 
855 static int cached_dev_cache_miss(struct btree *b, struct search *s,
856 				 struct bio *bio, unsigned sectors)
857 {
858 	int ret = MAP_CONTINUE;
859 	unsigned reada = 0;
860 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
861 	struct bio *miss, *cache_bio;
862 
863 	s->cache_missed = 1;
864 
865 	if (s->cache_miss || s->iop.bypass) {
866 		miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
867 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
868 		goto out_submit;
869 	}
870 
871 	if (!(bio->bi_opf & REQ_RAHEAD) &&
872 	    !(bio->bi_opf & REQ_META) &&
873 	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
874 		reada = min_t(sector_t, dc->readahead >> 9,
875 			      get_capacity(bio->bi_disk) - bio_end_sector(bio));
876 
877 	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
878 
879 	s->iop.replace_key = KEY(s->iop.inode,
880 				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
881 				 s->insert_bio_sectors);
882 
883 	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
884 	if (ret)
885 		return ret;
886 
887 	s->iop.replace = true;
888 
889 	miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
890 
891 	/* btree_search_recurse()'s btree iterator is no good anymore */
892 	ret = miss == bio ? MAP_DONE : -EINTR;
893 
894 	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
895 			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
896 			&dc->disk.bio_split);
897 	if (!cache_bio)
898 		goto out_submit;
899 
900 	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
901 	bio_copy_dev(cache_bio, miss);
902 	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
903 
904 	cache_bio->bi_end_io	= backing_request_endio;
905 	cache_bio->bi_private	= &s->cl;
906 
907 	bch_bio_map(cache_bio, NULL);
908 	if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
909 		goto out_put;
910 
911 	if (reada)
912 		bch_mark_cache_readahead(s->iop.c, s->d);
913 
914 	s->cache_miss	= miss;
915 	s->iop.bio	= cache_bio;
916 	bio_get(cache_bio);
917 	/* I/O request sent to backing device */
918 	closure_bio_submit(s->iop.c, cache_bio, &s->cl);
919 
920 	return ret;
921 out_put:
922 	bio_put(cache_bio);
923 out_submit:
924 	miss->bi_end_io		= backing_request_endio;
925 	miss->bi_private	= &s->cl;
926 	/* I/O request sent to backing device */
927 	closure_bio_submit(s->iop.c, miss, &s->cl);
928 	return ret;
929 }
930 
931 static void cached_dev_read(struct cached_dev *dc, struct search *s)
932 {
933 	struct closure *cl = &s->cl;
934 
935 	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
936 	continue_at(cl, cached_dev_read_done_bh, NULL);
937 }
938 
939 /* Process writes */
940 
941 static void cached_dev_write_complete(struct closure *cl)
942 {
943 	struct search *s = container_of(cl, struct search, cl);
944 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
945 
946 	up_read_non_owner(&dc->writeback_lock);
947 	cached_dev_bio_complete(cl);
948 }
949 
950 static void cached_dev_write(struct cached_dev *dc, struct search *s)
951 {
952 	struct closure *cl = &s->cl;
953 	struct bio *bio = &s->bio.bio;
954 	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
955 	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
956 
957 	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
958 
959 	down_read_non_owner(&dc->writeback_lock);
960 	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
961 		/*
962 		 * We overlap with some dirty data undergoing background
963 		 * writeback, force this write to writeback
964 		 */
965 		s->iop.bypass = false;
966 		s->iop.writeback = true;
967 	}
968 
969 	/*
970 	 * Discards aren't _required_ to do anything, so skipping if
971 	 * check_overlapping returned true is ok
972 	 *
973 	 * But check_overlapping drops dirty keys for which io hasn't started,
974 	 * so we still want to call it.
975 	 */
976 	if (bio_op(bio) == REQ_OP_DISCARD)
977 		s->iop.bypass = true;
978 
979 	if (should_writeback(dc, s->orig_bio,
980 			     cache_mode(dc),
981 			     s->iop.bypass)) {
982 		s->iop.bypass = false;
983 		s->iop.writeback = true;
984 	}
985 
986 	if (s->iop.bypass) {
987 		s->iop.bio = s->orig_bio;
988 		bio_get(s->iop.bio);
989 
990 		if (bio_op(bio) == REQ_OP_DISCARD &&
991 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
992 			goto insert_data;
993 
994 		/* I/O request sent to backing device */
995 		bio->bi_end_io = backing_request_endio;
996 		closure_bio_submit(s->iop.c, bio, cl);
997 
998 	} else if (s->iop.writeback) {
999 		bch_writeback_add(dc);
1000 		s->iop.bio = bio;
1001 
1002 		if (bio->bi_opf & REQ_PREFLUSH) {
1003 			/*
1004 			 * Also need to send a flush to the backing
1005 			 * device.
1006 			 */
1007 			struct bio *flush;
1008 
1009 			flush = bio_alloc_bioset(GFP_NOIO, 0,
1010 						 &dc->disk.bio_split);
1011 			if (!flush) {
1012 				s->iop.status = BLK_STS_RESOURCE;
1013 				goto insert_data;
1014 			}
1015 			bio_copy_dev(flush, bio);
1016 			flush->bi_end_io = backing_request_endio;
1017 			flush->bi_private = cl;
1018 			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1019 			/* I/O request sent to backing device */
1020 			closure_bio_submit(s->iop.c, flush, cl);
1021 		}
1022 	} else {
1023 		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1024 		/* I/O request sent to backing device */
1025 		bio->bi_end_io = backing_request_endio;
1026 		closure_bio_submit(s->iop.c, bio, cl);
1027 	}
1028 
1029 insert_data:
1030 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1031 	continue_at(cl, cached_dev_write_complete, NULL);
1032 }
1033 
1034 static void cached_dev_nodata(struct closure *cl)
1035 {
1036 	struct search *s = container_of(cl, struct search, cl);
1037 	struct bio *bio = &s->bio.bio;
1038 
1039 	if (s->iop.flush_journal)
1040 		bch_journal_meta(s->iop.c, cl);
1041 
1042 	/* If it's a flush, we send the flush to the backing device too */
1043 	bio->bi_end_io = backing_request_endio;
1044 	closure_bio_submit(s->iop.c, bio, cl);
1045 
1046 	continue_at(cl, cached_dev_bio_complete, NULL);
1047 }
1048 
1049 struct detached_dev_io_private {
1050 	struct bcache_device	*d;
1051 	unsigned long		start_time;
1052 	bio_end_io_t		*bi_end_io;
1053 	void			*bi_private;
1054 };
1055 
1056 static void detached_dev_end_io(struct bio *bio)
1057 {
1058 	struct detached_dev_io_private *ddip;
1059 
1060 	ddip = bio->bi_private;
1061 	bio->bi_end_io = ddip->bi_end_io;
1062 	bio->bi_private = ddip->bi_private;
1063 
1064 	generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1065 			    &ddip->d->disk->part0, ddip->start_time);
1066 
1067 	if (bio->bi_status) {
1068 		struct cached_dev *dc = container_of(ddip->d,
1069 						     struct cached_dev, disk);
1070 		/* should count I/O error for backing device here */
1071 		bch_count_backing_io_errors(dc, bio);
1072 	}
1073 
1074 	kfree(ddip);
1075 	bio->bi_end_io(bio);
1076 }
1077 
1078 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1079 {
1080 	struct detached_dev_io_private *ddip;
1081 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1082 
1083 	/*
1084 	 * no need to call closure_get(&dc->disk.cl),
1085 	 * because upper layer had already opened bcache device,
1086 	 * which would call closure_get(&dc->disk.cl)
1087 	 */
1088 	ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1089 	ddip->d = d;
1090 	ddip->start_time = jiffies;
1091 	ddip->bi_end_io = bio->bi_end_io;
1092 	ddip->bi_private = bio->bi_private;
1093 	bio->bi_end_io = detached_dev_end_io;
1094 	bio->bi_private = ddip;
1095 
1096 	if ((bio_op(bio) == REQ_OP_DISCARD) &&
1097 	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1098 		bio->bi_end_io(bio);
1099 	else
1100 		generic_make_request(bio);
1101 }
1102 
1103 /* Cached devices - read & write stuff */
1104 
1105 static blk_qc_t cached_dev_make_request(struct request_queue *q,
1106 					struct bio *bio)
1107 {
1108 	struct search *s;
1109 	struct bcache_device *d = bio->bi_disk->private_data;
1110 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1111 	int rw = bio_data_dir(bio);
1112 
1113 	if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1114 		     dc->io_disable)) {
1115 		bio->bi_status = BLK_STS_IOERR;
1116 		bio_endio(bio);
1117 		return BLK_QC_T_NONE;
1118 	}
1119 
1120 	atomic_set(&dc->backing_idle, 0);
1121 	generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1122 
1123 	bio_set_dev(bio, dc->bdev);
1124 	bio->bi_iter.bi_sector += dc->sb.data_offset;
1125 
1126 	if (cached_dev_get(dc)) {
1127 		s = search_alloc(bio, d);
1128 		trace_bcache_request_start(s->d, bio);
1129 
1130 		if (!bio->bi_iter.bi_size) {
1131 			/*
1132 			 * can't call bch_journal_meta from under
1133 			 * generic_make_request
1134 			 */
1135 			continue_at_nobarrier(&s->cl,
1136 					      cached_dev_nodata,
1137 					      bcache_wq);
1138 		} else {
1139 			s->iop.bypass = check_should_bypass(dc, bio);
1140 
1141 			if (rw)
1142 				cached_dev_write(dc, s);
1143 			else
1144 				cached_dev_read(dc, s);
1145 		}
1146 	} else
1147 		/* I/O request sent to backing device */
1148 		detached_dev_do_request(d, bio);
1149 
1150 	return BLK_QC_T_NONE;
1151 }
1152 
1153 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1154 			    unsigned int cmd, unsigned long arg)
1155 {
1156 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1157 	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1158 }
1159 
1160 static int cached_dev_congested(void *data, int bits)
1161 {
1162 	struct bcache_device *d = data;
1163 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1164 	struct request_queue *q = bdev_get_queue(dc->bdev);
1165 	int ret = 0;
1166 
1167 	if (bdi_congested(q->backing_dev_info, bits))
1168 		return 1;
1169 
1170 	if (cached_dev_get(dc)) {
1171 		unsigned i;
1172 		struct cache *ca;
1173 
1174 		for_each_cache(ca, d->c, i) {
1175 			q = bdev_get_queue(ca->bdev);
1176 			ret |= bdi_congested(q->backing_dev_info, bits);
1177 		}
1178 
1179 		cached_dev_put(dc);
1180 	}
1181 
1182 	return ret;
1183 }
1184 
1185 void bch_cached_dev_request_init(struct cached_dev *dc)
1186 {
1187 	struct gendisk *g = dc->disk.disk;
1188 
1189 	g->queue->make_request_fn		= cached_dev_make_request;
1190 	g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1191 	dc->disk.cache_miss			= cached_dev_cache_miss;
1192 	dc->disk.ioctl				= cached_dev_ioctl;
1193 }
1194 
1195 /* Flash backed devices */
1196 
1197 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1198 				struct bio *bio, unsigned sectors)
1199 {
1200 	unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1201 
1202 	swap(bio->bi_iter.bi_size, bytes);
1203 	zero_fill_bio(bio);
1204 	swap(bio->bi_iter.bi_size, bytes);
1205 
1206 	bio_advance(bio, bytes);
1207 
1208 	if (!bio->bi_iter.bi_size)
1209 		return MAP_DONE;
1210 
1211 	return MAP_CONTINUE;
1212 }
1213 
1214 static void flash_dev_nodata(struct closure *cl)
1215 {
1216 	struct search *s = container_of(cl, struct search, cl);
1217 
1218 	if (s->iop.flush_journal)
1219 		bch_journal_meta(s->iop.c, cl);
1220 
1221 	continue_at(cl, search_free, NULL);
1222 }
1223 
1224 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1225 					     struct bio *bio)
1226 {
1227 	struct search *s;
1228 	struct closure *cl;
1229 	struct bcache_device *d = bio->bi_disk->private_data;
1230 
1231 	if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1232 		bio->bi_status = BLK_STS_IOERR;
1233 		bio_endio(bio);
1234 		return BLK_QC_T_NONE;
1235 	}
1236 
1237 	generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1238 
1239 	s = search_alloc(bio, d);
1240 	cl = &s->cl;
1241 	bio = &s->bio.bio;
1242 
1243 	trace_bcache_request_start(s->d, bio);
1244 
1245 	if (!bio->bi_iter.bi_size) {
1246 		/*
1247 		 * can't call bch_journal_meta from under
1248 		 * generic_make_request
1249 		 */
1250 		continue_at_nobarrier(&s->cl,
1251 				      flash_dev_nodata,
1252 				      bcache_wq);
1253 		return BLK_QC_T_NONE;
1254 	} else if (bio_data_dir(bio)) {
1255 		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1256 					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1257 					&KEY(d->id, bio_end_sector(bio), 0));
1258 
1259 		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
1260 		s->iop.writeback	= true;
1261 		s->iop.bio		= bio;
1262 
1263 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1264 	} else {
1265 		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1266 	}
1267 
1268 	continue_at(cl, search_free, NULL);
1269 	return BLK_QC_T_NONE;
1270 }
1271 
1272 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1273 			   unsigned int cmd, unsigned long arg)
1274 {
1275 	return -ENOTTY;
1276 }
1277 
1278 static int flash_dev_congested(void *data, int bits)
1279 {
1280 	struct bcache_device *d = data;
1281 	struct request_queue *q;
1282 	struct cache *ca;
1283 	unsigned i;
1284 	int ret = 0;
1285 
1286 	for_each_cache(ca, d->c, i) {
1287 		q = bdev_get_queue(ca->bdev);
1288 		ret |= bdi_congested(q->backing_dev_info, bits);
1289 	}
1290 
1291 	return ret;
1292 }
1293 
1294 void bch_flash_dev_request_init(struct bcache_device *d)
1295 {
1296 	struct gendisk *g = d->disk;
1297 
1298 	g->queue->make_request_fn		= flash_dev_make_request;
1299 	g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1300 	d->cache_miss				= flash_dev_cache_miss;
1301 	d->ioctl				= flash_dev_ioctl;
1302 }
1303 
1304 void bch_request_exit(void)
1305 {
1306 	if (bch_search_cache)
1307 		kmem_cache_destroy(bch_search_cache);
1308 }
1309 
1310 int __init bch_request_init(void)
1311 {
1312 	bch_search_cache = KMEM_CACHE(search, 0);
1313 	if (!bch_search_cache)
1314 		return -ENOMEM;
1315 
1316 	return 0;
1317 }
1318