xref: /openbmc/linux/drivers/md/bcache/io.c (revision 98ddec80)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Some low level IO code, and hacks for various block layer limitations
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "bset.h"
11 #include "debug.h"
12 
13 #include <linux/blkdev.h>
14 
15 /* Bios with headers */
16 
17 void bch_bbio_free(struct bio *bio, struct cache_set *c)
18 {
19 	struct bbio *b = container_of(bio, struct bbio, bio);
20 	mempool_free(b, &c->bio_meta);
21 }
22 
23 struct bio *bch_bbio_alloc(struct cache_set *c)
24 {
25 	struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
26 	struct bio *bio = &b->bio;
27 
28 	bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
29 
30 	return bio;
31 }
32 
33 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
34 {
35 	struct bbio *b = container_of(bio, struct bbio, bio);
36 
37 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
38 	bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
39 
40 	b->submit_time_us = local_clock_us();
41 	closure_bio_submit(c, bio, bio->bi_private);
42 }
43 
44 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
45 		     struct bkey *k, unsigned ptr)
46 {
47 	struct bbio *b = container_of(bio, struct bbio, bio);
48 	bch_bkey_copy_single_ptr(&b->key, k, ptr);
49 	__bch_submit_bbio(bio, c);
50 }
51 
52 /* IO errors */
53 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
54 {
55 	unsigned errors;
56 
57 	WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
58 
59 	errors = atomic_add_return(1, &dc->io_errors);
60 	if (errors < dc->error_limit)
61 		pr_err("%s: IO error on backing device, unrecoverable",
62 			dc->backing_dev_name);
63 	else
64 		bch_cached_dev_error(dc);
65 }
66 
67 void bch_count_io_errors(struct cache *ca,
68 			 blk_status_t error,
69 			 int is_read,
70 			 const char *m)
71 {
72 	/*
73 	 * The halflife of an error is:
74 	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
75 	 */
76 
77 	if (ca->set->error_decay) {
78 		unsigned count = atomic_inc_return(&ca->io_count);
79 
80 		while (count > ca->set->error_decay) {
81 			unsigned errors;
82 			unsigned old = count;
83 			unsigned new = count - ca->set->error_decay;
84 
85 			/*
86 			 * First we subtract refresh from count; each time we
87 			 * succesfully do so, we rescale the errors once:
88 			 */
89 
90 			count = atomic_cmpxchg(&ca->io_count, old, new);
91 
92 			if (count == old) {
93 				count = new;
94 
95 				errors = atomic_read(&ca->io_errors);
96 				do {
97 					old = errors;
98 					new = ((uint64_t) errors * 127) / 128;
99 					errors = atomic_cmpxchg(&ca->io_errors,
100 								old, new);
101 				} while (old != errors);
102 			}
103 		}
104 	}
105 
106 	if (error) {
107 		unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
108 						    &ca->io_errors);
109 		errors >>= IO_ERROR_SHIFT;
110 
111 		if (errors < ca->set->error_limit)
112 			pr_err("%s: IO error on %s%s",
113 			       ca->cache_dev_name, m,
114 			       is_read ? ", recovering." : ".");
115 		else
116 			bch_cache_set_error(ca->set,
117 					    "%s: too many IO errors %s",
118 					    ca->cache_dev_name, m);
119 	}
120 }
121 
122 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
123 			      blk_status_t error, const char *m)
124 {
125 	struct bbio *b = container_of(bio, struct bbio, bio);
126 	struct cache *ca = PTR_CACHE(c, &b->key, 0);
127 	int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
128 
129 	unsigned threshold = op_is_write(bio_op(bio))
130 		? c->congested_write_threshold_us
131 		: c->congested_read_threshold_us;
132 
133 	if (threshold) {
134 		unsigned t = local_clock_us();
135 
136 		int us = t - b->submit_time_us;
137 		int congested = atomic_read(&c->congested);
138 
139 		if (us > (int) threshold) {
140 			int ms = us / 1024;
141 			c->congested_last_us = t;
142 
143 			ms = min(ms, CONGESTED_MAX + congested);
144 			atomic_sub(ms, &c->congested);
145 		} else if (congested < 0)
146 			atomic_inc(&c->congested);
147 	}
148 
149 	bch_count_io_errors(ca, error, is_read, m);
150 }
151 
152 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
153 		    blk_status_t error, const char *m)
154 {
155 	struct closure *cl = bio->bi_private;
156 
157 	bch_bbio_count_io_errors(c, bio, error, m);
158 	bio_put(bio);
159 	closure_put(cl);
160 }
161