xref: /openbmc/linux/drivers/md/bcache/io.c (revision 65ee8aeb)
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "bset.h"
10 #include "debug.h"
11 
12 #include <linux/blkdev.h>
13 
14 static unsigned bch_bio_max_sectors(struct bio *bio)
15 {
16 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
17 	struct bio_vec bv;
18 	struct bvec_iter iter;
19 	unsigned ret = 0, seg = 0;
20 
21 	if (bio->bi_rw & REQ_DISCARD)
22 		return min(bio_sectors(bio), q->limits.max_discard_sectors);
23 
24 	bio_for_each_segment(bv, bio, iter) {
25 		struct bvec_merge_data bvm = {
26 			.bi_bdev	= bio->bi_bdev,
27 			.bi_sector	= bio->bi_iter.bi_sector,
28 			.bi_size	= ret << 9,
29 			.bi_rw		= bio->bi_rw,
30 		};
31 
32 		if (seg == min_t(unsigned, BIO_MAX_PAGES,
33 				 queue_max_segments(q)))
34 			break;
35 
36 		if (q->merge_bvec_fn &&
37 		    q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
38 			break;
39 
40 		seg++;
41 		ret += bv.bv_len >> 9;
42 	}
43 
44 	ret = min(ret, queue_max_sectors(q));
45 
46 	WARN_ON(!ret);
47 	ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
48 
49 	return ret;
50 }
51 
52 static void bch_bio_submit_split_done(struct closure *cl)
53 {
54 	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
55 
56 	s->bio->bi_end_io = s->bi_end_io;
57 	s->bio->bi_private = s->bi_private;
58 	bio_endio_nodec(s->bio, 0);
59 
60 	closure_debug_destroy(&s->cl);
61 	mempool_free(s, s->p->bio_split_hook);
62 }
63 
64 static void bch_bio_submit_split_endio(struct bio *bio, int error)
65 {
66 	struct closure *cl = bio->bi_private;
67 	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
68 
69 	if (error)
70 		clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
71 
72 	bio_put(bio);
73 	closure_put(cl);
74 }
75 
76 void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
77 {
78 	struct bio_split_hook *s;
79 	struct bio *n;
80 
81 	if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
82 		goto submit;
83 
84 	if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
85 		goto submit;
86 
87 	s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
88 	closure_init(&s->cl, NULL);
89 
90 	s->bio		= bio;
91 	s->p		= p;
92 	s->bi_end_io	= bio->bi_end_io;
93 	s->bi_private	= bio->bi_private;
94 	bio_get(bio);
95 
96 	do {
97 		n = bio_next_split(bio, bch_bio_max_sectors(bio),
98 				   GFP_NOIO, s->p->bio_split);
99 
100 		n->bi_end_io	= bch_bio_submit_split_endio;
101 		n->bi_private	= &s->cl;
102 
103 		closure_get(&s->cl);
104 		generic_make_request(n);
105 	} while (n != bio);
106 
107 	continue_at(&s->cl, bch_bio_submit_split_done, NULL);
108 submit:
109 	generic_make_request(bio);
110 }
111 
112 /* Bios with headers */
113 
114 void bch_bbio_free(struct bio *bio, struct cache_set *c)
115 {
116 	struct bbio *b = container_of(bio, struct bbio, bio);
117 	mempool_free(b, c->bio_meta);
118 }
119 
120 struct bio *bch_bbio_alloc(struct cache_set *c)
121 {
122 	struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
123 	struct bio *bio = &b->bio;
124 
125 	bio_init(bio);
126 	bio->bi_flags		|= BIO_POOL_NONE << BIO_POOL_OFFSET;
127 	bio->bi_max_vecs	 = bucket_pages(c);
128 	bio->bi_io_vec		 = bio->bi_inline_vecs;
129 
130 	return bio;
131 }
132 
133 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
134 {
135 	struct bbio *b = container_of(bio, struct bbio, bio);
136 
137 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
138 	bio->bi_bdev		= PTR_CACHE(c, &b->key, 0)->bdev;
139 
140 	b->submit_time_us = local_clock_us();
141 	closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
142 }
143 
144 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
145 		     struct bkey *k, unsigned ptr)
146 {
147 	struct bbio *b = container_of(bio, struct bbio, bio);
148 	bch_bkey_copy_single_ptr(&b->key, k, ptr);
149 	__bch_submit_bbio(bio, c);
150 }
151 
152 /* IO errors */
153 
154 void bch_count_io_errors(struct cache *ca, int error, const char *m)
155 {
156 	/*
157 	 * The halflife of an error is:
158 	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
159 	 */
160 
161 	if (ca->set->error_decay) {
162 		unsigned count = atomic_inc_return(&ca->io_count);
163 
164 		while (count > ca->set->error_decay) {
165 			unsigned errors;
166 			unsigned old = count;
167 			unsigned new = count - ca->set->error_decay;
168 
169 			/*
170 			 * First we subtract refresh from count; each time we
171 			 * succesfully do so, we rescale the errors once:
172 			 */
173 
174 			count = atomic_cmpxchg(&ca->io_count, old, new);
175 
176 			if (count == old) {
177 				count = new;
178 
179 				errors = atomic_read(&ca->io_errors);
180 				do {
181 					old = errors;
182 					new = ((uint64_t) errors * 127) / 128;
183 					errors = atomic_cmpxchg(&ca->io_errors,
184 								old, new);
185 				} while (old != errors);
186 			}
187 		}
188 	}
189 
190 	if (error) {
191 		char buf[BDEVNAME_SIZE];
192 		unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
193 						    &ca->io_errors);
194 		errors >>= IO_ERROR_SHIFT;
195 
196 		if (errors < ca->set->error_limit)
197 			pr_err("%s: IO error on %s, recovering",
198 			       bdevname(ca->bdev, buf), m);
199 		else
200 			bch_cache_set_error(ca->set,
201 					    "%s: too many IO errors %s",
202 					    bdevname(ca->bdev, buf), m);
203 	}
204 }
205 
206 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
207 			      int error, const char *m)
208 {
209 	struct bbio *b = container_of(bio, struct bbio, bio);
210 	struct cache *ca = PTR_CACHE(c, &b->key, 0);
211 
212 	unsigned threshold = bio->bi_rw & REQ_WRITE
213 		? c->congested_write_threshold_us
214 		: c->congested_read_threshold_us;
215 
216 	if (threshold) {
217 		unsigned t = local_clock_us();
218 
219 		int us = t - b->submit_time_us;
220 		int congested = atomic_read(&c->congested);
221 
222 		if (us > (int) threshold) {
223 			int ms = us / 1024;
224 			c->congested_last_us = t;
225 
226 			ms = min(ms, CONGESTED_MAX + congested);
227 			atomic_sub(ms, &c->congested);
228 		} else if (congested < 0)
229 			atomic_inc(&c->congested);
230 	}
231 
232 	bch_count_io_errors(ca, error, m);
233 }
234 
235 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
236 		    int error, const char *m)
237 {
238 	struct closure *cl = bio->bi_private;
239 
240 	bch_bbio_count_io_errors(c, bio, error, m);
241 	bio_put(bio);
242 	closure_put(cl);
243 }
244