xref: /openbmc/linux/drivers/md/bcache/io.c (revision 95db3b25)
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "bset.h"
10 #include "debug.h"
11 
12 #include <linux/blkdev.h>
13 
14 /* Bios with headers */
15 
16 void bch_bbio_free(struct bio *bio, struct cache_set *c)
17 {
18 	struct bbio *b = container_of(bio, struct bbio, bio);
19 	mempool_free(b, c->bio_meta);
20 }
21 
22 struct bio *bch_bbio_alloc(struct cache_set *c)
23 {
24 	struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
25 	struct bio *bio = &b->bio;
26 
27 	bio_init(bio);
28 	bio->bi_flags		|= BIO_POOL_NONE << BIO_POOL_OFFSET;
29 	bio->bi_max_vecs	 = bucket_pages(c);
30 	bio->bi_io_vec		 = bio->bi_inline_vecs;
31 
32 	return bio;
33 }
34 
35 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
36 {
37 	struct bbio *b = container_of(bio, struct bbio, bio);
38 
39 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
40 	bio->bi_bdev		= PTR_CACHE(c, &b->key, 0)->bdev;
41 
42 	b->submit_time_us = local_clock_us();
43 	closure_bio_submit(bio, bio->bi_private);
44 }
45 
46 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
47 		     struct bkey *k, unsigned ptr)
48 {
49 	struct bbio *b = container_of(bio, struct bbio, bio);
50 	bch_bkey_copy_single_ptr(&b->key, k, ptr);
51 	__bch_submit_bbio(bio, c);
52 }
53 
54 /* IO errors */
55 
56 void bch_count_io_errors(struct cache *ca, int error, const char *m)
57 {
58 	/*
59 	 * The halflife of an error is:
60 	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
61 	 */
62 
63 	if (ca->set->error_decay) {
64 		unsigned count = atomic_inc_return(&ca->io_count);
65 
66 		while (count > ca->set->error_decay) {
67 			unsigned errors;
68 			unsigned old = count;
69 			unsigned new = count - ca->set->error_decay;
70 
71 			/*
72 			 * First we subtract refresh from count; each time we
73 			 * succesfully do so, we rescale the errors once:
74 			 */
75 
76 			count = atomic_cmpxchg(&ca->io_count, old, new);
77 
78 			if (count == old) {
79 				count = new;
80 
81 				errors = atomic_read(&ca->io_errors);
82 				do {
83 					old = errors;
84 					new = ((uint64_t) errors * 127) / 128;
85 					errors = atomic_cmpxchg(&ca->io_errors,
86 								old, new);
87 				} while (old != errors);
88 			}
89 		}
90 	}
91 
92 	if (error) {
93 		char buf[BDEVNAME_SIZE];
94 		unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
95 						    &ca->io_errors);
96 		errors >>= IO_ERROR_SHIFT;
97 
98 		if (errors < ca->set->error_limit)
99 			pr_err("%s: IO error on %s, recovering",
100 			       bdevname(ca->bdev, buf), m);
101 		else
102 			bch_cache_set_error(ca->set,
103 					    "%s: too many IO errors %s",
104 					    bdevname(ca->bdev, buf), m);
105 	}
106 }
107 
108 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
109 			      int error, const char *m)
110 {
111 	struct bbio *b = container_of(bio, struct bbio, bio);
112 	struct cache *ca = PTR_CACHE(c, &b->key, 0);
113 
114 	unsigned threshold = bio->bi_rw & REQ_WRITE
115 		? c->congested_write_threshold_us
116 		: c->congested_read_threshold_us;
117 
118 	if (threshold) {
119 		unsigned t = local_clock_us();
120 
121 		int us = t - b->submit_time_us;
122 		int congested = atomic_read(&c->congested);
123 
124 		if (us > (int) threshold) {
125 			int ms = us / 1024;
126 			c->congested_last_us = t;
127 
128 			ms = min(ms, CONGESTED_MAX + congested);
129 			atomic_sub(ms, &c->congested);
130 		} else if (congested < 0)
131 			atomic_inc(&c->congested);
132 	}
133 
134 	bch_count_io_errors(ca, error, m);
135 }
136 
137 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
138 		    int error, const char *m)
139 {
140 	struct closure *cl = bio->bi_private;
141 
142 	bch_bbio_count_io_errors(c, bio, error, m);
143 	bio_put(bio);
144 	closure_put(cl);
145 }
146