xref: /openbmc/linux/drivers/md/bcache/movinggc.c (revision 2d96b44f)
1 /*
2  * Moving/copying garbage collector
3  *
4  * Copyright 2012 Google, Inc.
5  */
6 
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
11 
12 #include <trace/events/bcache.h>
13 
14 struct moving_io {
15 	struct closure		cl;
16 	struct keybuf_key	*w;
17 	struct data_insert_op	op;
18 	struct bbio		bio;
19 };
20 
21 static bool moving_pred(struct keybuf *buf, struct bkey *k)
22 {
23 	struct cache_set *c = container_of(buf, struct cache_set,
24 					   moving_gc_keys);
25 	unsigned i;
26 
27 	for (i = 0; i < KEY_PTRS(k); i++)
28 		if (ptr_available(c, k, i) &&
29 		    GC_MOVE(PTR_BUCKET(c, k, i)))
30 			return true;
31 
32 	return false;
33 }
34 
35 /* Moving GC - IO loop */
36 
37 static void moving_io_destructor(struct closure *cl)
38 {
39 	struct moving_io *io = container_of(cl, struct moving_io, cl);
40 	kfree(io);
41 }
42 
43 static void write_moving_finish(struct closure *cl)
44 {
45 	struct moving_io *io = container_of(cl, struct moving_io, cl);
46 	struct bio *bio = &io->bio.bio;
47 
48 	bio_free_pages(bio);
49 
50 	if (io->op.replace_collision)
51 		trace_bcache_gc_copy_collision(&io->w->key);
52 
53 	bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
54 
55 	up(&io->op.c->moving_in_flight);
56 
57 	closure_return_with_destructor(cl, moving_io_destructor);
58 }
59 
60 static void read_moving_endio(struct bio *bio)
61 {
62 	struct bbio *b = container_of(bio, struct bbio, bio);
63 	struct moving_io *io = container_of(bio->bi_private,
64 					    struct moving_io, cl);
65 
66 	if (bio->bi_error)
67 		io->op.error = bio->bi_error;
68 	else if (!KEY_DIRTY(&b->key) &&
69 		 ptr_stale(io->op.c, &b->key, 0)) {
70 		io->op.error = -EINTR;
71 	}
72 
73 	bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
74 }
75 
76 static void moving_init(struct moving_io *io)
77 {
78 	struct bio *bio = &io->bio.bio;
79 
80 	bio_init(bio);
81 	bio_get(bio);
82 	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
83 
84 	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;
85 	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&io->w->key),
86 					       PAGE_SECTORS);
87 	bio->bi_private		= &io->cl;
88 	bio->bi_io_vec		= bio->bi_inline_vecs;
89 	bch_bio_map(bio, NULL);
90 }
91 
92 static void write_moving(struct closure *cl)
93 {
94 	struct moving_io *io = container_of(cl, struct moving_io, cl);
95 	struct data_insert_op *op = &io->op;
96 
97 	if (!op->error) {
98 		moving_init(io);
99 
100 		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
101 		op->write_prio		= 1;
102 		op->bio			= &io->bio.bio;
103 
104 		op->writeback		= KEY_DIRTY(&io->w->key);
105 		op->csum		= KEY_CSUM(&io->w->key);
106 
107 		bkey_copy(&op->replace_key, &io->w->key);
108 		op->replace		= true;
109 
110 		closure_call(&op->cl, bch_data_insert, NULL, cl);
111 	}
112 
113 	continue_at(cl, write_moving_finish, op->wq);
114 }
115 
116 static void read_moving_submit(struct closure *cl)
117 {
118 	struct moving_io *io = container_of(cl, struct moving_io, cl);
119 	struct bio *bio = &io->bio.bio;
120 
121 	bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
122 
123 	continue_at(cl, write_moving, io->op.wq);
124 }
125 
126 static void read_moving(struct cache_set *c)
127 {
128 	struct keybuf_key *w;
129 	struct moving_io *io;
130 	struct bio *bio;
131 	struct closure cl;
132 
133 	closure_init_stack(&cl);
134 
135 	/* XXX: if we error, background writeback could stall indefinitely */
136 
137 	while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
138 		w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
139 					   &MAX_KEY, moving_pred);
140 		if (!w)
141 			break;
142 
143 		if (ptr_stale(c, &w->key, 0)) {
144 			bch_keybuf_del(&c->moving_gc_keys, w);
145 			continue;
146 		}
147 
148 		io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
149 			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
150 			     GFP_KERNEL);
151 		if (!io)
152 			goto err;
153 
154 		w->private	= io;
155 		io->w		= w;
156 		io->op.inode	= KEY_INODE(&w->key);
157 		io->op.c	= c;
158 		io->op.wq	= c->moving_gc_wq;
159 
160 		moving_init(io);
161 		bio = &io->bio.bio;
162 
163 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
164 		bio->bi_end_io	= read_moving_endio;
165 
166 		if (bio_alloc_pages(bio, GFP_KERNEL))
167 			goto err;
168 
169 		trace_bcache_gc_copy(&w->key);
170 
171 		down(&c->moving_in_flight);
172 		closure_call(&io->cl, read_moving_submit, NULL, &cl);
173 	}
174 
175 	if (0) {
176 err:		if (!IS_ERR_OR_NULL(w->private))
177 			kfree(w->private);
178 
179 		bch_keybuf_del(&c->moving_gc_keys, w);
180 	}
181 
182 	closure_sync(&cl);
183 }
184 
185 static bool bucket_cmp(struct bucket *l, struct bucket *r)
186 {
187 	return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
188 }
189 
190 static unsigned bucket_heap_top(struct cache *ca)
191 {
192 	struct bucket *b;
193 	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
194 }
195 
196 void bch_moving_gc(struct cache_set *c)
197 {
198 	struct cache *ca;
199 	struct bucket *b;
200 	unsigned i;
201 
202 	if (!c->copy_gc_enabled)
203 		return;
204 
205 	mutex_lock(&c->bucket_lock);
206 
207 	for_each_cache(ca, c, i) {
208 		unsigned sectors_to_move = 0;
209 		unsigned reserve_sectors = ca->sb.bucket_size *
210 			fifo_used(&ca->free[RESERVE_MOVINGGC]);
211 
212 		ca->heap.used = 0;
213 
214 		for_each_bucket(b, ca) {
215 			if (GC_MARK(b) == GC_MARK_METADATA ||
216 			    !GC_SECTORS_USED(b) ||
217 			    GC_SECTORS_USED(b) == ca->sb.bucket_size ||
218 			    atomic_read(&b->pin))
219 				continue;
220 
221 			if (!heap_full(&ca->heap)) {
222 				sectors_to_move += GC_SECTORS_USED(b);
223 				heap_add(&ca->heap, b, bucket_cmp);
224 			} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
225 				sectors_to_move -= bucket_heap_top(ca);
226 				sectors_to_move += GC_SECTORS_USED(b);
227 
228 				ca->heap.data[0] = b;
229 				heap_sift(&ca->heap, 0, bucket_cmp);
230 			}
231 		}
232 
233 		while (sectors_to_move > reserve_sectors) {
234 			heap_pop(&ca->heap, b, bucket_cmp);
235 			sectors_to_move -= GC_SECTORS_USED(b);
236 		}
237 
238 		while (heap_pop(&ca->heap, b, bucket_cmp))
239 			SET_GC_MOVE(b, 1);
240 	}
241 
242 	mutex_unlock(&c->bucket_lock);
243 
244 	c->moving_gc_keys.last_scanned = ZERO_KEY;
245 
246 	read_moving(c);
247 }
248 
249 void bch_moving_init_cache_set(struct cache_set *c)
250 {
251 	bch_keybuf_init(&c->moving_gc_keys);
252 	sema_init(&c->moving_in_flight, 64);
253 }
254