xref: /openbmc/linux/drivers/md/bcache/writeback.c (revision 84fbfc33)
1 /*
2  * background writeback - scan btree for dirty data and write it to the backing
3  * device
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "writeback.h"
13 
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
16 #include <linux/sched/clock.h>
17 #include <trace/events/bcache.h>
18 
19 /* Rate limiting */
20 
21 static void __update_writeback_rate(struct cached_dev *dc)
22 {
23 	struct cache_set *c = dc->disk.c;
24 	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
25 				bcache_flash_devs_sectors_dirty(c);
26 	uint64_t cache_dirty_target =
27 		div_u64(cache_sectors * dc->writeback_percent, 100);
28 
29 	int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
30 				   c->cached_dev_sectors);
31 
32 	/* PD controller */
33 
34 	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
35 	int64_t derivative = dirty - dc->disk.sectors_dirty_last;
36 	int64_t proportional = dirty - target;
37 	int64_t change;
38 
39 	dc->disk.sectors_dirty_last = dirty;
40 
41 	/* Scale to sectors per second */
42 
43 	proportional *= dc->writeback_rate_update_seconds;
44 	proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
45 
46 	derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
47 
48 	derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
49 			      (dc->writeback_rate_d_term /
50 			       dc->writeback_rate_update_seconds) ?: 1, 0);
51 
52 	derivative *= dc->writeback_rate_d_term;
53 	derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
54 
55 	change = proportional + derivative;
56 
57 	/* Don't increase writeback rate if the device isn't keeping up */
58 	if (change > 0 &&
59 	    time_after64(local_clock(),
60 			 dc->writeback_rate.next + NSEC_PER_MSEC))
61 		change = 0;
62 
63 	dc->writeback_rate.rate =
64 		clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
65 			1, NSEC_PER_MSEC);
66 
67 	dc->writeback_rate_proportional = proportional;
68 	dc->writeback_rate_derivative = derivative;
69 	dc->writeback_rate_change = change;
70 	dc->writeback_rate_target = target;
71 }
72 
73 static void update_writeback_rate(struct work_struct *work)
74 {
75 	struct cached_dev *dc = container_of(to_delayed_work(work),
76 					     struct cached_dev,
77 					     writeback_rate_update);
78 
79 	down_read(&dc->writeback_lock);
80 
81 	if (atomic_read(&dc->has_dirty) &&
82 	    dc->writeback_percent)
83 		__update_writeback_rate(dc);
84 
85 	up_read(&dc->writeback_lock);
86 
87 	schedule_delayed_work(&dc->writeback_rate_update,
88 			      dc->writeback_rate_update_seconds * HZ);
89 }
90 
91 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
92 {
93 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
94 	    !dc->writeback_percent)
95 		return 0;
96 
97 	return bch_next_delay(&dc->writeback_rate, sectors);
98 }
99 
100 struct dirty_io {
101 	struct closure		cl;
102 	struct cached_dev	*dc;
103 	struct bio		bio;
104 };
105 
106 static void dirty_init(struct keybuf_key *w)
107 {
108 	struct dirty_io *io = w->private;
109 	struct bio *bio = &io->bio;
110 
111 	bio_init(bio, bio->bi_inline_vecs,
112 		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
113 	if (!io->dc->writeback_percent)
114 		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
115 
116 	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
117 	bio->bi_private		= w;
118 	bch_bio_map(bio, NULL);
119 }
120 
121 static void dirty_io_destructor(struct closure *cl)
122 {
123 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
124 	kfree(io);
125 }
126 
127 static void write_dirty_finish(struct closure *cl)
128 {
129 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
130 	struct keybuf_key *w = io->bio.bi_private;
131 	struct cached_dev *dc = io->dc;
132 
133 	bio_free_pages(&io->bio);
134 
135 	/* This is kind of a dumb way of signalling errors. */
136 	if (KEY_DIRTY(&w->key)) {
137 		int ret;
138 		unsigned i;
139 		struct keylist keys;
140 
141 		bch_keylist_init(&keys);
142 
143 		bkey_copy(keys.top, &w->key);
144 		SET_KEY_DIRTY(keys.top, false);
145 		bch_keylist_push(&keys);
146 
147 		for (i = 0; i < KEY_PTRS(&w->key); i++)
148 			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
149 
150 		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
151 
152 		if (ret)
153 			trace_bcache_writeback_collision(&w->key);
154 
155 		atomic_long_inc(ret
156 				? &dc->disk.c->writeback_keys_failed
157 				: &dc->disk.c->writeback_keys_done);
158 	}
159 
160 	bch_keybuf_del(&dc->writeback_keys, w);
161 	up(&dc->in_flight);
162 
163 	closure_return_with_destructor(cl, dirty_io_destructor);
164 }
165 
166 static void dirty_endio(struct bio *bio)
167 {
168 	struct keybuf_key *w = bio->bi_private;
169 	struct dirty_io *io = w->private;
170 
171 	if (bio->bi_status)
172 		SET_KEY_DIRTY(&w->key, false);
173 
174 	closure_put(&io->cl);
175 }
176 
177 static void write_dirty(struct closure *cl)
178 {
179 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
180 	struct keybuf_key *w = io->bio.bi_private;
181 
182 	dirty_init(w);
183 	bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
184 	io->bio.bi_iter.bi_sector = KEY_START(&w->key);
185 	bio_set_dev(&io->bio, io->dc->bdev);
186 	io->bio.bi_end_io	= dirty_endio;
187 
188 	closure_bio_submit(&io->bio, cl);
189 
190 	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
191 }
192 
193 static void read_dirty_endio(struct bio *bio)
194 {
195 	struct keybuf_key *w = bio->bi_private;
196 	struct dirty_io *io = w->private;
197 
198 	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
199 			    bio->bi_status, "reading dirty data from cache");
200 
201 	dirty_endio(bio);
202 }
203 
204 static void read_dirty_submit(struct closure *cl)
205 {
206 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
207 
208 	closure_bio_submit(&io->bio, cl);
209 
210 	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
211 }
212 
213 static void read_dirty(struct cached_dev *dc)
214 {
215 	unsigned delay = 0;
216 	struct keybuf_key *w;
217 	struct dirty_io *io;
218 	struct closure cl;
219 
220 	closure_init_stack(&cl);
221 
222 	/*
223 	 * XXX: if we error, background writeback just spins. Should use some
224 	 * mempools.
225 	 */
226 
227 	while (!kthread_should_stop()) {
228 
229 		w = bch_keybuf_next(&dc->writeback_keys);
230 		if (!w)
231 			break;
232 
233 		BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
234 
235 		if (KEY_START(&w->key) != dc->last_read ||
236 		    jiffies_to_msecs(delay) > 50)
237 			while (!kthread_should_stop() && delay)
238 				delay = schedule_timeout_interruptible(delay);
239 
240 		dc->last_read	= KEY_OFFSET(&w->key);
241 
242 		io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
243 			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
244 			     GFP_KERNEL);
245 		if (!io)
246 			goto err;
247 
248 		w->private	= io;
249 		io->dc		= dc;
250 
251 		dirty_init(w);
252 		bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
253 		io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
254 		bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
255 		io->bio.bi_end_io	= read_dirty_endio;
256 
257 		if (bio_alloc_pages(&io->bio, GFP_KERNEL))
258 			goto err_free;
259 
260 		trace_bcache_writeback(&w->key);
261 
262 		down(&dc->in_flight);
263 		closure_call(&io->cl, read_dirty_submit, NULL, &cl);
264 
265 		delay = writeback_delay(dc, KEY_SIZE(&w->key));
266 	}
267 
268 	if (0) {
269 err_free:
270 		kfree(w->private);
271 err:
272 		bch_keybuf_del(&dc->writeback_keys, w);
273 	}
274 
275 	/*
276 	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
277 	 * freed) before refilling again
278 	 */
279 	closure_sync(&cl);
280 }
281 
282 /* Scan for dirty data */
283 
284 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
285 				  uint64_t offset, int nr_sectors)
286 {
287 	struct bcache_device *d = c->devices[inode];
288 	unsigned stripe_offset, stripe, sectors_dirty;
289 
290 	if (!d)
291 		return;
292 
293 	stripe = offset_to_stripe(d, offset);
294 	stripe_offset = offset & (d->stripe_size - 1);
295 
296 	while (nr_sectors) {
297 		int s = min_t(unsigned, abs(nr_sectors),
298 			      d->stripe_size - stripe_offset);
299 
300 		if (nr_sectors < 0)
301 			s = -s;
302 
303 		if (stripe >= d->nr_stripes)
304 			return;
305 
306 		sectors_dirty = atomic_add_return(s,
307 					d->stripe_sectors_dirty + stripe);
308 		if (sectors_dirty == d->stripe_size)
309 			set_bit(stripe, d->full_dirty_stripes);
310 		else
311 			clear_bit(stripe, d->full_dirty_stripes);
312 
313 		nr_sectors -= s;
314 		stripe_offset = 0;
315 		stripe++;
316 	}
317 }
318 
319 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
320 {
321 	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
322 
323 	BUG_ON(KEY_INODE(k) != dc->disk.id);
324 
325 	return KEY_DIRTY(k);
326 }
327 
328 static void refill_full_stripes(struct cached_dev *dc)
329 {
330 	struct keybuf *buf = &dc->writeback_keys;
331 	unsigned start_stripe, stripe, next_stripe;
332 	bool wrapped = false;
333 
334 	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
335 
336 	if (stripe >= dc->disk.nr_stripes)
337 		stripe = 0;
338 
339 	start_stripe = stripe;
340 
341 	while (1) {
342 		stripe = find_next_bit(dc->disk.full_dirty_stripes,
343 				       dc->disk.nr_stripes, stripe);
344 
345 		if (stripe == dc->disk.nr_stripes)
346 			goto next;
347 
348 		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
349 						 dc->disk.nr_stripes, stripe);
350 
351 		buf->last_scanned = KEY(dc->disk.id,
352 					stripe * dc->disk.stripe_size, 0);
353 
354 		bch_refill_keybuf(dc->disk.c, buf,
355 				  &KEY(dc->disk.id,
356 				       next_stripe * dc->disk.stripe_size, 0),
357 				  dirty_pred);
358 
359 		if (array_freelist_empty(&buf->freelist))
360 			return;
361 
362 		stripe = next_stripe;
363 next:
364 		if (wrapped && stripe > start_stripe)
365 			return;
366 
367 		if (stripe == dc->disk.nr_stripes) {
368 			stripe = 0;
369 			wrapped = true;
370 		}
371 	}
372 }
373 
374 /*
375  * Returns true if we scanned the entire disk
376  */
377 static bool refill_dirty(struct cached_dev *dc)
378 {
379 	struct keybuf *buf = &dc->writeback_keys;
380 	struct bkey start = KEY(dc->disk.id, 0, 0);
381 	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
382 	struct bkey start_pos;
383 
384 	/*
385 	 * make sure keybuf pos is inside the range for this disk - at bringup
386 	 * we might not be attached yet so this disk's inode nr isn't
387 	 * initialized then
388 	 */
389 	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
390 	    bkey_cmp(&buf->last_scanned, &end) > 0)
391 		buf->last_scanned = start;
392 
393 	if (dc->partial_stripes_expensive) {
394 		refill_full_stripes(dc);
395 		if (array_freelist_empty(&buf->freelist))
396 			return false;
397 	}
398 
399 	start_pos = buf->last_scanned;
400 	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
401 
402 	if (bkey_cmp(&buf->last_scanned, &end) < 0)
403 		return false;
404 
405 	/*
406 	 * If we get to the end start scanning again from the beginning, and
407 	 * only scan up to where we initially started scanning from:
408 	 */
409 	buf->last_scanned = start;
410 	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
411 
412 	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
413 }
414 
415 static int bch_writeback_thread(void *arg)
416 {
417 	struct cached_dev *dc = arg;
418 	bool searched_full_index;
419 
420 	while (!kthread_should_stop()) {
421 		down_write(&dc->writeback_lock);
422 		if (!atomic_read(&dc->has_dirty) ||
423 		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
424 		     !dc->writeback_running)) {
425 			up_write(&dc->writeback_lock);
426 			set_current_state(TASK_INTERRUPTIBLE);
427 
428 			if (kthread_should_stop())
429 				return 0;
430 
431 			schedule();
432 			continue;
433 		}
434 
435 		searched_full_index = refill_dirty(dc);
436 
437 		if (searched_full_index &&
438 		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
439 			atomic_set(&dc->has_dirty, 0);
440 			cached_dev_put(dc);
441 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
442 			bch_write_bdev_super(dc, NULL);
443 		}
444 
445 		up_write(&dc->writeback_lock);
446 
447 		bch_ratelimit_reset(&dc->writeback_rate);
448 		read_dirty(dc);
449 
450 		if (searched_full_index) {
451 			unsigned delay = dc->writeback_delay * HZ;
452 
453 			while (delay &&
454 			       !kthread_should_stop() &&
455 			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
456 				delay = schedule_timeout_interruptible(delay);
457 		}
458 	}
459 
460 	return 0;
461 }
462 
463 /* Init */
464 
465 struct sectors_dirty_init {
466 	struct btree_op	op;
467 	unsigned	inode;
468 };
469 
470 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
471 				 struct bkey *k)
472 {
473 	struct sectors_dirty_init *op = container_of(_op,
474 						struct sectors_dirty_init, op);
475 	if (KEY_INODE(k) > op->inode)
476 		return MAP_DONE;
477 
478 	if (KEY_DIRTY(k))
479 		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
480 					     KEY_START(k), KEY_SIZE(k));
481 
482 	return MAP_CONTINUE;
483 }
484 
485 void bch_sectors_dirty_init(struct bcache_device *d)
486 {
487 	struct sectors_dirty_init op;
488 
489 	bch_btree_op_init(&op.op, -1);
490 	op.inode = d->id;
491 
492 	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
493 			   sectors_dirty_init_fn, 0);
494 
495 	d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
496 }
497 
498 void bch_cached_dev_writeback_init(struct cached_dev *dc)
499 {
500 	sema_init(&dc->in_flight, 64);
501 	init_rwsem(&dc->writeback_lock);
502 	bch_keybuf_init(&dc->writeback_keys);
503 
504 	dc->writeback_metadata		= true;
505 	dc->writeback_running		= true;
506 	dc->writeback_percent		= 10;
507 	dc->writeback_delay		= 30;
508 	dc->writeback_rate.rate		= 1024;
509 
510 	dc->writeback_rate_update_seconds = 5;
511 	dc->writeback_rate_d_term	= 30;
512 	dc->writeback_rate_p_term_inverse = 6000;
513 
514 	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
515 }
516 
517 int bch_cached_dev_writeback_start(struct cached_dev *dc)
518 {
519 	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
520 						WQ_MEM_RECLAIM, 0);
521 	if (!dc->writeback_write_wq)
522 		return -ENOMEM;
523 
524 	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
525 					      "bcache_writeback");
526 	if (IS_ERR(dc->writeback_thread))
527 		return PTR_ERR(dc->writeback_thread);
528 
529 	schedule_delayed_work(&dc->writeback_rate_update,
530 			      dc->writeback_rate_update_seconds * HZ);
531 
532 	bch_writeback_queue(dc);
533 
534 	return 0;
535 }
536