xref: /openbmc/linux/drivers/md/bcache/writeback.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * background writeback - scan btree for dirty data and write it to the backing
4  * device
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9 
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14 
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19 
20 /* Rate limiting */
21 static uint64_t __calc_target_rate(struct cached_dev *dc)
22 {
23 	struct cache_set *c = dc->disk.c;
24 
25 	/*
26 	 * This is the size of the cache, minus the amount used for
27 	 * flash-only devices
28 	 */
29 	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
30 				bcache_flash_devs_sectors_dirty(c);
31 
32 	/*
33 	 * Unfortunately there is no control of global dirty data.  If the
34 	 * user states that they want 10% dirty data in the cache, and has,
35 	 * e.g., 5 backing volumes of equal size, we try and ensure each
36 	 * backing volume uses about 2% of the cache for dirty data.
37 	 */
38 	uint32_t bdev_share =
39 		div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
40 				c->cached_dev_sectors);
41 
42 	uint64_t cache_dirty_target =
43 		div_u64(cache_sectors * dc->writeback_percent, 100);
44 
45 	/* Ensure each backing dev gets at least one dirty share */
46 	if (bdev_share < 1)
47 		bdev_share = 1;
48 
49 	return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
50 }
51 
52 static void __update_writeback_rate(struct cached_dev *dc)
53 {
54 	/*
55 	 * PI controller:
56 	 * Figures out the amount that should be written per second.
57 	 *
58 	 * First, the error (number of sectors that are dirty beyond our
59 	 * target) is calculated.  The error is accumulated (numerically
60 	 * integrated).
61 	 *
62 	 * Then, the proportional value and integral value are scaled
63 	 * based on configured values.  These are stored as inverses to
64 	 * avoid fixed point math and to make configuration easy-- e.g.
65 	 * the default value of 40 for writeback_rate_p_term_inverse
66 	 * attempts to write at a rate that would retire all the dirty
67 	 * blocks in 40 seconds.
68 	 *
69 	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
70 	 * of the error is accumulated in the integral term per second.
71 	 * This acts as a slow, long-term average that is not subject to
72 	 * variations in usage like the p term.
73 	 */
74 	int64_t target = __calc_target_rate(dc);
75 	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
76 	int64_t error = dirty - target;
77 	int64_t proportional_scaled =
78 		div_s64(error, dc->writeback_rate_p_term_inverse);
79 	int64_t integral_scaled;
80 	uint32_t new_rate;
81 
82 	if ((error < 0 && dc->writeback_rate_integral > 0) ||
83 	    (error > 0 && time_before64(local_clock(),
84 			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
85 		/*
86 		 * Only decrease the integral term if it's more than
87 		 * zero.  Only increase the integral term if the device
88 		 * is keeping up.  (Don't wind up the integral
89 		 * ineffectively in either case).
90 		 *
91 		 * It's necessary to scale this by
92 		 * writeback_rate_update_seconds to keep the integral
93 		 * term dimensioned properly.
94 		 */
95 		dc->writeback_rate_integral += error *
96 			dc->writeback_rate_update_seconds;
97 	}
98 
99 	integral_scaled = div_s64(dc->writeback_rate_integral,
100 			dc->writeback_rate_i_term_inverse);
101 
102 	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
103 			dc->writeback_rate_minimum, NSEC_PER_SEC);
104 
105 	dc->writeback_rate_proportional = proportional_scaled;
106 	dc->writeback_rate_integral_scaled = integral_scaled;
107 	dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
108 	dc->writeback_rate.rate = new_rate;
109 	dc->writeback_rate_target = target;
110 }
111 
112 static void update_writeback_rate(struct work_struct *work)
113 {
114 	struct cached_dev *dc = container_of(to_delayed_work(work),
115 					     struct cached_dev,
116 					     writeback_rate_update);
117 	struct cache_set *c = dc->disk.c;
118 
119 	/*
120 	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
121 	 * cancel_delayed_work_sync().
122 	 */
123 	set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
124 	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
125 	smp_mb();
126 
127 	/*
128 	 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
129 	 * check it here too.
130 	 */
131 	if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
132 	    test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
133 		clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
134 		/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
135 		smp_mb();
136 		return;
137 	}
138 
139 	down_read(&dc->writeback_lock);
140 
141 	if (atomic_read(&dc->has_dirty) &&
142 	    dc->writeback_percent)
143 		__update_writeback_rate(dc);
144 
145 	up_read(&dc->writeback_lock);
146 
147 	/*
148 	 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
149 	 * check it here too.
150 	 */
151 	if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
152 	    !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
153 		schedule_delayed_work(&dc->writeback_rate_update,
154 			      dc->writeback_rate_update_seconds * HZ);
155 	}
156 
157 	/*
158 	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
159 	 * cancel_delayed_work_sync().
160 	 */
161 	clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
162 	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
163 	smp_mb();
164 }
165 
166 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
167 {
168 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
169 	    !dc->writeback_percent)
170 		return 0;
171 
172 	return bch_next_delay(&dc->writeback_rate, sectors);
173 }
174 
175 struct dirty_io {
176 	struct closure		cl;
177 	struct cached_dev	*dc;
178 	uint16_t		sequence;
179 	struct bio		bio;
180 };
181 
182 static void dirty_init(struct keybuf_key *w)
183 {
184 	struct dirty_io *io = w->private;
185 	struct bio *bio = &io->bio;
186 
187 	bio_init(bio, bio->bi_inline_vecs,
188 		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
189 	if (!io->dc->writeback_percent)
190 		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
191 
192 	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
193 	bio->bi_private		= w;
194 	bch_bio_map(bio, NULL);
195 }
196 
197 static void dirty_io_destructor(struct closure *cl)
198 {
199 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
200 	kfree(io);
201 }
202 
203 static void write_dirty_finish(struct closure *cl)
204 {
205 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
206 	struct keybuf_key *w = io->bio.bi_private;
207 	struct cached_dev *dc = io->dc;
208 
209 	bio_free_pages(&io->bio);
210 
211 	/* This is kind of a dumb way of signalling errors. */
212 	if (KEY_DIRTY(&w->key)) {
213 		int ret;
214 		unsigned i;
215 		struct keylist keys;
216 
217 		bch_keylist_init(&keys);
218 
219 		bkey_copy(keys.top, &w->key);
220 		SET_KEY_DIRTY(keys.top, false);
221 		bch_keylist_push(&keys);
222 
223 		for (i = 0; i < KEY_PTRS(&w->key); i++)
224 			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
225 
226 		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
227 
228 		if (ret)
229 			trace_bcache_writeback_collision(&w->key);
230 
231 		atomic_long_inc(ret
232 				? &dc->disk.c->writeback_keys_failed
233 				: &dc->disk.c->writeback_keys_done);
234 	}
235 
236 	bch_keybuf_del(&dc->writeback_keys, w);
237 	up(&dc->in_flight);
238 
239 	closure_return_with_destructor(cl, dirty_io_destructor);
240 }
241 
242 static void dirty_endio(struct bio *bio)
243 {
244 	struct keybuf_key *w = bio->bi_private;
245 	struct dirty_io *io = w->private;
246 
247 	if (bio->bi_status) {
248 		SET_KEY_DIRTY(&w->key, false);
249 		bch_count_backing_io_errors(io->dc, bio);
250 	}
251 
252 	closure_put(&io->cl);
253 }
254 
255 static void write_dirty(struct closure *cl)
256 {
257 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
258 	struct keybuf_key *w = io->bio.bi_private;
259 	struct cached_dev *dc = io->dc;
260 
261 	uint16_t next_sequence;
262 
263 	if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
264 		/* Not our turn to write; wait for a write to complete */
265 		closure_wait(&dc->writeback_ordering_wait, cl);
266 
267 		if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
268 			/*
269 			 * Edge case-- it happened in indeterminate order
270 			 * relative to when we were added to wait list..
271 			 */
272 			closure_wake_up(&dc->writeback_ordering_wait);
273 		}
274 
275 		continue_at(cl, write_dirty, io->dc->writeback_write_wq);
276 		return;
277 	}
278 
279 	next_sequence = io->sequence + 1;
280 
281 	/*
282 	 * IO errors are signalled using the dirty bit on the key.
283 	 * If we failed to read, we should not attempt to write to the
284 	 * backing device.  Instead, immediately go to write_dirty_finish
285 	 * to clean up.
286 	 */
287 	if (KEY_DIRTY(&w->key)) {
288 		dirty_init(w);
289 		bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
290 		io->bio.bi_iter.bi_sector = KEY_START(&w->key);
291 		bio_set_dev(&io->bio, io->dc->bdev);
292 		io->bio.bi_end_io	= dirty_endio;
293 
294 		/* I/O request sent to backing device */
295 		closure_bio_submit(io->dc->disk.c, &io->bio, cl);
296 	}
297 
298 	atomic_set(&dc->writeback_sequence_next, next_sequence);
299 	closure_wake_up(&dc->writeback_ordering_wait);
300 
301 	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
302 }
303 
304 static void read_dirty_endio(struct bio *bio)
305 {
306 	struct keybuf_key *w = bio->bi_private;
307 	struct dirty_io *io = w->private;
308 
309 	/* is_read = 1 */
310 	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
311 			    bio->bi_status, 1,
312 			    "reading dirty data from cache");
313 
314 	dirty_endio(bio);
315 }
316 
317 static void read_dirty_submit(struct closure *cl)
318 {
319 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
320 
321 	closure_bio_submit(io->dc->disk.c, &io->bio, cl);
322 
323 	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
324 }
325 
326 static void read_dirty(struct cached_dev *dc)
327 {
328 	unsigned delay = 0;
329 	struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
330 	size_t size;
331 	int nk, i;
332 	struct dirty_io *io;
333 	struct closure cl;
334 	uint16_t sequence = 0;
335 
336 	BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
337 	atomic_set(&dc->writeback_sequence_next, sequence);
338 	closure_init_stack(&cl);
339 
340 	/*
341 	 * XXX: if we error, background writeback just spins. Should use some
342 	 * mempools.
343 	 */
344 
345 	next = bch_keybuf_next(&dc->writeback_keys);
346 
347 	while (!kthread_should_stop() &&
348 	       !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
349 	       next) {
350 		size = 0;
351 		nk = 0;
352 
353 		do {
354 			BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
355 
356 			/*
357 			 * Don't combine too many operations, even if they
358 			 * are all small.
359 			 */
360 			if (nk >= MAX_WRITEBACKS_IN_PASS)
361 				break;
362 
363 			/*
364 			 * If the current operation is very large, don't
365 			 * further combine operations.
366 			 */
367 			if (size >= MAX_WRITESIZE_IN_PASS)
368 				break;
369 
370 			/*
371 			 * Operations are only eligible to be combined
372 			 * if they are contiguous.
373 			 *
374 			 * TODO: add a heuristic willing to fire a
375 			 * certain amount of non-contiguous IO per pass,
376 			 * so that we can benefit from backing device
377 			 * command queueing.
378 			 */
379 			if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
380 						&START_KEY(&next->key)))
381 				break;
382 
383 			size += KEY_SIZE(&next->key);
384 			keys[nk++] = next;
385 		} while ((next = bch_keybuf_next(&dc->writeback_keys)));
386 
387 		/* Now we have gathered a set of 1..5 keys to write back. */
388 		for (i = 0; i < nk; i++) {
389 			w = keys[i];
390 
391 			io = kzalloc(sizeof(struct dirty_io) +
392 				     sizeof(struct bio_vec) *
393 				     DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
394 				     GFP_KERNEL);
395 			if (!io)
396 				goto err;
397 
398 			w->private	= io;
399 			io->dc		= dc;
400 			io->sequence    = sequence++;
401 
402 			dirty_init(w);
403 			bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
404 			io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
405 			bio_set_dev(&io->bio,
406 				    PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
407 			io->bio.bi_end_io	= read_dirty_endio;
408 
409 			if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
410 				goto err_free;
411 
412 			trace_bcache_writeback(&w->key);
413 
414 			down(&dc->in_flight);
415 
416 			/* We've acquired a semaphore for the maximum
417 			 * simultaneous number of writebacks; from here
418 			 * everything happens asynchronously.
419 			 */
420 			closure_call(&io->cl, read_dirty_submit, NULL, &cl);
421 		}
422 
423 		delay = writeback_delay(dc, size);
424 
425 		/* If the control system would wait for at least half a
426 		 * second, and there's been no reqs hitting the backing disk
427 		 * for awhile: use an alternate mode where we have at most
428 		 * one contiguous set of writebacks in flight at a time.  If
429 		 * someone wants to do IO it will be quick, as it will only
430 		 * have to contend with one operation in flight, and we'll
431 		 * be round-tripping data to the backing disk as quickly as
432 		 * it can accept it.
433 		 */
434 		if (delay >= HZ / 2) {
435 			/* 3 means at least 1.5 seconds, up to 7.5 if we
436 			 * have slowed way down.
437 			 */
438 			if (atomic_inc_return(&dc->backing_idle) >= 3) {
439 				/* Wait for current I/Os to finish */
440 				closure_sync(&cl);
441 				/* And immediately launch a new set. */
442 				delay = 0;
443 			}
444 		}
445 
446 		while (!kthread_should_stop() &&
447 		       !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
448 		       delay) {
449 			schedule_timeout_interruptible(delay);
450 			delay = writeback_delay(dc, 0);
451 		}
452 	}
453 
454 	if (0) {
455 err_free:
456 		kfree(w->private);
457 err:
458 		bch_keybuf_del(&dc->writeback_keys, w);
459 	}
460 
461 	/*
462 	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
463 	 * freed) before refilling again
464 	 */
465 	closure_sync(&cl);
466 }
467 
468 /* Scan for dirty data */
469 
470 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
471 				  uint64_t offset, int nr_sectors)
472 {
473 	struct bcache_device *d = c->devices[inode];
474 	unsigned stripe_offset, stripe, sectors_dirty;
475 
476 	if (!d)
477 		return;
478 
479 	stripe = offset_to_stripe(d, offset);
480 	stripe_offset = offset & (d->stripe_size - 1);
481 
482 	while (nr_sectors) {
483 		int s = min_t(unsigned, abs(nr_sectors),
484 			      d->stripe_size - stripe_offset);
485 
486 		if (nr_sectors < 0)
487 			s = -s;
488 
489 		if (stripe >= d->nr_stripes)
490 			return;
491 
492 		sectors_dirty = atomic_add_return(s,
493 					d->stripe_sectors_dirty + stripe);
494 		if (sectors_dirty == d->stripe_size)
495 			set_bit(stripe, d->full_dirty_stripes);
496 		else
497 			clear_bit(stripe, d->full_dirty_stripes);
498 
499 		nr_sectors -= s;
500 		stripe_offset = 0;
501 		stripe++;
502 	}
503 }
504 
505 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
506 {
507 	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
508 
509 	BUG_ON(KEY_INODE(k) != dc->disk.id);
510 
511 	return KEY_DIRTY(k);
512 }
513 
514 static void refill_full_stripes(struct cached_dev *dc)
515 {
516 	struct keybuf *buf = &dc->writeback_keys;
517 	unsigned start_stripe, stripe, next_stripe;
518 	bool wrapped = false;
519 
520 	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
521 
522 	if (stripe >= dc->disk.nr_stripes)
523 		stripe = 0;
524 
525 	start_stripe = stripe;
526 
527 	while (1) {
528 		stripe = find_next_bit(dc->disk.full_dirty_stripes,
529 				       dc->disk.nr_stripes, stripe);
530 
531 		if (stripe == dc->disk.nr_stripes)
532 			goto next;
533 
534 		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
535 						 dc->disk.nr_stripes, stripe);
536 
537 		buf->last_scanned = KEY(dc->disk.id,
538 					stripe * dc->disk.stripe_size, 0);
539 
540 		bch_refill_keybuf(dc->disk.c, buf,
541 				  &KEY(dc->disk.id,
542 				       next_stripe * dc->disk.stripe_size, 0),
543 				  dirty_pred);
544 
545 		if (array_freelist_empty(&buf->freelist))
546 			return;
547 
548 		stripe = next_stripe;
549 next:
550 		if (wrapped && stripe > start_stripe)
551 			return;
552 
553 		if (stripe == dc->disk.nr_stripes) {
554 			stripe = 0;
555 			wrapped = true;
556 		}
557 	}
558 }
559 
560 /*
561  * Returns true if we scanned the entire disk
562  */
563 static bool refill_dirty(struct cached_dev *dc)
564 {
565 	struct keybuf *buf = &dc->writeback_keys;
566 	struct bkey start = KEY(dc->disk.id, 0, 0);
567 	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
568 	struct bkey start_pos;
569 
570 	/*
571 	 * make sure keybuf pos is inside the range for this disk - at bringup
572 	 * we might not be attached yet so this disk's inode nr isn't
573 	 * initialized then
574 	 */
575 	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
576 	    bkey_cmp(&buf->last_scanned, &end) > 0)
577 		buf->last_scanned = start;
578 
579 	if (dc->partial_stripes_expensive) {
580 		refill_full_stripes(dc);
581 		if (array_freelist_empty(&buf->freelist))
582 			return false;
583 	}
584 
585 	start_pos = buf->last_scanned;
586 	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
587 
588 	if (bkey_cmp(&buf->last_scanned, &end) < 0)
589 		return false;
590 
591 	/*
592 	 * If we get to the end start scanning again from the beginning, and
593 	 * only scan up to where we initially started scanning from:
594 	 */
595 	buf->last_scanned = start;
596 	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
597 
598 	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
599 }
600 
601 static int bch_writeback_thread(void *arg)
602 {
603 	struct cached_dev *dc = arg;
604 	struct cache_set *c = dc->disk.c;
605 	bool searched_full_index;
606 
607 	bch_ratelimit_reset(&dc->writeback_rate);
608 
609 	while (!kthread_should_stop() &&
610 	       !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
611 		down_write(&dc->writeback_lock);
612 		set_current_state(TASK_INTERRUPTIBLE);
613 		/*
614 		 * If the bache device is detaching, skip here and continue
615 		 * to perform writeback. Otherwise, if no dirty data on cache,
616 		 * or there is dirty data on cache but writeback is disabled,
617 		 * the writeback thread should sleep here and wait for others
618 		 * to wake up it.
619 		 */
620 		if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
621 		    (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
622 			up_write(&dc->writeback_lock);
623 
624 			if (kthread_should_stop() ||
625 			    test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
626 				set_current_state(TASK_RUNNING);
627 				break;
628 			}
629 
630 			schedule();
631 			continue;
632 		}
633 		set_current_state(TASK_RUNNING);
634 
635 		searched_full_index = refill_dirty(dc);
636 
637 		if (searched_full_index &&
638 		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
639 			atomic_set(&dc->has_dirty, 0);
640 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
641 			bch_write_bdev_super(dc, NULL);
642 			/*
643 			 * If bcache device is detaching via sysfs interface,
644 			 * writeback thread should stop after there is no dirty
645 			 * data on cache. BCACHE_DEV_DETACHING flag is set in
646 			 * bch_cached_dev_detach().
647 			 */
648 			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
649 				break;
650 		}
651 
652 		up_write(&dc->writeback_lock);
653 
654 		read_dirty(dc);
655 
656 		if (searched_full_index) {
657 			unsigned delay = dc->writeback_delay * HZ;
658 
659 			while (delay &&
660 			       !kthread_should_stop() &&
661 			       !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
662 			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
663 				delay = schedule_timeout_interruptible(delay);
664 
665 			bch_ratelimit_reset(&dc->writeback_rate);
666 		}
667 	}
668 
669 	cached_dev_put(dc);
670 	wait_for_kthread_stop();
671 
672 	return 0;
673 }
674 
675 /* Init */
676 
677 struct sectors_dirty_init {
678 	struct btree_op	op;
679 	unsigned	inode;
680 };
681 
682 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
683 				 struct bkey *k)
684 {
685 	struct sectors_dirty_init *op = container_of(_op,
686 						struct sectors_dirty_init, op);
687 	if (KEY_INODE(k) > op->inode)
688 		return MAP_DONE;
689 
690 	if (KEY_DIRTY(k))
691 		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
692 					     KEY_START(k), KEY_SIZE(k));
693 
694 	return MAP_CONTINUE;
695 }
696 
697 void bch_sectors_dirty_init(struct bcache_device *d)
698 {
699 	struct sectors_dirty_init op;
700 
701 	bch_btree_op_init(&op.op, -1);
702 	op.inode = d->id;
703 
704 	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
705 			   sectors_dirty_init_fn, 0);
706 }
707 
708 void bch_cached_dev_writeback_init(struct cached_dev *dc)
709 {
710 	sema_init(&dc->in_flight, 64);
711 	init_rwsem(&dc->writeback_lock);
712 	bch_keybuf_init(&dc->writeback_keys);
713 
714 	dc->writeback_metadata		= true;
715 	dc->writeback_running		= true;
716 	dc->writeback_percent		= 10;
717 	dc->writeback_delay		= 30;
718 	dc->writeback_rate.rate		= 1024;
719 	dc->writeback_rate_minimum	= 8;
720 
721 	dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
722 	dc->writeback_rate_p_term_inverse = 40;
723 	dc->writeback_rate_i_term_inverse = 10000;
724 
725 	WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
726 	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
727 }
728 
729 int bch_cached_dev_writeback_start(struct cached_dev *dc)
730 {
731 	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
732 						WQ_MEM_RECLAIM, 0);
733 	if (!dc->writeback_write_wq)
734 		return -ENOMEM;
735 
736 	cached_dev_get(dc);
737 	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
738 					      "bcache_writeback");
739 	if (IS_ERR(dc->writeback_thread)) {
740 		cached_dev_put(dc);
741 		return PTR_ERR(dc->writeback_thread);
742 	}
743 
744 	WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
745 	schedule_delayed_work(&dc->writeback_rate_update,
746 			      dc->writeback_rate_update_seconds * HZ);
747 
748 	bch_writeback_queue(dc);
749 
750 	return 0;
751 }
752