1 /* 2 * background writeback - scan btree for dirty data and write it to the backing 3 * device 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "writeback.h" 13 14 #include <linux/delay.h> 15 #include <linux/freezer.h> 16 #include <linux/kthread.h> 17 #include <trace/events/bcache.h> 18 19 /* Rate limiting */ 20 21 static void __update_writeback_rate(struct cached_dev *dc) 22 { 23 struct cache_set *c = dc->disk.c; 24 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; 25 uint64_t cache_dirty_target = 26 div_u64(cache_sectors * dc->writeback_percent, 100); 27 28 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), 29 c->cached_dev_sectors); 30 31 /* PD controller */ 32 33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 34 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 35 int64_t proportional = dirty - target; 36 int64_t change; 37 38 dc->disk.sectors_dirty_last = dirty; 39 40 /* Scale to sectors per second */ 41 42 proportional *= dc->writeback_rate_update_seconds; 43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); 44 45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds); 46 47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, 48 (dc->writeback_rate_d_term / 49 dc->writeback_rate_update_seconds) ?: 1, 0); 50 51 derivative *= dc->writeback_rate_d_term; 52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); 53 54 change = proportional + derivative; 55 56 /* Don't increase writeback rate if the device isn't keeping up */ 57 if (change > 0 && 58 time_after64(local_clock(), 59 dc->writeback_rate.next + NSEC_PER_MSEC)) 60 change = 0; 61 62 dc->writeback_rate.rate = 63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, 64 1, NSEC_PER_MSEC); 65 66 dc->writeback_rate_proportional = proportional; 67 dc->writeback_rate_derivative = derivative; 68 dc->writeback_rate_change = change; 69 dc->writeback_rate_target = target; 70 } 71 72 static void update_writeback_rate(struct work_struct *work) 73 { 74 struct cached_dev *dc = container_of(to_delayed_work(work), 75 struct cached_dev, 76 writeback_rate_update); 77 78 down_read(&dc->writeback_lock); 79 80 if (atomic_read(&dc->has_dirty) && 81 dc->writeback_percent) 82 __update_writeback_rate(dc); 83 84 up_read(&dc->writeback_lock); 85 86 schedule_delayed_work(&dc->writeback_rate_update, 87 dc->writeback_rate_update_seconds * HZ); 88 } 89 90 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 91 { 92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 93 !dc->writeback_percent) 94 return 0; 95 96 return bch_next_delay(&dc->writeback_rate, sectors); 97 } 98 99 struct dirty_io { 100 struct closure cl; 101 struct cached_dev *dc; 102 struct bio bio; 103 }; 104 105 static void dirty_init(struct keybuf_key *w) 106 { 107 struct dirty_io *io = w->private; 108 struct bio *bio = &io->bio; 109 110 bio_init(bio); 111 if (!io->dc->writeback_percent) 112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 113 114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; 115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 116 bio->bi_private = w; 117 bio->bi_io_vec = bio->bi_inline_vecs; 118 bch_bio_map(bio, NULL); 119 } 120 121 static void dirty_io_destructor(struct closure *cl) 122 { 123 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 124 kfree(io); 125 } 126 127 static void write_dirty_finish(struct closure *cl) 128 { 129 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 130 struct keybuf_key *w = io->bio.bi_private; 131 struct cached_dev *dc = io->dc; 132 struct bio_vec *bv; 133 int i; 134 135 bio_for_each_segment_all(bv, &io->bio, i) 136 __free_page(bv->bv_page); 137 138 /* This is kind of a dumb way of signalling errors. */ 139 if (KEY_DIRTY(&w->key)) { 140 int ret; 141 unsigned i; 142 struct keylist keys; 143 144 bch_keylist_init(&keys); 145 146 bkey_copy(keys.top, &w->key); 147 SET_KEY_DIRTY(keys.top, false); 148 bch_keylist_push(&keys); 149 150 for (i = 0; i < KEY_PTRS(&w->key); i++) 151 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 152 153 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); 154 155 if (ret) 156 trace_bcache_writeback_collision(&w->key); 157 158 atomic_long_inc(ret 159 ? &dc->disk.c->writeback_keys_failed 160 : &dc->disk.c->writeback_keys_done); 161 } 162 163 bch_keybuf_del(&dc->writeback_keys, w); 164 up(&dc->in_flight); 165 166 closure_return_with_destructor(cl, dirty_io_destructor); 167 } 168 169 static void dirty_endio(struct bio *bio) 170 { 171 struct keybuf_key *w = bio->bi_private; 172 struct dirty_io *io = w->private; 173 174 if (bio->bi_error) 175 SET_KEY_DIRTY(&w->key, false); 176 177 closure_put(&io->cl); 178 } 179 180 static void write_dirty(struct closure *cl) 181 { 182 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 183 struct keybuf_key *w = io->bio.bi_private; 184 185 dirty_init(w); 186 io->bio.bi_rw = WRITE; 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key); 188 io->bio.bi_bdev = io->dc->bdev; 189 io->bio.bi_end_io = dirty_endio; 190 191 closure_bio_submit(&io->bio, cl); 192 193 continue_at(cl, write_dirty_finish, system_wq); 194 } 195 196 static void read_dirty_endio(struct bio *bio) 197 { 198 struct keybuf_key *w = bio->bi_private; 199 struct dirty_io *io = w->private; 200 201 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), 202 bio->bi_error, "reading dirty data from cache"); 203 204 dirty_endio(bio); 205 } 206 207 static void read_dirty_submit(struct closure *cl) 208 { 209 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 210 211 closure_bio_submit(&io->bio, cl); 212 213 continue_at(cl, write_dirty, system_wq); 214 } 215 216 static void read_dirty(struct cached_dev *dc) 217 { 218 unsigned delay = 0; 219 struct keybuf_key *w; 220 struct dirty_io *io; 221 struct closure cl; 222 223 closure_init_stack(&cl); 224 225 /* 226 * XXX: if we error, background writeback just spins. Should use some 227 * mempools. 228 */ 229 230 while (!kthread_should_stop()) { 231 try_to_freeze(); 232 233 w = bch_keybuf_next(&dc->writeback_keys); 234 if (!w) 235 break; 236 237 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); 238 239 if (KEY_START(&w->key) != dc->last_read || 240 jiffies_to_msecs(delay) > 50) 241 while (!kthread_should_stop() && delay) 242 delay = schedule_timeout_interruptible(delay); 243 244 dc->last_read = KEY_OFFSET(&w->key); 245 246 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec) 247 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 248 GFP_KERNEL); 249 if (!io) 250 goto err; 251 252 w->private = io; 253 io->dc = dc; 254 255 dirty_init(w); 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); 257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 258 &w->key, 0)->bdev; 259 io->bio.bi_rw = READ; 260 io->bio.bi_end_io = read_dirty_endio; 261 262 if (bio_alloc_pages(&io->bio, GFP_KERNEL)) 263 goto err_free; 264 265 trace_bcache_writeback(&w->key); 266 267 down(&dc->in_flight); 268 closure_call(&io->cl, read_dirty_submit, NULL, &cl); 269 270 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 271 } 272 273 if (0) { 274 err_free: 275 kfree(w->private); 276 err: 277 bch_keybuf_del(&dc->writeback_keys, w); 278 } 279 280 /* 281 * Wait for outstanding writeback IOs to finish (and keybuf slots to be 282 * freed) before refilling again 283 */ 284 closure_sync(&cl); 285 } 286 287 /* Scan for dirty data */ 288 289 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, 290 uint64_t offset, int nr_sectors) 291 { 292 struct bcache_device *d = c->devices[inode]; 293 unsigned stripe_offset, stripe, sectors_dirty; 294 295 if (!d) 296 return; 297 298 stripe = offset_to_stripe(d, offset); 299 stripe_offset = offset & (d->stripe_size - 1); 300 301 while (nr_sectors) { 302 int s = min_t(unsigned, abs(nr_sectors), 303 d->stripe_size - stripe_offset); 304 305 if (nr_sectors < 0) 306 s = -s; 307 308 if (stripe >= d->nr_stripes) 309 return; 310 311 sectors_dirty = atomic_add_return(s, 312 d->stripe_sectors_dirty + stripe); 313 if (sectors_dirty == d->stripe_size) 314 set_bit(stripe, d->full_dirty_stripes); 315 else 316 clear_bit(stripe, d->full_dirty_stripes); 317 318 nr_sectors -= s; 319 stripe_offset = 0; 320 stripe++; 321 } 322 } 323 324 static bool dirty_pred(struct keybuf *buf, struct bkey *k) 325 { 326 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); 327 328 BUG_ON(KEY_INODE(k) != dc->disk.id); 329 330 return KEY_DIRTY(k); 331 } 332 333 static void refill_full_stripes(struct cached_dev *dc) 334 { 335 struct keybuf *buf = &dc->writeback_keys; 336 unsigned start_stripe, stripe, next_stripe; 337 bool wrapped = false; 338 339 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); 340 341 if (stripe >= dc->disk.nr_stripes) 342 stripe = 0; 343 344 start_stripe = stripe; 345 346 while (1) { 347 stripe = find_next_bit(dc->disk.full_dirty_stripes, 348 dc->disk.nr_stripes, stripe); 349 350 if (stripe == dc->disk.nr_stripes) 351 goto next; 352 353 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, 354 dc->disk.nr_stripes, stripe); 355 356 buf->last_scanned = KEY(dc->disk.id, 357 stripe * dc->disk.stripe_size, 0); 358 359 bch_refill_keybuf(dc->disk.c, buf, 360 &KEY(dc->disk.id, 361 next_stripe * dc->disk.stripe_size, 0), 362 dirty_pred); 363 364 if (array_freelist_empty(&buf->freelist)) 365 return; 366 367 stripe = next_stripe; 368 next: 369 if (wrapped && stripe > start_stripe) 370 return; 371 372 if (stripe == dc->disk.nr_stripes) { 373 stripe = 0; 374 wrapped = true; 375 } 376 } 377 } 378 379 /* 380 * Returns true if we scanned the entire disk 381 */ 382 static bool refill_dirty(struct cached_dev *dc) 383 { 384 struct keybuf *buf = &dc->writeback_keys; 385 struct bkey start = KEY(dc->disk.id, 0, 0); 386 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); 387 struct bkey start_pos; 388 389 /* 390 * make sure keybuf pos is inside the range for this disk - at bringup 391 * we might not be attached yet so this disk's inode nr isn't 392 * initialized then 393 */ 394 if (bkey_cmp(&buf->last_scanned, &start) < 0 || 395 bkey_cmp(&buf->last_scanned, &end) > 0) 396 buf->last_scanned = start; 397 398 if (dc->partial_stripes_expensive) { 399 refill_full_stripes(dc); 400 if (array_freelist_empty(&buf->freelist)) 401 return false; 402 } 403 404 start_pos = buf->last_scanned; 405 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); 406 407 if (bkey_cmp(&buf->last_scanned, &end) < 0) 408 return false; 409 410 /* 411 * If we get to the end start scanning again from the beginning, and 412 * only scan up to where we initially started scanning from: 413 */ 414 buf->last_scanned = start; 415 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); 416 417 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; 418 } 419 420 static int bch_writeback_thread(void *arg) 421 { 422 struct cached_dev *dc = arg; 423 bool searched_full_index; 424 425 while (!kthread_should_stop()) { 426 down_write(&dc->writeback_lock); 427 if (!atomic_read(&dc->has_dirty) || 428 (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && 429 !dc->writeback_running)) { 430 up_write(&dc->writeback_lock); 431 set_current_state(TASK_INTERRUPTIBLE); 432 433 if (kthread_should_stop()) 434 return 0; 435 436 try_to_freeze(); 437 schedule(); 438 continue; 439 } 440 441 searched_full_index = refill_dirty(dc); 442 443 if (searched_full_index && 444 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { 445 atomic_set(&dc->has_dirty, 0); 446 cached_dev_put(dc); 447 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 448 bch_write_bdev_super(dc, NULL); 449 } 450 451 up_write(&dc->writeback_lock); 452 453 bch_ratelimit_reset(&dc->writeback_rate); 454 read_dirty(dc); 455 456 if (searched_full_index) { 457 unsigned delay = dc->writeback_delay * HZ; 458 459 while (delay && 460 !kthread_should_stop() && 461 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 462 delay = schedule_timeout_interruptible(delay); 463 } 464 } 465 466 return 0; 467 } 468 469 /* Init */ 470 471 struct sectors_dirty_init { 472 struct btree_op op; 473 unsigned inode; 474 }; 475 476 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, 477 struct bkey *k) 478 { 479 struct sectors_dirty_init *op = container_of(_op, 480 struct sectors_dirty_init, op); 481 if (KEY_INODE(k) > op->inode) 482 return MAP_DONE; 483 484 if (KEY_DIRTY(k)) 485 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), 486 KEY_START(k), KEY_SIZE(k)); 487 488 return MAP_CONTINUE; 489 } 490 491 void bch_sectors_dirty_init(struct cached_dev *dc) 492 { 493 struct sectors_dirty_init op; 494 495 bch_btree_op_init(&op.op, -1); 496 op.inode = dc->disk.id; 497 498 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), 499 sectors_dirty_init_fn, 0); 500 501 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); 502 } 503 504 void bch_cached_dev_writeback_init(struct cached_dev *dc) 505 { 506 sema_init(&dc->in_flight, 64); 507 init_rwsem(&dc->writeback_lock); 508 bch_keybuf_init(&dc->writeback_keys); 509 510 dc->writeback_metadata = true; 511 dc->writeback_running = true; 512 dc->writeback_percent = 10; 513 dc->writeback_delay = 30; 514 dc->writeback_rate.rate = 1024; 515 516 dc->writeback_rate_update_seconds = 5; 517 dc->writeback_rate_d_term = 30; 518 dc->writeback_rate_p_term_inverse = 6000; 519 520 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 521 } 522 523 int bch_cached_dev_writeback_start(struct cached_dev *dc) 524 { 525 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 526 "bcache_writeback"); 527 if (IS_ERR(dc->writeback_thread)) 528 return PTR_ERR(dc->writeback_thread); 529 530 schedule_delayed_work(&dc->writeback_rate_update, 531 dc->writeback_rate_update_seconds * HZ); 532 533 bch_writeback_queue(dc); 534 535 return 0; 536 } 537