1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * background writeback - scan btree for dirty data and write it to the backing 4 * device 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "writeback.h" 14 15 #include <linux/delay.h> 16 #include <linux/kthread.h> 17 #include <linux/sched/clock.h> 18 #include <trace/events/bcache.h> 19 20 static void update_gc_after_writeback(struct cache_set *c) 21 { 22 if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) || 23 c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD) 24 return; 25 26 c->gc_after_writeback |= BCH_DO_AUTO_GC; 27 } 28 29 /* Rate limiting */ 30 static uint64_t __calc_target_rate(struct cached_dev *dc) 31 { 32 struct cache_set *c = dc->disk.c; 33 34 /* 35 * This is the size of the cache, minus the amount used for 36 * flash-only devices 37 */ 38 uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size - 39 atomic_long_read(&c->flash_dev_dirty_sectors); 40 41 /* 42 * Unfortunately there is no control of global dirty data. If the 43 * user states that they want 10% dirty data in the cache, and has, 44 * e.g., 5 backing volumes of equal size, we try and ensure each 45 * backing volume uses about 2% of the cache for dirty data. 46 */ 47 uint32_t bdev_share = 48 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, 49 c->cached_dev_sectors); 50 51 uint64_t cache_dirty_target = 52 div_u64(cache_sectors * dc->writeback_percent, 100); 53 54 /* Ensure each backing dev gets at least one dirty share */ 55 if (bdev_share < 1) 56 bdev_share = 1; 57 58 return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT; 59 } 60 61 static void __update_writeback_rate(struct cached_dev *dc) 62 { 63 /* 64 * PI controller: 65 * Figures out the amount that should be written per second. 66 * 67 * First, the error (number of sectors that are dirty beyond our 68 * target) is calculated. The error is accumulated (numerically 69 * integrated). 70 * 71 * Then, the proportional value and integral value are scaled 72 * based on configured values. These are stored as inverses to 73 * avoid fixed point math and to make configuration easy-- e.g. 74 * the default value of 40 for writeback_rate_p_term_inverse 75 * attempts to write at a rate that would retire all the dirty 76 * blocks in 40 seconds. 77 * 78 * The writeback_rate_i_inverse value of 10000 means that 1/10000th 79 * of the error is accumulated in the integral term per second. 80 * This acts as a slow, long-term average that is not subject to 81 * variations in usage like the p term. 82 */ 83 int64_t target = __calc_target_rate(dc); 84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 85 int64_t error = dirty - target; 86 int64_t proportional_scaled = 87 div_s64(error, dc->writeback_rate_p_term_inverse); 88 int64_t integral_scaled; 89 uint32_t new_rate; 90 91 /* 92 * We need to consider the number of dirty buckets as well 93 * when calculating the proportional_scaled, Otherwise we might 94 * have an unreasonable small writeback rate at a highly fragmented situation 95 * when very few dirty sectors consumed a lot dirty buckets, the 96 * worst case is when dirty buckets reached cutoff_writeback_sync and 97 * dirty data is still not even reached to writeback percent, so the rate 98 * still will be at the minimum value, which will cause the write 99 * stuck at a non-writeback mode. 100 */ 101 struct cache_set *c = dc->disk.c; 102 103 int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets; 104 105 if (dc->writeback_consider_fragment && 106 c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) { 107 int64_t fragment = 108 div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); 109 int64_t fp_term; 110 int64_t fps; 111 112 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) { 113 fp_term = (int64_t)dc->writeback_rate_fp_term_low * 114 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW); 115 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) { 116 fp_term = (int64_t)dc->writeback_rate_fp_term_mid * 117 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID); 118 } else { 119 fp_term = (int64_t)dc->writeback_rate_fp_term_high * 120 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH); 121 } 122 fps = div_s64(dirty, dirty_buckets) * fp_term; 123 if (fragment > 3 && fps > proportional_scaled) { 124 /* Only overrite the p when fragment > 3 */ 125 proportional_scaled = fps; 126 } 127 } 128 129 if ((error < 0 && dc->writeback_rate_integral > 0) || 130 (error > 0 && time_before64(local_clock(), 131 dc->writeback_rate.next + NSEC_PER_MSEC))) { 132 /* 133 * Only decrease the integral term if it's more than 134 * zero. Only increase the integral term if the device 135 * is keeping up. (Don't wind up the integral 136 * ineffectively in either case). 137 * 138 * It's necessary to scale this by 139 * writeback_rate_update_seconds to keep the integral 140 * term dimensioned properly. 141 */ 142 dc->writeback_rate_integral += error * 143 dc->writeback_rate_update_seconds; 144 } 145 146 integral_scaled = div_s64(dc->writeback_rate_integral, 147 dc->writeback_rate_i_term_inverse); 148 149 new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled), 150 dc->writeback_rate_minimum, NSEC_PER_SEC); 151 152 dc->writeback_rate_proportional = proportional_scaled; 153 dc->writeback_rate_integral_scaled = integral_scaled; 154 dc->writeback_rate_change = new_rate - 155 atomic_long_read(&dc->writeback_rate.rate); 156 atomic_long_set(&dc->writeback_rate.rate, new_rate); 157 dc->writeback_rate_target = target; 158 } 159 160 static bool set_at_max_writeback_rate(struct cache_set *c, 161 struct cached_dev *dc) 162 { 163 /* Don't sst max writeback rate if it is disabled */ 164 if (!c->idle_max_writeback_rate_enabled) 165 return false; 166 167 /* Don't set max writeback rate if gc is running */ 168 if (!c->gc_mark_valid) 169 return false; 170 /* 171 * Idle_counter is increased everytime when update_writeback_rate() is 172 * called. If all backing devices attached to the same cache set have 173 * identical dc->writeback_rate_update_seconds values, it is about 6 174 * rounds of update_writeback_rate() on each backing device before 175 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set 176 * to each dc->writeback_rate.rate. 177 * In order to avoid extra locking cost for counting exact dirty cached 178 * devices number, c->attached_dev_nr is used to calculate the idle 179 * throushold. It might be bigger if not all cached device are in write- 180 * back mode, but it still works well with limited extra rounds of 181 * update_writeback_rate(). 182 */ 183 if (atomic_inc_return(&c->idle_counter) < 184 atomic_read(&c->attached_dev_nr) * 6) 185 return false; 186 187 if (atomic_read(&c->at_max_writeback_rate) != 1) 188 atomic_set(&c->at_max_writeback_rate, 1); 189 190 atomic_long_set(&dc->writeback_rate.rate, INT_MAX); 191 192 /* keep writeback_rate_target as existing value */ 193 dc->writeback_rate_proportional = 0; 194 dc->writeback_rate_integral_scaled = 0; 195 dc->writeback_rate_change = 0; 196 197 /* 198 * Check c->idle_counter and c->at_max_writeback_rate agagain in case 199 * new I/O arrives during before set_at_max_writeback_rate() returns. 200 * Then the writeback rate is set to 1, and its new value should be 201 * decided via __update_writeback_rate(). 202 */ 203 if ((atomic_read(&c->idle_counter) < 204 atomic_read(&c->attached_dev_nr) * 6) || 205 !atomic_read(&c->at_max_writeback_rate)) 206 return false; 207 208 return true; 209 } 210 211 static void update_writeback_rate(struct work_struct *work) 212 { 213 struct cached_dev *dc = container_of(to_delayed_work(work), 214 struct cached_dev, 215 writeback_rate_update); 216 struct cache_set *c = dc->disk.c; 217 218 /* 219 * should check BCACHE_DEV_RATE_DW_RUNNING before calling 220 * cancel_delayed_work_sync(). 221 */ 222 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); 223 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ 224 smp_mb__after_atomic(); 225 226 /* 227 * CACHE_SET_IO_DISABLE might be set via sysfs interface, 228 * check it here too. 229 */ 230 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || 231 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 232 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); 233 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ 234 smp_mb__after_atomic(); 235 return; 236 } 237 238 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { 239 /* 240 * If the whole cache set is idle, set_at_max_writeback_rate() 241 * will set writeback rate to a max number. Then it is 242 * unncessary to update writeback rate for an idle cache set 243 * in maximum writeback rate number(s). 244 */ 245 if (!set_at_max_writeback_rate(c, dc)) { 246 down_read(&dc->writeback_lock); 247 __update_writeback_rate(dc); 248 update_gc_after_writeback(c); 249 up_read(&dc->writeback_lock); 250 } 251 } 252 253 254 /* 255 * CACHE_SET_IO_DISABLE might be set via sysfs interface, 256 * check it here too. 257 */ 258 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && 259 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 260 schedule_delayed_work(&dc->writeback_rate_update, 261 dc->writeback_rate_update_seconds * HZ); 262 } 263 264 /* 265 * should check BCACHE_DEV_RATE_DW_RUNNING before calling 266 * cancel_delayed_work_sync(). 267 */ 268 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); 269 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ 270 smp_mb__after_atomic(); 271 } 272 273 static unsigned int writeback_delay(struct cached_dev *dc, 274 unsigned int sectors) 275 { 276 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 277 !dc->writeback_percent) 278 return 0; 279 280 return bch_next_delay(&dc->writeback_rate, sectors); 281 } 282 283 struct dirty_io { 284 struct closure cl; 285 struct cached_dev *dc; 286 uint16_t sequence; 287 struct bio bio; 288 }; 289 290 static void dirty_init(struct keybuf_key *w) 291 { 292 struct dirty_io *io = w->private; 293 struct bio *bio = &io->bio; 294 295 bio_init(bio, bio->bi_inline_vecs, 296 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); 297 if (!io->dc->writeback_percent) 298 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 299 300 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; 301 bio->bi_private = w; 302 bch_bio_map(bio, NULL); 303 } 304 305 static void dirty_io_destructor(struct closure *cl) 306 { 307 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 308 309 kfree(io); 310 } 311 312 static void write_dirty_finish(struct closure *cl) 313 { 314 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 315 struct keybuf_key *w = io->bio.bi_private; 316 struct cached_dev *dc = io->dc; 317 318 bio_free_pages(&io->bio); 319 320 /* This is kind of a dumb way of signalling errors. */ 321 if (KEY_DIRTY(&w->key)) { 322 int ret; 323 unsigned int i; 324 struct keylist keys; 325 326 bch_keylist_init(&keys); 327 328 bkey_copy(keys.top, &w->key); 329 SET_KEY_DIRTY(keys.top, false); 330 bch_keylist_push(&keys); 331 332 for (i = 0; i < KEY_PTRS(&w->key); i++) 333 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 334 335 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); 336 337 if (ret) 338 trace_bcache_writeback_collision(&w->key); 339 340 atomic_long_inc(ret 341 ? &dc->disk.c->writeback_keys_failed 342 : &dc->disk.c->writeback_keys_done); 343 } 344 345 bch_keybuf_del(&dc->writeback_keys, w); 346 up(&dc->in_flight); 347 348 closure_return_with_destructor(cl, dirty_io_destructor); 349 } 350 351 static void dirty_endio(struct bio *bio) 352 { 353 struct keybuf_key *w = bio->bi_private; 354 struct dirty_io *io = w->private; 355 356 if (bio->bi_status) { 357 SET_KEY_DIRTY(&w->key, false); 358 bch_count_backing_io_errors(io->dc, bio); 359 } 360 361 closure_put(&io->cl); 362 } 363 364 static void write_dirty(struct closure *cl) 365 { 366 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 367 struct keybuf_key *w = io->bio.bi_private; 368 struct cached_dev *dc = io->dc; 369 370 uint16_t next_sequence; 371 372 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) { 373 /* Not our turn to write; wait for a write to complete */ 374 closure_wait(&dc->writeback_ordering_wait, cl); 375 376 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) { 377 /* 378 * Edge case-- it happened in indeterminate order 379 * relative to when we were added to wait list.. 380 */ 381 closure_wake_up(&dc->writeback_ordering_wait); 382 } 383 384 continue_at(cl, write_dirty, io->dc->writeback_write_wq); 385 return; 386 } 387 388 next_sequence = io->sequence + 1; 389 390 /* 391 * IO errors are signalled using the dirty bit on the key. 392 * If we failed to read, we should not attempt to write to the 393 * backing device. Instead, immediately go to write_dirty_finish 394 * to clean up. 395 */ 396 if (KEY_DIRTY(&w->key)) { 397 dirty_init(w); 398 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); 399 io->bio.bi_iter.bi_sector = KEY_START(&w->key); 400 bio_set_dev(&io->bio, io->dc->bdev); 401 io->bio.bi_end_io = dirty_endio; 402 403 /* I/O request sent to backing device */ 404 closure_bio_submit(io->dc->disk.c, &io->bio, cl); 405 } 406 407 atomic_set(&dc->writeback_sequence_next, next_sequence); 408 closure_wake_up(&dc->writeback_ordering_wait); 409 410 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); 411 } 412 413 static void read_dirty_endio(struct bio *bio) 414 { 415 struct keybuf_key *w = bio->bi_private; 416 struct dirty_io *io = w->private; 417 418 /* is_read = 1 */ 419 bch_count_io_errors(io->dc->disk.c->cache, 420 bio->bi_status, 1, 421 "reading dirty data from cache"); 422 423 dirty_endio(bio); 424 } 425 426 static void read_dirty_submit(struct closure *cl) 427 { 428 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 429 430 closure_bio_submit(io->dc->disk.c, &io->bio, cl); 431 432 continue_at(cl, write_dirty, io->dc->writeback_write_wq); 433 } 434 435 static void read_dirty(struct cached_dev *dc) 436 { 437 unsigned int delay = 0; 438 struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w; 439 size_t size; 440 int nk, i; 441 struct dirty_io *io; 442 struct closure cl; 443 uint16_t sequence = 0; 444 445 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list)); 446 atomic_set(&dc->writeback_sequence_next, sequence); 447 closure_init_stack(&cl); 448 449 /* 450 * XXX: if we error, background writeback just spins. Should use some 451 * mempools. 452 */ 453 454 next = bch_keybuf_next(&dc->writeback_keys); 455 456 while (!kthread_should_stop() && 457 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && 458 next) { 459 size = 0; 460 nk = 0; 461 462 do { 463 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); 464 465 /* 466 * Don't combine too many operations, even if they 467 * are all small. 468 */ 469 if (nk >= MAX_WRITEBACKS_IN_PASS) 470 break; 471 472 /* 473 * If the current operation is very large, don't 474 * further combine operations. 475 */ 476 if (size >= MAX_WRITESIZE_IN_PASS) 477 break; 478 479 /* 480 * Operations are only eligible to be combined 481 * if they are contiguous. 482 * 483 * TODO: add a heuristic willing to fire a 484 * certain amount of non-contiguous IO per pass, 485 * so that we can benefit from backing device 486 * command queueing. 487 */ 488 if ((nk != 0) && bkey_cmp(&keys[nk-1]->key, 489 &START_KEY(&next->key))) 490 break; 491 492 size += KEY_SIZE(&next->key); 493 keys[nk++] = next; 494 } while ((next = bch_keybuf_next(&dc->writeback_keys))); 495 496 /* Now we have gathered a set of 1..5 keys to write back. */ 497 for (i = 0; i < nk; i++) { 498 w = keys[i]; 499 500 io = kzalloc(struct_size(io, bio.bi_inline_vecs, 501 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)), 502 GFP_KERNEL); 503 if (!io) 504 goto err; 505 506 w->private = io; 507 io->dc = dc; 508 io->sequence = sequence++; 509 510 dirty_init(w); 511 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); 512 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); 513 bio_set_dev(&io->bio, dc->disk.c->cache->bdev); 514 io->bio.bi_end_io = read_dirty_endio; 515 516 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) 517 goto err_free; 518 519 trace_bcache_writeback(&w->key); 520 521 down(&dc->in_flight); 522 523 /* 524 * We've acquired a semaphore for the maximum 525 * simultaneous number of writebacks; from here 526 * everything happens asynchronously. 527 */ 528 closure_call(&io->cl, read_dirty_submit, NULL, &cl); 529 } 530 531 delay = writeback_delay(dc, size); 532 533 while (!kthread_should_stop() && 534 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && 535 delay) { 536 schedule_timeout_interruptible(delay); 537 delay = writeback_delay(dc, 0); 538 } 539 } 540 541 if (0) { 542 err_free: 543 kfree(w->private); 544 err: 545 bch_keybuf_del(&dc->writeback_keys, w); 546 } 547 548 /* 549 * Wait for outstanding writeback IOs to finish (and keybuf slots to be 550 * freed) before refilling again 551 */ 552 closure_sync(&cl); 553 } 554 555 /* Scan for dirty data */ 556 557 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, 558 uint64_t offset, int nr_sectors) 559 { 560 struct bcache_device *d = c->devices[inode]; 561 unsigned int stripe_offset, sectors_dirty; 562 int stripe; 563 564 if (!d) 565 return; 566 567 stripe = offset_to_stripe(d, offset); 568 if (stripe < 0) 569 return; 570 571 if (UUID_FLASH_ONLY(&c->uuids[inode])) 572 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); 573 574 stripe_offset = offset & (d->stripe_size - 1); 575 576 while (nr_sectors) { 577 int s = min_t(unsigned int, abs(nr_sectors), 578 d->stripe_size - stripe_offset); 579 580 if (nr_sectors < 0) 581 s = -s; 582 583 if (stripe >= d->nr_stripes) 584 return; 585 586 sectors_dirty = atomic_add_return(s, 587 d->stripe_sectors_dirty + stripe); 588 if (sectors_dirty == d->stripe_size) 589 set_bit(stripe, d->full_dirty_stripes); 590 else 591 clear_bit(stripe, d->full_dirty_stripes); 592 593 nr_sectors -= s; 594 stripe_offset = 0; 595 stripe++; 596 } 597 } 598 599 static bool dirty_pred(struct keybuf *buf, struct bkey *k) 600 { 601 struct cached_dev *dc = container_of(buf, 602 struct cached_dev, 603 writeback_keys); 604 605 BUG_ON(KEY_INODE(k) != dc->disk.id); 606 607 return KEY_DIRTY(k); 608 } 609 610 static void refill_full_stripes(struct cached_dev *dc) 611 { 612 struct keybuf *buf = &dc->writeback_keys; 613 unsigned int start_stripe, next_stripe; 614 int stripe; 615 bool wrapped = false; 616 617 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); 618 if (stripe < 0) 619 stripe = 0; 620 621 start_stripe = stripe; 622 623 while (1) { 624 stripe = find_next_bit(dc->disk.full_dirty_stripes, 625 dc->disk.nr_stripes, stripe); 626 627 if (stripe == dc->disk.nr_stripes) 628 goto next; 629 630 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, 631 dc->disk.nr_stripes, stripe); 632 633 buf->last_scanned = KEY(dc->disk.id, 634 stripe * dc->disk.stripe_size, 0); 635 636 bch_refill_keybuf(dc->disk.c, buf, 637 &KEY(dc->disk.id, 638 next_stripe * dc->disk.stripe_size, 0), 639 dirty_pred); 640 641 if (array_freelist_empty(&buf->freelist)) 642 return; 643 644 stripe = next_stripe; 645 next: 646 if (wrapped && stripe > start_stripe) 647 return; 648 649 if (stripe == dc->disk.nr_stripes) { 650 stripe = 0; 651 wrapped = true; 652 } 653 } 654 } 655 656 /* 657 * Returns true if we scanned the entire disk 658 */ 659 static bool refill_dirty(struct cached_dev *dc) 660 { 661 struct keybuf *buf = &dc->writeback_keys; 662 struct bkey start = KEY(dc->disk.id, 0, 0); 663 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); 664 struct bkey start_pos; 665 666 /* 667 * make sure keybuf pos is inside the range for this disk - at bringup 668 * we might not be attached yet so this disk's inode nr isn't 669 * initialized then 670 */ 671 if (bkey_cmp(&buf->last_scanned, &start) < 0 || 672 bkey_cmp(&buf->last_scanned, &end) > 0) 673 buf->last_scanned = start; 674 675 if (dc->partial_stripes_expensive) { 676 refill_full_stripes(dc); 677 if (array_freelist_empty(&buf->freelist)) 678 return false; 679 } 680 681 start_pos = buf->last_scanned; 682 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); 683 684 if (bkey_cmp(&buf->last_scanned, &end) < 0) 685 return false; 686 687 /* 688 * If we get to the end start scanning again from the beginning, and 689 * only scan up to where we initially started scanning from: 690 */ 691 buf->last_scanned = start; 692 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); 693 694 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; 695 } 696 697 static int bch_writeback_thread(void *arg) 698 { 699 struct cached_dev *dc = arg; 700 struct cache_set *c = dc->disk.c; 701 bool searched_full_index; 702 703 bch_ratelimit_reset(&dc->writeback_rate); 704 705 while (!kthread_should_stop() && 706 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 707 down_write(&dc->writeback_lock); 708 set_current_state(TASK_INTERRUPTIBLE); 709 /* 710 * If the bache device is detaching, skip here and continue 711 * to perform writeback. Otherwise, if no dirty data on cache, 712 * or there is dirty data on cache but writeback is disabled, 713 * the writeback thread should sleep here and wait for others 714 * to wake up it. 715 */ 716 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && 717 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { 718 up_write(&dc->writeback_lock); 719 720 if (kthread_should_stop() || 721 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 722 set_current_state(TASK_RUNNING); 723 break; 724 } 725 726 schedule(); 727 continue; 728 } 729 set_current_state(TASK_RUNNING); 730 731 searched_full_index = refill_dirty(dc); 732 733 if (searched_full_index && 734 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { 735 atomic_set(&dc->has_dirty, 0); 736 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 737 bch_write_bdev_super(dc, NULL); 738 /* 739 * If bcache device is detaching via sysfs interface, 740 * writeback thread should stop after there is no dirty 741 * data on cache. BCACHE_DEV_DETACHING flag is set in 742 * bch_cached_dev_detach(). 743 */ 744 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { 745 struct closure cl; 746 747 closure_init_stack(&cl); 748 memset(&dc->sb.set_uuid, 0, 16); 749 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 750 751 bch_write_bdev_super(dc, &cl); 752 closure_sync(&cl); 753 754 up_write(&dc->writeback_lock); 755 break; 756 } 757 758 /* 759 * When dirty data rate is high (e.g. 50%+), there might 760 * be heavy buckets fragmentation after writeback 761 * finished, which hurts following write performance. 762 * If users really care about write performance they 763 * may set BCH_ENABLE_AUTO_GC via sysfs, then when 764 * BCH_DO_AUTO_GC is set, garbage collection thread 765 * will be wake up here. After moving gc, the shrunk 766 * btree and discarded free buckets SSD space may be 767 * helpful for following write requests. 768 */ 769 if (c->gc_after_writeback == 770 (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) { 771 c->gc_after_writeback &= ~BCH_DO_AUTO_GC; 772 force_wake_up_gc(c); 773 } 774 } 775 776 up_write(&dc->writeback_lock); 777 778 read_dirty(dc); 779 780 if (searched_full_index) { 781 unsigned int delay = dc->writeback_delay * HZ; 782 783 while (delay && 784 !kthread_should_stop() && 785 !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && 786 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 787 delay = schedule_timeout_interruptible(delay); 788 789 bch_ratelimit_reset(&dc->writeback_rate); 790 } 791 } 792 793 if (dc->writeback_write_wq) { 794 flush_workqueue(dc->writeback_write_wq); 795 destroy_workqueue(dc->writeback_write_wq); 796 } 797 cached_dev_put(dc); 798 wait_for_kthread_stop(); 799 800 return 0; 801 } 802 803 /* Init */ 804 #define INIT_KEYS_EACH_TIME 500000 805 #define INIT_KEYS_SLEEP_MS 100 806 807 struct sectors_dirty_init { 808 struct btree_op op; 809 unsigned int inode; 810 size_t count; 811 struct bkey start; 812 }; 813 814 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, 815 struct bkey *k) 816 { 817 struct sectors_dirty_init *op = container_of(_op, 818 struct sectors_dirty_init, op); 819 if (KEY_INODE(k) > op->inode) 820 return MAP_DONE; 821 822 if (KEY_DIRTY(k)) 823 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), 824 KEY_START(k), KEY_SIZE(k)); 825 826 op->count++; 827 if (atomic_read(&b->c->search_inflight) && 828 !(op->count % INIT_KEYS_EACH_TIME)) { 829 bkey_copy_key(&op->start, k); 830 return -EAGAIN; 831 } 832 833 return MAP_CONTINUE; 834 } 835 836 static int bch_root_node_dirty_init(struct cache_set *c, 837 struct bcache_device *d, 838 struct bkey *k) 839 { 840 struct sectors_dirty_init op; 841 int ret; 842 843 bch_btree_op_init(&op.op, -1); 844 op.inode = d->id; 845 op.count = 0; 846 op.start = KEY(op.inode, 0, 0); 847 848 do { 849 ret = bcache_btree(map_keys_recurse, 850 k, 851 c->root, 852 &op.op, 853 &op.start, 854 sectors_dirty_init_fn, 855 0); 856 if (ret == -EAGAIN) 857 schedule_timeout_interruptible( 858 msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); 859 else if (ret < 0) { 860 pr_warn("sectors dirty init failed, ret=%d!\n", ret); 861 break; 862 } 863 } while (ret == -EAGAIN); 864 865 return ret; 866 } 867 868 static int bch_dirty_init_thread(void *arg) 869 { 870 struct dirty_init_thrd_info *info = arg; 871 struct bch_dirty_init_state *state = info->state; 872 struct cache_set *c = state->c; 873 struct btree_iter iter; 874 struct bkey *k, *p; 875 int cur_idx, prev_idx, skip_nr; 876 877 k = p = NULL; 878 cur_idx = prev_idx = 0; 879 880 bch_btree_iter_init(&c->root->keys, &iter, NULL); 881 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); 882 BUG_ON(!k); 883 884 p = k; 885 886 while (k) { 887 spin_lock(&state->idx_lock); 888 cur_idx = state->key_idx; 889 state->key_idx++; 890 spin_unlock(&state->idx_lock); 891 892 skip_nr = cur_idx - prev_idx; 893 894 while (skip_nr) { 895 k = bch_btree_iter_next_filter(&iter, 896 &c->root->keys, 897 bch_ptr_bad); 898 if (k) 899 p = k; 900 else { 901 atomic_set(&state->enough, 1); 902 /* Update state->enough earlier */ 903 smp_mb__after_atomic(); 904 goto out; 905 } 906 skip_nr--; 907 cond_resched(); 908 } 909 910 if (p) { 911 if (bch_root_node_dirty_init(c, state->d, p) < 0) 912 goto out; 913 } 914 915 p = NULL; 916 prev_idx = cur_idx; 917 cond_resched(); 918 } 919 920 out: 921 /* In order to wake up state->wait in time */ 922 smp_mb__before_atomic(); 923 if (atomic_dec_and_test(&state->started)) 924 wake_up(&state->wait); 925 926 return 0; 927 } 928 929 static int bch_btre_dirty_init_thread_nr(void) 930 { 931 int n = num_online_cpus()/2; 932 933 if (n == 0) 934 n = 1; 935 else if (n > BCH_DIRTY_INIT_THRD_MAX) 936 n = BCH_DIRTY_INIT_THRD_MAX; 937 938 return n; 939 } 940 941 void bch_sectors_dirty_init(struct bcache_device *d) 942 { 943 int i; 944 struct bkey *k = NULL; 945 struct btree_iter iter; 946 struct sectors_dirty_init op; 947 struct cache_set *c = d->c; 948 struct bch_dirty_init_state *state; 949 char name[32]; 950 951 /* Just count root keys if no leaf node */ 952 if (c->root->level == 0) { 953 bch_btree_op_init(&op.op, -1); 954 op.inode = d->id; 955 op.count = 0; 956 op.start = KEY(op.inode, 0, 0); 957 958 for_each_key_filter(&c->root->keys, 959 k, &iter, bch_ptr_invalid) 960 sectors_dirty_init_fn(&op.op, c->root, k); 961 return; 962 } 963 964 state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL); 965 if (!state) { 966 pr_warn("sectors dirty init failed: cannot allocate memory\n"); 967 return; 968 } 969 970 state->c = c; 971 state->d = d; 972 state->total_threads = bch_btre_dirty_init_thread_nr(); 973 state->key_idx = 0; 974 spin_lock_init(&state->idx_lock); 975 atomic_set(&state->started, 0); 976 atomic_set(&state->enough, 0); 977 init_waitqueue_head(&state->wait); 978 979 for (i = 0; i < state->total_threads; i++) { 980 /* Fetch latest state->enough earlier */ 981 smp_mb__before_atomic(); 982 if (atomic_read(&state->enough)) 983 break; 984 985 state->infos[i].state = state; 986 atomic_inc(&state->started); 987 snprintf(name, sizeof(name), "bch_dirty_init[%d]", i); 988 989 state->infos[i].thread = 990 kthread_run(bch_dirty_init_thread, 991 &state->infos[i], 992 name); 993 if (IS_ERR(state->infos[i].thread)) { 994 pr_err("fails to run thread bch_dirty_init[%d]\n", i); 995 for (--i; i >= 0; i--) 996 kthread_stop(state->infos[i].thread); 997 goto out; 998 } 999 } 1000 1001 wait_event_interruptible(state->wait, 1002 atomic_read(&state->started) == 0 || 1003 test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1004 1005 out: 1006 kfree(state); 1007 } 1008 1009 void bch_cached_dev_writeback_init(struct cached_dev *dc) 1010 { 1011 sema_init(&dc->in_flight, 64); 1012 init_rwsem(&dc->writeback_lock); 1013 bch_keybuf_init(&dc->writeback_keys); 1014 1015 dc->writeback_metadata = true; 1016 dc->writeback_running = false; 1017 dc->writeback_consider_fragment = true; 1018 dc->writeback_percent = 10; 1019 dc->writeback_delay = 30; 1020 atomic_long_set(&dc->writeback_rate.rate, 1024); 1021 dc->writeback_rate_minimum = 8; 1022 1023 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; 1024 dc->writeback_rate_p_term_inverse = 40; 1025 dc->writeback_rate_fp_term_low = 1; 1026 dc->writeback_rate_fp_term_mid = 10; 1027 dc->writeback_rate_fp_term_high = 1000; 1028 dc->writeback_rate_i_term_inverse = 10000; 1029 1030 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); 1031 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 1032 } 1033 1034 int bch_cached_dev_writeback_start(struct cached_dev *dc) 1035 { 1036 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", 1037 WQ_MEM_RECLAIM, 0); 1038 if (!dc->writeback_write_wq) 1039 return -ENOMEM; 1040 1041 cached_dev_get(dc); 1042 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 1043 "bcache_writeback"); 1044 if (IS_ERR(dc->writeback_thread)) { 1045 cached_dev_put(dc); 1046 destroy_workqueue(dc->writeback_write_wq); 1047 return PTR_ERR(dc->writeback_thread); 1048 } 1049 dc->writeback_running = true; 1050 1051 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); 1052 schedule_delayed_work(&dc->writeback_rate_update, 1053 dc->writeback_rate_update_seconds * HZ); 1054 1055 bch_writeback_queue(dc); 1056 1057 return 0; 1058 } 1059