1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache sysfs interfaces 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "sysfs.h" 11 #include "btree.h" 12 #include "request.h" 13 #include "writeback.h" 14 15 #include <linux/blkdev.h> 16 #include <linux/sort.h> 17 #include <linux/sched/clock.h> 18 19 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 20 static const char * const bch_cache_modes[] = { 21 "writethrough", 22 "writeback", 23 "writearound", 24 "none", 25 NULL 26 }; 27 28 /* Default is -1; we skip past it for stop_when_cache_set_failed */ 29 static const char * const bch_stop_on_failure_modes[] = { 30 "auto", 31 "always", 32 NULL 33 }; 34 35 static const char * const cache_replacement_policies[] = { 36 "lru", 37 "fifo", 38 "random", 39 NULL 40 }; 41 42 static const char * const error_actions[] = { 43 "unregister", 44 "panic", 45 NULL 46 }; 47 48 write_attribute(attach); 49 write_attribute(detach); 50 write_attribute(unregister); 51 write_attribute(stop); 52 write_attribute(clear_stats); 53 write_attribute(trigger_gc); 54 write_attribute(prune_cache); 55 write_attribute(flash_vol_create); 56 57 read_attribute(bucket_size); 58 read_attribute(block_size); 59 read_attribute(nbuckets); 60 read_attribute(tree_depth); 61 read_attribute(root_usage_percent); 62 read_attribute(priority_stats); 63 read_attribute(btree_cache_size); 64 read_attribute(btree_cache_max_chain); 65 read_attribute(cache_available_percent); 66 read_attribute(written); 67 read_attribute(btree_written); 68 read_attribute(metadata_written); 69 read_attribute(active_journal_entries); 70 71 sysfs_time_stats_attribute(btree_gc, sec, ms); 72 sysfs_time_stats_attribute(btree_split, sec, us); 73 sysfs_time_stats_attribute(btree_sort, ms, us); 74 sysfs_time_stats_attribute(btree_read, ms, us); 75 76 read_attribute(btree_nodes); 77 read_attribute(btree_used_percent); 78 read_attribute(average_key_size); 79 read_attribute(dirty_data); 80 read_attribute(bset_tree_stats); 81 82 read_attribute(state); 83 read_attribute(cache_read_races); 84 read_attribute(reclaim); 85 read_attribute(flush_write); 86 read_attribute(retry_flush_write); 87 read_attribute(writeback_keys_done); 88 read_attribute(writeback_keys_failed); 89 read_attribute(io_errors); 90 read_attribute(congested); 91 rw_attribute(congested_read_threshold_us); 92 rw_attribute(congested_write_threshold_us); 93 94 rw_attribute(sequential_cutoff); 95 rw_attribute(data_csum); 96 rw_attribute(cache_mode); 97 rw_attribute(stop_when_cache_set_failed); 98 rw_attribute(writeback_metadata); 99 rw_attribute(writeback_running); 100 rw_attribute(writeback_percent); 101 rw_attribute(writeback_delay); 102 rw_attribute(writeback_rate); 103 104 rw_attribute(writeback_rate_update_seconds); 105 rw_attribute(writeback_rate_i_term_inverse); 106 rw_attribute(writeback_rate_p_term_inverse); 107 rw_attribute(writeback_rate_minimum); 108 read_attribute(writeback_rate_debug); 109 110 read_attribute(stripe_size); 111 read_attribute(partial_stripes_expensive); 112 113 rw_attribute(synchronous); 114 rw_attribute(journal_delay_ms); 115 rw_attribute(io_disable); 116 rw_attribute(discard); 117 rw_attribute(running); 118 rw_attribute(label); 119 rw_attribute(readahead); 120 rw_attribute(errors); 121 rw_attribute(io_error_limit); 122 rw_attribute(io_error_halflife); 123 rw_attribute(verify); 124 rw_attribute(bypass_torture_test); 125 rw_attribute(key_merging_disabled); 126 rw_attribute(gc_always_rewrite); 127 rw_attribute(expensive_debug_checks); 128 rw_attribute(cache_replacement_policy); 129 rw_attribute(btree_shrinker_disabled); 130 rw_attribute(copy_gc_enabled); 131 rw_attribute(size); 132 133 static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], 134 size_t selected) 135 { 136 char *out = buf; 137 size_t i; 138 139 for (i = 0; list[i]; i++) 140 out += snprintf(out, buf + size - out, 141 i == selected ? "[%s] " : "%s ", list[i]); 142 143 out[-1] = '\n'; 144 return out - buf; 145 } 146 147 SHOW(__bch_cached_dev) 148 { 149 struct cached_dev *dc = container_of(kobj, struct cached_dev, 150 disk.kobj); 151 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" }; 152 153 #define var(stat) (dc->stat) 154 155 if (attr == &sysfs_cache_mode) 156 return bch_snprint_string_list(buf, PAGE_SIZE, 157 bch_cache_modes, 158 BDEV_CACHE_MODE(&dc->sb)); 159 160 if (attr == &sysfs_stop_when_cache_set_failed) 161 return bch_snprint_string_list(buf, PAGE_SIZE, 162 bch_stop_on_failure_modes, 163 dc->stop_when_cache_set_failed); 164 165 166 sysfs_printf(data_csum, "%i", dc->disk.data_csum); 167 var_printf(verify, "%i"); 168 var_printf(bypass_torture_test, "%i"); 169 var_printf(writeback_metadata, "%i"); 170 var_printf(writeback_running, "%i"); 171 var_print(writeback_delay); 172 var_print(writeback_percent); 173 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); 174 sysfs_hprint(io_errors, atomic_read(&dc->io_errors)); 175 sysfs_printf(io_error_limit, "%i", dc->error_limit); 176 sysfs_printf(io_disable, "%i", dc->io_disable); 177 var_print(writeback_rate_update_seconds); 178 var_print(writeback_rate_i_term_inverse); 179 var_print(writeback_rate_p_term_inverse); 180 var_print(writeback_rate_minimum); 181 182 if (attr == &sysfs_writeback_rate_debug) { 183 char rate[20]; 184 char dirty[20]; 185 char target[20]; 186 char proportional[20]; 187 char integral[20]; 188 char change[20]; 189 s64 next_io; 190 191 bch_hprint(rate, dc->writeback_rate.rate << 9); 192 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); 193 bch_hprint(target, dc->writeback_rate_target << 9); 194 bch_hprint(proportional,dc->writeback_rate_proportional << 9); 195 bch_hprint(integral, dc->writeback_rate_integral_scaled << 9); 196 bch_hprint(change, dc->writeback_rate_change << 9); 197 198 next_io = div64_s64(dc->writeback_rate.next - local_clock(), 199 NSEC_PER_MSEC); 200 201 return sprintf(buf, 202 "rate:\t\t%s/sec\n" 203 "dirty:\t\t%s\n" 204 "target:\t\t%s\n" 205 "proportional:\t%s\n" 206 "integral:\t%s\n" 207 "change:\t\t%s/sec\n" 208 "next io:\t%llims\n", 209 rate, dirty, target, proportional, 210 integral, change, next_io); 211 } 212 213 sysfs_hprint(dirty_data, 214 bcache_dev_sectors_dirty(&dc->disk) << 9); 215 216 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); 217 var_printf(partial_stripes_expensive, "%u"); 218 219 var_hprint(sequential_cutoff); 220 var_hprint(readahead); 221 222 sysfs_print(running, atomic_read(&dc->running)); 223 sysfs_print(state, states[BDEV_STATE(&dc->sb)]); 224 225 if (attr == &sysfs_label) { 226 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 227 buf[SB_LABEL_SIZE + 1] = '\0'; 228 strcat(buf, "\n"); 229 return strlen(buf); 230 } 231 232 #undef var 233 return 0; 234 } 235 SHOW_LOCKED(bch_cached_dev) 236 237 STORE(__cached_dev) 238 { 239 struct cached_dev *dc = container_of(kobj, struct cached_dev, 240 disk.kobj); 241 ssize_t v; 242 struct cache_set *c; 243 struct kobj_uevent_env *env; 244 245 #define d_strtoul(var) sysfs_strtoul(var, dc->var) 246 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) 247 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 248 249 sysfs_strtoul(data_csum, dc->disk.data_csum); 250 d_strtoul(verify); 251 d_strtoul(bypass_torture_test); 252 d_strtoul(writeback_metadata); 253 d_strtoul(writeback_running); 254 d_strtoul(writeback_delay); 255 256 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 257 258 sysfs_strtoul_clamp(writeback_rate, 259 dc->writeback_rate.rate, 1, INT_MAX); 260 261 sysfs_strtoul_clamp(writeback_rate_update_seconds, 262 dc->writeback_rate_update_seconds, 263 1, WRITEBACK_RATE_UPDATE_SECS_MAX); 264 d_strtoul(writeback_rate_i_term_inverse); 265 d_strtoul_nonzero(writeback_rate_p_term_inverse); 266 267 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); 268 269 if (attr == &sysfs_io_disable) { 270 int v = strtoul_or_return(buf); 271 272 dc->io_disable = v ? 1 : 0; 273 } 274 275 d_strtoi_h(sequential_cutoff); 276 d_strtoi_h(readahead); 277 278 if (attr == &sysfs_clear_stats) 279 bch_cache_accounting_clear(&dc->accounting); 280 281 if (attr == &sysfs_running && 282 strtoul_or_return(buf)) 283 bch_cached_dev_run(dc); 284 285 if (attr == &sysfs_cache_mode) { 286 v = __sysfs_match_string(bch_cache_modes, -1, buf); 287 if (v < 0) 288 return v; 289 290 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) { 291 SET_BDEV_CACHE_MODE(&dc->sb, v); 292 bch_write_bdev_super(dc, NULL); 293 } 294 } 295 296 if (attr == &sysfs_stop_when_cache_set_failed) { 297 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); 298 if (v < 0) 299 return v; 300 301 dc->stop_when_cache_set_failed = v; 302 } 303 304 if (attr == &sysfs_label) { 305 if (size > SB_LABEL_SIZE) 306 return -EINVAL; 307 memcpy(dc->sb.label, buf, size); 308 if (size < SB_LABEL_SIZE) 309 dc->sb.label[size] = '\0'; 310 if (size && dc->sb.label[size - 1] == '\n') 311 dc->sb.label[size - 1] = '\0'; 312 bch_write_bdev_super(dc, NULL); 313 if (dc->disk.c) { 314 memcpy(dc->disk.c->uuids[dc->disk.id].label, 315 buf, SB_LABEL_SIZE); 316 bch_uuid_write(dc->disk.c); 317 } 318 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 319 if (!env) 320 return -ENOMEM; 321 add_uevent_var(env, "DRIVER=bcache"); 322 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), 323 add_uevent_var(env, "CACHED_LABEL=%s", buf); 324 kobject_uevent_env( 325 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp); 326 kfree(env); 327 } 328 329 if (attr == &sysfs_attach) { 330 uint8_t set_uuid[16]; 331 332 if (bch_parse_uuid(buf, set_uuid) < 16) 333 return -EINVAL; 334 335 v = -ENOENT; 336 list_for_each_entry(c, &bch_cache_sets, list) { 337 v = bch_cached_dev_attach(dc, c, set_uuid); 338 if (!v) 339 return size; 340 } 341 342 pr_err("Can't attach %s: cache set not found", buf); 343 return v; 344 } 345 346 if (attr == &sysfs_detach && dc->disk.c) 347 bch_cached_dev_detach(dc); 348 349 if (attr == &sysfs_stop) 350 bcache_device_stop(&dc->disk); 351 352 return size; 353 } 354 355 STORE(bch_cached_dev) 356 { 357 struct cached_dev *dc = container_of(kobj, struct cached_dev, 358 disk.kobj); 359 360 mutex_lock(&bch_register_lock); 361 size = __cached_dev_store(kobj, attr, buf, size); 362 363 if (attr == &sysfs_writeback_running) 364 bch_writeback_queue(dc); 365 366 if (attr == &sysfs_writeback_percent) 367 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 368 schedule_delayed_work(&dc->writeback_rate_update, 369 dc->writeback_rate_update_seconds * HZ); 370 371 mutex_unlock(&bch_register_lock); 372 return size; 373 } 374 375 static struct attribute *bch_cached_dev_files[] = { 376 &sysfs_attach, 377 &sysfs_detach, 378 &sysfs_stop, 379 #if 0 380 &sysfs_data_csum, 381 #endif 382 &sysfs_cache_mode, 383 &sysfs_stop_when_cache_set_failed, 384 &sysfs_writeback_metadata, 385 &sysfs_writeback_running, 386 &sysfs_writeback_delay, 387 &sysfs_writeback_percent, 388 &sysfs_writeback_rate, 389 &sysfs_writeback_rate_update_seconds, 390 &sysfs_writeback_rate_i_term_inverse, 391 &sysfs_writeback_rate_p_term_inverse, 392 &sysfs_writeback_rate_debug, 393 &sysfs_errors, 394 &sysfs_io_error_limit, 395 &sysfs_io_disable, 396 &sysfs_dirty_data, 397 &sysfs_stripe_size, 398 &sysfs_partial_stripes_expensive, 399 &sysfs_sequential_cutoff, 400 &sysfs_clear_stats, 401 &sysfs_running, 402 &sysfs_state, 403 &sysfs_label, 404 &sysfs_readahead, 405 #ifdef CONFIG_BCACHE_DEBUG 406 &sysfs_verify, 407 &sysfs_bypass_torture_test, 408 #endif 409 NULL 410 }; 411 KTYPE(bch_cached_dev); 412 413 SHOW(bch_flash_dev) 414 { 415 struct bcache_device *d = container_of(kobj, struct bcache_device, 416 kobj); 417 struct uuid_entry *u = &d->c->uuids[d->id]; 418 419 sysfs_printf(data_csum, "%i", d->data_csum); 420 sysfs_hprint(size, u->sectors << 9); 421 422 if (attr == &sysfs_label) { 423 memcpy(buf, u->label, SB_LABEL_SIZE); 424 buf[SB_LABEL_SIZE + 1] = '\0'; 425 strcat(buf, "\n"); 426 return strlen(buf); 427 } 428 429 return 0; 430 } 431 432 STORE(__bch_flash_dev) 433 { 434 struct bcache_device *d = container_of(kobj, struct bcache_device, 435 kobj); 436 struct uuid_entry *u = &d->c->uuids[d->id]; 437 438 sysfs_strtoul(data_csum, d->data_csum); 439 440 if (attr == &sysfs_size) { 441 uint64_t v; 442 strtoi_h_or_return(buf, v); 443 444 u->sectors = v >> 9; 445 bch_uuid_write(d->c); 446 set_capacity(d->disk, u->sectors); 447 } 448 449 if (attr == &sysfs_label) { 450 memcpy(u->label, buf, SB_LABEL_SIZE); 451 bch_uuid_write(d->c); 452 } 453 454 if (attr == &sysfs_unregister) { 455 set_bit(BCACHE_DEV_DETACHING, &d->flags); 456 bcache_device_stop(d); 457 } 458 459 return size; 460 } 461 STORE_LOCKED(bch_flash_dev) 462 463 static struct attribute *bch_flash_dev_files[] = { 464 &sysfs_unregister, 465 #if 0 466 &sysfs_data_csum, 467 #endif 468 &sysfs_label, 469 &sysfs_size, 470 NULL 471 }; 472 KTYPE(bch_flash_dev); 473 474 struct bset_stats_op { 475 struct btree_op op; 476 size_t nodes; 477 struct bset_stats stats; 478 }; 479 480 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) 481 { 482 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); 483 484 op->nodes++; 485 bch_btree_keys_stats(&b->keys, &op->stats); 486 487 return MAP_CONTINUE; 488 } 489 490 static int bch_bset_print_stats(struct cache_set *c, char *buf) 491 { 492 struct bset_stats_op op; 493 int ret; 494 495 memset(&op, 0, sizeof(op)); 496 bch_btree_op_init(&op.op, -1); 497 498 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); 499 if (ret < 0) 500 return ret; 501 502 return snprintf(buf, PAGE_SIZE, 503 "btree nodes: %zu\n" 504 "written sets: %zu\n" 505 "unwritten sets: %zu\n" 506 "written key bytes: %zu\n" 507 "unwritten key bytes: %zu\n" 508 "floats: %zu\n" 509 "failed: %zu\n", 510 op.nodes, 511 op.stats.sets_written, op.stats.sets_unwritten, 512 op.stats.bytes_written, op.stats.bytes_unwritten, 513 op.stats.floats, op.stats.failed); 514 } 515 516 static unsigned bch_root_usage(struct cache_set *c) 517 { 518 unsigned bytes = 0; 519 struct bkey *k; 520 struct btree *b; 521 struct btree_iter iter; 522 523 goto lock_root; 524 525 do { 526 rw_unlock(false, b); 527 lock_root: 528 b = c->root; 529 rw_lock(false, b, b->level); 530 } while (b != c->root); 531 532 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 533 bytes += bkey_bytes(k); 534 535 rw_unlock(false, b); 536 537 return (bytes * 100) / btree_bytes(c); 538 } 539 540 static size_t bch_cache_size(struct cache_set *c) 541 { 542 size_t ret = 0; 543 struct btree *b; 544 545 mutex_lock(&c->bucket_lock); 546 list_for_each_entry(b, &c->btree_cache, list) 547 ret += 1 << (b->keys.page_order + PAGE_SHIFT); 548 549 mutex_unlock(&c->bucket_lock); 550 return ret; 551 } 552 553 static unsigned bch_cache_max_chain(struct cache_set *c) 554 { 555 unsigned ret = 0; 556 struct hlist_head *h; 557 558 mutex_lock(&c->bucket_lock); 559 560 for (h = c->bucket_hash; 561 h < c->bucket_hash + (1 << BUCKET_HASH_BITS); 562 h++) { 563 unsigned i = 0; 564 struct hlist_node *p; 565 566 hlist_for_each(p, h) 567 i++; 568 569 ret = max(ret, i); 570 } 571 572 mutex_unlock(&c->bucket_lock); 573 return ret; 574 } 575 576 static unsigned bch_btree_used(struct cache_set *c) 577 { 578 return div64_u64(c->gc_stats.key_bytes * 100, 579 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); 580 } 581 582 static unsigned bch_average_key_size(struct cache_set *c) 583 { 584 return c->gc_stats.nkeys 585 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) 586 : 0; 587 } 588 589 SHOW(__bch_cache_set) 590 { 591 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 592 593 sysfs_print(synchronous, CACHE_SYNC(&c->sb)); 594 sysfs_print(journal_delay_ms, c->journal_delay_ms); 595 sysfs_hprint(bucket_size, bucket_bytes(c)); 596 sysfs_hprint(block_size, block_bytes(c)); 597 sysfs_print(tree_depth, c->root->level); 598 sysfs_print(root_usage_percent, bch_root_usage(c)); 599 600 sysfs_hprint(btree_cache_size, bch_cache_size(c)); 601 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); 602 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); 603 604 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); 605 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); 606 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); 607 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); 608 609 sysfs_print(btree_used_percent, bch_btree_used(c)); 610 sysfs_print(btree_nodes, c->gc_stats.nodes); 611 sysfs_hprint(average_key_size, bch_average_key_size(c)); 612 613 sysfs_print(cache_read_races, 614 atomic_long_read(&c->cache_read_races)); 615 616 sysfs_print(reclaim, 617 atomic_long_read(&c->reclaim)); 618 619 sysfs_print(flush_write, 620 atomic_long_read(&c->flush_write)); 621 622 sysfs_print(retry_flush_write, 623 atomic_long_read(&c->retry_flush_write)); 624 625 sysfs_print(writeback_keys_done, 626 atomic_long_read(&c->writeback_keys_done)); 627 sysfs_print(writeback_keys_failed, 628 atomic_long_read(&c->writeback_keys_failed)); 629 630 if (attr == &sysfs_errors) 631 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions, 632 c->on_error); 633 634 /* See count_io_errors for why 88 */ 635 sysfs_print(io_error_halflife, c->error_decay * 88); 636 sysfs_print(io_error_limit, c->error_limit); 637 638 sysfs_hprint(congested, 639 ((uint64_t) bch_get_congested(c)) << 9); 640 sysfs_print(congested_read_threshold_us, 641 c->congested_read_threshold_us); 642 sysfs_print(congested_write_threshold_us, 643 c->congested_write_threshold_us); 644 645 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); 646 sysfs_printf(verify, "%i", c->verify); 647 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); 648 sysfs_printf(expensive_debug_checks, 649 "%i", c->expensive_debug_checks); 650 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); 651 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); 652 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); 653 sysfs_printf(io_disable, "%i", 654 test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 655 656 if (attr == &sysfs_bset_tree_stats) 657 return bch_bset_print_stats(c, buf); 658 659 return 0; 660 } 661 SHOW_LOCKED(bch_cache_set) 662 663 STORE(__bch_cache_set) 664 { 665 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 666 ssize_t v; 667 668 if (attr == &sysfs_unregister) 669 bch_cache_set_unregister(c); 670 671 if (attr == &sysfs_stop) 672 bch_cache_set_stop(c); 673 674 if (attr == &sysfs_synchronous) { 675 bool sync = strtoul_or_return(buf); 676 677 if (sync != CACHE_SYNC(&c->sb)) { 678 SET_CACHE_SYNC(&c->sb, sync); 679 bcache_write_super(c); 680 } 681 } 682 683 if (attr == &sysfs_flash_vol_create) { 684 int r; 685 uint64_t v; 686 strtoi_h_or_return(buf, v); 687 688 r = bch_flash_dev_create(c, v); 689 if (r) 690 return r; 691 } 692 693 if (attr == &sysfs_clear_stats) { 694 atomic_long_set(&c->writeback_keys_done, 0); 695 atomic_long_set(&c->writeback_keys_failed, 0); 696 697 memset(&c->gc_stats, 0, sizeof(struct gc_stat)); 698 bch_cache_accounting_clear(&c->accounting); 699 } 700 701 if (attr == &sysfs_trigger_gc) { 702 /* 703 * Garbage collection thread only works when sectors_to_gc < 0, 704 * when users write to sysfs entry trigger_gc, most of time 705 * they want to forcibly triger gargage collection. Here -1 is 706 * set to c->sectors_to_gc, to make gc_should_run() give a 707 * chance to permit gc thread to run. "give a chance" means 708 * before going into gc_should_run(), there is still chance 709 * that c->sectors_to_gc being set to other positive value. So 710 * writing sysfs entry trigger_gc won't always make sure gc 711 * thread takes effect. 712 */ 713 atomic_set(&c->sectors_to_gc, -1); 714 wake_up_gc(c); 715 } 716 717 if (attr == &sysfs_prune_cache) { 718 struct shrink_control sc; 719 sc.gfp_mask = GFP_KERNEL; 720 sc.nr_to_scan = strtoul_or_return(buf); 721 c->shrink.scan_objects(&c->shrink, &sc); 722 } 723 724 sysfs_strtoul(congested_read_threshold_us, 725 c->congested_read_threshold_us); 726 sysfs_strtoul(congested_write_threshold_us, 727 c->congested_write_threshold_us); 728 729 if (attr == &sysfs_errors) { 730 v = __sysfs_match_string(error_actions, -1, buf); 731 if (v < 0) 732 return v; 733 734 c->on_error = v; 735 } 736 737 if (attr == &sysfs_io_error_limit) 738 c->error_limit = strtoul_or_return(buf); 739 740 /* See count_io_errors() for why 88 */ 741 if (attr == &sysfs_io_error_halflife) 742 c->error_decay = strtoul_or_return(buf) / 88; 743 744 if (attr == &sysfs_io_disable) { 745 v = strtoul_or_return(buf); 746 if (v) { 747 if (test_and_set_bit(CACHE_SET_IO_DISABLE, 748 &c->flags)) 749 pr_warn("CACHE_SET_IO_DISABLE already set"); 750 } else { 751 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE, 752 &c->flags)) 753 pr_warn("CACHE_SET_IO_DISABLE already cleared"); 754 } 755 } 756 757 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); 758 sysfs_strtoul(verify, c->verify); 759 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); 760 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); 761 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); 762 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); 763 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); 764 765 return size; 766 } 767 STORE_LOCKED(bch_cache_set) 768 769 SHOW(bch_cache_set_internal) 770 { 771 struct cache_set *c = container_of(kobj, struct cache_set, internal); 772 return bch_cache_set_show(&c->kobj, attr, buf); 773 } 774 775 STORE(bch_cache_set_internal) 776 { 777 struct cache_set *c = container_of(kobj, struct cache_set, internal); 778 return bch_cache_set_store(&c->kobj, attr, buf, size); 779 } 780 781 static void bch_cache_set_internal_release(struct kobject *k) 782 { 783 } 784 785 static struct attribute *bch_cache_set_files[] = { 786 &sysfs_unregister, 787 &sysfs_stop, 788 &sysfs_synchronous, 789 &sysfs_journal_delay_ms, 790 &sysfs_flash_vol_create, 791 792 &sysfs_bucket_size, 793 &sysfs_block_size, 794 &sysfs_tree_depth, 795 &sysfs_root_usage_percent, 796 &sysfs_btree_cache_size, 797 &sysfs_cache_available_percent, 798 799 &sysfs_average_key_size, 800 801 &sysfs_errors, 802 &sysfs_io_error_limit, 803 &sysfs_io_error_halflife, 804 &sysfs_congested, 805 &sysfs_congested_read_threshold_us, 806 &sysfs_congested_write_threshold_us, 807 &sysfs_clear_stats, 808 NULL 809 }; 810 KTYPE(bch_cache_set); 811 812 static struct attribute *bch_cache_set_internal_files[] = { 813 &sysfs_active_journal_entries, 814 815 sysfs_time_stats_attribute_list(btree_gc, sec, ms) 816 sysfs_time_stats_attribute_list(btree_split, sec, us) 817 sysfs_time_stats_attribute_list(btree_sort, ms, us) 818 sysfs_time_stats_attribute_list(btree_read, ms, us) 819 820 &sysfs_btree_nodes, 821 &sysfs_btree_used_percent, 822 &sysfs_btree_cache_max_chain, 823 824 &sysfs_bset_tree_stats, 825 &sysfs_cache_read_races, 826 &sysfs_reclaim, 827 &sysfs_flush_write, 828 &sysfs_retry_flush_write, 829 &sysfs_writeback_keys_done, 830 &sysfs_writeback_keys_failed, 831 832 &sysfs_trigger_gc, 833 &sysfs_prune_cache, 834 #ifdef CONFIG_BCACHE_DEBUG 835 &sysfs_verify, 836 &sysfs_key_merging_disabled, 837 &sysfs_expensive_debug_checks, 838 #endif 839 &sysfs_gc_always_rewrite, 840 &sysfs_btree_shrinker_disabled, 841 &sysfs_copy_gc_enabled, 842 &sysfs_io_disable, 843 NULL 844 }; 845 KTYPE(bch_cache_set_internal); 846 847 static int __bch_cache_cmp(const void *l, const void *r) 848 { 849 return *((uint16_t *)r) - *((uint16_t *)l); 850 } 851 852 SHOW(__bch_cache) 853 { 854 struct cache *ca = container_of(kobj, struct cache, kobj); 855 856 sysfs_hprint(bucket_size, bucket_bytes(ca)); 857 sysfs_hprint(block_size, block_bytes(ca)); 858 sysfs_print(nbuckets, ca->sb.nbuckets); 859 sysfs_print(discard, ca->discard); 860 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); 861 sysfs_hprint(btree_written, 862 atomic_long_read(&ca->btree_sectors_written) << 9); 863 sysfs_hprint(metadata_written, 864 (atomic_long_read(&ca->meta_sectors_written) + 865 atomic_long_read(&ca->btree_sectors_written)) << 9); 866 867 sysfs_print(io_errors, 868 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); 869 870 if (attr == &sysfs_cache_replacement_policy) 871 return bch_snprint_string_list(buf, PAGE_SIZE, 872 cache_replacement_policies, 873 CACHE_REPLACEMENT(&ca->sb)); 874 875 if (attr == &sysfs_priority_stats) { 876 struct bucket *b; 877 size_t n = ca->sb.nbuckets, i; 878 size_t unused = 0, available = 0, dirty = 0, meta = 0; 879 uint64_t sum = 0; 880 /* Compute 31 quantiles */ 881 uint16_t q[31], *p, *cached; 882 ssize_t ret; 883 884 cached = p = vmalloc(array_size(sizeof(uint16_t), 885 ca->sb.nbuckets)); 886 if (!p) 887 return -ENOMEM; 888 889 mutex_lock(&ca->set->bucket_lock); 890 for_each_bucket(b, ca) { 891 if (!GC_SECTORS_USED(b)) 892 unused++; 893 if (GC_MARK(b) == GC_MARK_RECLAIMABLE) 894 available++; 895 if (GC_MARK(b) == GC_MARK_DIRTY) 896 dirty++; 897 if (GC_MARK(b) == GC_MARK_METADATA) 898 meta++; 899 } 900 901 for (i = ca->sb.first_bucket; i < n; i++) 902 p[i] = ca->buckets[i].prio; 903 mutex_unlock(&ca->set->bucket_lock); 904 905 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL); 906 907 while (n && 908 !cached[n - 1]) 909 --n; 910 911 unused = ca->sb.nbuckets - n; 912 913 while (cached < p + n && 914 *cached == BTREE_PRIO) 915 cached++, n--; 916 917 for (i = 0; i < n; i++) 918 sum += INITIAL_PRIO - cached[i]; 919 920 if (n) 921 do_div(sum, n); 922 923 for (i = 0; i < ARRAY_SIZE(q); i++) 924 q[i] = INITIAL_PRIO - cached[n * (i + 1) / 925 (ARRAY_SIZE(q) + 1)]; 926 927 vfree(p); 928 929 ret = scnprintf(buf, PAGE_SIZE, 930 "Unused: %zu%%\n" 931 "Clean: %zu%%\n" 932 "Dirty: %zu%%\n" 933 "Metadata: %zu%%\n" 934 "Average: %llu\n" 935 "Sectors per Q: %zu\n" 936 "Quantiles: [", 937 unused * 100 / (size_t) ca->sb.nbuckets, 938 available * 100 / (size_t) ca->sb.nbuckets, 939 dirty * 100 / (size_t) ca->sb.nbuckets, 940 meta * 100 / (size_t) ca->sb.nbuckets, sum, 941 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); 942 943 for (i = 0; i < ARRAY_SIZE(q); i++) 944 ret += scnprintf(buf + ret, PAGE_SIZE - ret, 945 "%u ", q[i]); 946 ret--; 947 948 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n"); 949 950 return ret; 951 } 952 953 return 0; 954 } 955 SHOW_LOCKED(bch_cache) 956 957 STORE(__bch_cache) 958 { 959 struct cache *ca = container_of(kobj, struct cache, kobj); 960 ssize_t v; 961 962 if (attr == &sysfs_discard) { 963 bool v = strtoul_or_return(buf); 964 965 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 966 ca->discard = v; 967 968 if (v != CACHE_DISCARD(&ca->sb)) { 969 SET_CACHE_DISCARD(&ca->sb, v); 970 bcache_write_super(ca->set); 971 } 972 } 973 974 if (attr == &sysfs_cache_replacement_policy) { 975 v = __sysfs_match_string(cache_replacement_policies, -1, buf); 976 if (v < 0) 977 return v; 978 979 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) { 980 mutex_lock(&ca->set->bucket_lock); 981 SET_CACHE_REPLACEMENT(&ca->sb, v); 982 mutex_unlock(&ca->set->bucket_lock); 983 984 bcache_write_super(ca->set); 985 } 986 } 987 988 if (attr == &sysfs_clear_stats) { 989 atomic_long_set(&ca->sectors_written, 0); 990 atomic_long_set(&ca->btree_sectors_written, 0); 991 atomic_long_set(&ca->meta_sectors_written, 0); 992 atomic_set(&ca->io_count, 0); 993 atomic_set(&ca->io_errors, 0); 994 } 995 996 return size; 997 } 998 STORE_LOCKED(bch_cache) 999 1000 static struct attribute *bch_cache_files[] = { 1001 &sysfs_bucket_size, 1002 &sysfs_block_size, 1003 &sysfs_nbuckets, 1004 &sysfs_priority_stats, 1005 &sysfs_discard, 1006 &sysfs_written, 1007 &sysfs_btree_written, 1008 &sysfs_metadata_written, 1009 &sysfs_io_errors, 1010 &sysfs_clear_stats, 1011 &sysfs_cache_replacement_policy, 1012 NULL 1013 }; 1014 KTYPE(bch_cache); 1015