1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache sysfs interfaces 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "sysfs.h" 11 #include "btree.h" 12 #include "request.h" 13 #include "writeback.h" 14 15 #include <linux/blkdev.h> 16 #include <linux/sort.h> 17 #include <linux/sched/clock.h> 18 19 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 20 static const char * const bch_cache_modes[] = { 21 "writethrough", 22 "writeback", 23 "writearound", 24 "none", 25 NULL 26 }; 27 28 /* Default is -1; we skip past it for stop_when_cache_set_failed */ 29 static const char * const bch_stop_on_failure_modes[] = { 30 "auto", 31 "always", 32 NULL 33 }; 34 35 static const char * const cache_replacement_policies[] = { 36 "lru", 37 "fifo", 38 "random", 39 NULL 40 }; 41 42 static const char * const error_actions[] = { 43 "unregister", 44 "panic", 45 NULL 46 }; 47 48 write_attribute(attach); 49 write_attribute(detach); 50 write_attribute(unregister); 51 write_attribute(stop); 52 write_attribute(clear_stats); 53 write_attribute(trigger_gc); 54 write_attribute(prune_cache); 55 write_attribute(flash_vol_create); 56 57 read_attribute(bucket_size); 58 read_attribute(block_size); 59 read_attribute(nbuckets); 60 read_attribute(tree_depth); 61 read_attribute(root_usage_percent); 62 read_attribute(priority_stats); 63 read_attribute(btree_cache_size); 64 read_attribute(btree_cache_max_chain); 65 read_attribute(cache_available_percent); 66 read_attribute(written); 67 read_attribute(btree_written); 68 read_attribute(metadata_written); 69 read_attribute(active_journal_entries); 70 71 sysfs_time_stats_attribute(btree_gc, sec, ms); 72 sysfs_time_stats_attribute(btree_split, sec, us); 73 sysfs_time_stats_attribute(btree_sort, ms, us); 74 sysfs_time_stats_attribute(btree_read, ms, us); 75 76 read_attribute(btree_nodes); 77 read_attribute(btree_used_percent); 78 read_attribute(average_key_size); 79 read_attribute(dirty_data); 80 read_attribute(bset_tree_stats); 81 82 read_attribute(state); 83 read_attribute(cache_read_races); 84 read_attribute(reclaim); 85 read_attribute(flush_write); 86 read_attribute(retry_flush_write); 87 read_attribute(writeback_keys_done); 88 read_attribute(writeback_keys_failed); 89 read_attribute(io_errors); 90 read_attribute(congested); 91 rw_attribute(congested_read_threshold_us); 92 rw_attribute(congested_write_threshold_us); 93 94 rw_attribute(sequential_cutoff); 95 rw_attribute(data_csum); 96 rw_attribute(cache_mode); 97 rw_attribute(stop_when_cache_set_failed); 98 rw_attribute(writeback_metadata); 99 rw_attribute(writeback_running); 100 rw_attribute(writeback_percent); 101 rw_attribute(writeback_delay); 102 rw_attribute(writeback_rate); 103 104 rw_attribute(writeback_rate_update_seconds); 105 rw_attribute(writeback_rate_i_term_inverse); 106 rw_attribute(writeback_rate_p_term_inverse); 107 rw_attribute(writeback_rate_minimum); 108 read_attribute(writeback_rate_debug); 109 110 read_attribute(stripe_size); 111 read_attribute(partial_stripes_expensive); 112 113 rw_attribute(synchronous); 114 rw_attribute(journal_delay_ms); 115 rw_attribute(io_disable); 116 rw_attribute(discard); 117 rw_attribute(running); 118 rw_attribute(label); 119 rw_attribute(readahead); 120 rw_attribute(errors); 121 rw_attribute(io_error_limit); 122 rw_attribute(io_error_halflife); 123 rw_attribute(verify); 124 rw_attribute(bypass_torture_test); 125 rw_attribute(key_merging_disabled); 126 rw_attribute(gc_always_rewrite); 127 rw_attribute(expensive_debug_checks); 128 rw_attribute(cache_replacement_policy); 129 rw_attribute(btree_shrinker_disabled); 130 rw_attribute(copy_gc_enabled); 131 rw_attribute(size); 132 133 static ssize_t bch_snprint_string_list(char *buf, 134 size_t size, 135 const char * const list[], 136 size_t selected) 137 { 138 char *out = buf; 139 size_t i; 140 141 for (i = 0; list[i]; i++) 142 out += snprintf(out, buf + size - out, 143 i == selected ? "[%s] " : "%s ", list[i]); 144 145 out[-1] = '\n'; 146 return out - buf; 147 } 148 149 SHOW(__bch_cached_dev) 150 { 151 struct cached_dev *dc = container_of(kobj, struct cached_dev, 152 disk.kobj); 153 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" }; 154 int wb = dc->writeback_running; 155 156 #define var(stat) (dc->stat) 157 158 if (attr == &sysfs_cache_mode) 159 return bch_snprint_string_list(buf, PAGE_SIZE, 160 bch_cache_modes, 161 BDEV_CACHE_MODE(&dc->sb)); 162 163 if (attr == &sysfs_stop_when_cache_set_failed) 164 return bch_snprint_string_list(buf, PAGE_SIZE, 165 bch_stop_on_failure_modes, 166 dc->stop_when_cache_set_failed); 167 168 169 sysfs_printf(data_csum, "%i", dc->disk.data_csum); 170 var_printf(verify, "%i"); 171 var_printf(bypass_torture_test, "%i"); 172 var_printf(writeback_metadata, "%i"); 173 var_printf(writeback_running, "%i"); 174 var_print(writeback_delay); 175 var_print(writeback_percent); 176 sysfs_hprint(writeback_rate, 177 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); 178 sysfs_hprint(io_errors, atomic_read(&dc->io_errors)); 179 sysfs_printf(io_error_limit, "%i", dc->error_limit); 180 sysfs_printf(io_disable, "%i", dc->io_disable); 181 var_print(writeback_rate_update_seconds); 182 var_print(writeback_rate_i_term_inverse); 183 var_print(writeback_rate_p_term_inverse); 184 var_print(writeback_rate_minimum); 185 186 if (attr == &sysfs_writeback_rate_debug) { 187 char rate[20]; 188 char dirty[20]; 189 char target[20]; 190 char proportional[20]; 191 char integral[20]; 192 char change[20]; 193 s64 next_io; 194 195 /* 196 * Except for dirty and target, other values should 197 * be 0 if writeback is not running. 198 */ 199 bch_hprint(rate, 200 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 201 : 0); 202 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); 203 bch_hprint(target, dc->writeback_rate_target << 9); 204 bch_hprint(proportional, 205 wb ? dc->writeback_rate_proportional << 9 : 0); 206 bch_hprint(integral, 207 wb ? dc->writeback_rate_integral_scaled << 9 : 0); 208 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0); 209 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(), 210 NSEC_PER_MSEC) : 0; 211 212 return sprintf(buf, 213 "rate:\t\t%s/sec\n" 214 "dirty:\t\t%s\n" 215 "target:\t\t%s\n" 216 "proportional:\t%s\n" 217 "integral:\t%s\n" 218 "change:\t\t%s/sec\n" 219 "next io:\t%llims\n", 220 rate, dirty, target, proportional, 221 integral, change, next_io); 222 } 223 224 sysfs_hprint(dirty_data, 225 bcache_dev_sectors_dirty(&dc->disk) << 9); 226 227 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); 228 var_printf(partial_stripes_expensive, "%u"); 229 230 var_hprint(sequential_cutoff); 231 var_hprint(readahead); 232 233 sysfs_print(running, atomic_read(&dc->running)); 234 sysfs_print(state, states[BDEV_STATE(&dc->sb)]); 235 236 if (attr == &sysfs_label) { 237 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 238 buf[SB_LABEL_SIZE + 1] = '\0'; 239 strcat(buf, "\n"); 240 return strlen(buf); 241 } 242 243 #undef var 244 return 0; 245 } 246 SHOW_LOCKED(bch_cached_dev) 247 248 STORE(__cached_dev) 249 { 250 struct cached_dev *dc = container_of(kobj, struct cached_dev, 251 disk.kobj); 252 ssize_t v; 253 struct cache_set *c; 254 struct kobj_uevent_env *env; 255 256 #define d_strtoul(var) sysfs_strtoul(var, dc->var) 257 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) 258 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 259 260 sysfs_strtoul(data_csum, dc->disk.data_csum); 261 d_strtoul(verify); 262 d_strtoul(bypass_torture_test); 263 d_strtoul(writeback_metadata); 264 d_strtoul(writeback_running); 265 d_strtoul(writeback_delay); 266 267 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 268 269 if (attr == &sysfs_writeback_rate) { 270 ssize_t ret; 271 long int v = atomic_long_read(&dc->writeback_rate.rate); 272 273 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX); 274 275 if (!ret) { 276 atomic_long_set(&dc->writeback_rate.rate, v); 277 ret = size; 278 } 279 280 return ret; 281 } 282 283 sysfs_strtoul_clamp(writeback_rate_update_seconds, 284 dc->writeback_rate_update_seconds, 285 1, WRITEBACK_RATE_UPDATE_SECS_MAX); 286 d_strtoul(writeback_rate_i_term_inverse); 287 d_strtoul_nonzero(writeback_rate_p_term_inverse); 288 d_strtoul_nonzero(writeback_rate_minimum); 289 290 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); 291 292 if (attr == &sysfs_io_disable) { 293 int v = strtoul_or_return(buf); 294 295 dc->io_disable = v ? 1 : 0; 296 } 297 298 d_strtoi_h(sequential_cutoff); 299 d_strtoi_h(readahead); 300 301 if (attr == &sysfs_clear_stats) 302 bch_cache_accounting_clear(&dc->accounting); 303 304 if (attr == &sysfs_running && 305 strtoul_or_return(buf)) 306 bch_cached_dev_run(dc); 307 308 if (attr == &sysfs_cache_mode) { 309 v = __sysfs_match_string(bch_cache_modes, -1, buf); 310 if (v < 0) 311 return v; 312 313 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) { 314 SET_BDEV_CACHE_MODE(&dc->sb, v); 315 bch_write_bdev_super(dc, NULL); 316 } 317 } 318 319 if (attr == &sysfs_stop_when_cache_set_failed) { 320 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); 321 if (v < 0) 322 return v; 323 324 dc->stop_when_cache_set_failed = v; 325 } 326 327 if (attr == &sysfs_label) { 328 if (size > SB_LABEL_SIZE) 329 return -EINVAL; 330 memcpy(dc->sb.label, buf, size); 331 if (size < SB_LABEL_SIZE) 332 dc->sb.label[size] = '\0'; 333 if (size && dc->sb.label[size - 1] == '\n') 334 dc->sb.label[size - 1] = '\0'; 335 bch_write_bdev_super(dc, NULL); 336 if (dc->disk.c) { 337 memcpy(dc->disk.c->uuids[dc->disk.id].label, 338 buf, SB_LABEL_SIZE); 339 bch_uuid_write(dc->disk.c); 340 } 341 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 342 if (!env) 343 return -ENOMEM; 344 add_uevent_var(env, "DRIVER=bcache"); 345 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), 346 add_uevent_var(env, "CACHED_LABEL=%s", buf); 347 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, 348 KOBJ_CHANGE, 349 env->envp); 350 kfree(env); 351 } 352 353 if (attr == &sysfs_attach) { 354 uint8_t set_uuid[16]; 355 356 if (bch_parse_uuid(buf, set_uuid) < 16) 357 return -EINVAL; 358 359 v = -ENOENT; 360 list_for_each_entry(c, &bch_cache_sets, list) { 361 v = bch_cached_dev_attach(dc, c, set_uuid); 362 if (!v) 363 return size; 364 } 365 if (v == -ENOENT) 366 pr_err("Can't attach %s: cache set not found", buf); 367 return v; 368 } 369 370 if (attr == &sysfs_detach && dc->disk.c) 371 bch_cached_dev_detach(dc); 372 373 if (attr == &sysfs_stop) 374 bcache_device_stop(&dc->disk); 375 376 return size; 377 } 378 379 STORE(bch_cached_dev) 380 { 381 struct cached_dev *dc = container_of(kobj, struct cached_dev, 382 disk.kobj); 383 384 mutex_lock(&bch_register_lock); 385 size = __cached_dev_store(kobj, attr, buf, size); 386 387 if (attr == &sysfs_writeback_running) 388 bch_writeback_queue(dc); 389 390 if (attr == &sysfs_writeback_percent) 391 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 392 schedule_delayed_work(&dc->writeback_rate_update, 393 dc->writeback_rate_update_seconds * HZ); 394 395 mutex_unlock(&bch_register_lock); 396 return size; 397 } 398 399 static struct attribute *bch_cached_dev_files[] = { 400 &sysfs_attach, 401 &sysfs_detach, 402 &sysfs_stop, 403 #if 0 404 &sysfs_data_csum, 405 #endif 406 &sysfs_cache_mode, 407 &sysfs_stop_when_cache_set_failed, 408 &sysfs_writeback_metadata, 409 &sysfs_writeback_running, 410 &sysfs_writeback_delay, 411 &sysfs_writeback_percent, 412 &sysfs_writeback_rate, 413 &sysfs_writeback_rate_update_seconds, 414 &sysfs_writeback_rate_i_term_inverse, 415 &sysfs_writeback_rate_p_term_inverse, 416 &sysfs_writeback_rate_minimum, 417 &sysfs_writeback_rate_debug, 418 &sysfs_errors, 419 &sysfs_io_error_limit, 420 &sysfs_io_disable, 421 &sysfs_dirty_data, 422 &sysfs_stripe_size, 423 &sysfs_partial_stripes_expensive, 424 &sysfs_sequential_cutoff, 425 &sysfs_clear_stats, 426 &sysfs_running, 427 &sysfs_state, 428 &sysfs_label, 429 &sysfs_readahead, 430 #ifdef CONFIG_BCACHE_DEBUG 431 &sysfs_verify, 432 &sysfs_bypass_torture_test, 433 #endif 434 NULL 435 }; 436 KTYPE(bch_cached_dev); 437 438 SHOW(bch_flash_dev) 439 { 440 struct bcache_device *d = container_of(kobj, struct bcache_device, 441 kobj); 442 struct uuid_entry *u = &d->c->uuids[d->id]; 443 444 sysfs_printf(data_csum, "%i", d->data_csum); 445 sysfs_hprint(size, u->sectors << 9); 446 447 if (attr == &sysfs_label) { 448 memcpy(buf, u->label, SB_LABEL_SIZE); 449 buf[SB_LABEL_SIZE + 1] = '\0'; 450 strcat(buf, "\n"); 451 return strlen(buf); 452 } 453 454 return 0; 455 } 456 457 STORE(__bch_flash_dev) 458 { 459 struct bcache_device *d = container_of(kobj, struct bcache_device, 460 kobj); 461 struct uuid_entry *u = &d->c->uuids[d->id]; 462 463 sysfs_strtoul(data_csum, d->data_csum); 464 465 if (attr == &sysfs_size) { 466 uint64_t v; 467 468 strtoi_h_or_return(buf, v); 469 470 u->sectors = v >> 9; 471 bch_uuid_write(d->c); 472 set_capacity(d->disk, u->sectors); 473 } 474 475 if (attr == &sysfs_label) { 476 memcpy(u->label, buf, SB_LABEL_SIZE); 477 bch_uuid_write(d->c); 478 } 479 480 if (attr == &sysfs_unregister) { 481 set_bit(BCACHE_DEV_DETACHING, &d->flags); 482 bcache_device_stop(d); 483 } 484 485 return size; 486 } 487 STORE_LOCKED(bch_flash_dev) 488 489 static struct attribute *bch_flash_dev_files[] = { 490 &sysfs_unregister, 491 #if 0 492 &sysfs_data_csum, 493 #endif 494 &sysfs_label, 495 &sysfs_size, 496 NULL 497 }; 498 KTYPE(bch_flash_dev); 499 500 struct bset_stats_op { 501 struct btree_op op; 502 size_t nodes; 503 struct bset_stats stats; 504 }; 505 506 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) 507 { 508 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); 509 510 op->nodes++; 511 bch_btree_keys_stats(&b->keys, &op->stats); 512 513 return MAP_CONTINUE; 514 } 515 516 static int bch_bset_print_stats(struct cache_set *c, char *buf) 517 { 518 struct bset_stats_op op; 519 int ret; 520 521 memset(&op, 0, sizeof(op)); 522 bch_btree_op_init(&op.op, -1); 523 524 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); 525 if (ret < 0) 526 return ret; 527 528 return snprintf(buf, PAGE_SIZE, 529 "btree nodes: %zu\n" 530 "written sets: %zu\n" 531 "unwritten sets: %zu\n" 532 "written key bytes: %zu\n" 533 "unwritten key bytes: %zu\n" 534 "floats: %zu\n" 535 "failed: %zu\n", 536 op.nodes, 537 op.stats.sets_written, op.stats.sets_unwritten, 538 op.stats.bytes_written, op.stats.bytes_unwritten, 539 op.stats.floats, op.stats.failed); 540 } 541 542 static unsigned int bch_root_usage(struct cache_set *c) 543 { 544 unsigned int bytes = 0; 545 struct bkey *k; 546 struct btree *b; 547 struct btree_iter iter; 548 549 goto lock_root; 550 551 do { 552 rw_unlock(false, b); 553 lock_root: 554 b = c->root; 555 rw_lock(false, b, b->level); 556 } while (b != c->root); 557 558 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 559 bytes += bkey_bytes(k); 560 561 rw_unlock(false, b); 562 563 return (bytes * 100) / btree_bytes(c); 564 } 565 566 static size_t bch_cache_size(struct cache_set *c) 567 { 568 size_t ret = 0; 569 struct btree *b; 570 571 mutex_lock(&c->bucket_lock); 572 list_for_each_entry(b, &c->btree_cache, list) 573 ret += 1 << (b->keys.page_order + PAGE_SHIFT); 574 575 mutex_unlock(&c->bucket_lock); 576 return ret; 577 } 578 579 static unsigned int bch_cache_max_chain(struct cache_set *c) 580 { 581 unsigned int ret = 0; 582 struct hlist_head *h; 583 584 mutex_lock(&c->bucket_lock); 585 586 for (h = c->bucket_hash; 587 h < c->bucket_hash + (1 << BUCKET_HASH_BITS); 588 h++) { 589 unsigned int i = 0; 590 struct hlist_node *p; 591 592 hlist_for_each(p, h) 593 i++; 594 595 ret = max(ret, i); 596 } 597 598 mutex_unlock(&c->bucket_lock); 599 return ret; 600 } 601 602 static unsigned int bch_btree_used(struct cache_set *c) 603 { 604 return div64_u64(c->gc_stats.key_bytes * 100, 605 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); 606 } 607 608 static unsigned int bch_average_key_size(struct cache_set *c) 609 { 610 return c->gc_stats.nkeys 611 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) 612 : 0; 613 } 614 615 SHOW(__bch_cache_set) 616 { 617 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 618 619 sysfs_print(synchronous, CACHE_SYNC(&c->sb)); 620 sysfs_print(journal_delay_ms, c->journal_delay_ms); 621 sysfs_hprint(bucket_size, bucket_bytes(c)); 622 sysfs_hprint(block_size, block_bytes(c)); 623 sysfs_print(tree_depth, c->root->level); 624 sysfs_print(root_usage_percent, bch_root_usage(c)); 625 626 sysfs_hprint(btree_cache_size, bch_cache_size(c)); 627 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); 628 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); 629 630 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); 631 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); 632 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); 633 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); 634 635 sysfs_print(btree_used_percent, bch_btree_used(c)); 636 sysfs_print(btree_nodes, c->gc_stats.nodes); 637 sysfs_hprint(average_key_size, bch_average_key_size(c)); 638 639 sysfs_print(cache_read_races, 640 atomic_long_read(&c->cache_read_races)); 641 642 sysfs_print(reclaim, 643 atomic_long_read(&c->reclaim)); 644 645 sysfs_print(flush_write, 646 atomic_long_read(&c->flush_write)); 647 648 sysfs_print(retry_flush_write, 649 atomic_long_read(&c->retry_flush_write)); 650 651 sysfs_print(writeback_keys_done, 652 atomic_long_read(&c->writeback_keys_done)); 653 sysfs_print(writeback_keys_failed, 654 atomic_long_read(&c->writeback_keys_failed)); 655 656 if (attr == &sysfs_errors) 657 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions, 658 c->on_error); 659 660 /* See count_io_errors for why 88 */ 661 sysfs_print(io_error_halflife, c->error_decay * 88); 662 sysfs_print(io_error_limit, c->error_limit); 663 664 sysfs_hprint(congested, 665 ((uint64_t) bch_get_congested(c)) << 9); 666 sysfs_print(congested_read_threshold_us, 667 c->congested_read_threshold_us); 668 sysfs_print(congested_write_threshold_us, 669 c->congested_write_threshold_us); 670 671 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); 672 sysfs_printf(verify, "%i", c->verify); 673 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); 674 sysfs_printf(expensive_debug_checks, 675 "%i", c->expensive_debug_checks); 676 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); 677 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); 678 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); 679 sysfs_printf(io_disable, "%i", 680 test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 681 682 if (attr == &sysfs_bset_tree_stats) 683 return bch_bset_print_stats(c, buf); 684 685 return 0; 686 } 687 SHOW_LOCKED(bch_cache_set) 688 689 STORE(__bch_cache_set) 690 { 691 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 692 ssize_t v; 693 694 if (attr == &sysfs_unregister) 695 bch_cache_set_unregister(c); 696 697 if (attr == &sysfs_stop) 698 bch_cache_set_stop(c); 699 700 if (attr == &sysfs_synchronous) { 701 bool sync = strtoul_or_return(buf); 702 703 if (sync != CACHE_SYNC(&c->sb)) { 704 SET_CACHE_SYNC(&c->sb, sync); 705 bcache_write_super(c); 706 } 707 } 708 709 if (attr == &sysfs_flash_vol_create) { 710 int r; 711 uint64_t v; 712 713 strtoi_h_or_return(buf, v); 714 715 r = bch_flash_dev_create(c, v); 716 if (r) 717 return r; 718 } 719 720 if (attr == &sysfs_clear_stats) { 721 atomic_long_set(&c->writeback_keys_done, 0); 722 atomic_long_set(&c->writeback_keys_failed, 0); 723 724 memset(&c->gc_stats, 0, sizeof(struct gc_stat)); 725 bch_cache_accounting_clear(&c->accounting); 726 } 727 728 if (attr == &sysfs_trigger_gc) { 729 /* 730 * Garbage collection thread only works when sectors_to_gc < 0, 731 * when users write to sysfs entry trigger_gc, most of time 732 * they want to forcibly triger gargage collection. Here -1 is 733 * set to c->sectors_to_gc, to make gc_should_run() give a 734 * chance to permit gc thread to run. "give a chance" means 735 * before going into gc_should_run(), there is still chance 736 * that c->sectors_to_gc being set to other positive value. So 737 * writing sysfs entry trigger_gc won't always make sure gc 738 * thread takes effect. 739 */ 740 atomic_set(&c->sectors_to_gc, -1); 741 wake_up_gc(c); 742 } 743 744 if (attr == &sysfs_prune_cache) { 745 struct shrink_control sc; 746 747 sc.gfp_mask = GFP_KERNEL; 748 sc.nr_to_scan = strtoul_or_return(buf); 749 c->shrink.scan_objects(&c->shrink, &sc); 750 } 751 752 sysfs_strtoul(congested_read_threshold_us, 753 c->congested_read_threshold_us); 754 sysfs_strtoul(congested_write_threshold_us, 755 c->congested_write_threshold_us); 756 757 if (attr == &sysfs_errors) { 758 v = __sysfs_match_string(error_actions, -1, buf); 759 if (v < 0) 760 return v; 761 762 c->on_error = v; 763 } 764 765 if (attr == &sysfs_io_error_limit) 766 c->error_limit = strtoul_or_return(buf); 767 768 /* See count_io_errors() for why 88 */ 769 if (attr == &sysfs_io_error_halflife) 770 c->error_decay = strtoul_or_return(buf) / 88; 771 772 if (attr == &sysfs_io_disable) { 773 v = strtoul_or_return(buf); 774 if (v) { 775 if (test_and_set_bit(CACHE_SET_IO_DISABLE, 776 &c->flags)) 777 pr_warn("CACHE_SET_IO_DISABLE already set"); 778 } else { 779 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE, 780 &c->flags)) 781 pr_warn("CACHE_SET_IO_DISABLE already cleared"); 782 } 783 } 784 785 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); 786 sysfs_strtoul(verify, c->verify); 787 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); 788 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); 789 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); 790 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); 791 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); 792 793 return size; 794 } 795 STORE_LOCKED(bch_cache_set) 796 797 SHOW(bch_cache_set_internal) 798 { 799 struct cache_set *c = container_of(kobj, struct cache_set, internal); 800 801 return bch_cache_set_show(&c->kobj, attr, buf); 802 } 803 804 STORE(bch_cache_set_internal) 805 { 806 struct cache_set *c = container_of(kobj, struct cache_set, internal); 807 808 return bch_cache_set_store(&c->kobj, attr, buf, size); 809 } 810 811 static void bch_cache_set_internal_release(struct kobject *k) 812 { 813 } 814 815 static struct attribute *bch_cache_set_files[] = { 816 &sysfs_unregister, 817 &sysfs_stop, 818 &sysfs_synchronous, 819 &sysfs_journal_delay_ms, 820 &sysfs_flash_vol_create, 821 822 &sysfs_bucket_size, 823 &sysfs_block_size, 824 &sysfs_tree_depth, 825 &sysfs_root_usage_percent, 826 &sysfs_btree_cache_size, 827 &sysfs_cache_available_percent, 828 829 &sysfs_average_key_size, 830 831 &sysfs_errors, 832 &sysfs_io_error_limit, 833 &sysfs_io_error_halflife, 834 &sysfs_congested, 835 &sysfs_congested_read_threshold_us, 836 &sysfs_congested_write_threshold_us, 837 &sysfs_clear_stats, 838 NULL 839 }; 840 KTYPE(bch_cache_set); 841 842 static struct attribute *bch_cache_set_internal_files[] = { 843 &sysfs_active_journal_entries, 844 845 sysfs_time_stats_attribute_list(btree_gc, sec, ms) 846 sysfs_time_stats_attribute_list(btree_split, sec, us) 847 sysfs_time_stats_attribute_list(btree_sort, ms, us) 848 sysfs_time_stats_attribute_list(btree_read, ms, us) 849 850 &sysfs_btree_nodes, 851 &sysfs_btree_used_percent, 852 &sysfs_btree_cache_max_chain, 853 854 &sysfs_bset_tree_stats, 855 &sysfs_cache_read_races, 856 &sysfs_reclaim, 857 &sysfs_flush_write, 858 &sysfs_retry_flush_write, 859 &sysfs_writeback_keys_done, 860 &sysfs_writeback_keys_failed, 861 862 &sysfs_trigger_gc, 863 &sysfs_prune_cache, 864 #ifdef CONFIG_BCACHE_DEBUG 865 &sysfs_verify, 866 &sysfs_key_merging_disabled, 867 &sysfs_expensive_debug_checks, 868 #endif 869 &sysfs_gc_always_rewrite, 870 &sysfs_btree_shrinker_disabled, 871 &sysfs_copy_gc_enabled, 872 &sysfs_io_disable, 873 NULL 874 }; 875 KTYPE(bch_cache_set_internal); 876 877 static int __bch_cache_cmp(const void *l, const void *r) 878 { 879 return *((uint16_t *)r) - *((uint16_t *)l); 880 } 881 882 SHOW(__bch_cache) 883 { 884 struct cache *ca = container_of(kobj, struct cache, kobj); 885 886 sysfs_hprint(bucket_size, bucket_bytes(ca)); 887 sysfs_hprint(block_size, block_bytes(ca)); 888 sysfs_print(nbuckets, ca->sb.nbuckets); 889 sysfs_print(discard, ca->discard); 890 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); 891 sysfs_hprint(btree_written, 892 atomic_long_read(&ca->btree_sectors_written) << 9); 893 sysfs_hprint(metadata_written, 894 (atomic_long_read(&ca->meta_sectors_written) + 895 atomic_long_read(&ca->btree_sectors_written)) << 9); 896 897 sysfs_print(io_errors, 898 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); 899 900 if (attr == &sysfs_cache_replacement_policy) 901 return bch_snprint_string_list(buf, PAGE_SIZE, 902 cache_replacement_policies, 903 CACHE_REPLACEMENT(&ca->sb)); 904 905 if (attr == &sysfs_priority_stats) { 906 struct bucket *b; 907 size_t n = ca->sb.nbuckets, i; 908 size_t unused = 0, available = 0, dirty = 0, meta = 0; 909 uint64_t sum = 0; 910 /* Compute 31 quantiles */ 911 uint16_t q[31], *p, *cached; 912 ssize_t ret; 913 914 cached = p = vmalloc(array_size(sizeof(uint16_t), 915 ca->sb.nbuckets)); 916 if (!p) 917 return -ENOMEM; 918 919 mutex_lock(&ca->set->bucket_lock); 920 for_each_bucket(b, ca) { 921 if (!GC_SECTORS_USED(b)) 922 unused++; 923 if (GC_MARK(b) == GC_MARK_RECLAIMABLE) 924 available++; 925 if (GC_MARK(b) == GC_MARK_DIRTY) 926 dirty++; 927 if (GC_MARK(b) == GC_MARK_METADATA) 928 meta++; 929 } 930 931 for (i = ca->sb.first_bucket; i < n; i++) 932 p[i] = ca->buckets[i].prio; 933 mutex_unlock(&ca->set->bucket_lock); 934 935 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL); 936 937 while (n && 938 !cached[n - 1]) 939 --n; 940 941 unused = ca->sb.nbuckets - n; 942 943 while (cached < p + n && 944 *cached == BTREE_PRIO) 945 cached++, n--; 946 947 for (i = 0; i < n; i++) 948 sum += INITIAL_PRIO - cached[i]; 949 950 if (n) 951 do_div(sum, n); 952 953 for (i = 0; i < ARRAY_SIZE(q); i++) 954 q[i] = INITIAL_PRIO - cached[n * (i + 1) / 955 (ARRAY_SIZE(q) + 1)]; 956 957 vfree(p); 958 959 ret = scnprintf(buf, PAGE_SIZE, 960 "Unused: %zu%%\n" 961 "Clean: %zu%%\n" 962 "Dirty: %zu%%\n" 963 "Metadata: %zu%%\n" 964 "Average: %llu\n" 965 "Sectors per Q: %zu\n" 966 "Quantiles: [", 967 unused * 100 / (size_t) ca->sb.nbuckets, 968 available * 100 / (size_t) ca->sb.nbuckets, 969 dirty * 100 / (size_t) ca->sb.nbuckets, 970 meta * 100 / (size_t) ca->sb.nbuckets, sum, 971 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); 972 973 for (i = 0; i < ARRAY_SIZE(q); i++) 974 ret += scnprintf(buf + ret, PAGE_SIZE - ret, 975 "%u ", q[i]); 976 ret--; 977 978 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n"); 979 980 return ret; 981 } 982 983 return 0; 984 } 985 SHOW_LOCKED(bch_cache) 986 987 STORE(__bch_cache) 988 { 989 struct cache *ca = container_of(kobj, struct cache, kobj); 990 ssize_t v; 991 992 if (attr == &sysfs_discard) { 993 bool v = strtoul_or_return(buf); 994 995 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 996 ca->discard = v; 997 998 if (v != CACHE_DISCARD(&ca->sb)) { 999 SET_CACHE_DISCARD(&ca->sb, v); 1000 bcache_write_super(ca->set); 1001 } 1002 } 1003 1004 if (attr == &sysfs_cache_replacement_policy) { 1005 v = __sysfs_match_string(cache_replacement_policies, -1, buf); 1006 if (v < 0) 1007 return v; 1008 1009 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) { 1010 mutex_lock(&ca->set->bucket_lock); 1011 SET_CACHE_REPLACEMENT(&ca->sb, v); 1012 mutex_unlock(&ca->set->bucket_lock); 1013 1014 bcache_write_super(ca->set); 1015 } 1016 } 1017 1018 if (attr == &sysfs_clear_stats) { 1019 atomic_long_set(&ca->sectors_written, 0); 1020 atomic_long_set(&ca->btree_sectors_written, 0); 1021 atomic_long_set(&ca->meta_sectors_written, 0); 1022 atomic_set(&ca->io_count, 0); 1023 atomic_set(&ca->io_errors, 0); 1024 } 1025 1026 return size; 1027 } 1028 STORE_LOCKED(bch_cache) 1029 1030 static struct attribute *bch_cache_files[] = { 1031 &sysfs_bucket_size, 1032 &sysfs_block_size, 1033 &sysfs_nbuckets, 1034 &sysfs_priority_stats, 1035 &sysfs_discard, 1036 &sysfs_written, 1037 &sysfs_btree_written, 1038 &sysfs_metadata_written, 1039 &sysfs_io_errors, 1040 &sysfs_clear_stats, 1041 &sysfs_cache_replacement_policy, 1042 NULL 1043 }; 1044 KTYPE(bch_cache); 1045