1 /* 2 * bcache sysfs interfaces 3 * 4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 5 * Copyright 2012 Google, Inc. 6 */ 7 8 #include "bcache.h" 9 #include "sysfs.h" 10 #include "btree.h" 11 #include "request.h" 12 #include "writeback.h" 13 14 #include <linux/blkdev.h> 15 #include <linux/sort.h> 16 17 static const char * const cache_replacement_policies[] = { 18 "lru", 19 "fifo", 20 "random", 21 NULL 22 }; 23 24 static const char * const error_actions[] = { 25 "unregister", 26 "panic", 27 NULL 28 }; 29 30 write_attribute(attach); 31 write_attribute(detach); 32 write_attribute(unregister); 33 write_attribute(stop); 34 write_attribute(clear_stats); 35 write_attribute(trigger_gc); 36 write_attribute(prune_cache); 37 write_attribute(flash_vol_create); 38 39 read_attribute(bucket_size); 40 read_attribute(block_size); 41 read_attribute(nbuckets); 42 read_attribute(tree_depth); 43 read_attribute(root_usage_percent); 44 read_attribute(priority_stats); 45 read_attribute(btree_cache_size); 46 read_attribute(btree_cache_max_chain); 47 read_attribute(cache_available_percent); 48 read_attribute(written); 49 read_attribute(btree_written); 50 read_attribute(metadata_written); 51 read_attribute(active_journal_entries); 52 53 sysfs_time_stats_attribute(btree_gc, sec, ms); 54 sysfs_time_stats_attribute(btree_split, sec, us); 55 sysfs_time_stats_attribute(btree_sort, ms, us); 56 sysfs_time_stats_attribute(btree_read, ms, us); 57 sysfs_time_stats_attribute(try_harder, ms, us); 58 59 read_attribute(btree_nodes); 60 read_attribute(btree_used_percent); 61 read_attribute(average_key_size); 62 read_attribute(dirty_data); 63 read_attribute(bset_tree_stats); 64 65 read_attribute(state); 66 read_attribute(cache_read_races); 67 read_attribute(writeback_keys_done); 68 read_attribute(writeback_keys_failed); 69 read_attribute(io_errors); 70 read_attribute(congested); 71 rw_attribute(congested_read_threshold_us); 72 rw_attribute(congested_write_threshold_us); 73 74 rw_attribute(sequential_cutoff); 75 rw_attribute(data_csum); 76 rw_attribute(cache_mode); 77 rw_attribute(writeback_metadata); 78 rw_attribute(writeback_running); 79 rw_attribute(writeback_percent); 80 rw_attribute(writeback_delay); 81 rw_attribute(writeback_rate); 82 83 rw_attribute(writeback_rate_update_seconds); 84 rw_attribute(writeback_rate_d_term); 85 rw_attribute(writeback_rate_p_term_inverse); 86 rw_attribute(writeback_rate_d_smooth); 87 read_attribute(writeback_rate_debug); 88 89 read_attribute(stripe_size); 90 read_attribute(partial_stripes_expensive); 91 92 rw_attribute(synchronous); 93 rw_attribute(journal_delay_ms); 94 rw_attribute(discard); 95 rw_attribute(running); 96 rw_attribute(label); 97 rw_attribute(readahead); 98 rw_attribute(errors); 99 rw_attribute(io_error_limit); 100 rw_attribute(io_error_halflife); 101 rw_attribute(verify); 102 rw_attribute(bypass_torture_test); 103 rw_attribute(key_merging_disabled); 104 rw_attribute(gc_always_rewrite); 105 rw_attribute(expensive_debug_checks); 106 rw_attribute(freelist_percent); 107 rw_attribute(cache_replacement_policy); 108 rw_attribute(btree_shrinker_disabled); 109 rw_attribute(copy_gc_enabled); 110 rw_attribute(size); 111 112 SHOW(__bch_cached_dev) 113 { 114 struct cached_dev *dc = container_of(kobj, struct cached_dev, 115 disk.kobj); 116 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" }; 117 118 #define var(stat) (dc->stat) 119 120 if (attr == &sysfs_cache_mode) 121 return bch_snprint_string_list(buf, PAGE_SIZE, 122 bch_cache_modes + 1, 123 BDEV_CACHE_MODE(&dc->sb)); 124 125 sysfs_printf(data_csum, "%i", dc->disk.data_csum); 126 var_printf(verify, "%i"); 127 var_printf(bypass_torture_test, "%i"); 128 var_printf(writeback_metadata, "%i"); 129 var_printf(writeback_running, "%i"); 130 var_print(writeback_delay); 131 var_print(writeback_percent); 132 sysfs_print(writeback_rate, dc->writeback_rate.rate); 133 134 var_print(writeback_rate_update_seconds); 135 var_print(writeback_rate_d_term); 136 var_print(writeback_rate_p_term_inverse); 137 var_print(writeback_rate_d_smooth); 138 139 if (attr == &sysfs_writeback_rate_debug) { 140 char dirty[20]; 141 char derivative[20]; 142 char target[20]; 143 bch_hprint(dirty, 144 bcache_dev_sectors_dirty(&dc->disk) << 9); 145 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 146 bch_hprint(target, dc->writeback_rate_target << 9); 147 148 return sprintf(buf, 149 "rate:\t\t%u\n" 150 "change:\t\t%i\n" 151 "dirty:\t\t%s\n" 152 "derivative:\t%s\n" 153 "target:\t\t%s\n", 154 dc->writeback_rate.rate, 155 dc->writeback_rate_change, 156 dirty, derivative, target); 157 } 158 159 sysfs_hprint(dirty_data, 160 bcache_dev_sectors_dirty(&dc->disk) << 9); 161 162 sysfs_hprint(stripe_size, dc->disk.stripe_size << 9); 163 var_printf(partial_stripes_expensive, "%u"); 164 165 var_hprint(sequential_cutoff); 166 var_hprint(readahead); 167 168 sysfs_print(running, atomic_read(&dc->running)); 169 sysfs_print(state, states[BDEV_STATE(&dc->sb)]); 170 171 if (attr == &sysfs_label) { 172 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 173 buf[SB_LABEL_SIZE + 1] = '\0'; 174 strcat(buf, "\n"); 175 return strlen(buf); 176 } 177 178 #undef var 179 return 0; 180 } 181 SHOW_LOCKED(bch_cached_dev) 182 183 STORE(__cached_dev) 184 { 185 struct cached_dev *dc = container_of(kobj, struct cached_dev, 186 disk.kobj); 187 unsigned v = size; 188 struct cache_set *c; 189 struct kobj_uevent_env *env; 190 191 #define d_strtoul(var) sysfs_strtoul(var, dc->var) 192 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 193 194 sysfs_strtoul(data_csum, dc->disk.data_csum); 195 d_strtoul(verify); 196 d_strtoul(bypass_torture_test); 197 d_strtoul(writeback_metadata); 198 d_strtoul(writeback_running); 199 d_strtoul(writeback_delay); 200 sysfs_strtoul_clamp(writeback_rate, 201 dc->writeback_rate.rate, 1, 1000000); 202 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 203 204 d_strtoul(writeback_rate_update_seconds); 205 d_strtoul(writeback_rate_d_term); 206 d_strtoul(writeback_rate_p_term_inverse); 207 sysfs_strtoul_clamp(writeback_rate_p_term_inverse, 208 dc->writeback_rate_p_term_inverse, 1, INT_MAX); 209 d_strtoul(writeback_rate_d_smooth); 210 211 d_strtoi_h(sequential_cutoff); 212 d_strtoi_h(readahead); 213 214 if (attr == &sysfs_clear_stats) 215 bch_cache_accounting_clear(&dc->accounting); 216 217 if (attr == &sysfs_running && 218 strtoul_or_return(buf)) 219 bch_cached_dev_run(dc); 220 221 if (attr == &sysfs_cache_mode) { 222 ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1); 223 224 if (v < 0) 225 return v; 226 227 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) { 228 SET_BDEV_CACHE_MODE(&dc->sb, v); 229 bch_write_bdev_super(dc, NULL); 230 } 231 } 232 233 if (attr == &sysfs_label) { 234 if (size > SB_LABEL_SIZE) 235 return -EINVAL; 236 memcpy(dc->sb.label, buf, size); 237 if (size < SB_LABEL_SIZE) 238 dc->sb.label[size] = '\0'; 239 if (size && dc->sb.label[size - 1] == '\n') 240 dc->sb.label[size - 1] = '\0'; 241 bch_write_bdev_super(dc, NULL); 242 if (dc->disk.c) { 243 memcpy(dc->disk.c->uuids[dc->disk.id].label, 244 buf, SB_LABEL_SIZE); 245 bch_uuid_write(dc->disk.c); 246 } 247 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 248 if (!env) 249 return -ENOMEM; 250 add_uevent_var(env, "DRIVER=bcache"); 251 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), 252 add_uevent_var(env, "CACHED_LABEL=%s", buf); 253 kobject_uevent_env( 254 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp); 255 kfree(env); 256 } 257 258 if (attr == &sysfs_attach) { 259 if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16) 260 return -EINVAL; 261 262 list_for_each_entry(c, &bch_cache_sets, list) { 263 v = bch_cached_dev_attach(dc, c); 264 if (!v) 265 return size; 266 } 267 268 pr_err("Can't attach %s: cache set not found", buf); 269 size = v; 270 } 271 272 if (attr == &sysfs_detach && dc->disk.c) 273 bch_cached_dev_detach(dc); 274 275 if (attr == &sysfs_stop) 276 bcache_device_stop(&dc->disk); 277 278 return size; 279 } 280 281 STORE(bch_cached_dev) 282 { 283 struct cached_dev *dc = container_of(kobj, struct cached_dev, 284 disk.kobj); 285 286 mutex_lock(&bch_register_lock); 287 size = __cached_dev_store(kobj, attr, buf, size); 288 289 if (attr == &sysfs_writeback_running) 290 bch_writeback_queue(dc); 291 292 if (attr == &sysfs_writeback_percent) 293 schedule_delayed_work(&dc->writeback_rate_update, 294 dc->writeback_rate_update_seconds * HZ); 295 296 mutex_unlock(&bch_register_lock); 297 return size; 298 } 299 300 static struct attribute *bch_cached_dev_files[] = { 301 &sysfs_attach, 302 &sysfs_detach, 303 &sysfs_stop, 304 #if 0 305 &sysfs_data_csum, 306 #endif 307 &sysfs_cache_mode, 308 &sysfs_writeback_metadata, 309 &sysfs_writeback_running, 310 &sysfs_writeback_delay, 311 &sysfs_writeback_percent, 312 &sysfs_writeback_rate, 313 &sysfs_writeback_rate_update_seconds, 314 &sysfs_writeback_rate_d_term, 315 &sysfs_writeback_rate_p_term_inverse, 316 &sysfs_writeback_rate_d_smooth, 317 &sysfs_writeback_rate_debug, 318 &sysfs_dirty_data, 319 &sysfs_stripe_size, 320 &sysfs_partial_stripes_expensive, 321 &sysfs_sequential_cutoff, 322 &sysfs_clear_stats, 323 &sysfs_running, 324 &sysfs_state, 325 &sysfs_label, 326 &sysfs_readahead, 327 #ifdef CONFIG_BCACHE_DEBUG 328 &sysfs_verify, 329 &sysfs_bypass_torture_test, 330 #endif 331 NULL 332 }; 333 KTYPE(bch_cached_dev); 334 335 SHOW(bch_flash_dev) 336 { 337 struct bcache_device *d = container_of(kobj, struct bcache_device, 338 kobj); 339 struct uuid_entry *u = &d->c->uuids[d->id]; 340 341 sysfs_printf(data_csum, "%i", d->data_csum); 342 sysfs_hprint(size, u->sectors << 9); 343 344 if (attr == &sysfs_label) { 345 memcpy(buf, u->label, SB_LABEL_SIZE); 346 buf[SB_LABEL_SIZE + 1] = '\0'; 347 strcat(buf, "\n"); 348 return strlen(buf); 349 } 350 351 return 0; 352 } 353 354 STORE(__bch_flash_dev) 355 { 356 struct bcache_device *d = container_of(kobj, struct bcache_device, 357 kobj); 358 struct uuid_entry *u = &d->c->uuids[d->id]; 359 360 sysfs_strtoul(data_csum, d->data_csum); 361 362 if (attr == &sysfs_size) { 363 uint64_t v; 364 strtoi_h_or_return(buf, v); 365 366 u->sectors = v >> 9; 367 bch_uuid_write(d->c); 368 set_capacity(d->disk, u->sectors); 369 } 370 371 if (attr == &sysfs_label) { 372 memcpy(u->label, buf, SB_LABEL_SIZE); 373 bch_uuid_write(d->c); 374 } 375 376 if (attr == &sysfs_unregister) { 377 set_bit(BCACHE_DEV_DETACHING, &d->flags); 378 bcache_device_stop(d); 379 } 380 381 return size; 382 } 383 STORE_LOCKED(bch_flash_dev) 384 385 static struct attribute *bch_flash_dev_files[] = { 386 &sysfs_unregister, 387 #if 0 388 &sysfs_data_csum, 389 #endif 390 &sysfs_label, 391 &sysfs_size, 392 NULL 393 }; 394 KTYPE(bch_flash_dev); 395 396 SHOW(__bch_cache_set) 397 { 398 unsigned root_usage(struct cache_set *c) 399 { 400 unsigned bytes = 0; 401 struct bkey *k; 402 struct btree *b; 403 struct btree_iter iter; 404 405 goto lock_root; 406 407 do { 408 rw_unlock(false, b); 409 lock_root: 410 b = c->root; 411 rw_lock(false, b, b->level); 412 } while (b != c->root); 413 414 for_each_key_filter(b, k, &iter, bch_ptr_bad) 415 bytes += bkey_bytes(k); 416 417 rw_unlock(false, b); 418 419 return (bytes * 100) / btree_bytes(c); 420 } 421 422 size_t cache_size(struct cache_set *c) 423 { 424 size_t ret = 0; 425 struct btree *b; 426 427 mutex_lock(&c->bucket_lock); 428 list_for_each_entry(b, &c->btree_cache, list) 429 ret += 1 << (b->page_order + PAGE_SHIFT); 430 431 mutex_unlock(&c->bucket_lock); 432 return ret; 433 } 434 435 unsigned cache_max_chain(struct cache_set *c) 436 { 437 unsigned ret = 0; 438 struct hlist_head *h; 439 440 mutex_lock(&c->bucket_lock); 441 442 for (h = c->bucket_hash; 443 h < c->bucket_hash + (1 << BUCKET_HASH_BITS); 444 h++) { 445 unsigned i = 0; 446 struct hlist_node *p; 447 448 hlist_for_each(p, h) 449 i++; 450 451 ret = max(ret, i); 452 } 453 454 mutex_unlock(&c->bucket_lock); 455 return ret; 456 } 457 458 unsigned btree_used(struct cache_set *c) 459 { 460 return div64_u64(c->gc_stats.key_bytes * 100, 461 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); 462 } 463 464 unsigned average_key_size(struct cache_set *c) 465 { 466 return c->gc_stats.nkeys 467 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) 468 : 0; 469 } 470 471 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 472 473 sysfs_print(synchronous, CACHE_SYNC(&c->sb)); 474 sysfs_print(journal_delay_ms, c->journal_delay_ms); 475 sysfs_hprint(bucket_size, bucket_bytes(c)); 476 sysfs_hprint(block_size, block_bytes(c)); 477 sysfs_print(tree_depth, c->root->level); 478 sysfs_print(root_usage_percent, root_usage(c)); 479 480 sysfs_hprint(btree_cache_size, cache_size(c)); 481 sysfs_print(btree_cache_max_chain, cache_max_chain(c)); 482 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); 483 484 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); 485 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); 486 sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us); 487 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); 488 sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us); 489 490 sysfs_print(btree_used_percent, btree_used(c)); 491 sysfs_print(btree_nodes, c->gc_stats.nodes); 492 sysfs_hprint(average_key_size, average_key_size(c)); 493 494 sysfs_print(cache_read_races, 495 atomic_long_read(&c->cache_read_races)); 496 497 sysfs_print(writeback_keys_done, 498 atomic_long_read(&c->writeback_keys_done)); 499 sysfs_print(writeback_keys_failed, 500 atomic_long_read(&c->writeback_keys_failed)); 501 502 if (attr == &sysfs_errors) 503 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions, 504 c->on_error); 505 506 /* See count_io_errors for why 88 */ 507 sysfs_print(io_error_halflife, c->error_decay * 88); 508 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT); 509 510 sysfs_hprint(congested, 511 ((uint64_t) bch_get_congested(c)) << 9); 512 sysfs_print(congested_read_threshold_us, 513 c->congested_read_threshold_us); 514 sysfs_print(congested_write_threshold_us, 515 c->congested_write_threshold_us); 516 517 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); 518 sysfs_printf(verify, "%i", c->verify); 519 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); 520 sysfs_printf(expensive_debug_checks, 521 "%i", c->expensive_debug_checks); 522 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); 523 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); 524 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); 525 526 if (attr == &sysfs_bset_tree_stats) 527 return bch_bset_print_stats(c, buf); 528 529 return 0; 530 } 531 SHOW_LOCKED(bch_cache_set) 532 533 STORE(__bch_cache_set) 534 { 535 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 536 537 if (attr == &sysfs_unregister) 538 bch_cache_set_unregister(c); 539 540 if (attr == &sysfs_stop) 541 bch_cache_set_stop(c); 542 543 if (attr == &sysfs_synchronous) { 544 bool sync = strtoul_or_return(buf); 545 546 if (sync != CACHE_SYNC(&c->sb)) { 547 SET_CACHE_SYNC(&c->sb, sync); 548 bcache_write_super(c); 549 } 550 } 551 552 if (attr == &sysfs_flash_vol_create) { 553 int r; 554 uint64_t v; 555 strtoi_h_or_return(buf, v); 556 557 r = bch_flash_dev_create(c, v); 558 if (r) 559 return r; 560 } 561 562 if (attr == &sysfs_clear_stats) { 563 atomic_long_set(&c->writeback_keys_done, 0); 564 atomic_long_set(&c->writeback_keys_failed, 0); 565 566 memset(&c->gc_stats, 0, sizeof(struct gc_stat)); 567 bch_cache_accounting_clear(&c->accounting); 568 } 569 570 if (attr == &sysfs_trigger_gc) 571 wake_up_gc(c); 572 573 if (attr == &sysfs_prune_cache) { 574 struct shrink_control sc; 575 sc.gfp_mask = GFP_KERNEL; 576 sc.nr_to_scan = strtoul_or_return(buf); 577 c->shrink.scan_objects(&c->shrink, &sc); 578 } 579 580 sysfs_strtoul(congested_read_threshold_us, 581 c->congested_read_threshold_us); 582 sysfs_strtoul(congested_write_threshold_us, 583 c->congested_write_threshold_us); 584 585 if (attr == &sysfs_errors) { 586 ssize_t v = bch_read_string_list(buf, error_actions); 587 588 if (v < 0) 589 return v; 590 591 c->on_error = v; 592 } 593 594 if (attr == &sysfs_io_error_limit) 595 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; 596 597 /* See count_io_errors() for why 88 */ 598 if (attr == &sysfs_io_error_halflife) 599 c->error_decay = strtoul_or_return(buf) / 88; 600 601 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); 602 sysfs_strtoul(verify, c->verify); 603 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); 604 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); 605 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); 606 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); 607 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); 608 609 return size; 610 } 611 STORE_LOCKED(bch_cache_set) 612 613 SHOW(bch_cache_set_internal) 614 { 615 struct cache_set *c = container_of(kobj, struct cache_set, internal); 616 return bch_cache_set_show(&c->kobj, attr, buf); 617 } 618 619 STORE(bch_cache_set_internal) 620 { 621 struct cache_set *c = container_of(kobj, struct cache_set, internal); 622 return bch_cache_set_store(&c->kobj, attr, buf, size); 623 } 624 625 static void bch_cache_set_internal_release(struct kobject *k) 626 { 627 } 628 629 static struct attribute *bch_cache_set_files[] = { 630 &sysfs_unregister, 631 &sysfs_stop, 632 &sysfs_synchronous, 633 &sysfs_journal_delay_ms, 634 &sysfs_flash_vol_create, 635 636 &sysfs_bucket_size, 637 &sysfs_block_size, 638 &sysfs_tree_depth, 639 &sysfs_root_usage_percent, 640 &sysfs_btree_cache_size, 641 &sysfs_cache_available_percent, 642 643 &sysfs_average_key_size, 644 645 &sysfs_errors, 646 &sysfs_io_error_limit, 647 &sysfs_io_error_halflife, 648 &sysfs_congested, 649 &sysfs_congested_read_threshold_us, 650 &sysfs_congested_write_threshold_us, 651 &sysfs_clear_stats, 652 NULL 653 }; 654 KTYPE(bch_cache_set); 655 656 static struct attribute *bch_cache_set_internal_files[] = { 657 &sysfs_active_journal_entries, 658 659 sysfs_time_stats_attribute_list(btree_gc, sec, ms) 660 sysfs_time_stats_attribute_list(btree_split, sec, us) 661 sysfs_time_stats_attribute_list(btree_sort, ms, us) 662 sysfs_time_stats_attribute_list(btree_read, ms, us) 663 sysfs_time_stats_attribute_list(try_harder, ms, us) 664 665 &sysfs_btree_nodes, 666 &sysfs_btree_used_percent, 667 &sysfs_btree_cache_max_chain, 668 669 &sysfs_bset_tree_stats, 670 &sysfs_cache_read_races, 671 &sysfs_writeback_keys_done, 672 &sysfs_writeback_keys_failed, 673 674 &sysfs_trigger_gc, 675 &sysfs_prune_cache, 676 #ifdef CONFIG_BCACHE_DEBUG 677 &sysfs_verify, 678 &sysfs_key_merging_disabled, 679 &sysfs_expensive_debug_checks, 680 #endif 681 &sysfs_gc_always_rewrite, 682 &sysfs_btree_shrinker_disabled, 683 &sysfs_copy_gc_enabled, 684 NULL 685 }; 686 KTYPE(bch_cache_set_internal); 687 688 SHOW(__bch_cache) 689 { 690 struct cache *ca = container_of(kobj, struct cache, kobj); 691 692 sysfs_hprint(bucket_size, bucket_bytes(ca)); 693 sysfs_hprint(block_size, block_bytes(ca)); 694 sysfs_print(nbuckets, ca->sb.nbuckets); 695 sysfs_print(discard, ca->discard); 696 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); 697 sysfs_hprint(btree_written, 698 atomic_long_read(&ca->btree_sectors_written) << 9); 699 sysfs_hprint(metadata_written, 700 (atomic_long_read(&ca->meta_sectors_written) + 701 atomic_long_read(&ca->btree_sectors_written)) << 9); 702 703 sysfs_print(io_errors, 704 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); 705 706 sysfs_print(freelist_percent, ca->free.size * 100 / 707 ((size_t) ca->sb.nbuckets)); 708 709 if (attr == &sysfs_cache_replacement_policy) 710 return bch_snprint_string_list(buf, PAGE_SIZE, 711 cache_replacement_policies, 712 CACHE_REPLACEMENT(&ca->sb)); 713 714 if (attr == &sysfs_priority_stats) { 715 int cmp(const void *l, const void *r) 716 { return *((uint16_t *) r) - *((uint16_t *) l); } 717 718 size_t n = ca->sb.nbuckets, i, unused, btree; 719 uint64_t sum = 0; 720 /* Compute 31 quantiles */ 721 uint16_t q[31], *p, *cached; 722 ssize_t ret; 723 724 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); 725 if (!p) 726 return -ENOMEM; 727 728 mutex_lock(&ca->set->bucket_lock); 729 for (i = ca->sb.first_bucket; i < n; i++) 730 p[i] = ca->buckets[i].prio; 731 mutex_unlock(&ca->set->bucket_lock); 732 733 sort(p, n, sizeof(uint16_t), cmp, NULL); 734 735 while (n && 736 !cached[n - 1]) 737 --n; 738 739 unused = ca->sb.nbuckets - n; 740 741 while (cached < p + n && 742 *cached == BTREE_PRIO) 743 cached++; 744 745 btree = cached - p; 746 n -= btree; 747 748 for (i = 0; i < n; i++) 749 sum += INITIAL_PRIO - cached[i]; 750 751 if (n) 752 do_div(sum, n); 753 754 for (i = 0; i < ARRAY_SIZE(q); i++) 755 q[i] = INITIAL_PRIO - cached[n * (i + 1) / 756 (ARRAY_SIZE(q) + 1)]; 757 758 vfree(p); 759 760 ret = scnprintf(buf, PAGE_SIZE, 761 "Unused: %zu%%\n" 762 "Metadata: %zu%%\n" 763 "Average: %llu\n" 764 "Sectors per Q: %zu\n" 765 "Quantiles: [", 766 unused * 100 / (size_t) ca->sb.nbuckets, 767 btree * 100 / (size_t) ca->sb.nbuckets, sum, 768 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); 769 770 for (i = 0; i < ARRAY_SIZE(q); i++) 771 ret += scnprintf(buf + ret, PAGE_SIZE - ret, 772 "%u ", q[i]); 773 ret--; 774 775 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n"); 776 777 return ret; 778 } 779 780 return 0; 781 } 782 SHOW_LOCKED(bch_cache) 783 784 STORE(__bch_cache) 785 { 786 struct cache *ca = container_of(kobj, struct cache, kobj); 787 788 if (attr == &sysfs_discard) { 789 bool v = strtoul_or_return(buf); 790 791 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 792 ca->discard = v; 793 794 if (v != CACHE_DISCARD(&ca->sb)) { 795 SET_CACHE_DISCARD(&ca->sb, v); 796 bcache_write_super(ca->set); 797 } 798 } 799 800 if (attr == &sysfs_cache_replacement_policy) { 801 ssize_t v = bch_read_string_list(buf, cache_replacement_policies); 802 803 if (v < 0) 804 return v; 805 806 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) { 807 mutex_lock(&ca->set->bucket_lock); 808 SET_CACHE_REPLACEMENT(&ca->sb, v); 809 mutex_unlock(&ca->set->bucket_lock); 810 811 bcache_write_super(ca->set); 812 } 813 } 814 815 if (attr == &sysfs_freelist_percent) { 816 DECLARE_FIFO(long, free); 817 long i; 818 size_t p = strtoul_or_return(buf); 819 820 p = clamp_t(size_t, 821 ((size_t) ca->sb.nbuckets * p) / 100, 822 roundup_pow_of_two(ca->sb.nbuckets) >> 9, 823 ca->sb.nbuckets / 2); 824 825 if (!init_fifo_exact(&free, p, GFP_KERNEL)) 826 return -ENOMEM; 827 828 mutex_lock(&ca->set->bucket_lock); 829 830 fifo_move(&free, &ca->free); 831 fifo_swap(&free, &ca->free); 832 833 mutex_unlock(&ca->set->bucket_lock); 834 835 while (fifo_pop(&free, i)) 836 atomic_dec(&ca->buckets[i].pin); 837 838 free_fifo(&free); 839 } 840 841 if (attr == &sysfs_clear_stats) { 842 atomic_long_set(&ca->sectors_written, 0); 843 atomic_long_set(&ca->btree_sectors_written, 0); 844 atomic_long_set(&ca->meta_sectors_written, 0); 845 atomic_set(&ca->io_count, 0); 846 atomic_set(&ca->io_errors, 0); 847 } 848 849 return size; 850 } 851 STORE_LOCKED(bch_cache) 852 853 static struct attribute *bch_cache_files[] = { 854 &sysfs_bucket_size, 855 &sysfs_block_size, 856 &sysfs_nbuckets, 857 &sysfs_priority_stats, 858 &sysfs_discard, 859 &sysfs_written, 860 &sysfs_btree_written, 861 &sysfs_metadata_written, 862 &sysfs_io_errors, 863 &sysfs_clear_stats, 864 &sysfs_freelist_percent, 865 &sysfs_cache_replacement_policy, 866 NULL 867 }; 868 KTYPE(bch_cache); 869