xref: /openbmc/linux/drivers/md/bcache/sysfs.c (revision da2ef666)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes[] = {
21 	"writethrough",
22 	"writeback",
23 	"writearound",
24 	"none",
25 	NULL
26 };
27 
28 /* Default is -1; we skip past it for stop_when_cache_set_failed */
29 static const char * const bch_stop_on_failure_modes[] = {
30 	"auto",
31 	"always",
32 	NULL
33 };
34 
35 static const char * const cache_replacement_policies[] = {
36 	"lru",
37 	"fifo",
38 	"random",
39 	NULL
40 };
41 
42 static const char * const error_actions[] = {
43 	"unregister",
44 	"panic",
45 	NULL
46 };
47 
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
56 
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 
71 sysfs_time_stats_attribute(btree_gc,	sec, ms);
72 sysfs_time_stats_attribute(btree_split, sec, us);
73 sysfs_time_stats_attribute(btree_sort,	ms,  us);
74 sysfs_time_stats_attribute(btree_read,	ms,  us);
75 
76 read_attribute(btree_nodes);
77 read_attribute(btree_used_percent);
78 read_attribute(average_key_size);
79 read_attribute(dirty_data);
80 read_attribute(bset_tree_stats);
81 
82 read_attribute(state);
83 read_attribute(cache_read_races);
84 read_attribute(reclaim);
85 read_attribute(flush_write);
86 read_attribute(retry_flush_write);
87 read_attribute(writeback_keys_done);
88 read_attribute(writeback_keys_failed);
89 read_attribute(io_errors);
90 read_attribute(congested);
91 rw_attribute(congested_read_threshold_us);
92 rw_attribute(congested_write_threshold_us);
93 
94 rw_attribute(sequential_cutoff);
95 rw_attribute(data_csum);
96 rw_attribute(cache_mode);
97 rw_attribute(stop_when_cache_set_failed);
98 rw_attribute(writeback_metadata);
99 rw_attribute(writeback_running);
100 rw_attribute(writeback_percent);
101 rw_attribute(writeback_delay);
102 rw_attribute(writeback_rate);
103 
104 rw_attribute(writeback_rate_update_seconds);
105 rw_attribute(writeback_rate_i_term_inverse);
106 rw_attribute(writeback_rate_p_term_inverse);
107 rw_attribute(writeback_rate_minimum);
108 read_attribute(writeback_rate_debug);
109 
110 read_attribute(stripe_size);
111 read_attribute(partial_stripes_expensive);
112 
113 rw_attribute(synchronous);
114 rw_attribute(journal_delay_ms);
115 rw_attribute(io_disable);
116 rw_attribute(discard);
117 rw_attribute(running);
118 rw_attribute(label);
119 rw_attribute(readahead);
120 rw_attribute(errors);
121 rw_attribute(io_error_limit);
122 rw_attribute(io_error_halflife);
123 rw_attribute(verify);
124 rw_attribute(bypass_torture_test);
125 rw_attribute(key_merging_disabled);
126 rw_attribute(gc_always_rewrite);
127 rw_attribute(expensive_debug_checks);
128 rw_attribute(cache_replacement_policy);
129 rw_attribute(btree_shrinker_disabled);
130 rw_attribute(copy_gc_enabled);
131 rw_attribute(size);
132 
133 static ssize_t bch_snprint_string_list(char *buf,
134 				       size_t size,
135 				       const char * const list[],
136 				       size_t selected)
137 {
138 	char *out = buf;
139 	size_t i;
140 
141 	for (i = 0; list[i]; i++)
142 		out += snprintf(out, buf + size - out,
143 				i == selected ? "[%s] " : "%s ", list[i]);
144 
145 	out[-1] = '\n';
146 	return out - buf;
147 }
148 
149 SHOW(__bch_cached_dev)
150 {
151 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
152 					     disk.kobj);
153 	char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
154 	int wb = dc->writeback_running;
155 
156 #define var(stat)		(dc->stat)
157 
158 	if (attr == &sysfs_cache_mode)
159 		return bch_snprint_string_list(buf, PAGE_SIZE,
160 					       bch_cache_modes,
161 					       BDEV_CACHE_MODE(&dc->sb));
162 
163 	if (attr == &sysfs_stop_when_cache_set_failed)
164 		return bch_snprint_string_list(buf, PAGE_SIZE,
165 					       bch_stop_on_failure_modes,
166 					       dc->stop_when_cache_set_failed);
167 
168 
169 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
170 	var_printf(verify,		"%i");
171 	var_printf(bypass_torture_test,	"%i");
172 	var_printf(writeback_metadata,	"%i");
173 	var_printf(writeback_running,	"%i");
174 	var_print(writeback_delay);
175 	var_print(writeback_percent);
176 	sysfs_hprint(writeback_rate,
177 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
178 	sysfs_hprint(io_errors,		atomic_read(&dc->io_errors));
179 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
180 	sysfs_printf(io_disable,	"%i", dc->io_disable);
181 	var_print(writeback_rate_update_seconds);
182 	var_print(writeback_rate_i_term_inverse);
183 	var_print(writeback_rate_p_term_inverse);
184 	var_print(writeback_rate_minimum);
185 
186 	if (attr == &sysfs_writeback_rate_debug) {
187 		char rate[20];
188 		char dirty[20];
189 		char target[20];
190 		char proportional[20];
191 		char integral[20];
192 		char change[20];
193 		s64 next_io;
194 
195 		/*
196 		 * Except for dirty and target, other values should
197 		 * be 0 if writeback is not running.
198 		 */
199 		bch_hprint(rate,
200 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
201 			      : 0);
202 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
203 		bch_hprint(target, dc->writeback_rate_target << 9);
204 		bch_hprint(proportional,
205 			   wb ? dc->writeback_rate_proportional << 9 : 0);
206 		bch_hprint(integral,
207 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
208 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
209 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
210 					 NSEC_PER_MSEC) : 0;
211 
212 		return sprintf(buf,
213 			       "rate:\t\t%s/sec\n"
214 			       "dirty:\t\t%s\n"
215 			       "target:\t\t%s\n"
216 			       "proportional:\t%s\n"
217 			       "integral:\t%s\n"
218 			       "change:\t\t%s/sec\n"
219 			       "next io:\t%llims\n",
220 			       rate, dirty, target, proportional,
221 			       integral, change, next_io);
222 	}
223 
224 	sysfs_hprint(dirty_data,
225 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
226 
227 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
228 	var_printf(partial_stripes_expensive,	"%u");
229 
230 	var_hprint(sequential_cutoff);
231 	var_hprint(readahead);
232 
233 	sysfs_print(running,		atomic_read(&dc->running));
234 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
235 
236 	if (attr == &sysfs_label) {
237 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
238 		buf[SB_LABEL_SIZE + 1] = '\0';
239 		strcat(buf, "\n");
240 		return strlen(buf);
241 	}
242 
243 #undef var
244 	return 0;
245 }
246 SHOW_LOCKED(bch_cached_dev)
247 
248 STORE(__cached_dev)
249 {
250 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
251 					     disk.kobj);
252 	ssize_t v;
253 	struct cache_set *c;
254 	struct kobj_uevent_env *env;
255 
256 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
257 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
258 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
259 
260 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
261 	d_strtoul(verify);
262 	d_strtoul(bypass_torture_test);
263 	d_strtoul(writeback_metadata);
264 	d_strtoul(writeback_running);
265 	d_strtoul(writeback_delay);
266 
267 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
268 
269 	if (attr == &sysfs_writeback_rate) {
270 		ssize_t ret;
271 		long int v = atomic_long_read(&dc->writeback_rate.rate);
272 
273 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
274 
275 		if (!ret) {
276 			atomic_long_set(&dc->writeback_rate.rate, v);
277 			ret = size;
278 		}
279 
280 		return ret;
281 	}
282 
283 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
284 			    dc->writeback_rate_update_seconds,
285 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
286 	d_strtoul(writeback_rate_i_term_inverse);
287 	d_strtoul_nonzero(writeback_rate_p_term_inverse);
288 
289 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
290 
291 	if (attr == &sysfs_io_disable) {
292 		int v = strtoul_or_return(buf);
293 
294 		dc->io_disable = v ? 1 : 0;
295 	}
296 
297 	d_strtoi_h(sequential_cutoff);
298 	d_strtoi_h(readahead);
299 
300 	if (attr == &sysfs_clear_stats)
301 		bch_cache_accounting_clear(&dc->accounting);
302 
303 	if (attr == &sysfs_running &&
304 	    strtoul_or_return(buf))
305 		bch_cached_dev_run(dc);
306 
307 	if (attr == &sysfs_cache_mode) {
308 		v = __sysfs_match_string(bch_cache_modes, -1, buf);
309 		if (v < 0)
310 			return v;
311 
312 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
313 			SET_BDEV_CACHE_MODE(&dc->sb, v);
314 			bch_write_bdev_super(dc, NULL);
315 		}
316 	}
317 
318 	if (attr == &sysfs_stop_when_cache_set_failed) {
319 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
320 		if (v < 0)
321 			return v;
322 
323 		dc->stop_when_cache_set_failed = v;
324 	}
325 
326 	if (attr == &sysfs_label) {
327 		if (size > SB_LABEL_SIZE)
328 			return -EINVAL;
329 		memcpy(dc->sb.label, buf, size);
330 		if (size < SB_LABEL_SIZE)
331 			dc->sb.label[size] = '\0';
332 		if (size && dc->sb.label[size - 1] == '\n')
333 			dc->sb.label[size - 1] = '\0';
334 		bch_write_bdev_super(dc, NULL);
335 		if (dc->disk.c) {
336 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
337 			       buf, SB_LABEL_SIZE);
338 			bch_uuid_write(dc->disk.c);
339 		}
340 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
341 		if (!env)
342 			return -ENOMEM;
343 		add_uevent_var(env, "DRIVER=bcache");
344 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
345 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
346 		kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
347 				   KOBJ_CHANGE,
348 				   env->envp);
349 		kfree(env);
350 	}
351 
352 	if (attr == &sysfs_attach) {
353 		uint8_t		set_uuid[16];
354 
355 		if (bch_parse_uuid(buf, set_uuid) < 16)
356 			return -EINVAL;
357 
358 		v = -ENOENT;
359 		list_for_each_entry(c, &bch_cache_sets, list) {
360 			v = bch_cached_dev_attach(dc, c, set_uuid);
361 			if (!v)
362 				return size;
363 		}
364 		if (v == -ENOENT)
365 			pr_err("Can't attach %s: cache set not found", buf);
366 		return v;
367 	}
368 
369 	if (attr == &sysfs_detach && dc->disk.c)
370 		bch_cached_dev_detach(dc);
371 
372 	if (attr == &sysfs_stop)
373 		bcache_device_stop(&dc->disk);
374 
375 	return size;
376 }
377 
378 STORE(bch_cached_dev)
379 {
380 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
381 					     disk.kobj);
382 
383 	mutex_lock(&bch_register_lock);
384 	size = __cached_dev_store(kobj, attr, buf, size);
385 
386 	if (attr == &sysfs_writeback_running)
387 		bch_writeback_queue(dc);
388 
389 	if (attr == &sysfs_writeback_percent)
390 		if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
391 			schedule_delayed_work(&dc->writeback_rate_update,
392 				      dc->writeback_rate_update_seconds * HZ);
393 
394 	mutex_unlock(&bch_register_lock);
395 	return size;
396 }
397 
398 static struct attribute *bch_cached_dev_files[] = {
399 	&sysfs_attach,
400 	&sysfs_detach,
401 	&sysfs_stop,
402 #if 0
403 	&sysfs_data_csum,
404 #endif
405 	&sysfs_cache_mode,
406 	&sysfs_stop_when_cache_set_failed,
407 	&sysfs_writeback_metadata,
408 	&sysfs_writeback_running,
409 	&sysfs_writeback_delay,
410 	&sysfs_writeback_percent,
411 	&sysfs_writeback_rate,
412 	&sysfs_writeback_rate_update_seconds,
413 	&sysfs_writeback_rate_i_term_inverse,
414 	&sysfs_writeback_rate_p_term_inverse,
415 	&sysfs_writeback_rate_debug,
416 	&sysfs_errors,
417 	&sysfs_io_error_limit,
418 	&sysfs_io_disable,
419 	&sysfs_dirty_data,
420 	&sysfs_stripe_size,
421 	&sysfs_partial_stripes_expensive,
422 	&sysfs_sequential_cutoff,
423 	&sysfs_clear_stats,
424 	&sysfs_running,
425 	&sysfs_state,
426 	&sysfs_label,
427 	&sysfs_readahead,
428 #ifdef CONFIG_BCACHE_DEBUG
429 	&sysfs_verify,
430 	&sysfs_bypass_torture_test,
431 #endif
432 	NULL
433 };
434 KTYPE(bch_cached_dev);
435 
436 SHOW(bch_flash_dev)
437 {
438 	struct bcache_device *d = container_of(kobj, struct bcache_device,
439 					       kobj);
440 	struct uuid_entry *u = &d->c->uuids[d->id];
441 
442 	sysfs_printf(data_csum,	"%i", d->data_csum);
443 	sysfs_hprint(size,	u->sectors << 9);
444 
445 	if (attr == &sysfs_label) {
446 		memcpy(buf, u->label, SB_LABEL_SIZE);
447 		buf[SB_LABEL_SIZE + 1] = '\0';
448 		strcat(buf, "\n");
449 		return strlen(buf);
450 	}
451 
452 	return 0;
453 }
454 
455 STORE(__bch_flash_dev)
456 {
457 	struct bcache_device *d = container_of(kobj, struct bcache_device,
458 					       kobj);
459 	struct uuid_entry *u = &d->c->uuids[d->id];
460 
461 	sysfs_strtoul(data_csum,	d->data_csum);
462 
463 	if (attr == &sysfs_size) {
464 		uint64_t v;
465 
466 		strtoi_h_or_return(buf, v);
467 
468 		u->sectors = v >> 9;
469 		bch_uuid_write(d->c);
470 		set_capacity(d->disk, u->sectors);
471 	}
472 
473 	if (attr == &sysfs_label) {
474 		memcpy(u->label, buf, SB_LABEL_SIZE);
475 		bch_uuid_write(d->c);
476 	}
477 
478 	if (attr == &sysfs_unregister) {
479 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
480 		bcache_device_stop(d);
481 	}
482 
483 	return size;
484 }
485 STORE_LOCKED(bch_flash_dev)
486 
487 static struct attribute *bch_flash_dev_files[] = {
488 	&sysfs_unregister,
489 #if 0
490 	&sysfs_data_csum,
491 #endif
492 	&sysfs_label,
493 	&sysfs_size,
494 	NULL
495 };
496 KTYPE(bch_flash_dev);
497 
498 struct bset_stats_op {
499 	struct btree_op op;
500 	size_t nodes;
501 	struct bset_stats stats;
502 };
503 
504 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
505 {
506 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
507 
508 	op->nodes++;
509 	bch_btree_keys_stats(&b->keys, &op->stats);
510 
511 	return MAP_CONTINUE;
512 }
513 
514 static int bch_bset_print_stats(struct cache_set *c, char *buf)
515 {
516 	struct bset_stats_op op;
517 	int ret;
518 
519 	memset(&op, 0, sizeof(op));
520 	bch_btree_op_init(&op.op, -1);
521 
522 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
523 	if (ret < 0)
524 		return ret;
525 
526 	return snprintf(buf, PAGE_SIZE,
527 			"btree nodes:		%zu\n"
528 			"written sets:		%zu\n"
529 			"unwritten sets:		%zu\n"
530 			"written key bytes:	%zu\n"
531 			"unwritten key bytes:	%zu\n"
532 			"floats:			%zu\n"
533 			"failed:			%zu\n",
534 			op.nodes,
535 			op.stats.sets_written, op.stats.sets_unwritten,
536 			op.stats.bytes_written, op.stats.bytes_unwritten,
537 			op.stats.floats, op.stats.failed);
538 }
539 
540 static unsigned int bch_root_usage(struct cache_set *c)
541 {
542 	unsigned int bytes = 0;
543 	struct bkey *k;
544 	struct btree *b;
545 	struct btree_iter iter;
546 
547 	goto lock_root;
548 
549 	do {
550 		rw_unlock(false, b);
551 lock_root:
552 		b = c->root;
553 		rw_lock(false, b, b->level);
554 	} while (b != c->root);
555 
556 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
557 		bytes += bkey_bytes(k);
558 
559 	rw_unlock(false, b);
560 
561 	return (bytes * 100) / btree_bytes(c);
562 }
563 
564 static size_t bch_cache_size(struct cache_set *c)
565 {
566 	size_t ret = 0;
567 	struct btree *b;
568 
569 	mutex_lock(&c->bucket_lock);
570 	list_for_each_entry(b, &c->btree_cache, list)
571 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
572 
573 	mutex_unlock(&c->bucket_lock);
574 	return ret;
575 }
576 
577 static unsigned int bch_cache_max_chain(struct cache_set *c)
578 {
579 	unsigned int ret = 0;
580 	struct hlist_head *h;
581 
582 	mutex_lock(&c->bucket_lock);
583 
584 	for (h = c->bucket_hash;
585 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
586 	     h++) {
587 		unsigned int i = 0;
588 		struct hlist_node *p;
589 
590 		hlist_for_each(p, h)
591 			i++;
592 
593 		ret = max(ret, i);
594 	}
595 
596 	mutex_unlock(&c->bucket_lock);
597 	return ret;
598 }
599 
600 static unsigned int bch_btree_used(struct cache_set *c)
601 {
602 	return div64_u64(c->gc_stats.key_bytes * 100,
603 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
604 }
605 
606 static unsigned int bch_average_key_size(struct cache_set *c)
607 {
608 	return c->gc_stats.nkeys
609 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
610 		: 0;
611 }
612 
613 SHOW(__bch_cache_set)
614 {
615 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
616 
617 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
618 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
619 	sysfs_hprint(bucket_size,		bucket_bytes(c));
620 	sysfs_hprint(block_size,		block_bytes(c));
621 	sysfs_print(tree_depth,			c->root->level);
622 	sysfs_print(root_usage_percent,		bch_root_usage(c));
623 
624 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
625 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
626 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
627 
628 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
629 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
630 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
631 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
632 
633 	sysfs_print(btree_used_percent,	bch_btree_used(c));
634 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
635 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
636 
637 	sysfs_print(cache_read_races,
638 		    atomic_long_read(&c->cache_read_races));
639 
640 	sysfs_print(reclaim,
641 		    atomic_long_read(&c->reclaim));
642 
643 	sysfs_print(flush_write,
644 		    atomic_long_read(&c->flush_write));
645 
646 	sysfs_print(retry_flush_write,
647 		    atomic_long_read(&c->retry_flush_write));
648 
649 	sysfs_print(writeback_keys_done,
650 		    atomic_long_read(&c->writeback_keys_done));
651 	sysfs_print(writeback_keys_failed,
652 		    atomic_long_read(&c->writeback_keys_failed));
653 
654 	if (attr == &sysfs_errors)
655 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
656 					       c->on_error);
657 
658 	/* See count_io_errors for why 88 */
659 	sysfs_print(io_error_halflife,	c->error_decay * 88);
660 	sysfs_print(io_error_limit,	c->error_limit);
661 
662 	sysfs_hprint(congested,
663 		     ((uint64_t) bch_get_congested(c)) << 9);
664 	sysfs_print(congested_read_threshold_us,
665 		    c->congested_read_threshold_us);
666 	sysfs_print(congested_write_threshold_us,
667 		    c->congested_write_threshold_us);
668 
669 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
670 	sysfs_printf(verify,			"%i", c->verify);
671 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
672 	sysfs_printf(expensive_debug_checks,
673 		     "%i", c->expensive_debug_checks);
674 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
675 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
676 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
677 	sysfs_printf(io_disable,		"%i",
678 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
679 
680 	if (attr == &sysfs_bset_tree_stats)
681 		return bch_bset_print_stats(c, buf);
682 
683 	return 0;
684 }
685 SHOW_LOCKED(bch_cache_set)
686 
687 STORE(__bch_cache_set)
688 {
689 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
690 	ssize_t v;
691 
692 	if (attr == &sysfs_unregister)
693 		bch_cache_set_unregister(c);
694 
695 	if (attr == &sysfs_stop)
696 		bch_cache_set_stop(c);
697 
698 	if (attr == &sysfs_synchronous) {
699 		bool sync = strtoul_or_return(buf);
700 
701 		if (sync != CACHE_SYNC(&c->sb)) {
702 			SET_CACHE_SYNC(&c->sb, sync);
703 			bcache_write_super(c);
704 		}
705 	}
706 
707 	if (attr == &sysfs_flash_vol_create) {
708 		int r;
709 		uint64_t v;
710 
711 		strtoi_h_or_return(buf, v);
712 
713 		r = bch_flash_dev_create(c, v);
714 		if (r)
715 			return r;
716 	}
717 
718 	if (attr == &sysfs_clear_stats) {
719 		atomic_long_set(&c->writeback_keys_done,	0);
720 		atomic_long_set(&c->writeback_keys_failed,	0);
721 
722 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
723 		bch_cache_accounting_clear(&c->accounting);
724 	}
725 
726 	if (attr == &sysfs_trigger_gc) {
727 		/*
728 		 * Garbage collection thread only works when sectors_to_gc < 0,
729 		 * when users write to sysfs entry trigger_gc, most of time
730 		 * they want to forcibly triger gargage collection. Here -1 is
731 		 * set to c->sectors_to_gc, to make gc_should_run() give a
732 		 * chance to permit gc thread to run. "give a chance" means
733 		 * before going into gc_should_run(), there is still chance
734 		 * that c->sectors_to_gc being set to other positive value. So
735 		 * writing sysfs entry trigger_gc won't always make sure gc
736 		 * thread takes effect.
737 		 */
738 		atomic_set(&c->sectors_to_gc, -1);
739 		wake_up_gc(c);
740 	}
741 
742 	if (attr == &sysfs_prune_cache) {
743 		struct shrink_control sc;
744 
745 		sc.gfp_mask = GFP_KERNEL;
746 		sc.nr_to_scan = strtoul_or_return(buf);
747 		c->shrink.scan_objects(&c->shrink, &sc);
748 	}
749 
750 	sysfs_strtoul(congested_read_threshold_us,
751 		      c->congested_read_threshold_us);
752 	sysfs_strtoul(congested_write_threshold_us,
753 		      c->congested_write_threshold_us);
754 
755 	if (attr == &sysfs_errors) {
756 		v = __sysfs_match_string(error_actions, -1, buf);
757 		if (v < 0)
758 			return v;
759 
760 		c->on_error = v;
761 	}
762 
763 	if (attr == &sysfs_io_error_limit)
764 		c->error_limit = strtoul_or_return(buf);
765 
766 	/* See count_io_errors() for why 88 */
767 	if (attr == &sysfs_io_error_halflife)
768 		c->error_decay = strtoul_or_return(buf) / 88;
769 
770 	if (attr == &sysfs_io_disable) {
771 		v = strtoul_or_return(buf);
772 		if (v) {
773 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
774 					     &c->flags))
775 				pr_warn("CACHE_SET_IO_DISABLE already set");
776 		} else {
777 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
778 						&c->flags))
779 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
780 		}
781 	}
782 
783 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
784 	sysfs_strtoul(verify,			c->verify);
785 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
786 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
787 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
788 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
789 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
790 
791 	return size;
792 }
793 STORE_LOCKED(bch_cache_set)
794 
795 SHOW(bch_cache_set_internal)
796 {
797 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
798 
799 	return bch_cache_set_show(&c->kobj, attr, buf);
800 }
801 
802 STORE(bch_cache_set_internal)
803 {
804 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
805 
806 	return bch_cache_set_store(&c->kobj, attr, buf, size);
807 }
808 
809 static void bch_cache_set_internal_release(struct kobject *k)
810 {
811 }
812 
813 static struct attribute *bch_cache_set_files[] = {
814 	&sysfs_unregister,
815 	&sysfs_stop,
816 	&sysfs_synchronous,
817 	&sysfs_journal_delay_ms,
818 	&sysfs_flash_vol_create,
819 
820 	&sysfs_bucket_size,
821 	&sysfs_block_size,
822 	&sysfs_tree_depth,
823 	&sysfs_root_usage_percent,
824 	&sysfs_btree_cache_size,
825 	&sysfs_cache_available_percent,
826 
827 	&sysfs_average_key_size,
828 
829 	&sysfs_errors,
830 	&sysfs_io_error_limit,
831 	&sysfs_io_error_halflife,
832 	&sysfs_congested,
833 	&sysfs_congested_read_threshold_us,
834 	&sysfs_congested_write_threshold_us,
835 	&sysfs_clear_stats,
836 	NULL
837 };
838 KTYPE(bch_cache_set);
839 
840 static struct attribute *bch_cache_set_internal_files[] = {
841 	&sysfs_active_journal_entries,
842 
843 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
844 	sysfs_time_stats_attribute_list(btree_split, sec, us)
845 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
846 	sysfs_time_stats_attribute_list(btree_read, ms, us)
847 
848 	&sysfs_btree_nodes,
849 	&sysfs_btree_used_percent,
850 	&sysfs_btree_cache_max_chain,
851 
852 	&sysfs_bset_tree_stats,
853 	&sysfs_cache_read_races,
854 	&sysfs_reclaim,
855 	&sysfs_flush_write,
856 	&sysfs_retry_flush_write,
857 	&sysfs_writeback_keys_done,
858 	&sysfs_writeback_keys_failed,
859 
860 	&sysfs_trigger_gc,
861 	&sysfs_prune_cache,
862 #ifdef CONFIG_BCACHE_DEBUG
863 	&sysfs_verify,
864 	&sysfs_key_merging_disabled,
865 	&sysfs_expensive_debug_checks,
866 #endif
867 	&sysfs_gc_always_rewrite,
868 	&sysfs_btree_shrinker_disabled,
869 	&sysfs_copy_gc_enabled,
870 	&sysfs_io_disable,
871 	NULL
872 };
873 KTYPE(bch_cache_set_internal);
874 
875 static int __bch_cache_cmp(const void *l, const void *r)
876 {
877 	return *((uint16_t *)r) - *((uint16_t *)l);
878 }
879 
880 SHOW(__bch_cache)
881 {
882 	struct cache *ca = container_of(kobj, struct cache, kobj);
883 
884 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
885 	sysfs_hprint(block_size,	block_bytes(ca));
886 	sysfs_print(nbuckets,		ca->sb.nbuckets);
887 	sysfs_print(discard,		ca->discard);
888 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
889 	sysfs_hprint(btree_written,
890 		     atomic_long_read(&ca->btree_sectors_written) << 9);
891 	sysfs_hprint(metadata_written,
892 		     (atomic_long_read(&ca->meta_sectors_written) +
893 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
894 
895 	sysfs_print(io_errors,
896 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
897 
898 	if (attr == &sysfs_cache_replacement_policy)
899 		return bch_snprint_string_list(buf, PAGE_SIZE,
900 					       cache_replacement_policies,
901 					       CACHE_REPLACEMENT(&ca->sb));
902 
903 	if (attr == &sysfs_priority_stats) {
904 		struct bucket *b;
905 		size_t n = ca->sb.nbuckets, i;
906 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
907 		uint64_t sum = 0;
908 		/* Compute 31 quantiles */
909 		uint16_t q[31], *p, *cached;
910 		ssize_t ret;
911 
912 		cached = p = vmalloc(array_size(sizeof(uint16_t),
913 						ca->sb.nbuckets));
914 		if (!p)
915 			return -ENOMEM;
916 
917 		mutex_lock(&ca->set->bucket_lock);
918 		for_each_bucket(b, ca) {
919 			if (!GC_SECTORS_USED(b))
920 				unused++;
921 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
922 				available++;
923 			if (GC_MARK(b) == GC_MARK_DIRTY)
924 				dirty++;
925 			if (GC_MARK(b) == GC_MARK_METADATA)
926 				meta++;
927 		}
928 
929 		for (i = ca->sb.first_bucket; i < n; i++)
930 			p[i] = ca->buckets[i].prio;
931 		mutex_unlock(&ca->set->bucket_lock);
932 
933 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
934 
935 		while (n &&
936 		       !cached[n - 1])
937 			--n;
938 
939 		unused = ca->sb.nbuckets - n;
940 
941 		while (cached < p + n &&
942 		       *cached == BTREE_PRIO)
943 			cached++, n--;
944 
945 		for (i = 0; i < n; i++)
946 			sum += INITIAL_PRIO - cached[i];
947 
948 		if (n)
949 			do_div(sum, n);
950 
951 		for (i = 0; i < ARRAY_SIZE(q); i++)
952 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
953 				(ARRAY_SIZE(q) + 1)];
954 
955 		vfree(p);
956 
957 		ret = scnprintf(buf, PAGE_SIZE,
958 				"Unused:		%zu%%\n"
959 				"Clean:		%zu%%\n"
960 				"Dirty:		%zu%%\n"
961 				"Metadata:	%zu%%\n"
962 				"Average:	%llu\n"
963 				"Sectors per Q:	%zu\n"
964 				"Quantiles:	[",
965 				unused * 100 / (size_t) ca->sb.nbuckets,
966 				available * 100 / (size_t) ca->sb.nbuckets,
967 				dirty * 100 / (size_t) ca->sb.nbuckets,
968 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
969 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
970 
971 		for (i = 0; i < ARRAY_SIZE(q); i++)
972 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
973 					 "%u ", q[i]);
974 		ret--;
975 
976 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
977 
978 		return ret;
979 	}
980 
981 	return 0;
982 }
983 SHOW_LOCKED(bch_cache)
984 
985 STORE(__bch_cache)
986 {
987 	struct cache *ca = container_of(kobj, struct cache, kobj);
988 	ssize_t v;
989 
990 	if (attr == &sysfs_discard) {
991 		bool v = strtoul_or_return(buf);
992 
993 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
994 			ca->discard = v;
995 
996 		if (v != CACHE_DISCARD(&ca->sb)) {
997 			SET_CACHE_DISCARD(&ca->sb, v);
998 			bcache_write_super(ca->set);
999 		}
1000 	}
1001 
1002 	if (attr == &sysfs_cache_replacement_policy) {
1003 		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1004 		if (v < 0)
1005 			return v;
1006 
1007 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1008 			mutex_lock(&ca->set->bucket_lock);
1009 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1010 			mutex_unlock(&ca->set->bucket_lock);
1011 
1012 			bcache_write_super(ca->set);
1013 		}
1014 	}
1015 
1016 	if (attr == &sysfs_clear_stats) {
1017 		atomic_long_set(&ca->sectors_written, 0);
1018 		atomic_long_set(&ca->btree_sectors_written, 0);
1019 		atomic_long_set(&ca->meta_sectors_written, 0);
1020 		atomic_set(&ca->io_count, 0);
1021 		atomic_set(&ca->io_errors, 0);
1022 	}
1023 
1024 	return size;
1025 }
1026 STORE_LOCKED(bch_cache)
1027 
1028 static struct attribute *bch_cache_files[] = {
1029 	&sysfs_bucket_size,
1030 	&sysfs_block_size,
1031 	&sysfs_nbuckets,
1032 	&sysfs_priority_stats,
1033 	&sysfs_discard,
1034 	&sysfs_written,
1035 	&sysfs_btree_written,
1036 	&sysfs_metadata_written,
1037 	&sysfs_io_errors,
1038 	&sysfs_clear_stats,
1039 	&sysfs_cache_replacement_policy,
1040 	NULL
1041 };
1042 KTYPE(bch_cache);
1043