xref: /openbmc/linux/drivers/md/bcache/sysfs.c (revision 1fae7cf05293d3a2c9e59c1bc59372322386467c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes[] = {
21 	"writethrough",
22 	"writeback",
23 	"writearound",
24 	"none",
25 	NULL
26 };
27 
28 /* Default is -1; we skip past it for stop_when_cache_set_failed */
29 static const char * const bch_stop_on_failure_modes[] = {
30 	"auto",
31 	"always",
32 	NULL
33 };
34 
35 static const char * const cache_replacement_policies[] = {
36 	"lru",
37 	"fifo",
38 	"random",
39 	NULL
40 };
41 
42 static const char * const error_actions[] = {
43 	"unregister",
44 	"panic",
45 	NULL
46 };
47 
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
56 
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 
71 sysfs_time_stats_attribute(btree_gc,	sec, ms);
72 sysfs_time_stats_attribute(btree_split, sec, us);
73 sysfs_time_stats_attribute(btree_sort,	ms,  us);
74 sysfs_time_stats_attribute(btree_read,	ms,  us);
75 
76 read_attribute(btree_nodes);
77 read_attribute(btree_used_percent);
78 read_attribute(average_key_size);
79 read_attribute(dirty_data);
80 read_attribute(bset_tree_stats);
81 
82 read_attribute(state);
83 read_attribute(cache_read_races);
84 read_attribute(reclaim);
85 read_attribute(flush_write);
86 read_attribute(retry_flush_write);
87 read_attribute(writeback_keys_done);
88 read_attribute(writeback_keys_failed);
89 read_attribute(io_errors);
90 read_attribute(congested);
91 rw_attribute(congested_read_threshold_us);
92 rw_attribute(congested_write_threshold_us);
93 
94 rw_attribute(sequential_cutoff);
95 rw_attribute(data_csum);
96 rw_attribute(cache_mode);
97 rw_attribute(stop_when_cache_set_failed);
98 rw_attribute(writeback_metadata);
99 rw_attribute(writeback_running);
100 rw_attribute(writeback_percent);
101 rw_attribute(writeback_delay);
102 rw_attribute(writeback_rate);
103 
104 rw_attribute(writeback_rate_update_seconds);
105 rw_attribute(writeback_rate_i_term_inverse);
106 rw_attribute(writeback_rate_p_term_inverse);
107 rw_attribute(writeback_rate_minimum);
108 read_attribute(writeback_rate_debug);
109 
110 read_attribute(stripe_size);
111 read_attribute(partial_stripes_expensive);
112 
113 rw_attribute(synchronous);
114 rw_attribute(journal_delay_ms);
115 rw_attribute(io_disable);
116 rw_attribute(discard);
117 rw_attribute(running);
118 rw_attribute(label);
119 rw_attribute(readahead);
120 rw_attribute(errors);
121 rw_attribute(io_error_limit);
122 rw_attribute(io_error_halflife);
123 rw_attribute(verify);
124 rw_attribute(bypass_torture_test);
125 rw_attribute(key_merging_disabled);
126 rw_attribute(gc_always_rewrite);
127 rw_attribute(expensive_debug_checks);
128 rw_attribute(cache_replacement_policy);
129 rw_attribute(btree_shrinker_disabled);
130 rw_attribute(copy_gc_enabled);
131 rw_attribute(size);
132 
133 static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
134 			    size_t selected)
135 {
136 	char *out = buf;
137 	size_t i;
138 
139 	for (i = 0; list[i]; i++)
140 		out += snprintf(out, buf + size - out,
141 				i == selected ? "[%s] " : "%s ", list[i]);
142 
143 	out[-1] = '\n';
144 	return out - buf;
145 }
146 
147 SHOW(__bch_cached_dev)
148 {
149 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
150 					     disk.kobj);
151 	const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
152 	int wb = dc->writeback_running;
153 
154 #define var(stat)		(dc->stat)
155 
156 	if (attr == &sysfs_cache_mode)
157 		return bch_snprint_string_list(buf, PAGE_SIZE,
158 					       bch_cache_modes,
159 					       BDEV_CACHE_MODE(&dc->sb));
160 
161 	if (attr == &sysfs_stop_when_cache_set_failed)
162 		return bch_snprint_string_list(buf, PAGE_SIZE,
163 					       bch_stop_on_failure_modes,
164 					       dc->stop_when_cache_set_failed);
165 
166 
167 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
168 	var_printf(verify,		"%i");
169 	var_printf(bypass_torture_test,	"%i");
170 	var_printf(writeback_metadata,	"%i");
171 	var_printf(writeback_running,	"%i");
172 	var_print(writeback_delay);
173 	var_print(writeback_percent);
174 	sysfs_hprint(writeback_rate,
175 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
176 	sysfs_hprint(io_errors,		atomic_read(&dc->io_errors));
177 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
178 	sysfs_printf(io_disable,	"%i", dc->io_disable);
179 	var_print(writeback_rate_update_seconds);
180 	var_print(writeback_rate_i_term_inverse);
181 	var_print(writeback_rate_p_term_inverse);
182 	var_print(writeback_rate_minimum);
183 
184 	if (attr == &sysfs_writeback_rate_debug) {
185 		char rate[20];
186 		char dirty[20];
187 		char target[20];
188 		char proportional[20];
189 		char integral[20];
190 		char change[20];
191 		s64 next_io;
192 
193 		/*
194 		 * Except for dirty and target, other values should
195 		 * be 0 if writeback is not running.
196 		 */
197 		bch_hprint(rate,
198 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
199 			      : 0);
200 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
201 		bch_hprint(target, dc->writeback_rate_target << 9);
202 		bch_hprint(proportional,
203 			   wb ? dc->writeback_rate_proportional << 9 : 0);
204 		bch_hprint(integral,
205 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
206 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
207 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
208 					 NSEC_PER_MSEC) : 0;
209 
210 		return sprintf(buf,
211 			       "rate:\t\t%s/sec\n"
212 			       "dirty:\t\t%s\n"
213 			       "target:\t\t%s\n"
214 			       "proportional:\t%s\n"
215 			       "integral:\t%s\n"
216 			       "change:\t\t%s/sec\n"
217 			       "next io:\t%llims\n",
218 			       rate, dirty, target, proportional,
219 			       integral, change, next_io);
220 	}
221 
222 	sysfs_hprint(dirty_data,
223 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
224 
225 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
226 	var_printf(partial_stripes_expensive,	"%u");
227 
228 	var_hprint(sequential_cutoff);
229 	var_hprint(readahead);
230 
231 	sysfs_print(running,		atomic_read(&dc->running));
232 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
233 
234 	if (attr == &sysfs_label) {
235 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
236 		buf[SB_LABEL_SIZE + 1] = '\0';
237 		strcat(buf, "\n");
238 		return strlen(buf);
239 	}
240 
241 #undef var
242 	return 0;
243 }
244 SHOW_LOCKED(bch_cached_dev)
245 
246 STORE(__cached_dev)
247 {
248 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
249 					     disk.kobj);
250 	ssize_t v;
251 	struct cache_set *c;
252 	struct kobj_uevent_env *env;
253 
254 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
255 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
256 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
257 
258 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
259 	d_strtoul(verify);
260 	d_strtoul(bypass_torture_test);
261 	d_strtoul(writeback_metadata);
262 	d_strtoul(writeback_running);
263 	d_strtoul(writeback_delay);
264 
265 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
266 
267 	if (attr == &sysfs_writeback_rate) {
268 		ssize_t ret;
269 		long int v = atomic_long_read(&dc->writeback_rate.rate);
270 
271 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
272 
273 		if (!ret) {
274 			atomic_long_set(&dc->writeback_rate.rate, v);
275 			ret = size;
276 		}
277 
278 		return ret;
279 	}
280 
281 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
282 			    dc->writeback_rate_update_seconds,
283 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
284 	d_strtoul(writeback_rate_i_term_inverse);
285 	d_strtoul_nonzero(writeback_rate_p_term_inverse);
286 
287 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
288 
289 	if (attr == &sysfs_io_disable) {
290 		int v = strtoul_or_return(buf);
291 
292 		dc->io_disable = v ? 1 : 0;
293 	}
294 
295 	d_strtoi_h(sequential_cutoff);
296 	d_strtoi_h(readahead);
297 
298 	if (attr == &sysfs_clear_stats)
299 		bch_cache_accounting_clear(&dc->accounting);
300 
301 	if (attr == &sysfs_running &&
302 	    strtoul_or_return(buf))
303 		bch_cached_dev_run(dc);
304 
305 	if (attr == &sysfs_cache_mode) {
306 		v = __sysfs_match_string(bch_cache_modes, -1, buf);
307 		if (v < 0)
308 			return v;
309 
310 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
311 			SET_BDEV_CACHE_MODE(&dc->sb, v);
312 			bch_write_bdev_super(dc, NULL);
313 		}
314 	}
315 
316 	if (attr == &sysfs_stop_when_cache_set_failed) {
317 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
318 		if (v < 0)
319 			return v;
320 
321 		dc->stop_when_cache_set_failed = v;
322 	}
323 
324 	if (attr == &sysfs_label) {
325 		if (size > SB_LABEL_SIZE)
326 			return -EINVAL;
327 		memcpy(dc->sb.label, buf, size);
328 		if (size < SB_LABEL_SIZE)
329 			dc->sb.label[size] = '\0';
330 		if (size && dc->sb.label[size - 1] == '\n')
331 			dc->sb.label[size - 1] = '\0';
332 		bch_write_bdev_super(dc, NULL);
333 		if (dc->disk.c) {
334 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
335 			       buf, SB_LABEL_SIZE);
336 			bch_uuid_write(dc->disk.c);
337 		}
338 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
339 		if (!env)
340 			return -ENOMEM;
341 		add_uevent_var(env, "DRIVER=bcache");
342 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
343 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
344 		kobject_uevent_env(
345 			&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
346 		kfree(env);
347 	}
348 
349 	if (attr == &sysfs_attach) {
350 		uint8_t		set_uuid[16];
351 
352 		if (bch_parse_uuid(buf, set_uuid) < 16)
353 			return -EINVAL;
354 
355 		v = -ENOENT;
356 		list_for_each_entry(c, &bch_cache_sets, list) {
357 			v = bch_cached_dev_attach(dc, c, set_uuid);
358 			if (!v)
359 				return size;
360 		}
361 		if (v == -ENOENT)
362 			pr_err("Can't attach %s: cache set not found", buf);
363 		return v;
364 	}
365 
366 	if (attr == &sysfs_detach && dc->disk.c)
367 		bch_cached_dev_detach(dc);
368 
369 	if (attr == &sysfs_stop)
370 		bcache_device_stop(&dc->disk);
371 
372 	return size;
373 }
374 
375 STORE(bch_cached_dev)
376 {
377 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
378 					     disk.kobj);
379 
380 	mutex_lock(&bch_register_lock);
381 	size = __cached_dev_store(kobj, attr, buf, size);
382 
383 	if (attr == &sysfs_writeback_running)
384 		bch_writeback_queue(dc);
385 
386 	if (attr == &sysfs_writeback_percent)
387 		if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
388 			schedule_delayed_work(&dc->writeback_rate_update,
389 				      dc->writeback_rate_update_seconds * HZ);
390 
391 	mutex_unlock(&bch_register_lock);
392 	return size;
393 }
394 
395 static struct attribute *bch_cached_dev_files[] = {
396 	&sysfs_attach,
397 	&sysfs_detach,
398 	&sysfs_stop,
399 #if 0
400 	&sysfs_data_csum,
401 #endif
402 	&sysfs_cache_mode,
403 	&sysfs_stop_when_cache_set_failed,
404 	&sysfs_writeback_metadata,
405 	&sysfs_writeback_running,
406 	&sysfs_writeback_delay,
407 	&sysfs_writeback_percent,
408 	&sysfs_writeback_rate,
409 	&sysfs_writeback_rate_update_seconds,
410 	&sysfs_writeback_rate_i_term_inverse,
411 	&sysfs_writeback_rate_p_term_inverse,
412 	&sysfs_writeback_rate_debug,
413 	&sysfs_errors,
414 	&sysfs_io_error_limit,
415 	&sysfs_io_disable,
416 	&sysfs_dirty_data,
417 	&sysfs_stripe_size,
418 	&sysfs_partial_stripes_expensive,
419 	&sysfs_sequential_cutoff,
420 	&sysfs_clear_stats,
421 	&sysfs_running,
422 	&sysfs_state,
423 	&sysfs_label,
424 	&sysfs_readahead,
425 #ifdef CONFIG_BCACHE_DEBUG
426 	&sysfs_verify,
427 	&sysfs_bypass_torture_test,
428 #endif
429 	NULL
430 };
431 KTYPE(bch_cached_dev);
432 
433 SHOW(bch_flash_dev)
434 {
435 	struct bcache_device *d = container_of(kobj, struct bcache_device,
436 					       kobj);
437 	struct uuid_entry *u = &d->c->uuids[d->id];
438 
439 	sysfs_printf(data_csum,	"%i", d->data_csum);
440 	sysfs_hprint(size,	u->sectors << 9);
441 
442 	if (attr == &sysfs_label) {
443 		memcpy(buf, u->label, SB_LABEL_SIZE);
444 		buf[SB_LABEL_SIZE + 1] = '\0';
445 		strcat(buf, "\n");
446 		return strlen(buf);
447 	}
448 
449 	return 0;
450 }
451 
452 STORE(__bch_flash_dev)
453 {
454 	struct bcache_device *d = container_of(kobj, struct bcache_device,
455 					       kobj);
456 	struct uuid_entry *u = &d->c->uuids[d->id];
457 
458 	sysfs_strtoul(data_csum,	d->data_csum);
459 
460 	if (attr == &sysfs_size) {
461 		uint64_t v;
462 
463 		strtoi_h_or_return(buf, v);
464 
465 		u->sectors = v >> 9;
466 		bch_uuid_write(d->c);
467 		set_capacity(d->disk, u->sectors);
468 	}
469 
470 	if (attr == &sysfs_label) {
471 		memcpy(u->label, buf, SB_LABEL_SIZE);
472 		bch_uuid_write(d->c);
473 	}
474 
475 	if (attr == &sysfs_unregister) {
476 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
477 		bcache_device_stop(d);
478 	}
479 
480 	return size;
481 }
482 STORE_LOCKED(bch_flash_dev)
483 
484 static struct attribute *bch_flash_dev_files[] = {
485 	&sysfs_unregister,
486 #if 0
487 	&sysfs_data_csum,
488 #endif
489 	&sysfs_label,
490 	&sysfs_size,
491 	NULL
492 };
493 KTYPE(bch_flash_dev);
494 
495 struct bset_stats_op {
496 	struct btree_op op;
497 	size_t nodes;
498 	struct bset_stats stats;
499 };
500 
501 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
502 {
503 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
504 
505 	op->nodes++;
506 	bch_btree_keys_stats(&b->keys, &op->stats);
507 
508 	return MAP_CONTINUE;
509 }
510 
511 static int bch_bset_print_stats(struct cache_set *c, char *buf)
512 {
513 	struct bset_stats_op op;
514 	int ret;
515 
516 	memset(&op, 0, sizeof(op));
517 	bch_btree_op_init(&op.op, -1);
518 
519 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
520 	if (ret < 0)
521 		return ret;
522 
523 	return snprintf(buf, PAGE_SIZE,
524 			"btree nodes:		%zu\n"
525 			"written sets:		%zu\n"
526 			"unwritten sets:		%zu\n"
527 			"written key bytes:	%zu\n"
528 			"unwritten key bytes:	%zu\n"
529 			"floats:			%zu\n"
530 			"failed:			%zu\n",
531 			op.nodes,
532 			op.stats.sets_written, op.stats.sets_unwritten,
533 			op.stats.bytes_written, op.stats.bytes_unwritten,
534 			op.stats.floats, op.stats.failed);
535 }
536 
537 static unsigned int bch_root_usage(struct cache_set *c)
538 {
539 	unsigned int bytes = 0;
540 	struct bkey *k;
541 	struct btree *b;
542 	struct btree_iter iter;
543 
544 	goto lock_root;
545 
546 	do {
547 		rw_unlock(false, b);
548 lock_root:
549 		b = c->root;
550 		rw_lock(false, b, b->level);
551 	} while (b != c->root);
552 
553 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
554 		bytes += bkey_bytes(k);
555 
556 	rw_unlock(false, b);
557 
558 	return (bytes * 100) / btree_bytes(c);
559 }
560 
561 static size_t bch_cache_size(struct cache_set *c)
562 {
563 	size_t ret = 0;
564 	struct btree *b;
565 
566 	mutex_lock(&c->bucket_lock);
567 	list_for_each_entry(b, &c->btree_cache, list)
568 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
569 
570 	mutex_unlock(&c->bucket_lock);
571 	return ret;
572 }
573 
574 static unsigned int bch_cache_max_chain(struct cache_set *c)
575 {
576 	unsigned int ret = 0;
577 	struct hlist_head *h;
578 
579 	mutex_lock(&c->bucket_lock);
580 
581 	for (h = c->bucket_hash;
582 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
583 	     h++) {
584 		unsigned int i = 0;
585 		struct hlist_node *p;
586 
587 		hlist_for_each(p, h)
588 			i++;
589 
590 		ret = max(ret, i);
591 	}
592 
593 	mutex_unlock(&c->bucket_lock);
594 	return ret;
595 }
596 
597 static unsigned int bch_btree_used(struct cache_set *c)
598 {
599 	return div64_u64(c->gc_stats.key_bytes * 100,
600 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
601 }
602 
603 static unsigned int bch_average_key_size(struct cache_set *c)
604 {
605 	return c->gc_stats.nkeys
606 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
607 		: 0;
608 }
609 
610 SHOW(__bch_cache_set)
611 {
612 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
613 
614 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
615 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
616 	sysfs_hprint(bucket_size,		bucket_bytes(c));
617 	sysfs_hprint(block_size,		block_bytes(c));
618 	sysfs_print(tree_depth,			c->root->level);
619 	sysfs_print(root_usage_percent,		bch_root_usage(c));
620 
621 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
622 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
623 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
624 
625 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
626 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
627 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
628 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
629 
630 	sysfs_print(btree_used_percent,	bch_btree_used(c));
631 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
632 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
633 
634 	sysfs_print(cache_read_races,
635 		    atomic_long_read(&c->cache_read_races));
636 
637 	sysfs_print(reclaim,
638 		    atomic_long_read(&c->reclaim));
639 
640 	sysfs_print(flush_write,
641 		    atomic_long_read(&c->flush_write));
642 
643 	sysfs_print(retry_flush_write,
644 		    atomic_long_read(&c->retry_flush_write));
645 
646 	sysfs_print(writeback_keys_done,
647 		    atomic_long_read(&c->writeback_keys_done));
648 	sysfs_print(writeback_keys_failed,
649 		    atomic_long_read(&c->writeback_keys_failed));
650 
651 	if (attr == &sysfs_errors)
652 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
653 					       c->on_error);
654 
655 	/* See count_io_errors for why 88 */
656 	sysfs_print(io_error_halflife,	c->error_decay * 88);
657 	sysfs_print(io_error_limit,	c->error_limit);
658 
659 	sysfs_hprint(congested,
660 		     ((uint64_t) bch_get_congested(c)) << 9);
661 	sysfs_print(congested_read_threshold_us,
662 		    c->congested_read_threshold_us);
663 	sysfs_print(congested_write_threshold_us,
664 		    c->congested_write_threshold_us);
665 
666 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
667 	sysfs_printf(verify,			"%i", c->verify);
668 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
669 	sysfs_printf(expensive_debug_checks,
670 		     "%i", c->expensive_debug_checks);
671 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
672 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
673 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
674 	sysfs_printf(io_disable,		"%i",
675 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
676 
677 	if (attr == &sysfs_bset_tree_stats)
678 		return bch_bset_print_stats(c, buf);
679 
680 	return 0;
681 }
682 SHOW_LOCKED(bch_cache_set)
683 
684 STORE(__bch_cache_set)
685 {
686 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
687 	ssize_t v;
688 
689 	if (attr == &sysfs_unregister)
690 		bch_cache_set_unregister(c);
691 
692 	if (attr == &sysfs_stop)
693 		bch_cache_set_stop(c);
694 
695 	if (attr == &sysfs_synchronous) {
696 		bool sync = strtoul_or_return(buf);
697 
698 		if (sync != CACHE_SYNC(&c->sb)) {
699 			SET_CACHE_SYNC(&c->sb, sync);
700 			bcache_write_super(c);
701 		}
702 	}
703 
704 	if (attr == &sysfs_flash_vol_create) {
705 		int r;
706 		uint64_t v;
707 
708 		strtoi_h_or_return(buf, v);
709 
710 		r = bch_flash_dev_create(c, v);
711 		if (r)
712 			return r;
713 	}
714 
715 	if (attr == &sysfs_clear_stats) {
716 		atomic_long_set(&c->writeback_keys_done,	0);
717 		atomic_long_set(&c->writeback_keys_failed,	0);
718 
719 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
720 		bch_cache_accounting_clear(&c->accounting);
721 	}
722 
723 	if (attr == &sysfs_trigger_gc) {
724 		/*
725 		 * Garbage collection thread only works when sectors_to_gc < 0,
726 		 * when users write to sysfs entry trigger_gc, most of time
727 		 * they want to forcibly triger gargage collection. Here -1 is
728 		 * set to c->sectors_to_gc, to make gc_should_run() give a
729 		 * chance to permit gc thread to run. "give a chance" means
730 		 * before going into gc_should_run(), there is still chance
731 		 * that c->sectors_to_gc being set to other positive value. So
732 		 * writing sysfs entry trigger_gc won't always make sure gc
733 		 * thread takes effect.
734 		 */
735 		atomic_set(&c->sectors_to_gc, -1);
736 		wake_up_gc(c);
737 	}
738 
739 	if (attr == &sysfs_prune_cache) {
740 		struct shrink_control sc;
741 
742 		sc.gfp_mask = GFP_KERNEL;
743 		sc.nr_to_scan = strtoul_or_return(buf);
744 		c->shrink.scan_objects(&c->shrink, &sc);
745 	}
746 
747 	sysfs_strtoul(congested_read_threshold_us,
748 		      c->congested_read_threshold_us);
749 	sysfs_strtoul(congested_write_threshold_us,
750 		      c->congested_write_threshold_us);
751 
752 	if (attr == &sysfs_errors) {
753 		v = __sysfs_match_string(error_actions, -1, buf);
754 		if (v < 0)
755 			return v;
756 
757 		c->on_error = v;
758 	}
759 
760 	if (attr == &sysfs_io_error_limit)
761 		c->error_limit = strtoul_or_return(buf);
762 
763 	/* See count_io_errors() for why 88 */
764 	if (attr == &sysfs_io_error_halflife)
765 		c->error_decay = strtoul_or_return(buf) / 88;
766 
767 	if (attr == &sysfs_io_disable) {
768 		v = strtoul_or_return(buf);
769 		if (v) {
770 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
771 					     &c->flags))
772 				pr_warn("CACHE_SET_IO_DISABLE already set");
773 		} else {
774 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
775 						&c->flags))
776 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
777 		}
778 	}
779 
780 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
781 	sysfs_strtoul(verify,			c->verify);
782 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
783 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
784 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
785 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
786 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
787 
788 	return size;
789 }
790 STORE_LOCKED(bch_cache_set)
791 
792 SHOW(bch_cache_set_internal)
793 {
794 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
795 
796 	return bch_cache_set_show(&c->kobj, attr, buf);
797 }
798 
799 STORE(bch_cache_set_internal)
800 {
801 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
802 
803 	return bch_cache_set_store(&c->kobj, attr, buf, size);
804 }
805 
806 static void bch_cache_set_internal_release(struct kobject *k)
807 {
808 }
809 
810 static struct attribute *bch_cache_set_files[] = {
811 	&sysfs_unregister,
812 	&sysfs_stop,
813 	&sysfs_synchronous,
814 	&sysfs_journal_delay_ms,
815 	&sysfs_flash_vol_create,
816 
817 	&sysfs_bucket_size,
818 	&sysfs_block_size,
819 	&sysfs_tree_depth,
820 	&sysfs_root_usage_percent,
821 	&sysfs_btree_cache_size,
822 	&sysfs_cache_available_percent,
823 
824 	&sysfs_average_key_size,
825 
826 	&sysfs_errors,
827 	&sysfs_io_error_limit,
828 	&sysfs_io_error_halflife,
829 	&sysfs_congested,
830 	&sysfs_congested_read_threshold_us,
831 	&sysfs_congested_write_threshold_us,
832 	&sysfs_clear_stats,
833 	NULL
834 };
835 KTYPE(bch_cache_set);
836 
837 static struct attribute *bch_cache_set_internal_files[] = {
838 	&sysfs_active_journal_entries,
839 
840 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
841 	sysfs_time_stats_attribute_list(btree_split, sec, us)
842 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
843 	sysfs_time_stats_attribute_list(btree_read, ms, us)
844 
845 	&sysfs_btree_nodes,
846 	&sysfs_btree_used_percent,
847 	&sysfs_btree_cache_max_chain,
848 
849 	&sysfs_bset_tree_stats,
850 	&sysfs_cache_read_races,
851 	&sysfs_reclaim,
852 	&sysfs_flush_write,
853 	&sysfs_retry_flush_write,
854 	&sysfs_writeback_keys_done,
855 	&sysfs_writeback_keys_failed,
856 
857 	&sysfs_trigger_gc,
858 	&sysfs_prune_cache,
859 #ifdef CONFIG_BCACHE_DEBUG
860 	&sysfs_verify,
861 	&sysfs_key_merging_disabled,
862 	&sysfs_expensive_debug_checks,
863 #endif
864 	&sysfs_gc_always_rewrite,
865 	&sysfs_btree_shrinker_disabled,
866 	&sysfs_copy_gc_enabled,
867 	&sysfs_io_disable,
868 	NULL
869 };
870 KTYPE(bch_cache_set_internal);
871 
872 static int __bch_cache_cmp(const void *l, const void *r)
873 {
874 	return *((uint16_t *)r) - *((uint16_t *)l);
875 }
876 
877 SHOW(__bch_cache)
878 {
879 	struct cache *ca = container_of(kobj, struct cache, kobj);
880 
881 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
882 	sysfs_hprint(block_size,	block_bytes(ca));
883 	sysfs_print(nbuckets,		ca->sb.nbuckets);
884 	sysfs_print(discard,		ca->discard);
885 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
886 	sysfs_hprint(btree_written,
887 		     atomic_long_read(&ca->btree_sectors_written) << 9);
888 	sysfs_hprint(metadata_written,
889 		     (atomic_long_read(&ca->meta_sectors_written) +
890 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
891 
892 	sysfs_print(io_errors,
893 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
894 
895 	if (attr == &sysfs_cache_replacement_policy)
896 		return bch_snprint_string_list(buf, PAGE_SIZE,
897 					       cache_replacement_policies,
898 					       CACHE_REPLACEMENT(&ca->sb));
899 
900 	if (attr == &sysfs_priority_stats) {
901 		struct bucket *b;
902 		size_t n = ca->sb.nbuckets, i;
903 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
904 		uint64_t sum = 0;
905 		/* Compute 31 quantiles */
906 		uint16_t q[31], *p, *cached;
907 		ssize_t ret;
908 
909 		cached = p = vmalloc(array_size(sizeof(uint16_t),
910 						ca->sb.nbuckets));
911 		if (!p)
912 			return -ENOMEM;
913 
914 		mutex_lock(&ca->set->bucket_lock);
915 		for_each_bucket(b, ca) {
916 			if (!GC_SECTORS_USED(b))
917 				unused++;
918 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
919 				available++;
920 			if (GC_MARK(b) == GC_MARK_DIRTY)
921 				dirty++;
922 			if (GC_MARK(b) == GC_MARK_METADATA)
923 				meta++;
924 		}
925 
926 		for (i = ca->sb.first_bucket; i < n; i++)
927 			p[i] = ca->buckets[i].prio;
928 		mutex_unlock(&ca->set->bucket_lock);
929 
930 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
931 
932 		while (n &&
933 		       !cached[n - 1])
934 			--n;
935 
936 		unused = ca->sb.nbuckets - n;
937 
938 		while (cached < p + n &&
939 		       *cached == BTREE_PRIO)
940 			cached++, n--;
941 
942 		for (i = 0; i < n; i++)
943 			sum += INITIAL_PRIO - cached[i];
944 
945 		if (n)
946 			do_div(sum, n);
947 
948 		for (i = 0; i < ARRAY_SIZE(q); i++)
949 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
950 				(ARRAY_SIZE(q) + 1)];
951 
952 		vfree(p);
953 
954 		ret = scnprintf(buf, PAGE_SIZE,
955 				"Unused:		%zu%%\n"
956 				"Clean:		%zu%%\n"
957 				"Dirty:		%zu%%\n"
958 				"Metadata:	%zu%%\n"
959 				"Average:	%llu\n"
960 				"Sectors per Q:	%zu\n"
961 				"Quantiles:	[",
962 				unused * 100 / (size_t) ca->sb.nbuckets,
963 				available * 100 / (size_t) ca->sb.nbuckets,
964 				dirty * 100 / (size_t) ca->sb.nbuckets,
965 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
966 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
967 
968 		for (i = 0; i < ARRAY_SIZE(q); i++)
969 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
970 					 "%u ", q[i]);
971 		ret--;
972 
973 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
974 
975 		return ret;
976 	}
977 
978 	return 0;
979 }
980 SHOW_LOCKED(bch_cache)
981 
982 STORE(__bch_cache)
983 {
984 	struct cache *ca = container_of(kobj, struct cache, kobj);
985 	ssize_t v;
986 
987 	if (attr == &sysfs_discard) {
988 		bool v = strtoul_or_return(buf);
989 
990 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
991 			ca->discard = v;
992 
993 		if (v != CACHE_DISCARD(&ca->sb)) {
994 			SET_CACHE_DISCARD(&ca->sb, v);
995 			bcache_write_super(ca->set);
996 		}
997 	}
998 
999 	if (attr == &sysfs_cache_replacement_policy) {
1000 		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1001 		if (v < 0)
1002 			return v;
1003 
1004 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1005 			mutex_lock(&ca->set->bucket_lock);
1006 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1007 			mutex_unlock(&ca->set->bucket_lock);
1008 
1009 			bcache_write_super(ca->set);
1010 		}
1011 	}
1012 
1013 	if (attr == &sysfs_clear_stats) {
1014 		atomic_long_set(&ca->sectors_written, 0);
1015 		atomic_long_set(&ca->btree_sectors_written, 0);
1016 		atomic_long_set(&ca->meta_sectors_written, 0);
1017 		atomic_set(&ca->io_count, 0);
1018 		atomic_set(&ca->io_errors, 0);
1019 	}
1020 
1021 	return size;
1022 }
1023 STORE_LOCKED(bch_cache)
1024 
1025 static struct attribute *bch_cache_files[] = {
1026 	&sysfs_bucket_size,
1027 	&sysfs_block_size,
1028 	&sysfs_nbuckets,
1029 	&sysfs_priority_stats,
1030 	&sysfs_discard,
1031 	&sysfs_written,
1032 	&sysfs_btree_written,
1033 	&sysfs_metadata_written,
1034 	&sysfs_io_errors,
1035 	&sysfs_clear_stats,
1036 	&sysfs_cache_replacement_policy,
1037 	NULL
1038 };
1039 KTYPE(bch_cache);
1040