xref: /openbmc/linux/drivers/md/bcache/sysfs.c (revision 9b5db89e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 /* Default is 0 ("writethrough") */
20 static const char * const bch_cache_modes[] = {
21 	"writethrough",
22 	"writeback",
23 	"writearound",
24 	"none",
25 	NULL
26 };
27 
28 /* Default is 0 ("auto") */
29 static const char * const bch_stop_on_failure_modes[] = {
30 	"auto",
31 	"always",
32 	NULL
33 };
34 
35 static const char * const cache_replacement_policies[] = {
36 	"lru",
37 	"fifo",
38 	"random",
39 	NULL
40 };
41 
42 static const char * const error_actions[] = {
43 	"unregister",
44 	"panic",
45 	NULL
46 };
47 
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
56 
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 read_attribute(backing_dev_name);
71 read_attribute(backing_dev_uuid);
72 
73 sysfs_time_stats_attribute(btree_gc,	sec, ms);
74 sysfs_time_stats_attribute(btree_split, sec, us);
75 sysfs_time_stats_attribute(btree_sort,	ms,  us);
76 sysfs_time_stats_attribute(btree_read,	ms,  us);
77 
78 read_attribute(btree_nodes);
79 read_attribute(btree_used_percent);
80 read_attribute(average_key_size);
81 read_attribute(dirty_data);
82 read_attribute(bset_tree_stats);
83 
84 read_attribute(state);
85 read_attribute(cache_read_races);
86 read_attribute(reclaim);
87 read_attribute(flush_write);
88 read_attribute(retry_flush_write);
89 read_attribute(writeback_keys_done);
90 read_attribute(writeback_keys_failed);
91 read_attribute(io_errors);
92 read_attribute(congested);
93 read_attribute(cutoff_writeback);
94 read_attribute(cutoff_writeback_sync);
95 rw_attribute(congested_read_threshold_us);
96 rw_attribute(congested_write_threshold_us);
97 
98 rw_attribute(sequential_cutoff);
99 rw_attribute(data_csum);
100 rw_attribute(cache_mode);
101 rw_attribute(stop_when_cache_set_failed);
102 rw_attribute(writeback_metadata);
103 rw_attribute(writeback_running);
104 rw_attribute(writeback_percent);
105 rw_attribute(writeback_delay);
106 rw_attribute(writeback_rate);
107 
108 rw_attribute(writeback_rate_update_seconds);
109 rw_attribute(writeback_rate_i_term_inverse);
110 rw_attribute(writeback_rate_p_term_inverse);
111 rw_attribute(writeback_rate_minimum);
112 read_attribute(writeback_rate_debug);
113 
114 read_attribute(stripe_size);
115 read_attribute(partial_stripes_expensive);
116 
117 rw_attribute(synchronous);
118 rw_attribute(journal_delay_ms);
119 rw_attribute(io_disable);
120 rw_attribute(discard);
121 rw_attribute(running);
122 rw_attribute(label);
123 rw_attribute(readahead);
124 rw_attribute(errors);
125 rw_attribute(io_error_limit);
126 rw_attribute(io_error_halflife);
127 rw_attribute(verify);
128 rw_attribute(bypass_torture_test);
129 rw_attribute(key_merging_disabled);
130 rw_attribute(gc_always_rewrite);
131 rw_attribute(expensive_debug_checks);
132 rw_attribute(cache_replacement_policy);
133 rw_attribute(btree_shrinker_disabled);
134 rw_attribute(copy_gc_enabled);
135 rw_attribute(gc_after_writeback);
136 rw_attribute(size);
137 
138 static ssize_t bch_snprint_string_list(char *buf,
139 				       size_t size,
140 				       const char * const list[],
141 				       size_t selected)
142 {
143 	char *out = buf;
144 	size_t i;
145 
146 	for (i = 0; list[i]; i++)
147 		out += snprintf(out, buf + size - out,
148 				i == selected ? "[%s] " : "%s ", list[i]);
149 
150 	out[-1] = '\n';
151 	return out - buf;
152 }
153 
154 SHOW(__bch_cached_dev)
155 {
156 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
157 					     disk.kobj);
158 	char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
159 	int wb = dc->writeback_running;
160 
161 #define var(stat)		(dc->stat)
162 
163 	if (attr == &sysfs_cache_mode)
164 		return bch_snprint_string_list(buf, PAGE_SIZE,
165 					       bch_cache_modes,
166 					       BDEV_CACHE_MODE(&dc->sb));
167 
168 	if (attr == &sysfs_stop_when_cache_set_failed)
169 		return bch_snprint_string_list(buf, PAGE_SIZE,
170 					       bch_stop_on_failure_modes,
171 					       dc->stop_when_cache_set_failed);
172 
173 
174 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
175 	var_printf(verify,		"%i");
176 	var_printf(bypass_torture_test,	"%i");
177 	var_printf(writeback_metadata,	"%i");
178 	var_printf(writeback_running,	"%i");
179 	var_print(writeback_delay);
180 	var_print(writeback_percent);
181 	sysfs_hprint(writeback_rate,
182 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
183 	sysfs_hprint(io_errors,		atomic_read(&dc->io_errors));
184 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
185 	sysfs_printf(io_disable,	"%i", dc->io_disable);
186 	var_print(writeback_rate_update_seconds);
187 	var_print(writeback_rate_i_term_inverse);
188 	var_print(writeback_rate_p_term_inverse);
189 	var_print(writeback_rate_minimum);
190 
191 	if (attr == &sysfs_writeback_rate_debug) {
192 		char rate[20];
193 		char dirty[20];
194 		char target[20];
195 		char proportional[20];
196 		char integral[20];
197 		char change[20];
198 		s64 next_io;
199 
200 		/*
201 		 * Except for dirty and target, other values should
202 		 * be 0 if writeback is not running.
203 		 */
204 		bch_hprint(rate,
205 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
206 			      : 0);
207 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
208 		bch_hprint(target, dc->writeback_rate_target << 9);
209 		bch_hprint(proportional,
210 			   wb ? dc->writeback_rate_proportional << 9 : 0);
211 		bch_hprint(integral,
212 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
213 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
214 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
215 					 NSEC_PER_MSEC) : 0;
216 
217 		return sprintf(buf,
218 			       "rate:\t\t%s/sec\n"
219 			       "dirty:\t\t%s\n"
220 			       "target:\t\t%s\n"
221 			       "proportional:\t%s\n"
222 			       "integral:\t%s\n"
223 			       "change:\t\t%s/sec\n"
224 			       "next io:\t%llims\n",
225 			       rate, dirty, target, proportional,
226 			       integral, change, next_io);
227 	}
228 
229 	sysfs_hprint(dirty_data,
230 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
231 
232 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
233 	var_printf(partial_stripes_expensive,	"%u");
234 
235 	var_hprint(sequential_cutoff);
236 	var_hprint(readahead);
237 
238 	sysfs_print(running,		atomic_read(&dc->running));
239 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
240 
241 	if (attr == &sysfs_label) {
242 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
243 		buf[SB_LABEL_SIZE + 1] = '\0';
244 		strcat(buf, "\n");
245 		return strlen(buf);
246 	}
247 
248 	if (attr == &sysfs_backing_dev_name) {
249 		snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
250 		strcat(buf, "\n");
251 		return strlen(buf);
252 	}
253 
254 	if (attr == &sysfs_backing_dev_uuid) {
255 		/* convert binary uuid into 36-byte string plus '\0' */
256 		snprintf(buf, 36+1, "%pU", dc->sb.uuid);
257 		strcat(buf, "\n");
258 		return strlen(buf);
259 	}
260 
261 #undef var
262 	return 0;
263 }
264 SHOW_LOCKED(bch_cached_dev)
265 
266 STORE(__cached_dev)
267 {
268 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
269 					     disk.kobj);
270 	ssize_t v;
271 	struct cache_set *c;
272 	struct kobj_uevent_env *env;
273 
274 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
275 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
276 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
277 
278 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
279 	d_strtoul(verify);
280 	sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
281 	sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
282 	sysfs_strtoul_bool(writeback_running, dc->writeback_running);
283 	sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
284 
285 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
286 			    0, bch_cutoff_writeback);
287 
288 	if (attr == &sysfs_writeback_rate) {
289 		ssize_t ret;
290 		long int v = atomic_long_read(&dc->writeback_rate.rate);
291 
292 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
293 
294 		if (!ret) {
295 			atomic_long_set(&dc->writeback_rate.rate, v);
296 			ret = size;
297 		}
298 
299 		return ret;
300 	}
301 
302 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
303 			    dc->writeback_rate_update_seconds,
304 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
305 	sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
306 			    dc->writeback_rate_i_term_inverse,
307 			    1, UINT_MAX);
308 	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
309 			    dc->writeback_rate_p_term_inverse,
310 			    1, UINT_MAX);
311 	sysfs_strtoul_clamp(writeback_rate_minimum,
312 			    dc->writeback_rate_minimum,
313 			    1, UINT_MAX);
314 
315 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
316 
317 	if (attr == &sysfs_io_disable) {
318 		int v = strtoul_or_return(buf);
319 
320 		dc->io_disable = v ? 1 : 0;
321 	}
322 
323 	sysfs_strtoul_clamp(sequential_cutoff,
324 			    dc->sequential_cutoff,
325 			    0, UINT_MAX);
326 	d_strtoi_h(readahead);
327 
328 	if (attr == &sysfs_clear_stats)
329 		bch_cache_accounting_clear(&dc->accounting);
330 
331 	if (attr == &sysfs_running &&
332 	    strtoul_or_return(buf))
333 		bch_cached_dev_run(dc);
334 
335 	if (attr == &sysfs_cache_mode) {
336 		v = __sysfs_match_string(bch_cache_modes, -1, buf);
337 		if (v < 0)
338 			return v;
339 
340 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
341 			SET_BDEV_CACHE_MODE(&dc->sb, v);
342 			bch_write_bdev_super(dc, NULL);
343 		}
344 	}
345 
346 	if (attr == &sysfs_stop_when_cache_set_failed) {
347 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
348 		if (v < 0)
349 			return v;
350 
351 		dc->stop_when_cache_set_failed = v;
352 	}
353 
354 	if (attr == &sysfs_label) {
355 		if (size > SB_LABEL_SIZE)
356 			return -EINVAL;
357 		memcpy(dc->sb.label, buf, size);
358 		if (size < SB_LABEL_SIZE)
359 			dc->sb.label[size] = '\0';
360 		if (size && dc->sb.label[size - 1] == '\n')
361 			dc->sb.label[size - 1] = '\0';
362 		bch_write_bdev_super(dc, NULL);
363 		if (dc->disk.c) {
364 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
365 			       buf, SB_LABEL_SIZE);
366 			bch_uuid_write(dc->disk.c);
367 		}
368 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
369 		if (!env)
370 			return -ENOMEM;
371 		add_uevent_var(env, "DRIVER=bcache");
372 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
373 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
374 		kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
375 				   KOBJ_CHANGE,
376 				   env->envp);
377 		kfree(env);
378 	}
379 
380 	if (attr == &sysfs_attach) {
381 		uint8_t		set_uuid[16];
382 
383 		if (bch_parse_uuid(buf, set_uuid) < 16)
384 			return -EINVAL;
385 
386 		v = -ENOENT;
387 		list_for_each_entry(c, &bch_cache_sets, list) {
388 			v = bch_cached_dev_attach(dc, c, set_uuid);
389 			if (!v)
390 				return size;
391 		}
392 		if (v == -ENOENT)
393 			pr_err("Can't attach %s: cache set not found", buf);
394 		return v;
395 	}
396 
397 	if (attr == &sysfs_detach && dc->disk.c)
398 		bch_cached_dev_detach(dc);
399 
400 	if (attr == &sysfs_stop)
401 		bcache_device_stop(&dc->disk);
402 
403 	return size;
404 }
405 
406 STORE(bch_cached_dev)
407 {
408 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
409 					     disk.kobj);
410 
411 	mutex_lock(&bch_register_lock);
412 	size = __cached_dev_store(kobj, attr, buf, size);
413 
414 	if (attr == &sysfs_writeback_running) {
415 		/* dc->writeback_running changed in __cached_dev_store() */
416 		if (IS_ERR_OR_NULL(dc->writeback_thread)) {
417 			/*
418 			 * reject setting it to 1 via sysfs if writeback
419 			 * kthread is not created yet.
420 			 */
421 			if (dc->writeback_running) {
422 				dc->writeback_running = false;
423 				pr_err("%s: failed to run non-existent writeback thread",
424 						dc->disk.disk->disk_name);
425 			}
426 		} else
427 			/*
428 			 * writeback kthread will check if dc->writeback_running
429 			 * is true or false.
430 			 */
431 			bch_writeback_queue(dc);
432 	}
433 
434 	/*
435 	 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
436 	 * a cache set, otherwise it doesn't make sense.
437 	 */
438 	if (attr == &sysfs_writeback_percent)
439 		if ((dc->disk.c != NULL) &&
440 		    (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
441 			schedule_delayed_work(&dc->writeback_rate_update,
442 				      dc->writeback_rate_update_seconds * HZ);
443 
444 	mutex_unlock(&bch_register_lock);
445 	return size;
446 }
447 
448 static struct attribute *bch_cached_dev_files[] = {
449 	&sysfs_attach,
450 	&sysfs_detach,
451 	&sysfs_stop,
452 #if 0
453 	&sysfs_data_csum,
454 #endif
455 	&sysfs_cache_mode,
456 	&sysfs_stop_when_cache_set_failed,
457 	&sysfs_writeback_metadata,
458 	&sysfs_writeback_running,
459 	&sysfs_writeback_delay,
460 	&sysfs_writeback_percent,
461 	&sysfs_writeback_rate,
462 	&sysfs_writeback_rate_update_seconds,
463 	&sysfs_writeback_rate_i_term_inverse,
464 	&sysfs_writeback_rate_p_term_inverse,
465 	&sysfs_writeback_rate_minimum,
466 	&sysfs_writeback_rate_debug,
467 	&sysfs_errors,
468 	&sysfs_io_error_limit,
469 	&sysfs_io_disable,
470 	&sysfs_dirty_data,
471 	&sysfs_stripe_size,
472 	&sysfs_partial_stripes_expensive,
473 	&sysfs_sequential_cutoff,
474 	&sysfs_clear_stats,
475 	&sysfs_running,
476 	&sysfs_state,
477 	&sysfs_label,
478 	&sysfs_readahead,
479 #ifdef CONFIG_BCACHE_DEBUG
480 	&sysfs_verify,
481 	&sysfs_bypass_torture_test,
482 #endif
483 	&sysfs_backing_dev_name,
484 	&sysfs_backing_dev_uuid,
485 	NULL
486 };
487 KTYPE(bch_cached_dev);
488 
489 SHOW(bch_flash_dev)
490 {
491 	struct bcache_device *d = container_of(kobj, struct bcache_device,
492 					       kobj);
493 	struct uuid_entry *u = &d->c->uuids[d->id];
494 
495 	sysfs_printf(data_csum,	"%i", d->data_csum);
496 	sysfs_hprint(size,	u->sectors << 9);
497 
498 	if (attr == &sysfs_label) {
499 		memcpy(buf, u->label, SB_LABEL_SIZE);
500 		buf[SB_LABEL_SIZE + 1] = '\0';
501 		strcat(buf, "\n");
502 		return strlen(buf);
503 	}
504 
505 	return 0;
506 }
507 
508 STORE(__bch_flash_dev)
509 {
510 	struct bcache_device *d = container_of(kobj, struct bcache_device,
511 					       kobj);
512 	struct uuid_entry *u = &d->c->uuids[d->id];
513 
514 	sysfs_strtoul(data_csum,	d->data_csum);
515 
516 	if (attr == &sysfs_size) {
517 		uint64_t v;
518 
519 		strtoi_h_or_return(buf, v);
520 
521 		u->sectors = v >> 9;
522 		bch_uuid_write(d->c);
523 		set_capacity(d->disk, u->sectors);
524 	}
525 
526 	if (attr == &sysfs_label) {
527 		memcpy(u->label, buf, SB_LABEL_SIZE);
528 		bch_uuid_write(d->c);
529 	}
530 
531 	if (attr == &sysfs_unregister) {
532 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
533 		bcache_device_stop(d);
534 	}
535 
536 	return size;
537 }
538 STORE_LOCKED(bch_flash_dev)
539 
540 static struct attribute *bch_flash_dev_files[] = {
541 	&sysfs_unregister,
542 #if 0
543 	&sysfs_data_csum,
544 #endif
545 	&sysfs_label,
546 	&sysfs_size,
547 	NULL
548 };
549 KTYPE(bch_flash_dev);
550 
551 struct bset_stats_op {
552 	struct btree_op op;
553 	size_t nodes;
554 	struct bset_stats stats;
555 };
556 
557 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
558 {
559 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
560 
561 	op->nodes++;
562 	bch_btree_keys_stats(&b->keys, &op->stats);
563 
564 	return MAP_CONTINUE;
565 }
566 
567 static int bch_bset_print_stats(struct cache_set *c, char *buf)
568 {
569 	struct bset_stats_op op;
570 	int ret;
571 
572 	memset(&op, 0, sizeof(op));
573 	bch_btree_op_init(&op.op, -1);
574 
575 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
576 	if (ret < 0)
577 		return ret;
578 
579 	return snprintf(buf, PAGE_SIZE,
580 			"btree nodes:		%zu\n"
581 			"written sets:		%zu\n"
582 			"unwritten sets:		%zu\n"
583 			"written key bytes:	%zu\n"
584 			"unwritten key bytes:	%zu\n"
585 			"floats:			%zu\n"
586 			"failed:			%zu\n",
587 			op.nodes,
588 			op.stats.sets_written, op.stats.sets_unwritten,
589 			op.stats.bytes_written, op.stats.bytes_unwritten,
590 			op.stats.floats, op.stats.failed);
591 }
592 
593 static unsigned int bch_root_usage(struct cache_set *c)
594 {
595 	unsigned int bytes = 0;
596 	struct bkey *k;
597 	struct btree *b;
598 	struct btree_iter iter;
599 
600 	goto lock_root;
601 
602 	do {
603 		rw_unlock(false, b);
604 lock_root:
605 		b = c->root;
606 		rw_lock(false, b, b->level);
607 	} while (b != c->root);
608 
609 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
610 		bytes += bkey_bytes(k);
611 
612 	rw_unlock(false, b);
613 
614 	return (bytes * 100) / btree_bytes(c);
615 }
616 
617 static size_t bch_cache_size(struct cache_set *c)
618 {
619 	size_t ret = 0;
620 	struct btree *b;
621 
622 	mutex_lock(&c->bucket_lock);
623 	list_for_each_entry(b, &c->btree_cache, list)
624 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
625 
626 	mutex_unlock(&c->bucket_lock);
627 	return ret;
628 }
629 
630 static unsigned int bch_cache_max_chain(struct cache_set *c)
631 {
632 	unsigned int ret = 0;
633 	struct hlist_head *h;
634 
635 	mutex_lock(&c->bucket_lock);
636 
637 	for (h = c->bucket_hash;
638 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
639 	     h++) {
640 		unsigned int i = 0;
641 		struct hlist_node *p;
642 
643 		hlist_for_each(p, h)
644 			i++;
645 
646 		ret = max(ret, i);
647 	}
648 
649 	mutex_unlock(&c->bucket_lock);
650 	return ret;
651 }
652 
653 static unsigned int bch_btree_used(struct cache_set *c)
654 {
655 	return div64_u64(c->gc_stats.key_bytes * 100,
656 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
657 }
658 
659 static unsigned int bch_average_key_size(struct cache_set *c)
660 {
661 	return c->gc_stats.nkeys
662 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
663 		: 0;
664 }
665 
666 SHOW(__bch_cache_set)
667 {
668 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
669 
670 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
671 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
672 	sysfs_hprint(bucket_size,		bucket_bytes(c));
673 	sysfs_hprint(block_size,		block_bytes(c));
674 	sysfs_print(tree_depth,			c->root->level);
675 	sysfs_print(root_usage_percent,		bch_root_usage(c));
676 
677 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
678 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
679 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
680 
681 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
682 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
683 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
684 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
685 
686 	sysfs_print(btree_used_percent,	bch_btree_used(c));
687 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
688 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
689 
690 	sysfs_print(cache_read_races,
691 		    atomic_long_read(&c->cache_read_races));
692 
693 	sysfs_print(reclaim,
694 		    atomic_long_read(&c->reclaim));
695 
696 	sysfs_print(flush_write,
697 		    atomic_long_read(&c->flush_write));
698 
699 	sysfs_print(retry_flush_write,
700 		    atomic_long_read(&c->retry_flush_write));
701 
702 	sysfs_print(writeback_keys_done,
703 		    atomic_long_read(&c->writeback_keys_done));
704 	sysfs_print(writeback_keys_failed,
705 		    atomic_long_read(&c->writeback_keys_failed));
706 
707 	if (attr == &sysfs_errors)
708 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
709 					       c->on_error);
710 
711 	/* See count_io_errors for why 88 */
712 	sysfs_print(io_error_halflife,	c->error_decay * 88);
713 	sysfs_print(io_error_limit,	c->error_limit);
714 
715 	sysfs_hprint(congested,
716 		     ((uint64_t) bch_get_congested(c)) << 9);
717 	sysfs_print(congested_read_threshold_us,
718 		    c->congested_read_threshold_us);
719 	sysfs_print(congested_write_threshold_us,
720 		    c->congested_write_threshold_us);
721 
722 	sysfs_print(cutoff_writeback, bch_cutoff_writeback);
723 	sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
724 
725 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
726 	sysfs_printf(verify,			"%i", c->verify);
727 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
728 	sysfs_printf(expensive_debug_checks,
729 		     "%i", c->expensive_debug_checks);
730 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
731 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
732 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
733 	sysfs_printf(gc_after_writeback,	"%i", c->gc_after_writeback);
734 	sysfs_printf(io_disable,		"%i",
735 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
736 
737 	if (attr == &sysfs_bset_tree_stats)
738 		return bch_bset_print_stats(c, buf);
739 
740 	return 0;
741 }
742 SHOW_LOCKED(bch_cache_set)
743 
744 STORE(__bch_cache_set)
745 {
746 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
747 	ssize_t v;
748 
749 	if (attr == &sysfs_unregister)
750 		bch_cache_set_unregister(c);
751 
752 	if (attr == &sysfs_stop)
753 		bch_cache_set_stop(c);
754 
755 	if (attr == &sysfs_synchronous) {
756 		bool sync = strtoul_or_return(buf);
757 
758 		if (sync != CACHE_SYNC(&c->sb)) {
759 			SET_CACHE_SYNC(&c->sb, sync);
760 			bcache_write_super(c);
761 		}
762 	}
763 
764 	if (attr == &sysfs_flash_vol_create) {
765 		int r;
766 		uint64_t v;
767 
768 		strtoi_h_or_return(buf, v);
769 
770 		r = bch_flash_dev_create(c, v);
771 		if (r)
772 			return r;
773 	}
774 
775 	if (attr == &sysfs_clear_stats) {
776 		atomic_long_set(&c->writeback_keys_done,	0);
777 		atomic_long_set(&c->writeback_keys_failed,	0);
778 
779 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
780 		bch_cache_accounting_clear(&c->accounting);
781 	}
782 
783 	if (attr == &sysfs_trigger_gc)
784 		force_wake_up_gc(c);
785 
786 	if (attr == &sysfs_prune_cache) {
787 		struct shrink_control sc;
788 
789 		sc.gfp_mask = GFP_KERNEL;
790 		sc.nr_to_scan = strtoul_or_return(buf);
791 		c->shrink.scan_objects(&c->shrink, &sc);
792 	}
793 
794 	sysfs_strtoul_clamp(congested_read_threshold_us,
795 			    c->congested_read_threshold_us,
796 			    0, UINT_MAX);
797 	sysfs_strtoul_clamp(congested_write_threshold_us,
798 			    c->congested_write_threshold_us,
799 			    0, UINT_MAX);
800 
801 	if (attr == &sysfs_errors) {
802 		v = __sysfs_match_string(error_actions, -1, buf);
803 		if (v < 0)
804 			return v;
805 
806 		c->on_error = v;
807 	}
808 
809 	sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
810 
811 	/* See count_io_errors() for why 88 */
812 	if (attr == &sysfs_io_error_halflife) {
813 		unsigned long v = 0;
814 		ssize_t ret;
815 
816 		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
817 		if (!ret) {
818 			c->error_decay = v / 88;
819 			return size;
820 		}
821 		return ret;
822 	}
823 
824 	if (attr == &sysfs_io_disable) {
825 		v = strtoul_or_return(buf);
826 		if (v) {
827 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
828 					     &c->flags))
829 				pr_warn("CACHE_SET_IO_DISABLE already set");
830 		} else {
831 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
832 						&c->flags))
833 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
834 		}
835 	}
836 
837 	sysfs_strtoul_clamp(journal_delay_ms,
838 			    c->journal_delay_ms,
839 			    0, USHRT_MAX);
840 	sysfs_strtoul_bool(verify,		c->verify);
841 	sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
842 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
843 	sysfs_strtoul_bool(gc_always_rewrite,	c->gc_always_rewrite);
844 	sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
845 	sysfs_strtoul_bool(copy_gc_enabled,	c->copy_gc_enabled);
846 	/*
847 	 * write gc_after_writeback here may overwrite an already set
848 	 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
849 	 * set in next chance.
850 	 */
851 	sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
852 
853 	return size;
854 }
855 STORE_LOCKED(bch_cache_set)
856 
857 SHOW(bch_cache_set_internal)
858 {
859 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
860 
861 	return bch_cache_set_show(&c->kobj, attr, buf);
862 }
863 
864 STORE(bch_cache_set_internal)
865 {
866 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
867 
868 	return bch_cache_set_store(&c->kobj, attr, buf, size);
869 }
870 
871 static void bch_cache_set_internal_release(struct kobject *k)
872 {
873 }
874 
875 static struct attribute *bch_cache_set_files[] = {
876 	&sysfs_unregister,
877 	&sysfs_stop,
878 	&sysfs_synchronous,
879 	&sysfs_journal_delay_ms,
880 	&sysfs_flash_vol_create,
881 
882 	&sysfs_bucket_size,
883 	&sysfs_block_size,
884 	&sysfs_tree_depth,
885 	&sysfs_root_usage_percent,
886 	&sysfs_btree_cache_size,
887 	&sysfs_cache_available_percent,
888 
889 	&sysfs_average_key_size,
890 
891 	&sysfs_errors,
892 	&sysfs_io_error_limit,
893 	&sysfs_io_error_halflife,
894 	&sysfs_congested,
895 	&sysfs_congested_read_threshold_us,
896 	&sysfs_congested_write_threshold_us,
897 	&sysfs_clear_stats,
898 	NULL
899 };
900 KTYPE(bch_cache_set);
901 
902 static struct attribute *bch_cache_set_internal_files[] = {
903 	&sysfs_active_journal_entries,
904 
905 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
906 	sysfs_time_stats_attribute_list(btree_split, sec, us)
907 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
908 	sysfs_time_stats_attribute_list(btree_read, ms, us)
909 
910 	&sysfs_btree_nodes,
911 	&sysfs_btree_used_percent,
912 	&sysfs_btree_cache_max_chain,
913 
914 	&sysfs_bset_tree_stats,
915 	&sysfs_cache_read_races,
916 	&sysfs_reclaim,
917 	&sysfs_flush_write,
918 	&sysfs_retry_flush_write,
919 	&sysfs_writeback_keys_done,
920 	&sysfs_writeback_keys_failed,
921 
922 	&sysfs_trigger_gc,
923 	&sysfs_prune_cache,
924 #ifdef CONFIG_BCACHE_DEBUG
925 	&sysfs_verify,
926 	&sysfs_key_merging_disabled,
927 	&sysfs_expensive_debug_checks,
928 #endif
929 	&sysfs_gc_always_rewrite,
930 	&sysfs_btree_shrinker_disabled,
931 	&sysfs_copy_gc_enabled,
932 	&sysfs_gc_after_writeback,
933 	&sysfs_io_disable,
934 	&sysfs_cutoff_writeback,
935 	&sysfs_cutoff_writeback_sync,
936 	NULL
937 };
938 KTYPE(bch_cache_set_internal);
939 
940 static int __bch_cache_cmp(const void *l, const void *r)
941 {
942 	return *((uint16_t *)r) - *((uint16_t *)l);
943 }
944 
945 SHOW(__bch_cache)
946 {
947 	struct cache *ca = container_of(kobj, struct cache, kobj);
948 
949 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
950 	sysfs_hprint(block_size,	block_bytes(ca));
951 	sysfs_print(nbuckets,		ca->sb.nbuckets);
952 	sysfs_print(discard,		ca->discard);
953 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
954 	sysfs_hprint(btree_written,
955 		     atomic_long_read(&ca->btree_sectors_written) << 9);
956 	sysfs_hprint(metadata_written,
957 		     (atomic_long_read(&ca->meta_sectors_written) +
958 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
959 
960 	sysfs_print(io_errors,
961 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
962 
963 	if (attr == &sysfs_cache_replacement_policy)
964 		return bch_snprint_string_list(buf, PAGE_SIZE,
965 					       cache_replacement_policies,
966 					       CACHE_REPLACEMENT(&ca->sb));
967 
968 	if (attr == &sysfs_priority_stats) {
969 		struct bucket *b;
970 		size_t n = ca->sb.nbuckets, i;
971 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
972 		uint64_t sum = 0;
973 		/* Compute 31 quantiles */
974 		uint16_t q[31], *p, *cached;
975 		ssize_t ret;
976 
977 		cached = p = vmalloc(array_size(sizeof(uint16_t),
978 						ca->sb.nbuckets));
979 		if (!p)
980 			return -ENOMEM;
981 
982 		mutex_lock(&ca->set->bucket_lock);
983 		for_each_bucket(b, ca) {
984 			if (!GC_SECTORS_USED(b))
985 				unused++;
986 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
987 				available++;
988 			if (GC_MARK(b) == GC_MARK_DIRTY)
989 				dirty++;
990 			if (GC_MARK(b) == GC_MARK_METADATA)
991 				meta++;
992 		}
993 
994 		for (i = ca->sb.first_bucket; i < n; i++)
995 			p[i] = ca->buckets[i].prio;
996 		mutex_unlock(&ca->set->bucket_lock);
997 
998 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
999 
1000 		while (n &&
1001 		       !cached[n - 1])
1002 			--n;
1003 
1004 		while (cached < p + n &&
1005 		       *cached == BTREE_PRIO)
1006 			cached++, n--;
1007 
1008 		for (i = 0; i < n; i++)
1009 			sum += INITIAL_PRIO - cached[i];
1010 
1011 		if (n)
1012 			do_div(sum, n);
1013 
1014 		for (i = 0; i < ARRAY_SIZE(q); i++)
1015 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1016 				(ARRAY_SIZE(q) + 1)];
1017 
1018 		vfree(p);
1019 
1020 		ret = scnprintf(buf, PAGE_SIZE,
1021 				"Unused:		%zu%%\n"
1022 				"Clean:		%zu%%\n"
1023 				"Dirty:		%zu%%\n"
1024 				"Metadata:	%zu%%\n"
1025 				"Average:	%llu\n"
1026 				"Sectors per Q:	%zu\n"
1027 				"Quantiles:	[",
1028 				unused * 100 / (size_t) ca->sb.nbuckets,
1029 				available * 100 / (size_t) ca->sb.nbuckets,
1030 				dirty * 100 / (size_t) ca->sb.nbuckets,
1031 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
1032 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1033 
1034 		for (i = 0; i < ARRAY_SIZE(q); i++)
1035 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1036 					 "%u ", q[i]);
1037 		ret--;
1038 
1039 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1040 
1041 		return ret;
1042 	}
1043 
1044 	return 0;
1045 }
1046 SHOW_LOCKED(bch_cache)
1047 
1048 STORE(__bch_cache)
1049 {
1050 	struct cache *ca = container_of(kobj, struct cache, kobj);
1051 	ssize_t v;
1052 
1053 	if (attr == &sysfs_discard) {
1054 		bool v = strtoul_or_return(buf);
1055 
1056 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1057 			ca->discard = v;
1058 
1059 		if (v != CACHE_DISCARD(&ca->sb)) {
1060 			SET_CACHE_DISCARD(&ca->sb, v);
1061 			bcache_write_super(ca->set);
1062 		}
1063 	}
1064 
1065 	if (attr == &sysfs_cache_replacement_policy) {
1066 		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1067 		if (v < 0)
1068 			return v;
1069 
1070 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1071 			mutex_lock(&ca->set->bucket_lock);
1072 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1073 			mutex_unlock(&ca->set->bucket_lock);
1074 
1075 			bcache_write_super(ca->set);
1076 		}
1077 	}
1078 
1079 	if (attr == &sysfs_clear_stats) {
1080 		atomic_long_set(&ca->sectors_written, 0);
1081 		atomic_long_set(&ca->btree_sectors_written, 0);
1082 		atomic_long_set(&ca->meta_sectors_written, 0);
1083 		atomic_set(&ca->io_count, 0);
1084 		atomic_set(&ca->io_errors, 0);
1085 	}
1086 
1087 	return size;
1088 }
1089 STORE_LOCKED(bch_cache)
1090 
1091 static struct attribute *bch_cache_files[] = {
1092 	&sysfs_bucket_size,
1093 	&sysfs_block_size,
1094 	&sysfs_nbuckets,
1095 	&sysfs_priority_stats,
1096 	&sysfs_discard,
1097 	&sysfs_written,
1098 	&sysfs_btree_written,
1099 	&sysfs_metadata_written,
1100 	&sysfs_io_errors,
1101 	&sysfs_clear_stats,
1102 	&sysfs_cache_replacement_policy,
1103 	NULL
1104 };
1105 KTYPE(bch_cache);
1106