xref: /openbmc/linux/drivers/md/bcache/sysfs.c (revision 5461999848e0462c14f306a62923d22de820a59c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 /* Default is 0 ("writethrough") */
20 static const char * const bch_cache_modes[] = {
21 	"writethrough",
22 	"writeback",
23 	"writearound",
24 	"none"
25 };
26 
27 /* Default is 0 ("auto") */
28 static const char * const bch_stop_on_failure_modes[] = {
29 	"auto",
30 	"always"
31 };
32 
33 static const char * const cache_replacement_policies[] = {
34 	"lru",
35 	"fifo",
36 	"random"
37 };
38 
39 static const char * const error_actions[] = {
40 	"unregister",
41 	"panic"
42 };
43 
44 write_attribute(attach);
45 write_attribute(detach);
46 write_attribute(unregister);
47 write_attribute(stop);
48 write_attribute(clear_stats);
49 write_attribute(trigger_gc);
50 write_attribute(prune_cache);
51 write_attribute(flash_vol_create);
52 
53 read_attribute(bucket_size);
54 read_attribute(block_size);
55 read_attribute(nbuckets);
56 read_attribute(tree_depth);
57 read_attribute(root_usage_percent);
58 read_attribute(priority_stats);
59 read_attribute(btree_cache_size);
60 read_attribute(btree_cache_max_chain);
61 read_attribute(cache_available_percent);
62 read_attribute(written);
63 read_attribute(btree_written);
64 read_attribute(metadata_written);
65 read_attribute(active_journal_entries);
66 read_attribute(backing_dev_name);
67 read_attribute(backing_dev_uuid);
68 
69 sysfs_time_stats_attribute(btree_gc,	sec, ms);
70 sysfs_time_stats_attribute(btree_split, sec, us);
71 sysfs_time_stats_attribute(btree_sort,	ms,  us);
72 sysfs_time_stats_attribute(btree_read,	ms,  us);
73 
74 read_attribute(btree_nodes);
75 read_attribute(btree_used_percent);
76 read_attribute(average_key_size);
77 read_attribute(dirty_data);
78 read_attribute(bset_tree_stats);
79 
80 read_attribute(state);
81 read_attribute(cache_read_races);
82 read_attribute(reclaim);
83 read_attribute(flush_write);
84 read_attribute(retry_flush_write);
85 read_attribute(writeback_keys_done);
86 read_attribute(writeback_keys_failed);
87 read_attribute(io_errors);
88 read_attribute(congested);
89 read_attribute(cutoff_writeback);
90 read_attribute(cutoff_writeback_sync);
91 rw_attribute(congested_read_threshold_us);
92 rw_attribute(congested_write_threshold_us);
93 
94 rw_attribute(sequential_cutoff);
95 rw_attribute(data_csum);
96 rw_attribute(cache_mode);
97 rw_attribute(stop_when_cache_set_failed);
98 rw_attribute(writeback_metadata);
99 rw_attribute(writeback_running);
100 rw_attribute(writeback_percent);
101 rw_attribute(writeback_delay);
102 rw_attribute(writeback_rate);
103 
104 rw_attribute(writeback_rate_update_seconds);
105 rw_attribute(writeback_rate_i_term_inverse);
106 rw_attribute(writeback_rate_p_term_inverse);
107 rw_attribute(writeback_rate_minimum);
108 read_attribute(writeback_rate_debug);
109 
110 read_attribute(stripe_size);
111 read_attribute(partial_stripes_expensive);
112 
113 rw_attribute(synchronous);
114 rw_attribute(journal_delay_ms);
115 rw_attribute(io_disable);
116 rw_attribute(discard);
117 rw_attribute(running);
118 rw_attribute(label);
119 rw_attribute(readahead);
120 rw_attribute(errors);
121 rw_attribute(io_error_limit);
122 rw_attribute(io_error_halflife);
123 rw_attribute(verify);
124 rw_attribute(bypass_torture_test);
125 rw_attribute(key_merging_disabled);
126 rw_attribute(gc_always_rewrite);
127 rw_attribute(expensive_debug_checks);
128 rw_attribute(cache_replacement_policy);
129 rw_attribute(btree_shrinker_disabled);
130 rw_attribute(copy_gc_enabled);
131 rw_attribute(gc_after_writeback);
132 rw_attribute(size);
133 
134 static ssize_t bch_snprint_string_list(char *buf,
135 				       size_t size,
136 				       const char * const list[],
137 				       size_t selected)
138 {
139 	char *out = buf;
140 	size_t i;
141 
142 	for (i = 0; list[i]; i++)
143 		out += snprintf(out, buf + size - out,
144 				i == selected ? "[%s] " : "%s ", list[i]);
145 
146 	out[-1] = '\n';
147 	return out - buf;
148 }
149 
150 SHOW(__bch_cached_dev)
151 {
152 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
153 					     disk.kobj);
154 	char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
155 	int wb = dc->writeback_running;
156 
157 #define var(stat)		(dc->stat)
158 
159 	if (attr == &sysfs_cache_mode)
160 		return bch_snprint_string_list(buf, PAGE_SIZE,
161 					       bch_cache_modes,
162 					       BDEV_CACHE_MODE(&dc->sb));
163 
164 	if (attr == &sysfs_stop_when_cache_set_failed)
165 		return bch_snprint_string_list(buf, PAGE_SIZE,
166 					       bch_stop_on_failure_modes,
167 					       dc->stop_when_cache_set_failed);
168 
169 
170 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
171 	var_printf(verify,		"%i");
172 	var_printf(bypass_torture_test,	"%i");
173 	var_printf(writeback_metadata,	"%i");
174 	var_printf(writeback_running,	"%i");
175 	var_print(writeback_delay);
176 	var_print(writeback_percent);
177 	sysfs_hprint(writeback_rate,
178 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
179 	sysfs_printf(io_errors,		"%i", atomic_read(&dc->io_errors));
180 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
181 	sysfs_printf(io_disable,	"%i", dc->io_disable);
182 	var_print(writeback_rate_update_seconds);
183 	var_print(writeback_rate_i_term_inverse);
184 	var_print(writeback_rate_p_term_inverse);
185 	var_print(writeback_rate_minimum);
186 
187 	if (attr == &sysfs_writeback_rate_debug) {
188 		char rate[20];
189 		char dirty[20];
190 		char target[20];
191 		char proportional[20];
192 		char integral[20];
193 		char change[20];
194 		s64 next_io;
195 
196 		/*
197 		 * Except for dirty and target, other values should
198 		 * be 0 if writeback is not running.
199 		 */
200 		bch_hprint(rate,
201 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
202 			      : 0);
203 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
204 		bch_hprint(target, dc->writeback_rate_target << 9);
205 		bch_hprint(proportional,
206 			   wb ? dc->writeback_rate_proportional << 9 : 0);
207 		bch_hprint(integral,
208 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
209 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
210 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
211 					 NSEC_PER_MSEC) : 0;
212 
213 		return sprintf(buf,
214 			       "rate:\t\t%s/sec\n"
215 			       "dirty:\t\t%s\n"
216 			       "target:\t\t%s\n"
217 			       "proportional:\t%s\n"
218 			       "integral:\t%s\n"
219 			       "change:\t\t%s/sec\n"
220 			       "next io:\t%llims\n",
221 			       rate, dirty, target, proportional,
222 			       integral, change, next_io);
223 	}
224 
225 	sysfs_hprint(dirty_data,
226 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
227 
228 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
229 	var_printf(partial_stripes_expensive,	"%u");
230 
231 	var_hprint(sequential_cutoff);
232 	var_hprint(readahead);
233 
234 	sysfs_print(running,		atomic_read(&dc->running));
235 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
236 
237 	if (attr == &sysfs_label) {
238 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
239 		buf[SB_LABEL_SIZE + 1] = '\0';
240 		strcat(buf, "\n");
241 		return strlen(buf);
242 	}
243 
244 	if (attr == &sysfs_backing_dev_name) {
245 		snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
246 		strcat(buf, "\n");
247 		return strlen(buf);
248 	}
249 
250 	if (attr == &sysfs_backing_dev_uuid) {
251 		/* convert binary uuid into 36-byte string plus '\0' */
252 		snprintf(buf, 36+1, "%pU", dc->sb.uuid);
253 		strcat(buf, "\n");
254 		return strlen(buf);
255 	}
256 
257 #undef var
258 	return 0;
259 }
260 SHOW_LOCKED(bch_cached_dev)
261 
262 STORE(__cached_dev)
263 {
264 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
265 					     disk.kobj);
266 	ssize_t v;
267 	struct cache_set *c;
268 	struct kobj_uevent_env *env;
269 
270 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
271 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
272 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
273 
274 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
275 	d_strtoul(verify);
276 	sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
277 	sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
278 	sysfs_strtoul_bool(writeback_running, dc->writeback_running);
279 	sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
280 
281 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
282 			    0, bch_cutoff_writeback);
283 
284 	if (attr == &sysfs_writeback_rate) {
285 		ssize_t ret;
286 		long int v = atomic_long_read(&dc->writeback_rate.rate);
287 
288 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
289 
290 		if (!ret) {
291 			atomic_long_set(&dc->writeback_rate.rate, v);
292 			ret = size;
293 		}
294 
295 		return ret;
296 	}
297 
298 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
299 			    dc->writeback_rate_update_seconds,
300 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
301 	sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
302 			    dc->writeback_rate_i_term_inverse,
303 			    1, UINT_MAX);
304 	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
305 			    dc->writeback_rate_p_term_inverse,
306 			    1, UINT_MAX);
307 	sysfs_strtoul_clamp(writeback_rate_minimum,
308 			    dc->writeback_rate_minimum,
309 			    1, UINT_MAX);
310 
311 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
312 
313 	if (attr == &sysfs_io_disable) {
314 		int v = strtoul_or_return(buf);
315 
316 		dc->io_disable = v ? 1 : 0;
317 	}
318 
319 	sysfs_strtoul_clamp(sequential_cutoff,
320 			    dc->sequential_cutoff,
321 			    0, UINT_MAX);
322 	d_strtoi_h(readahead);
323 
324 	if (attr == &sysfs_clear_stats)
325 		bch_cache_accounting_clear(&dc->accounting);
326 
327 	if (attr == &sysfs_running &&
328 	    strtoul_or_return(buf)) {
329 		v = bch_cached_dev_run(dc);
330 		if (v)
331 			return v;
332 	}
333 
334 	if (attr == &sysfs_cache_mode) {
335 		v = sysfs_match_string(bch_cache_modes, buf);
336 		if (v < 0)
337 			return v;
338 
339 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
340 			SET_BDEV_CACHE_MODE(&dc->sb, v);
341 			bch_write_bdev_super(dc, NULL);
342 		}
343 	}
344 
345 	if (attr == &sysfs_stop_when_cache_set_failed) {
346 		v = sysfs_match_string(bch_stop_on_failure_modes, buf);
347 		if (v < 0)
348 			return v;
349 
350 		dc->stop_when_cache_set_failed = v;
351 	}
352 
353 	if (attr == &sysfs_label) {
354 		if (size > SB_LABEL_SIZE)
355 			return -EINVAL;
356 		memcpy(dc->sb.label, buf, size);
357 		if (size < SB_LABEL_SIZE)
358 			dc->sb.label[size] = '\0';
359 		if (size && dc->sb.label[size - 1] == '\n')
360 			dc->sb.label[size - 1] = '\0';
361 		bch_write_bdev_super(dc, NULL);
362 		if (dc->disk.c) {
363 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
364 			       buf, SB_LABEL_SIZE);
365 			bch_uuid_write(dc->disk.c);
366 		}
367 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
368 		if (!env)
369 			return -ENOMEM;
370 		add_uevent_var(env, "DRIVER=bcache");
371 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
372 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
373 		kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
374 				   KOBJ_CHANGE,
375 				   env->envp);
376 		kfree(env);
377 	}
378 
379 	if (attr == &sysfs_attach) {
380 		uint8_t		set_uuid[16];
381 
382 		if (bch_parse_uuid(buf, set_uuid) < 16)
383 			return -EINVAL;
384 
385 		v = -ENOENT;
386 		list_for_each_entry(c, &bch_cache_sets, list) {
387 			v = bch_cached_dev_attach(dc, c, set_uuid);
388 			if (!v)
389 				return size;
390 		}
391 		if (v == -ENOENT)
392 			pr_err("Can't attach %s: cache set not found", buf);
393 		return v;
394 	}
395 
396 	if (attr == &sysfs_detach && dc->disk.c)
397 		bch_cached_dev_detach(dc);
398 
399 	if (attr == &sysfs_stop)
400 		bcache_device_stop(&dc->disk);
401 
402 	return size;
403 }
404 
405 STORE(bch_cached_dev)
406 {
407 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
408 					     disk.kobj);
409 
410 	mutex_lock(&bch_register_lock);
411 	size = __cached_dev_store(kobj, attr, buf, size);
412 
413 	if (attr == &sysfs_writeback_running) {
414 		/* dc->writeback_running changed in __cached_dev_store() */
415 		if (IS_ERR_OR_NULL(dc->writeback_thread)) {
416 			/*
417 			 * reject setting it to 1 via sysfs if writeback
418 			 * kthread is not created yet.
419 			 */
420 			if (dc->writeback_running) {
421 				dc->writeback_running = false;
422 				pr_err("%s: failed to run non-existent writeback thread",
423 						dc->disk.disk->disk_name);
424 			}
425 		} else
426 			/*
427 			 * writeback kthread will check if dc->writeback_running
428 			 * is true or false.
429 			 */
430 			bch_writeback_queue(dc);
431 	}
432 
433 	/*
434 	 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
435 	 * a cache set, otherwise it doesn't make sense.
436 	 */
437 	if (attr == &sysfs_writeback_percent)
438 		if ((dc->disk.c != NULL) &&
439 		    (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
440 			schedule_delayed_work(&dc->writeback_rate_update,
441 				      dc->writeback_rate_update_seconds * HZ);
442 
443 	mutex_unlock(&bch_register_lock);
444 	return size;
445 }
446 
447 static struct attribute *bch_cached_dev_files[] = {
448 	&sysfs_attach,
449 	&sysfs_detach,
450 	&sysfs_stop,
451 #if 0
452 	&sysfs_data_csum,
453 #endif
454 	&sysfs_cache_mode,
455 	&sysfs_stop_when_cache_set_failed,
456 	&sysfs_writeback_metadata,
457 	&sysfs_writeback_running,
458 	&sysfs_writeback_delay,
459 	&sysfs_writeback_percent,
460 	&sysfs_writeback_rate,
461 	&sysfs_writeback_rate_update_seconds,
462 	&sysfs_writeback_rate_i_term_inverse,
463 	&sysfs_writeback_rate_p_term_inverse,
464 	&sysfs_writeback_rate_minimum,
465 	&sysfs_writeback_rate_debug,
466 	&sysfs_io_errors,
467 	&sysfs_io_error_limit,
468 	&sysfs_io_disable,
469 	&sysfs_dirty_data,
470 	&sysfs_stripe_size,
471 	&sysfs_partial_stripes_expensive,
472 	&sysfs_sequential_cutoff,
473 	&sysfs_clear_stats,
474 	&sysfs_running,
475 	&sysfs_state,
476 	&sysfs_label,
477 	&sysfs_readahead,
478 #ifdef CONFIG_BCACHE_DEBUG
479 	&sysfs_verify,
480 	&sysfs_bypass_torture_test,
481 #endif
482 	&sysfs_backing_dev_name,
483 	&sysfs_backing_dev_uuid,
484 	NULL
485 };
486 KTYPE(bch_cached_dev);
487 
488 SHOW(bch_flash_dev)
489 {
490 	struct bcache_device *d = container_of(kobj, struct bcache_device,
491 					       kobj);
492 	struct uuid_entry *u = &d->c->uuids[d->id];
493 
494 	sysfs_printf(data_csum,	"%i", d->data_csum);
495 	sysfs_hprint(size,	u->sectors << 9);
496 
497 	if (attr == &sysfs_label) {
498 		memcpy(buf, u->label, SB_LABEL_SIZE);
499 		buf[SB_LABEL_SIZE + 1] = '\0';
500 		strcat(buf, "\n");
501 		return strlen(buf);
502 	}
503 
504 	return 0;
505 }
506 
507 STORE(__bch_flash_dev)
508 {
509 	struct bcache_device *d = container_of(kobj, struct bcache_device,
510 					       kobj);
511 	struct uuid_entry *u = &d->c->uuids[d->id];
512 
513 	sysfs_strtoul(data_csum,	d->data_csum);
514 
515 	if (attr == &sysfs_size) {
516 		uint64_t v;
517 
518 		strtoi_h_or_return(buf, v);
519 
520 		u->sectors = v >> 9;
521 		bch_uuid_write(d->c);
522 		set_capacity(d->disk, u->sectors);
523 	}
524 
525 	if (attr == &sysfs_label) {
526 		memcpy(u->label, buf, SB_LABEL_SIZE);
527 		bch_uuid_write(d->c);
528 	}
529 
530 	if (attr == &sysfs_unregister) {
531 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
532 		bcache_device_stop(d);
533 	}
534 
535 	return size;
536 }
537 STORE_LOCKED(bch_flash_dev)
538 
539 static struct attribute *bch_flash_dev_files[] = {
540 	&sysfs_unregister,
541 #if 0
542 	&sysfs_data_csum,
543 #endif
544 	&sysfs_label,
545 	&sysfs_size,
546 	NULL
547 };
548 KTYPE(bch_flash_dev);
549 
550 struct bset_stats_op {
551 	struct btree_op op;
552 	size_t nodes;
553 	struct bset_stats stats;
554 };
555 
556 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
557 {
558 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
559 
560 	op->nodes++;
561 	bch_btree_keys_stats(&b->keys, &op->stats);
562 
563 	return MAP_CONTINUE;
564 }
565 
566 static int bch_bset_print_stats(struct cache_set *c, char *buf)
567 {
568 	struct bset_stats_op op;
569 	int ret;
570 
571 	memset(&op, 0, sizeof(op));
572 	bch_btree_op_init(&op.op, -1);
573 
574 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
575 	if (ret < 0)
576 		return ret;
577 
578 	return snprintf(buf, PAGE_SIZE,
579 			"btree nodes:		%zu\n"
580 			"written sets:		%zu\n"
581 			"unwritten sets:		%zu\n"
582 			"written key bytes:	%zu\n"
583 			"unwritten key bytes:	%zu\n"
584 			"floats:			%zu\n"
585 			"failed:			%zu\n",
586 			op.nodes,
587 			op.stats.sets_written, op.stats.sets_unwritten,
588 			op.stats.bytes_written, op.stats.bytes_unwritten,
589 			op.stats.floats, op.stats.failed);
590 }
591 
592 static unsigned int bch_root_usage(struct cache_set *c)
593 {
594 	unsigned int bytes = 0;
595 	struct bkey *k;
596 	struct btree *b;
597 	struct btree_iter iter;
598 
599 	goto lock_root;
600 
601 	do {
602 		rw_unlock(false, b);
603 lock_root:
604 		b = c->root;
605 		rw_lock(false, b, b->level);
606 	} while (b != c->root);
607 
608 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
609 		bytes += bkey_bytes(k);
610 
611 	rw_unlock(false, b);
612 
613 	return (bytes * 100) / btree_bytes(c);
614 }
615 
616 static size_t bch_cache_size(struct cache_set *c)
617 {
618 	size_t ret = 0;
619 	struct btree *b;
620 
621 	mutex_lock(&c->bucket_lock);
622 	list_for_each_entry(b, &c->btree_cache, list)
623 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
624 
625 	mutex_unlock(&c->bucket_lock);
626 	return ret;
627 }
628 
629 static unsigned int bch_cache_max_chain(struct cache_set *c)
630 {
631 	unsigned int ret = 0;
632 	struct hlist_head *h;
633 
634 	mutex_lock(&c->bucket_lock);
635 
636 	for (h = c->bucket_hash;
637 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
638 	     h++) {
639 		unsigned int i = 0;
640 		struct hlist_node *p;
641 
642 		hlist_for_each(p, h)
643 			i++;
644 
645 		ret = max(ret, i);
646 	}
647 
648 	mutex_unlock(&c->bucket_lock);
649 	return ret;
650 }
651 
652 static unsigned int bch_btree_used(struct cache_set *c)
653 {
654 	return div64_u64(c->gc_stats.key_bytes * 100,
655 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
656 }
657 
658 static unsigned int bch_average_key_size(struct cache_set *c)
659 {
660 	return c->gc_stats.nkeys
661 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
662 		: 0;
663 }
664 
665 SHOW(__bch_cache_set)
666 {
667 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
668 
669 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
670 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
671 	sysfs_hprint(bucket_size,		bucket_bytes(c));
672 	sysfs_hprint(block_size,		block_bytes(c));
673 	sysfs_print(tree_depth,			c->root->level);
674 	sysfs_print(root_usage_percent,		bch_root_usage(c));
675 
676 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
677 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
678 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
679 
680 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
681 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
682 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
683 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
684 
685 	sysfs_print(btree_used_percent,	bch_btree_used(c));
686 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
687 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
688 
689 	sysfs_print(cache_read_races,
690 		    atomic_long_read(&c->cache_read_races));
691 
692 	sysfs_print(reclaim,
693 		    atomic_long_read(&c->reclaim));
694 
695 	sysfs_print(flush_write,
696 		    atomic_long_read(&c->flush_write));
697 
698 	sysfs_print(retry_flush_write,
699 		    atomic_long_read(&c->retry_flush_write));
700 
701 	sysfs_print(writeback_keys_done,
702 		    atomic_long_read(&c->writeback_keys_done));
703 	sysfs_print(writeback_keys_failed,
704 		    atomic_long_read(&c->writeback_keys_failed));
705 
706 	if (attr == &sysfs_errors)
707 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
708 					       c->on_error);
709 
710 	/* See count_io_errors for why 88 */
711 	sysfs_print(io_error_halflife,	c->error_decay * 88);
712 	sysfs_print(io_error_limit,	c->error_limit);
713 
714 	sysfs_hprint(congested,
715 		     ((uint64_t) bch_get_congested(c)) << 9);
716 	sysfs_print(congested_read_threshold_us,
717 		    c->congested_read_threshold_us);
718 	sysfs_print(congested_write_threshold_us,
719 		    c->congested_write_threshold_us);
720 
721 	sysfs_print(cutoff_writeback, bch_cutoff_writeback);
722 	sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
723 
724 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
725 	sysfs_printf(verify,			"%i", c->verify);
726 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
727 	sysfs_printf(expensive_debug_checks,
728 		     "%i", c->expensive_debug_checks);
729 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
730 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
731 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
732 	sysfs_printf(gc_after_writeback,	"%i", c->gc_after_writeback);
733 	sysfs_printf(io_disable,		"%i",
734 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
735 
736 	if (attr == &sysfs_bset_tree_stats)
737 		return bch_bset_print_stats(c, buf);
738 
739 	return 0;
740 }
741 SHOW_LOCKED(bch_cache_set)
742 
743 STORE(__bch_cache_set)
744 {
745 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
746 	ssize_t v;
747 
748 	if (attr == &sysfs_unregister)
749 		bch_cache_set_unregister(c);
750 
751 	if (attr == &sysfs_stop)
752 		bch_cache_set_stop(c);
753 
754 	if (attr == &sysfs_synchronous) {
755 		bool sync = strtoul_or_return(buf);
756 
757 		if (sync != CACHE_SYNC(&c->sb)) {
758 			SET_CACHE_SYNC(&c->sb, sync);
759 			bcache_write_super(c);
760 		}
761 	}
762 
763 	if (attr == &sysfs_flash_vol_create) {
764 		int r;
765 		uint64_t v;
766 
767 		strtoi_h_or_return(buf, v);
768 
769 		r = bch_flash_dev_create(c, v);
770 		if (r)
771 			return r;
772 	}
773 
774 	if (attr == &sysfs_clear_stats) {
775 		atomic_long_set(&c->writeback_keys_done,	0);
776 		atomic_long_set(&c->writeback_keys_failed,	0);
777 
778 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
779 		bch_cache_accounting_clear(&c->accounting);
780 	}
781 
782 	if (attr == &sysfs_trigger_gc)
783 		force_wake_up_gc(c);
784 
785 	if (attr == &sysfs_prune_cache) {
786 		struct shrink_control sc;
787 
788 		sc.gfp_mask = GFP_KERNEL;
789 		sc.nr_to_scan = strtoul_or_return(buf);
790 		c->shrink.scan_objects(&c->shrink, &sc);
791 	}
792 
793 	sysfs_strtoul_clamp(congested_read_threshold_us,
794 			    c->congested_read_threshold_us,
795 			    0, UINT_MAX);
796 	sysfs_strtoul_clamp(congested_write_threshold_us,
797 			    c->congested_write_threshold_us,
798 			    0, UINT_MAX);
799 
800 	if (attr == &sysfs_errors) {
801 		v = sysfs_match_string(error_actions, buf);
802 		if (v < 0)
803 			return v;
804 
805 		c->on_error = v;
806 	}
807 
808 	sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
809 
810 	/* See count_io_errors() for why 88 */
811 	if (attr == &sysfs_io_error_halflife) {
812 		unsigned long v = 0;
813 		ssize_t ret;
814 
815 		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
816 		if (!ret) {
817 			c->error_decay = v / 88;
818 			return size;
819 		}
820 		return ret;
821 	}
822 
823 	if (attr == &sysfs_io_disable) {
824 		v = strtoul_or_return(buf);
825 		if (v) {
826 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
827 					     &c->flags))
828 				pr_warn("CACHE_SET_IO_DISABLE already set");
829 		} else {
830 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
831 						&c->flags))
832 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
833 		}
834 	}
835 
836 	sysfs_strtoul_clamp(journal_delay_ms,
837 			    c->journal_delay_ms,
838 			    0, USHRT_MAX);
839 	sysfs_strtoul_bool(verify,		c->verify);
840 	sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
841 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
842 	sysfs_strtoul_bool(gc_always_rewrite,	c->gc_always_rewrite);
843 	sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
844 	sysfs_strtoul_bool(copy_gc_enabled,	c->copy_gc_enabled);
845 	/*
846 	 * write gc_after_writeback here may overwrite an already set
847 	 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
848 	 * set in next chance.
849 	 */
850 	sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
851 
852 	return size;
853 }
854 STORE_LOCKED(bch_cache_set)
855 
856 SHOW(bch_cache_set_internal)
857 {
858 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
859 
860 	return bch_cache_set_show(&c->kobj, attr, buf);
861 }
862 
863 STORE(bch_cache_set_internal)
864 {
865 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
866 
867 	return bch_cache_set_store(&c->kobj, attr, buf, size);
868 }
869 
870 static void bch_cache_set_internal_release(struct kobject *k)
871 {
872 }
873 
874 static struct attribute *bch_cache_set_files[] = {
875 	&sysfs_unregister,
876 	&sysfs_stop,
877 	&sysfs_synchronous,
878 	&sysfs_journal_delay_ms,
879 	&sysfs_flash_vol_create,
880 
881 	&sysfs_bucket_size,
882 	&sysfs_block_size,
883 	&sysfs_tree_depth,
884 	&sysfs_root_usage_percent,
885 	&sysfs_btree_cache_size,
886 	&sysfs_cache_available_percent,
887 
888 	&sysfs_average_key_size,
889 
890 	&sysfs_errors,
891 	&sysfs_io_error_limit,
892 	&sysfs_io_error_halflife,
893 	&sysfs_congested,
894 	&sysfs_congested_read_threshold_us,
895 	&sysfs_congested_write_threshold_us,
896 	&sysfs_clear_stats,
897 	NULL
898 };
899 KTYPE(bch_cache_set);
900 
901 static struct attribute *bch_cache_set_internal_files[] = {
902 	&sysfs_active_journal_entries,
903 
904 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
905 	sysfs_time_stats_attribute_list(btree_split, sec, us)
906 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
907 	sysfs_time_stats_attribute_list(btree_read, ms, us)
908 
909 	&sysfs_btree_nodes,
910 	&sysfs_btree_used_percent,
911 	&sysfs_btree_cache_max_chain,
912 
913 	&sysfs_bset_tree_stats,
914 	&sysfs_cache_read_races,
915 	&sysfs_reclaim,
916 	&sysfs_flush_write,
917 	&sysfs_retry_flush_write,
918 	&sysfs_writeback_keys_done,
919 	&sysfs_writeback_keys_failed,
920 
921 	&sysfs_trigger_gc,
922 	&sysfs_prune_cache,
923 #ifdef CONFIG_BCACHE_DEBUG
924 	&sysfs_verify,
925 	&sysfs_key_merging_disabled,
926 	&sysfs_expensive_debug_checks,
927 #endif
928 	&sysfs_gc_always_rewrite,
929 	&sysfs_btree_shrinker_disabled,
930 	&sysfs_copy_gc_enabled,
931 	&sysfs_gc_after_writeback,
932 	&sysfs_io_disable,
933 	&sysfs_cutoff_writeback,
934 	&sysfs_cutoff_writeback_sync,
935 	NULL
936 };
937 KTYPE(bch_cache_set_internal);
938 
939 static int __bch_cache_cmp(const void *l, const void *r)
940 {
941 	return *((uint16_t *)r) - *((uint16_t *)l);
942 }
943 
944 SHOW(__bch_cache)
945 {
946 	struct cache *ca = container_of(kobj, struct cache, kobj);
947 
948 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
949 	sysfs_hprint(block_size,	block_bytes(ca));
950 	sysfs_print(nbuckets,		ca->sb.nbuckets);
951 	sysfs_print(discard,		ca->discard);
952 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
953 	sysfs_hprint(btree_written,
954 		     atomic_long_read(&ca->btree_sectors_written) << 9);
955 	sysfs_hprint(metadata_written,
956 		     (atomic_long_read(&ca->meta_sectors_written) +
957 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
958 
959 	sysfs_print(io_errors,
960 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
961 
962 	if (attr == &sysfs_cache_replacement_policy)
963 		return bch_snprint_string_list(buf, PAGE_SIZE,
964 					       cache_replacement_policies,
965 					       CACHE_REPLACEMENT(&ca->sb));
966 
967 	if (attr == &sysfs_priority_stats) {
968 		struct bucket *b;
969 		size_t n = ca->sb.nbuckets, i;
970 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
971 		uint64_t sum = 0;
972 		/* Compute 31 quantiles */
973 		uint16_t q[31], *p, *cached;
974 		ssize_t ret;
975 
976 		cached = p = vmalloc(array_size(sizeof(uint16_t),
977 						ca->sb.nbuckets));
978 		if (!p)
979 			return -ENOMEM;
980 
981 		mutex_lock(&ca->set->bucket_lock);
982 		for_each_bucket(b, ca) {
983 			if (!GC_SECTORS_USED(b))
984 				unused++;
985 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
986 				available++;
987 			if (GC_MARK(b) == GC_MARK_DIRTY)
988 				dirty++;
989 			if (GC_MARK(b) == GC_MARK_METADATA)
990 				meta++;
991 		}
992 
993 		for (i = ca->sb.first_bucket; i < n; i++)
994 			p[i] = ca->buckets[i].prio;
995 		mutex_unlock(&ca->set->bucket_lock);
996 
997 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
998 
999 		while (n &&
1000 		       !cached[n - 1])
1001 			--n;
1002 
1003 		while (cached < p + n &&
1004 		       *cached == BTREE_PRIO)
1005 			cached++, n--;
1006 
1007 		for (i = 0; i < n; i++)
1008 			sum += INITIAL_PRIO - cached[i];
1009 
1010 		if (n)
1011 			do_div(sum, n);
1012 
1013 		for (i = 0; i < ARRAY_SIZE(q); i++)
1014 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1015 				(ARRAY_SIZE(q) + 1)];
1016 
1017 		vfree(p);
1018 
1019 		ret = scnprintf(buf, PAGE_SIZE,
1020 				"Unused:		%zu%%\n"
1021 				"Clean:		%zu%%\n"
1022 				"Dirty:		%zu%%\n"
1023 				"Metadata:	%zu%%\n"
1024 				"Average:	%llu\n"
1025 				"Sectors per Q:	%zu\n"
1026 				"Quantiles:	[",
1027 				unused * 100 / (size_t) ca->sb.nbuckets,
1028 				available * 100 / (size_t) ca->sb.nbuckets,
1029 				dirty * 100 / (size_t) ca->sb.nbuckets,
1030 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
1031 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1032 
1033 		for (i = 0; i < ARRAY_SIZE(q); i++)
1034 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1035 					 "%u ", q[i]);
1036 		ret--;
1037 
1038 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1039 
1040 		return ret;
1041 	}
1042 
1043 	return 0;
1044 }
1045 SHOW_LOCKED(bch_cache)
1046 
1047 STORE(__bch_cache)
1048 {
1049 	struct cache *ca = container_of(kobj, struct cache, kobj);
1050 	ssize_t v;
1051 
1052 	if (attr == &sysfs_discard) {
1053 		bool v = strtoul_or_return(buf);
1054 
1055 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1056 			ca->discard = v;
1057 
1058 		if (v != CACHE_DISCARD(&ca->sb)) {
1059 			SET_CACHE_DISCARD(&ca->sb, v);
1060 			bcache_write_super(ca->set);
1061 		}
1062 	}
1063 
1064 	if (attr == &sysfs_cache_replacement_policy) {
1065 		v = sysfs_match_string(cache_replacement_policies, buf);
1066 		if (v < 0)
1067 			return v;
1068 
1069 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1070 			mutex_lock(&ca->set->bucket_lock);
1071 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1072 			mutex_unlock(&ca->set->bucket_lock);
1073 
1074 			bcache_write_super(ca->set);
1075 		}
1076 	}
1077 
1078 	if (attr == &sysfs_clear_stats) {
1079 		atomic_long_set(&ca->sectors_written, 0);
1080 		atomic_long_set(&ca->btree_sectors_written, 0);
1081 		atomic_long_set(&ca->meta_sectors_written, 0);
1082 		atomic_set(&ca->io_count, 0);
1083 		atomic_set(&ca->io_errors, 0);
1084 	}
1085 
1086 	return size;
1087 }
1088 STORE_LOCKED(bch_cache)
1089 
1090 static struct attribute *bch_cache_files[] = {
1091 	&sysfs_bucket_size,
1092 	&sysfs_block_size,
1093 	&sysfs_nbuckets,
1094 	&sysfs_priority_stats,
1095 	&sysfs_discard,
1096 	&sysfs_written,
1097 	&sysfs_btree_written,
1098 	&sysfs_metadata_written,
1099 	&sysfs_io_errors,
1100 	&sysfs_clear_stats,
1101 	&sysfs_cache_replacement_policy,
1102 	NULL
1103 };
1104 KTYPE(bch_cache);
1105