xref: /openbmc/linux/drivers/md/bcache/sysfs.c (revision f35e839a)
1 /*
2  * bcache sysfs interfaces
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "sysfs.h"
10 #include "btree.h"
11 #include "request.h"
12 
13 #include <linux/sort.h>
14 
15 static const char * const cache_replacement_policies[] = {
16 	"lru",
17 	"fifo",
18 	"random",
19 	NULL
20 };
21 
22 write_attribute(attach);
23 write_attribute(detach);
24 write_attribute(unregister);
25 write_attribute(stop);
26 write_attribute(clear_stats);
27 write_attribute(trigger_gc);
28 write_attribute(prune_cache);
29 write_attribute(flash_vol_create);
30 
31 read_attribute(bucket_size);
32 read_attribute(block_size);
33 read_attribute(nbuckets);
34 read_attribute(tree_depth);
35 read_attribute(root_usage_percent);
36 read_attribute(priority_stats);
37 read_attribute(btree_cache_size);
38 read_attribute(btree_cache_max_chain);
39 read_attribute(cache_available_percent);
40 read_attribute(written);
41 read_attribute(btree_written);
42 read_attribute(metadata_written);
43 read_attribute(active_journal_entries);
44 
45 sysfs_time_stats_attribute(btree_gc,	sec, ms);
46 sysfs_time_stats_attribute(btree_split, sec, us);
47 sysfs_time_stats_attribute(btree_sort,	ms,  us);
48 sysfs_time_stats_attribute(btree_read,	ms,  us);
49 sysfs_time_stats_attribute(try_harder,	ms,  us);
50 
51 read_attribute(btree_nodes);
52 read_attribute(btree_used_percent);
53 read_attribute(average_key_size);
54 read_attribute(dirty_data);
55 read_attribute(bset_tree_stats);
56 
57 read_attribute(state);
58 read_attribute(cache_read_races);
59 read_attribute(writeback_keys_done);
60 read_attribute(writeback_keys_failed);
61 read_attribute(io_errors);
62 read_attribute(congested);
63 rw_attribute(congested_read_threshold_us);
64 rw_attribute(congested_write_threshold_us);
65 
66 rw_attribute(sequential_cutoff);
67 rw_attribute(sequential_merge);
68 rw_attribute(data_csum);
69 rw_attribute(cache_mode);
70 rw_attribute(writeback_metadata);
71 rw_attribute(writeback_running);
72 rw_attribute(writeback_percent);
73 rw_attribute(writeback_delay);
74 rw_attribute(writeback_rate);
75 
76 rw_attribute(writeback_rate_update_seconds);
77 rw_attribute(writeback_rate_d_term);
78 rw_attribute(writeback_rate_p_term_inverse);
79 rw_attribute(writeback_rate_d_smooth);
80 read_attribute(writeback_rate_debug);
81 
82 rw_attribute(synchronous);
83 rw_attribute(journal_delay_ms);
84 rw_attribute(discard);
85 rw_attribute(running);
86 rw_attribute(label);
87 rw_attribute(readahead);
88 rw_attribute(io_error_limit);
89 rw_attribute(io_error_halflife);
90 rw_attribute(verify);
91 rw_attribute(key_merging_disabled);
92 rw_attribute(gc_always_rewrite);
93 rw_attribute(freelist_percent);
94 rw_attribute(cache_replacement_policy);
95 rw_attribute(btree_shrinker_disabled);
96 rw_attribute(copy_gc_enabled);
97 rw_attribute(size);
98 
99 SHOW(__bch_cached_dev)
100 {
101 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
102 					     disk.kobj);
103 	const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
104 
105 #define var(stat)		(dc->stat)
106 
107 	if (attr == &sysfs_cache_mode)
108 		return bch_snprint_string_list(buf, PAGE_SIZE,
109 					       bch_cache_modes + 1,
110 					       BDEV_CACHE_MODE(&dc->sb));
111 
112 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
113 	var_printf(verify,		"%i");
114 	var_printf(writeback_metadata,	"%i");
115 	var_printf(writeback_running,	"%i");
116 	var_print(writeback_delay);
117 	var_print(writeback_percent);
118 	sysfs_print(writeback_rate,	dc->writeback_rate.rate);
119 
120 	var_print(writeback_rate_update_seconds);
121 	var_print(writeback_rate_d_term);
122 	var_print(writeback_rate_p_term_inverse);
123 	var_print(writeback_rate_d_smooth);
124 
125 	if (attr == &sysfs_writeback_rate_debug) {
126 		char dirty[20];
127 		char derivative[20];
128 		char target[20];
129 		bch_hprint(dirty,
130 		       atomic_long_read(&dc->disk.sectors_dirty) << 9);
131 		bch_hprint(derivative,	dc->writeback_rate_derivative << 9);
132 		bch_hprint(target,	dc->writeback_rate_target << 9);
133 
134 		return sprintf(buf,
135 			       "rate:\t\t%u\n"
136 			       "change:\t\t%i\n"
137 			       "dirty:\t\t%s\n"
138 			       "derivative:\t%s\n"
139 			       "target:\t\t%s\n",
140 			       dc->writeback_rate.rate,
141 			       dc->writeback_rate_change,
142 			       dirty, derivative, target);
143 	}
144 
145 	sysfs_hprint(dirty_data,
146 		     atomic_long_read(&dc->disk.sectors_dirty) << 9);
147 
148 	var_printf(sequential_merge,	"%i");
149 	var_hprint(sequential_cutoff);
150 	var_hprint(readahead);
151 
152 	sysfs_print(running,		atomic_read(&dc->running));
153 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
154 
155 	if (attr == &sysfs_label) {
156 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
157 		buf[SB_LABEL_SIZE + 1] = '\0';
158 		strcat(buf, "\n");
159 		return strlen(buf);
160 	}
161 
162 #undef var
163 	return 0;
164 }
165 SHOW_LOCKED(bch_cached_dev)
166 
167 STORE(__cached_dev)
168 {
169 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
170 					     disk.kobj);
171 	unsigned v = size;
172 	struct cache_set *c;
173 
174 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
175 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
176 
177 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
178 	d_strtoul(verify);
179 	d_strtoul(writeback_metadata);
180 	d_strtoul(writeback_running);
181 	d_strtoul(writeback_delay);
182 	sysfs_strtoul_clamp(writeback_rate,
183 			    dc->writeback_rate.rate, 1, 1000000);
184 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
185 
186 	d_strtoul(writeback_rate_update_seconds);
187 	d_strtoul(writeback_rate_d_term);
188 	d_strtoul(writeback_rate_p_term_inverse);
189 	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
190 			    dc->writeback_rate_p_term_inverse, 1, INT_MAX);
191 	d_strtoul(writeback_rate_d_smooth);
192 
193 	d_strtoul(sequential_merge);
194 	d_strtoi_h(sequential_cutoff);
195 	d_strtoi_h(readahead);
196 
197 	if (attr == &sysfs_clear_stats)
198 		bch_cache_accounting_clear(&dc->accounting);
199 
200 	if (attr == &sysfs_running &&
201 	    strtoul_or_return(buf))
202 		bch_cached_dev_run(dc);
203 
204 	if (attr == &sysfs_cache_mode) {
205 		ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
206 
207 		if (v < 0)
208 			return v;
209 
210 		if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
211 			SET_BDEV_CACHE_MODE(&dc->sb, v);
212 			bch_write_bdev_super(dc, NULL);
213 		}
214 	}
215 
216 	if (attr == &sysfs_label) {
217 		memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
218 		bch_write_bdev_super(dc, NULL);
219 		if (dc->disk.c) {
220 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
221 			       buf, SB_LABEL_SIZE);
222 			bch_uuid_write(dc->disk.c);
223 		}
224 	}
225 
226 	if (attr == &sysfs_attach) {
227 		if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
228 			return -EINVAL;
229 
230 		list_for_each_entry(c, &bch_cache_sets, list) {
231 			v = bch_cached_dev_attach(dc, c);
232 			if (!v)
233 				return size;
234 		}
235 
236 		pr_err("Can't attach %s: cache set not found", buf);
237 		size = v;
238 	}
239 
240 	if (attr == &sysfs_detach && dc->disk.c)
241 		bch_cached_dev_detach(dc);
242 
243 	if (attr == &sysfs_stop)
244 		bcache_device_stop(&dc->disk);
245 
246 	return size;
247 }
248 
249 STORE(bch_cached_dev)
250 {
251 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
252 					     disk.kobj);
253 
254 	mutex_lock(&bch_register_lock);
255 	size = __cached_dev_store(kobj, attr, buf, size);
256 
257 	if (attr == &sysfs_writeback_running)
258 		bch_writeback_queue(dc);
259 
260 	if (attr == &sysfs_writeback_percent)
261 		schedule_delayed_work(&dc->writeback_rate_update,
262 				      dc->writeback_rate_update_seconds * HZ);
263 
264 	mutex_unlock(&bch_register_lock);
265 	return size;
266 }
267 
268 static struct attribute *bch_cached_dev_files[] = {
269 	&sysfs_attach,
270 	&sysfs_detach,
271 	&sysfs_stop,
272 #if 0
273 	&sysfs_data_csum,
274 #endif
275 	&sysfs_cache_mode,
276 	&sysfs_writeback_metadata,
277 	&sysfs_writeback_running,
278 	&sysfs_writeback_delay,
279 	&sysfs_writeback_percent,
280 	&sysfs_writeback_rate,
281 	&sysfs_writeback_rate_update_seconds,
282 	&sysfs_writeback_rate_d_term,
283 	&sysfs_writeback_rate_p_term_inverse,
284 	&sysfs_writeback_rate_d_smooth,
285 	&sysfs_writeback_rate_debug,
286 	&sysfs_dirty_data,
287 	&sysfs_sequential_cutoff,
288 	&sysfs_sequential_merge,
289 	&sysfs_clear_stats,
290 	&sysfs_running,
291 	&sysfs_state,
292 	&sysfs_label,
293 	&sysfs_readahead,
294 #ifdef CONFIG_BCACHE_DEBUG
295 	&sysfs_verify,
296 #endif
297 	NULL
298 };
299 KTYPE(bch_cached_dev);
300 
301 SHOW(bch_flash_dev)
302 {
303 	struct bcache_device *d = container_of(kobj, struct bcache_device,
304 					       kobj);
305 	struct uuid_entry *u = &d->c->uuids[d->id];
306 
307 	sysfs_printf(data_csum,	"%i", d->data_csum);
308 	sysfs_hprint(size,	u->sectors << 9);
309 
310 	if (attr == &sysfs_label) {
311 		memcpy(buf, u->label, SB_LABEL_SIZE);
312 		buf[SB_LABEL_SIZE + 1] = '\0';
313 		strcat(buf, "\n");
314 		return strlen(buf);
315 	}
316 
317 	return 0;
318 }
319 
320 STORE(__bch_flash_dev)
321 {
322 	struct bcache_device *d = container_of(kobj, struct bcache_device,
323 					       kobj);
324 	struct uuid_entry *u = &d->c->uuids[d->id];
325 
326 	sysfs_strtoul(data_csum,	d->data_csum);
327 
328 	if (attr == &sysfs_size) {
329 		uint64_t v;
330 		strtoi_h_or_return(buf, v);
331 
332 		u->sectors = v >> 9;
333 		bch_uuid_write(d->c);
334 		set_capacity(d->disk, u->sectors);
335 	}
336 
337 	if (attr == &sysfs_label) {
338 		memcpy(u->label, buf, SB_LABEL_SIZE);
339 		bch_uuid_write(d->c);
340 	}
341 
342 	if (attr == &sysfs_unregister) {
343 		atomic_set(&d->detaching, 1);
344 		bcache_device_stop(d);
345 	}
346 
347 	return size;
348 }
349 STORE_LOCKED(bch_flash_dev)
350 
351 static struct attribute *bch_flash_dev_files[] = {
352 	&sysfs_unregister,
353 #if 0
354 	&sysfs_data_csum,
355 #endif
356 	&sysfs_label,
357 	&sysfs_size,
358 	NULL
359 };
360 KTYPE(bch_flash_dev);
361 
362 SHOW(__bch_cache_set)
363 {
364 	unsigned root_usage(struct cache_set *c)
365 	{
366 		unsigned bytes = 0;
367 		struct bkey *k;
368 		struct btree *b;
369 		struct btree_iter iter;
370 
371 		goto lock_root;
372 
373 		do {
374 			rw_unlock(false, b);
375 lock_root:
376 			b = c->root;
377 			rw_lock(false, b, b->level);
378 		} while (b != c->root);
379 
380 		for_each_key_filter(b, k, &iter, bch_ptr_bad)
381 			bytes += bkey_bytes(k);
382 
383 		rw_unlock(false, b);
384 
385 		return (bytes * 100) / btree_bytes(c);
386 	}
387 
388 	size_t cache_size(struct cache_set *c)
389 	{
390 		size_t ret = 0;
391 		struct btree *b;
392 
393 		mutex_lock(&c->bucket_lock);
394 		list_for_each_entry(b, &c->btree_cache, list)
395 			ret += 1 << (b->page_order + PAGE_SHIFT);
396 
397 		mutex_unlock(&c->bucket_lock);
398 		return ret;
399 	}
400 
401 	unsigned cache_max_chain(struct cache_set *c)
402 	{
403 		unsigned ret = 0;
404 		struct hlist_head *h;
405 
406 		mutex_lock(&c->bucket_lock);
407 
408 		for (h = c->bucket_hash;
409 		     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
410 		     h++) {
411 			unsigned i = 0;
412 			struct hlist_node *p;
413 
414 			hlist_for_each(p, h)
415 				i++;
416 
417 			ret = max(ret, i);
418 		}
419 
420 		mutex_unlock(&c->bucket_lock);
421 		return ret;
422 	}
423 
424 	unsigned btree_used(struct cache_set *c)
425 	{
426 		return div64_u64(c->gc_stats.key_bytes * 100,
427 				 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
428 	}
429 
430 	unsigned average_key_size(struct cache_set *c)
431 	{
432 		return c->gc_stats.nkeys
433 			? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
434 			: 0;
435 	}
436 
437 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
438 
439 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
440 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
441 	sysfs_hprint(bucket_size,		bucket_bytes(c));
442 	sysfs_hprint(block_size,		block_bytes(c));
443 	sysfs_print(tree_depth,			c->root->level);
444 	sysfs_print(root_usage_percent,		root_usage(c));
445 
446 	sysfs_hprint(btree_cache_size,		cache_size(c));
447 	sysfs_print(btree_cache_max_chain,	cache_max_chain(c));
448 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
449 
450 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
451 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
452 	sysfs_print_time_stats(&c->sort_time,		btree_sort, ms, us);
453 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
454 	sysfs_print_time_stats(&c->try_harder_time,	try_harder, ms, us);
455 
456 	sysfs_print(btree_used_percent,	btree_used(c));
457 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
458 	sysfs_hprint(dirty_data,	c->gc_stats.dirty);
459 	sysfs_hprint(average_key_size,	average_key_size(c));
460 
461 	sysfs_print(cache_read_races,
462 		    atomic_long_read(&c->cache_read_races));
463 
464 	sysfs_print(writeback_keys_done,
465 		    atomic_long_read(&c->writeback_keys_done));
466 	sysfs_print(writeback_keys_failed,
467 		    atomic_long_read(&c->writeback_keys_failed));
468 
469 	/* See count_io_errors for why 88 */
470 	sysfs_print(io_error_halflife,	c->error_decay * 88);
471 	sysfs_print(io_error_limit,	c->error_limit >> IO_ERROR_SHIFT);
472 
473 	sysfs_hprint(congested,
474 		     ((uint64_t) bch_get_congested(c)) << 9);
475 	sysfs_print(congested_read_threshold_us,
476 		    c->congested_read_threshold_us);
477 	sysfs_print(congested_write_threshold_us,
478 		    c->congested_write_threshold_us);
479 
480 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
481 	sysfs_printf(verify,			"%i", c->verify);
482 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
483 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
484 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
485 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
486 
487 	if (attr == &sysfs_bset_tree_stats)
488 		return bch_bset_print_stats(c, buf);
489 
490 	return 0;
491 }
492 SHOW_LOCKED(bch_cache_set)
493 
494 STORE(__bch_cache_set)
495 {
496 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
497 
498 	if (attr == &sysfs_unregister)
499 		bch_cache_set_unregister(c);
500 
501 	if (attr == &sysfs_stop)
502 		bch_cache_set_stop(c);
503 
504 	if (attr == &sysfs_synchronous) {
505 		bool sync = strtoul_or_return(buf);
506 
507 		if (sync != CACHE_SYNC(&c->sb)) {
508 			SET_CACHE_SYNC(&c->sb, sync);
509 			bcache_write_super(c);
510 		}
511 	}
512 
513 	if (attr == &sysfs_flash_vol_create) {
514 		int r;
515 		uint64_t v;
516 		strtoi_h_or_return(buf, v);
517 
518 		r = bch_flash_dev_create(c, v);
519 		if (r)
520 			return r;
521 	}
522 
523 	if (attr == &sysfs_clear_stats) {
524 		atomic_long_set(&c->writeback_keys_done,	0);
525 		atomic_long_set(&c->writeback_keys_failed,	0);
526 
527 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
528 		bch_cache_accounting_clear(&c->accounting);
529 	}
530 
531 	if (attr == &sysfs_trigger_gc)
532 		bch_queue_gc(c);
533 
534 	if (attr == &sysfs_prune_cache) {
535 		struct shrink_control sc;
536 		sc.gfp_mask = GFP_KERNEL;
537 		sc.nr_to_scan = strtoul_or_return(buf);
538 		c->shrink.shrink(&c->shrink, &sc);
539 	}
540 
541 	sysfs_strtoul(congested_read_threshold_us,
542 		      c->congested_read_threshold_us);
543 	sysfs_strtoul(congested_write_threshold_us,
544 		      c->congested_write_threshold_us);
545 
546 	if (attr == &sysfs_io_error_limit)
547 		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
548 
549 	/* See count_io_errors() for why 88 */
550 	if (attr == &sysfs_io_error_halflife)
551 		c->error_decay = strtoul_or_return(buf) / 88;
552 
553 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
554 	sysfs_strtoul(verify,			c->verify);
555 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
556 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
557 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
558 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
559 
560 	return size;
561 }
562 STORE_LOCKED(bch_cache_set)
563 
564 SHOW(bch_cache_set_internal)
565 {
566 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
567 	return bch_cache_set_show(&c->kobj, attr, buf);
568 }
569 
570 STORE(bch_cache_set_internal)
571 {
572 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
573 	return bch_cache_set_store(&c->kobj, attr, buf, size);
574 }
575 
576 static void bch_cache_set_internal_release(struct kobject *k)
577 {
578 }
579 
580 static struct attribute *bch_cache_set_files[] = {
581 	&sysfs_unregister,
582 	&sysfs_stop,
583 	&sysfs_synchronous,
584 	&sysfs_journal_delay_ms,
585 	&sysfs_flash_vol_create,
586 
587 	&sysfs_bucket_size,
588 	&sysfs_block_size,
589 	&sysfs_tree_depth,
590 	&sysfs_root_usage_percent,
591 	&sysfs_btree_cache_size,
592 	&sysfs_cache_available_percent,
593 
594 	&sysfs_average_key_size,
595 	&sysfs_dirty_data,
596 
597 	&sysfs_io_error_limit,
598 	&sysfs_io_error_halflife,
599 	&sysfs_congested,
600 	&sysfs_congested_read_threshold_us,
601 	&sysfs_congested_write_threshold_us,
602 	&sysfs_clear_stats,
603 	NULL
604 };
605 KTYPE(bch_cache_set);
606 
607 static struct attribute *bch_cache_set_internal_files[] = {
608 	&sysfs_active_journal_entries,
609 
610 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
611 	sysfs_time_stats_attribute_list(btree_split, sec, us)
612 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
613 	sysfs_time_stats_attribute_list(btree_read, ms, us)
614 	sysfs_time_stats_attribute_list(try_harder, ms, us)
615 
616 	&sysfs_btree_nodes,
617 	&sysfs_btree_used_percent,
618 	&sysfs_btree_cache_max_chain,
619 
620 	&sysfs_bset_tree_stats,
621 	&sysfs_cache_read_races,
622 	&sysfs_writeback_keys_done,
623 	&sysfs_writeback_keys_failed,
624 
625 	&sysfs_trigger_gc,
626 	&sysfs_prune_cache,
627 #ifdef CONFIG_BCACHE_DEBUG
628 	&sysfs_verify,
629 	&sysfs_key_merging_disabled,
630 #endif
631 	&sysfs_gc_always_rewrite,
632 	&sysfs_btree_shrinker_disabled,
633 	&sysfs_copy_gc_enabled,
634 	NULL
635 };
636 KTYPE(bch_cache_set_internal);
637 
638 SHOW(__bch_cache)
639 {
640 	struct cache *ca = container_of(kobj, struct cache, kobj);
641 
642 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
643 	sysfs_hprint(block_size,	block_bytes(ca));
644 	sysfs_print(nbuckets,		ca->sb.nbuckets);
645 	sysfs_print(discard,		ca->discard);
646 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
647 	sysfs_hprint(btree_written,
648 		     atomic_long_read(&ca->btree_sectors_written) << 9);
649 	sysfs_hprint(metadata_written,
650 		     (atomic_long_read(&ca->meta_sectors_written) +
651 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
652 
653 	sysfs_print(io_errors,
654 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
655 
656 	sysfs_print(freelist_percent, ca->free.size * 100 /
657 		    ((size_t) ca->sb.nbuckets));
658 
659 	if (attr == &sysfs_cache_replacement_policy)
660 		return bch_snprint_string_list(buf, PAGE_SIZE,
661 					       cache_replacement_policies,
662 					       CACHE_REPLACEMENT(&ca->sb));
663 
664 	if (attr == &sysfs_priority_stats) {
665 		int cmp(const void *l, const void *r)
666 		{	return *((uint16_t *) r) - *((uint16_t *) l); }
667 
668 		/* Number of quantiles we compute */
669 		const unsigned nq = 31;
670 
671 		size_t n = ca->sb.nbuckets, i, unused, btree;
672 		uint64_t sum = 0;
673 		uint16_t q[nq], *p, *cached;
674 		ssize_t ret;
675 
676 		cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
677 		if (!p)
678 			return -ENOMEM;
679 
680 		mutex_lock(&ca->set->bucket_lock);
681 		for (i = ca->sb.first_bucket; i < n; i++)
682 			p[i] = ca->buckets[i].prio;
683 		mutex_unlock(&ca->set->bucket_lock);
684 
685 		sort(p, n, sizeof(uint16_t), cmp, NULL);
686 
687 		while (n &&
688 		       !cached[n - 1])
689 			--n;
690 
691 		unused = ca->sb.nbuckets - n;
692 
693 		while (cached < p + n &&
694 		       *cached == BTREE_PRIO)
695 			cached++;
696 
697 		btree = cached - p;
698 		n -= btree;
699 
700 		for (i = 0; i < n; i++)
701 			sum += INITIAL_PRIO - cached[i];
702 
703 		if (n)
704 			do_div(sum, n);
705 
706 		for (i = 0; i < nq; i++)
707 			q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)];
708 
709 		vfree(p);
710 
711 		ret = snprintf(buf, PAGE_SIZE,
712 			       "Unused:		%zu%%\n"
713 			       "Metadata:	%zu%%\n"
714 			       "Average:	%llu\n"
715 			       "Sectors per Q:	%zu\n"
716 			       "Quantiles:	[",
717 			       unused * 100 / (size_t) ca->sb.nbuckets,
718 			       btree * 100 / (size_t) ca->sb.nbuckets, sum,
719 			       n * ca->sb.bucket_size / (nq + 1));
720 
721 		for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++)
722 			ret += snprintf(buf + ret, PAGE_SIZE - ret,
723 					i < nq - 1 ? "%u " : "%u]\n", q[i]);
724 
725 		buf[PAGE_SIZE - 1] = '\0';
726 		return ret;
727 	}
728 
729 	return 0;
730 }
731 SHOW_LOCKED(bch_cache)
732 
733 STORE(__bch_cache)
734 {
735 	struct cache *ca = container_of(kobj, struct cache, kobj);
736 
737 	if (attr == &sysfs_discard) {
738 		bool v = strtoul_or_return(buf);
739 
740 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
741 			ca->discard = v;
742 
743 		if (v != CACHE_DISCARD(&ca->sb)) {
744 			SET_CACHE_DISCARD(&ca->sb, v);
745 			bcache_write_super(ca->set);
746 		}
747 	}
748 
749 	if (attr == &sysfs_cache_replacement_policy) {
750 		ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
751 
752 		if (v < 0)
753 			return v;
754 
755 		if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
756 			mutex_lock(&ca->set->bucket_lock);
757 			SET_CACHE_REPLACEMENT(&ca->sb, v);
758 			mutex_unlock(&ca->set->bucket_lock);
759 
760 			bcache_write_super(ca->set);
761 		}
762 	}
763 
764 	if (attr == &sysfs_freelist_percent) {
765 		DECLARE_FIFO(long, free);
766 		long i;
767 		size_t p = strtoul_or_return(buf);
768 
769 		p = clamp_t(size_t,
770 			    ((size_t) ca->sb.nbuckets * p) / 100,
771 			    roundup_pow_of_two(ca->sb.nbuckets) >> 9,
772 			    ca->sb.nbuckets / 2);
773 
774 		if (!init_fifo_exact(&free, p, GFP_KERNEL))
775 			return -ENOMEM;
776 
777 		mutex_lock(&ca->set->bucket_lock);
778 
779 		fifo_move(&free, &ca->free);
780 		fifo_swap(&free, &ca->free);
781 
782 		mutex_unlock(&ca->set->bucket_lock);
783 
784 		while (fifo_pop(&free, i))
785 			atomic_dec(&ca->buckets[i].pin);
786 
787 		free_fifo(&free);
788 	}
789 
790 	if (attr == &sysfs_clear_stats) {
791 		atomic_long_set(&ca->sectors_written, 0);
792 		atomic_long_set(&ca->btree_sectors_written, 0);
793 		atomic_long_set(&ca->meta_sectors_written, 0);
794 		atomic_set(&ca->io_count, 0);
795 		atomic_set(&ca->io_errors, 0);
796 	}
797 
798 	return size;
799 }
800 STORE_LOCKED(bch_cache)
801 
802 static struct attribute *bch_cache_files[] = {
803 	&sysfs_bucket_size,
804 	&sysfs_block_size,
805 	&sysfs_nbuckets,
806 	&sysfs_priority_stats,
807 	&sysfs_discard,
808 	&sysfs_written,
809 	&sysfs_btree_written,
810 	&sysfs_metadata_written,
811 	&sysfs_io_errors,
812 	&sysfs_clear_stats,
813 	&sysfs_freelist_percent,
814 	&sysfs_cache_replacement_policy,
815 	NULL
816 };
817 KTYPE(bch_cache);
818