xref: /openbmc/linux/fs/xfs/xfs_sysfs.c (revision 97e6ea6d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
16 
17 struct xfs_sysfs_attr {
18 	struct attribute attr;
19 	ssize_t (*show)(struct kobject *kobject, char *buf);
20 	ssize_t (*store)(struct kobject *kobject, const char *buf,
21 			 size_t count);
22 };
23 
24 static inline struct xfs_sysfs_attr *
25 to_attr(struct attribute *attr)
26 {
27 	return container_of(attr, struct xfs_sysfs_attr, attr);
28 }
29 
30 #define XFS_SYSFS_ATTR_RW(name) \
31 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
36 
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
38 
39 STATIC ssize_t
40 xfs_sysfs_object_show(
41 	struct kobject		*kobject,
42 	struct attribute	*attr,
43 	char			*buf)
44 {
45 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
46 
47 	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
48 }
49 
50 STATIC ssize_t
51 xfs_sysfs_object_store(
52 	struct kobject		*kobject,
53 	struct attribute	*attr,
54 	const char		*buf,
55 	size_t			count)
56 {
57 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
58 
59 	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
60 }
61 
62 static const struct sysfs_ops xfs_sysfs_ops = {
63 	.show = xfs_sysfs_object_show,
64 	.store = xfs_sysfs_object_store,
65 };
66 
67 static struct attribute *xfs_mp_attrs[] = {
68 	NULL,
69 };
70 
71 struct kobj_type xfs_mp_ktype = {
72 	.release = xfs_sysfs_release,
73 	.sysfs_ops = &xfs_sysfs_ops,
74 	.default_attrs = xfs_mp_attrs,
75 };
76 
77 #ifdef DEBUG
78 /* debug */
79 
80 STATIC ssize_t
81 bug_on_assert_store(
82 	struct kobject		*kobject,
83 	const char		*buf,
84 	size_t			count)
85 {
86 	int			ret;
87 	int			val;
88 
89 	ret = kstrtoint(buf, 0, &val);
90 	if (ret)
91 		return ret;
92 
93 	if (val == 1)
94 		xfs_globals.bug_on_assert = true;
95 	else if (val == 0)
96 		xfs_globals.bug_on_assert = false;
97 	else
98 		return -EINVAL;
99 
100 	return count;
101 }
102 
103 STATIC ssize_t
104 bug_on_assert_show(
105 	struct kobject		*kobject,
106 	char			*buf)
107 {
108 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bug_on_assert ? 1 : 0);
109 }
110 XFS_SYSFS_ATTR_RW(bug_on_assert);
111 
112 STATIC ssize_t
113 log_recovery_delay_store(
114 	struct kobject	*kobject,
115 	const char	*buf,
116 	size_t		count)
117 {
118 	int		ret;
119 	int		val;
120 
121 	ret = kstrtoint(buf, 0, &val);
122 	if (ret)
123 		return ret;
124 
125 	if (val < 0 || val > 60)
126 		return -EINVAL;
127 
128 	xfs_globals.log_recovery_delay = val;
129 
130 	return count;
131 }
132 
133 STATIC ssize_t
134 log_recovery_delay_show(
135 	struct kobject	*kobject,
136 	char		*buf)
137 {
138 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.log_recovery_delay);
139 }
140 XFS_SYSFS_ATTR_RW(log_recovery_delay);
141 
142 STATIC ssize_t
143 mount_delay_store(
144 	struct kobject	*kobject,
145 	const char	*buf,
146 	size_t		count)
147 {
148 	int		ret;
149 	int		val;
150 
151 	ret = kstrtoint(buf, 0, &val);
152 	if (ret)
153 		return ret;
154 
155 	if (val < 0 || val > 60)
156 		return -EINVAL;
157 
158 	xfs_globals.mount_delay = val;
159 
160 	return count;
161 }
162 
163 STATIC ssize_t
164 mount_delay_show(
165 	struct kobject	*kobject,
166 	char		*buf)
167 {
168 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.mount_delay);
169 }
170 XFS_SYSFS_ATTR_RW(mount_delay);
171 
172 static ssize_t
173 always_cow_store(
174 	struct kobject	*kobject,
175 	const char	*buf,
176 	size_t		count)
177 {
178 	ssize_t		ret;
179 
180 	ret = kstrtobool(buf, &xfs_globals.always_cow);
181 	if (ret < 0)
182 		return ret;
183 	return count;
184 }
185 
186 static ssize_t
187 always_cow_show(
188 	struct kobject	*kobject,
189 	char		*buf)
190 {
191 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
192 }
193 XFS_SYSFS_ATTR_RW(always_cow);
194 
195 #ifdef DEBUG
196 /*
197  * Override how many threads the parallel work queue is allowed to create.
198  * This has to be a debug-only global (instead of an errortag) because one of
199  * the main users of parallel workqueues is mount time quotacheck.
200  */
201 STATIC ssize_t
202 pwork_threads_store(
203 	struct kobject	*kobject,
204 	const char	*buf,
205 	size_t		count)
206 {
207 	int		ret;
208 	int		val;
209 
210 	ret = kstrtoint(buf, 0, &val);
211 	if (ret)
212 		return ret;
213 
214 	if (val < -1 || val > num_possible_cpus())
215 		return -EINVAL;
216 
217 	xfs_globals.pwork_threads = val;
218 
219 	return count;
220 }
221 
222 STATIC ssize_t
223 pwork_threads_show(
224 	struct kobject	*kobject,
225 	char		*buf)
226 {
227 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.pwork_threads);
228 }
229 XFS_SYSFS_ATTR_RW(pwork_threads);
230 #endif /* DEBUG */
231 
232 static struct attribute *xfs_dbg_attrs[] = {
233 	ATTR_LIST(bug_on_assert),
234 	ATTR_LIST(log_recovery_delay),
235 	ATTR_LIST(mount_delay),
236 	ATTR_LIST(always_cow),
237 #ifdef DEBUG
238 	ATTR_LIST(pwork_threads),
239 #endif
240 	NULL,
241 };
242 
243 struct kobj_type xfs_dbg_ktype = {
244 	.release = xfs_sysfs_release,
245 	.sysfs_ops = &xfs_sysfs_ops,
246 	.default_attrs = xfs_dbg_attrs,
247 };
248 
249 #endif /* DEBUG */
250 
251 /* stats */
252 
253 static inline struct xstats *
254 to_xstats(struct kobject *kobject)
255 {
256 	struct xfs_kobj *kobj = to_kobj(kobject);
257 
258 	return container_of(kobj, struct xstats, xs_kobj);
259 }
260 
261 STATIC ssize_t
262 stats_show(
263 	struct kobject	*kobject,
264 	char		*buf)
265 {
266 	struct xstats	*stats = to_xstats(kobject);
267 
268 	return xfs_stats_format(stats->xs_stats, buf);
269 }
270 XFS_SYSFS_ATTR_RO(stats);
271 
272 STATIC ssize_t
273 stats_clear_store(
274 	struct kobject	*kobject,
275 	const char	*buf,
276 	size_t		count)
277 {
278 	int		ret;
279 	int		val;
280 	struct xstats	*stats = to_xstats(kobject);
281 
282 	ret = kstrtoint(buf, 0, &val);
283 	if (ret)
284 		return ret;
285 
286 	if (val != 1)
287 		return -EINVAL;
288 
289 	xfs_stats_clearall(stats->xs_stats);
290 	return count;
291 }
292 XFS_SYSFS_ATTR_WO(stats_clear);
293 
294 static struct attribute *xfs_stats_attrs[] = {
295 	ATTR_LIST(stats),
296 	ATTR_LIST(stats_clear),
297 	NULL,
298 };
299 
300 struct kobj_type xfs_stats_ktype = {
301 	.release = xfs_sysfs_release,
302 	.sysfs_ops = &xfs_sysfs_ops,
303 	.default_attrs = xfs_stats_attrs,
304 };
305 
306 /* xlog */
307 
308 static inline struct xlog *
309 to_xlog(struct kobject *kobject)
310 {
311 	struct xfs_kobj *kobj = to_kobj(kobject);
312 
313 	return container_of(kobj, struct xlog, l_kobj);
314 }
315 
316 STATIC ssize_t
317 log_head_lsn_show(
318 	struct kobject	*kobject,
319 	char		*buf)
320 {
321 	int cycle;
322 	int block;
323 	struct xlog *log = to_xlog(kobject);
324 
325 	spin_lock(&log->l_icloglock);
326 	cycle = log->l_curr_cycle;
327 	block = log->l_curr_block;
328 	spin_unlock(&log->l_icloglock);
329 
330 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
331 }
332 XFS_SYSFS_ATTR_RO(log_head_lsn);
333 
334 STATIC ssize_t
335 log_tail_lsn_show(
336 	struct kobject	*kobject,
337 	char		*buf)
338 {
339 	int cycle;
340 	int block;
341 	struct xlog *log = to_xlog(kobject);
342 
343 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
344 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
345 }
346 XFS_SYSFS_ATTR_RO(log_tail_lsn);
347 
348 STATIC ssize_t
349 reserve_grant_head_show(
350 	struct kobject	*kobject,
351 	char		*buf)
352 
353 {
354 	int cycle;
355 	int bytes;
356 	struct xlog *log = to_xlog(kobject);
357 
358 	xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
359 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
360 }
361 XFS_SYSFS_ATTR_RO(reserve_grant_head);
362 
363 STATIC ssize_t
364 write_grant_head_show(
365 	struct kobject	*kobject,
366 	char		*buf)
367 {
368 	int cycle;
369 	int bytes;
370 	struct xlog *log = to_xlog(kobject);
371 
372 	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
373 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
374 }
375 XFS_SYSFS_ATTR_RO(write_grant_head);
376 
377 static struct attribute *xfs_log_attrs[] = {
378 	ATTR_LIST(log_head_lsn),
379 	ATTR_LIST(log_tail_lsn),
380 	ATTR_LIST(reserve_grant_head),
381 	ATTR_LIST(write_grant_head),
382 	NULL,
383 };
384 
385 struct kobj_type xfs_log_ktype = {
386 	.release = xfs_sysfs_release,
387 	.sysfs_ops = &xfs_sysfs_ops,
388 	.default_attrs = xfs_log_attrs,
389 };
390 
391 /*
392  * Metadata IO error configuration
393  *
394  * The sysfs structure here is:
395  *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
396  *
397  * where <class> allows us to discriminate between data IO and metadata IO,
398  * and any other future type of IO (e.g. special inode or directory error
399  * handling) we care to support.
400  */
401 static inline struct xfs_error_cfg *
402 to_error_cfg(struct kobject *kobject)
403 {
404 	struct xfs_kobj *kobj = to_kobj(kobject);
405 	return container_of(kobj, struct xfs_error_cfg, kobj);
406 }
407 
408 static inline struct xfs_mount *
409 err_to_mp(struct kobject *kobject)
410 {
411 	struct xfs_kobj *kobj = to_kobj(kobject);
412 	return container_of(kobj, struct xfs_mount, m_error_kobj);
413 }
414 
415 static ssize_t
416 max_retries_show(
417 	struct kobject	*kobject,
418 	char		*buf)
419 {
420 	int		retries;
421 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
422 
423 	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
424 		retries = -1;
425 	else
426 		retries = cfg->max_retries;
427 
428 	return snprintf(buf, PAGE_SIZE, "%d\n", retries);
429 }
430 
431 static ssize_t
432 max_retries_store(
433 	struct kobject	*kobject,
434 	const char	*buf,
435 	size_t		count)
436 {
437 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
438 	int		ret;
439 	int		val;
440 
441 	ret = kstrtoint(buf, 0, &val);
442 	if (ret)
443 		return ret;
444 
445 	if (val < -1)
446 		return -EINVAL;
447 
448 	if (val == -1)
449 		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
450 	else
451 		cfg->max_retries = val;
452 	return count;
453 }
454 XFS_SYSFS_ATTR_RW(max_retries);
455 
456 static ssize_t
457 retry_timeout_seconds_show(
458 	struct kobject	*kobject,
459 	char		*buf)
460 {
461 	int		timeout;
462 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
463 
464 	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
465 		timeout = -1;
466 	else
467 		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
468 
469 	return snprintf(buf, PAGE_SIZE, "%d\n", timeout);
470 }
471 
472 static ssize_t
473 retry_timeout_seconds_store(
474 	struct kobject	*kobject,
475 	const char	*buf,
476 	size_t		count)
477 {
478 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
479 	int		ret;
480 	int		val;
481 
482 	ret = kstrtoint(buf, 0, &val);
483 	if (ret)
484 		return ret;
485 
486 	/* 1 day timeout maximum, -1 means infinite */
487 	if (val < -1 || val > 86400)
488 		return -EINVAL;
489 
490 	if (val == -1)
491 		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
492 	else {
493 		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
494 		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
495 	}
496 	return count;
497 }
498 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
499 
500 static ssize_t
501 fail_at_unmount_show(
502 	struct kobject	*kobject,
503 	char		*buf)
504 {
505 	struct xfs_mount	*mp = err_to_mp(kobject);
506 
507 	return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
508 }
509 
510 static ssize_t
511 fail_at_unmount_store(
512 	struct kobject	*kobject,
513 	const char	*buf,
514 	size_t		count)
515 {
516 	struct xfs_mount	*mp = err_to_mp(kobject);
517 	int		ret;
518 	int		val;
519 
520 	ret = kstrtoint(buf, 0, &val);
521 	if (ret)
522 		return ret;
523 
524 	if (val < 0 || val > 1)
525 		return -EINVAL;
526 
527 	mp->m_fail_unmount = val;
528 	return count;
529 }
530 XFS_SYSFS_ATTR_RW(fail_at_unmount);
531 
532 static struct attribute *xfs_error_attrs[] = {
533 	ATTR_LIST(max_retries),
534 	ATTR_LIST(retry_timeout_seconds),
535 	NULL,
536 };
537 
538 
539 static struct kobj_type xfs_error_cfg_ktype = {
540 	.release = xfs_sysfs_release,
541 	.sysfs_ops = &xfs_sysfs_ops,
542 	.default_attrs = xfs_error_attrs,
543 };
544 
545 static struct kobj_type xfs_error_ktype = {
546 	.release = xfs_sysfs_release,
547 	.sysfs_ops = &xfs_sysfs_ops,
548 };
549 
550 /*
551  * Error initialization tables. These need to be ordered in the same
552  * order as the enums used to index the array. All class init tables need to
553  * define a "default" behaviour as the first entry, all other entries can be
554  * empty.
555  */
556 struct xfs_error_init {
557 	char		*name;
558 	int		max_retries;
559 	int		retry_timeout;	/* in seconds */
560 };
561 
562 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
563 	{ .name = "default",
564 	  .max_retries = XFS_ERR_RETRY_FOREVER,
565 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
566 	},
567 	{ .name = "EIO",
568 	  .max_retries = XFS_ERR_RETRY_FOREVER,
569 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
570 	},
571 	{ .name = "ENOSPC",
572 	  .max_retries = XFS_ERR_RETRY_FOREVER,
573 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
574 	},
575 	{ .name = "ENODEV",
576 	  .max_retries = 0,	/* We can't recover from devices disappearing */
577 	  .retry_timeout = 0,
578 	},
579 };
580 
581 static int
582 xfs_error_sysfs_init_class(
583 	struct xfs_mount	*mp,
584 	int			class,
585 	const char		*parent_name,
586 	struct xfs_kobj		*parent_kobj,
587 	const struct xfs_error_init init[])
588 {
589 	struct xfs_error_cfg	*cfg;
590 	int			error;
591 	int			i;
592 
593 	ASSERT(class < XFS_ERR_CLASS_MAX);
594 
595 	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
596 				&mp->m_error_kobj, parent_name);
597 	if (error)
598 		return error;
599 
600 	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
601 		cfg = &mp->m_error_cfg[class][i];
602 		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
603 					parent_kobj, init[i].name);
604 		if (error)
605 			goto out_error;
606 
607 		cfg->max_retries = init[i].max_retries;
608 		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
609 			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
610 		else
611 			cfg->retry_timeout = msecs_to_jiffies(
612 					init[i].retry_timeout * MSEC_PER_SEC);
613 	}
614 	return 0;
615 
616 out_error:
617 	/* unwind the entries that succeeded */
618 	for (i--; i >= 0; i--) {
619 		cfg = &mp->m_error_cfg[class][i];
620 		xfs_sysfs_del(&cfg->kobj);
621 	}
622 	xfs_sysfs_del(parent_kobj);
623 	return error;
624 }
625 
626 int
627 xfs_error_sysfs_init(
628 	struct xfs_mount	*mp)
629 {
630 	int			error;
631 
632 	/* .../xfs/<dev>/error/ */
633 	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
634 				&mp->m_kobj, "error");
635 	if (error)
636 		return error;
637 
638 	error = sysfs_create_file(&mp->m_error_kobj.kobject,
639 				  ATTR_LIST(fail_at_unmount));
640 
641 	if (error)
642 		goto out_error;
643 
644 	/* .../xfs/<dev>/error/metadata/ */
645 	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
646 				"metadata", &mp->m_error_meta_kobj,
647 				xfs_error_meta_init);
648 	if (error)
649 		goto out_error;
650 
651 	return 0;
652 
653 out_error:
654 	xfs_sysfs_del(&mp->m_error_kobj);
655 	return error;
656 }
657 
658 void
659 xfs_error_sysfs_del(
660 	struct xfs_mount	*mp)
661 {
662 	struct xfs_error_cfg	*cfg;
663 	int			i, j;
664 
665 	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
666 		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
667 			cfg = &mp->m_error_cfg[i][j];
668 
669 			xfs_sysfs_del(&cfg->kobj);
670 		}
671 	}
672 	xfs_sysfs_del(&mp->m_error_meta_kobj);
673 	xfs_sysfs_del(&mp->m_error_kobj);
674 }
675 
676 struct xfs_error_cfg *
677 xfs_error_get_cfg(
678 	struct xfs_mount	*mp,
679 	int			error_class,
680 	int			error)
681 {
682 	struct xfs_error_cfg	*cfg;
683 
684 	if (error < 0)
685 		error = -error;
686 
687 	switch (error) {
688 	case EIO:
689 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
690 		break;
691 	case ENOSPC:
692 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
693 		break;
694 	case ENODEV:
695 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
696 		break;
697 	default:
698 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
699 		break;
700 	}
701 
702 	return cfg;
703 }
704