xref: /openbmc/linux/fs/xfs/xfs_sysfs.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log_priv.h"
14 #include "xfs_mount.h"
15 
16 struct xfs_sysfs_attr {
17 	struct attribute attr;
18 	ssize_t (*show)(struct kobject *kobject, char *buf);
19 	ssize_t (*store)(struct kobject *kobject, const char *buf,
20 			 size_t count);
21 };
22 
23 static inline struct xfs_sysfs_attr *
24 to_attr(struct attribute *attr)
25 {
26 	return container_of(attr, struct xfs_sysfs_attr, attr);
27 }
28 
29 #define XFS_SYSFS_ATTR_RW(name) \
30 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
31 #define XFS_SYSFS_ATTR_RO(name) \
32 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
33 #define XFS_SYSFS_ATTR_WO(name) \
34 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
35 
36 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
37 
38 STATIC ssize_t
39 xfs_sysfs_object_show(
40 	struct kobject		*kobject,
41 	struct attribute	*attr,
42 	char			*buf)
43 {
44 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
45 
46 	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
47 }
48 
49 STATIC ssize_t
50 xfs_sysfs_object_store(
51 	struct kobject		*kobject,
52 	struct attribute	*attr,
53 	const char		*buf,
54 	size_t			count)
55 {
56 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
57 
58 	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
59 }
60 
61 static const struct sysfs_ops xfs_sysfs_ops = {
62 	.show = xfs_sysfs_object_show,
63 	.store = xfs_sysfs_object_store,
64 };
65 
66 /*
67  * xfs_mount kobject. The mp kobject also serves as the per-mount parent object
68  * that is identified by the fsname under sysfs.
69  */
70 
71 static inline struct xfs_mount *
72 to_mp(struct kobject *kobject)
73 {
74 	struct xfs_kobj *kobj = to_kobj(kobject);
75 
76 	return container_of(kobj, struct xfs_mount, m_kobj);
77 }
78 
79 static struct attribute *xfs_mp_attrs[] = {
80 	NULL,
81 };
82 
83 struct kobj_type xfs_mp_ktype = {
84 	.release = xfs_sysfs_release,
85 	.sysfs_ops = &xfs_sysfs_ops,
86 	.default_attrs = xfs_mp_attrs,
87 };
88 
89 #ifdef DEBUG
90 /* debug */
91 
92 STATIC ssize_t
93 bug_on_assert_store(
94 	struct kobject		*kobject,
95 	const char		*buf,
96 	size_t			count)
97 {
98 	int			ret;
99 	int			val;
100 
101 	ret = kstrtoint(buf, 0, &val);
102 	if (ret)
103 		return ret;
104 
105 	if (val == 1)
106 		xfs_globals.bug_on_assert = true;
107 	else if (val == 0)
108 		xfs_globals.bug_on_assert = false;
109 	else
110 		return -EINVAL;
111 
112 	return count;
113 }
114 
115 STATIC ssize_t
116 bug_on_assert_show(
117 	struct kobject		*kobject,
118 	char			*buf)
119 {
120 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bug_on_assert ? 1 : 0);
121 }
122 XFS_SYSFS_ATTR_RW(bug_on_assert);
123 
124 STATIC ssize_t
125 log_recovery_delay_store(
126 	struct kobject	*kobject,
127 	const char	*buf,
128 	size_t		count)
129 {
130 	int		ret;
131 	int		val;
132 
133 	ret = kstrtoint(buf, 0, &val);
134 	if (ret)
135 		return ret;
136 
137 	if (val < 0 || val > 60)
138 		return -EINVAL;
139 
140 	xfs_globals.log_recovery_delay = val;
141 
142 	return count;
143 }
144 
145 STATIC ssize_t
146 log_recovery_delay_show(
147 	struct kobject	*kobject,
148 	char		*buf)
149 {
150 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.log_recovery_delay);
151 }
152 XFS_SYSFS_ATTR_RW(log_recovery_delay);
153 
154 STATIC ssize_t
155 mount_delay_store(
156 	struct kobject	*kobject,
157 	const char	*buf,
158 	size_t		count)
159 {
160 	int		ret;
161 	int		val;
162 
163 	ret = kstrtoint(buf, 0, &val);
164 	if (ret)
165 		return ret;
166 
167 	if (val < 0 || val > 60)
168 		return -EINVAL;
169 
170 	xfs_globals.mount_delay = val;
171 
172 	return count;
173 }
174 
175 STATIC ssize_t
176 mount_delay_show(
177 	struct kobject	*kobject,
178 	char		*buf)
179 {
180 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.mount_delay);
181 }
182 XFS_SYSFS_ATTR_RW(mount_delay);
183 
184 static ssize_t
185 always_cow_store(
186 	struct kobject	*kobject,
187 	const char	*buf,
188 	size_t		count)
189 {
190 	ssize_t		ret;
191 
192 	ret = kstrtobool(buf, &xfs_globals.always_cow);
193 	if (ret < 0)
194 		return ret;
195 	return count;
196 }
197 
198 static ssize_t
199 always_cow_show(
200 	struct kobject	*kobject,
201 	char		*buf)
202 {
203 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
204 }
205 XFS_SYSFS_ATTR_RW(always_cow);
206 
207 #ifdef DEBUG
208 /*
209  * Override how many threads the parallel work queue is allowed to create.
210  * This has to be a debug-only global (instead of an errortag) because one of
211  * the main users of parallel workqueues is mount time quotacheck.
212  */
213 STATIC ssize_t
214 pwork_threads_store(
215 	struct kobject	*kobject,
216 	const char	*buf,
217 	size_t		count)
218 {
219 	int		ret;
220 	int		val;
221 
222 	ret = kstrtoint(buf, 0, &val);
223 	if (ret)
224 		return ret;
225 
226 	if (val < -1 || val > num_possible_cpus())
227 		return -EINVAL;
228 
229 	xfs_globals.pwork_threads = val;
230 
231 	return count;
232 }
233 
234 STATIC ssize_t
235 pwork_threads_show(
236 	struct kobject	*kobject,
237 	char		*buf)
238 {
239 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.pwork_threads);
240 }
241 XFS_SYSFS_ATTR_RW(pwork_threads);
242 #endif /* DEBUG */
243 
244 static struct attribute *xfs_dbg_attrs[] = {
245 	ATTR_LIST(bug_on_assert),
246 	ATTR_LIST(log_recovery_delay),
247 	ATTR_LIST(mount_delay),
248 	ATTR_LIST(always_cow),
249 #ifdef DEBUG
250 	ATTR_LIST(pwork_threads),
251 #endif
252 	NULL,
253 };
254 
255 struct kobj_type xfs_dbg_ktype = {
256 	.release = xfs_sysfs_release,
257 	.sysfs_ops = &xfs_sysfs_ops,
258 	.default_attrs = xfs_dbg_attrs,
259 };
260 
261 #endif /* DEBUG */
262 
263 /* stats */
264 
265 static inline struct xstats *
266 to_xstats(struct kobject *kobject)
267 {
268 	struct xfs_kobj *kobj = to_kobj(kobject);
269 
270 	return container_of(kobj, struct xstats, xs_kobj);
271 }
272 
273 STATIC ssize_t
274 stats_show(
275 	struct kobject	*kobject,
276 	char		*buf)
277 {
278 	struct xstats	*stats = to_xstats(kobject);
279 
280 	return xfs_stats_format(stats->xs_stats, buf);
281 }
282 XFS_SYSFS_ATTR_RO(stats);
283 
284 STATIC ssize_t
285 stats_clear_store(
286 	struct kobject	*kobject,
287 	const char	*buf,
288 	size_t		count)
289 {
290 	int		ret;
291 	int		val;
292 	struct xstats	*stats = to_xstats(kobject);
293 
294 	ret = kstrtoint(buf, 0, &val);
295 	if (ret)
296 		return ret;
297 
298 	if (val != 1)
299 		return -EINVAL;
300 
301 	xfs_stats_clearall(stats->xs_stats);
302 	return count;
303 }
304 XFS_SYSFS_ATTR_WO(stats_clear);
305 
306 static struct attribute *xfs_stats_attrs[] = {
307 	ATTR_LIST(stats),
308 	ATTR_LIST(stats_clear),
309 	NULL,
310 };
311 
312 struct kobj_type xfs_stats_ktype = {
313 	.release = xfs_sysfs_release,
314 	.sysfs_ops = &xfs_sysfs_ops,
315 	.default_attrs = xfs_stats_attrs,
316 };
317 
318 /* xlog */
319 
320 static inline struct xlog *
321 to_xlog(struct kobject *kobject)
322 {
323 	struct xfs_kobj *kobj = to_kobj(kobject);
324 
325 	return container_of(kobj, struct xlog, l_kobj);
326 }
327 
328 STATIC ssize_t
329 log_head_lsn_show(
330 	struct kobject	*kobject,
331 	char		*buf)
332 {
333 	int cycle;
334 	int block;
335 	struct xlog *log = to_xlog(kobject);
336 
337 	spin_lock(&log->l_icloglock);
338 	cycle = log->l_curr_cycle;
339 	block = log->l_curr_block;
340 	spin_unlock(&log->l_icloglock);
341 
342 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
343 }
344 XFS_SYSFS_ATTR_RO(log_head_lsn);
345 
346 STATIC ssize_t
347 log_tail_lsn_show(
348 	struct kobject	*kobject,
349 	char		*buf)
350 {
351 	int cycle;
352 	int block;
353 	struct xlog *log = to_xlog(kobject);
354 
355 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
356 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
357 }
358 XFS_SYSFS_ATTR_RO(log_tail_lsn);
359 
360 STATIC ssize_t
361 reserve_grant_head_show(
362 	struct kobject	*kobject,
363 	char		*buf)
364 
365 {
366 	int cycle;
367 	int bytes;
368 	struct xlog *log = to_xlog(kobject);
369 
370 	xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
371 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
372 }
373 XFS_SYSFS_ATTR_RO(reserve_grant_head);
374 
375 STATIC ssize_t
376 write_grant_head_show(
377 	struct kobject	*kobject,
378 	char		*buf)
379 {
380 	int cycle;
381 	int bytes;
382 	struct xlog *log = to_xlog(kobject);
383 
384 	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
385 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
386 }
387 XFS_SYSFS_ATTR_RO(write_grant_head);
388 
389 static struct attribute *xfs_log_attrs[] = {
390 	ATTR_LIST(log_head_lsn),
391 	ATTR_LIST(log_tail_lsn),
392 	ATTR_LIST(reserve_grant_head),
393 	ATTR_LIST(write_grant_head),
394 	NULL,
395 };
396 
397 struct kobj_type xfs_log_ktype = {
398 	.release = xfs_sysfs_release,
399 	.sysfs_ops = &xfs_sysfs_ops,
400 	.default_attrs = xfs_log_attrs,
401 };
402 
403 /*
404  * Metadata IO error configuration
405  *
406  * The sysfs structure here is:
407  *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
408  *
409  * where <class> allows us to discriminate between data IO and metadata IO,
410  * and any other future type of IO (e.g. special inode or directory error
411  * handling) we care to support.
412  */
413 static inline struct xfs_error_cfg *
414 to_error_cfg(struct kobject *kobject)
415 {
416 	struct xfs_kobj *kobj = to_kobj(kobject);
417 	return container_of(kobj, struct xfs_error_cfg, kobj);
418 }
419 
420 static inline struct xfs_mount *
421 err_to_mp(struct kobject *kobject)
422 {
423 	struct xfs_kobj *kobj = to_kobj(kobject);
424 	return container_of(kobj, struct xfs_mount, m_error_kobj);
425 }
426 
427 static ssize_t
428 max_retries_show(
429 	struct kobject	*kobject,
430 	char		*buf)
431 {
432 	int		retries;
433 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
434 
435 	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
436 		retries = -1;
437 	else
438 		retries = cfg->max_retries;
439 
440 	return snprintf(buf, PAGE_SIZE, "%d\n", retries);
441 }
442 
443 static ssize_t
444 max_retries_store(
445 	struct kobject	*kobject,
446 	const char	*buf,
447 	size_t		count)
448 {
449 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
450 	int		ret;
451 	int		val;
452 
453 	ret = kstrtoint(buf, 0, &val);
454 	if (ret)
455 		return ret;
456 
457 	if (val < -1)
458 		return -EINVAL;
459 
460 	if (val == -1)
461 		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
462 	else
463 		cfg->max_retries = val;
464 	return count;
465 }
466 XFS_SYSFS_ATTR_RW(max_retries);
467 
468 static ssize_t
469 retry_timeout_seconds_show(
470 	struct kobject	*kobject,
471 	char		*buf)
472 {
473 	int		timeout;
474 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
475 
476 	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
477 		timeout = -1;
478 	else
479 		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
480 
481 	return snprintf(buf, PAGE_SIZE, "%d\n", timeout);
482 }
483 
484 static ssize_t
485 retry_timeout_seconds_store(
486 	struct kobject	*kobject,
487 	const char	*buf,
488 	size_t		count)
489 {
490 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
491 	int		ret;
492 	int		val;
493 
494 	ret = kstrtoint(buf, 0, &val);
495 	if (ret)
496 		return ret;
497 
498 	/* 1 day timeout maximum, -1 means infinite */
499 	if (val < -1 || val > 86400)
500 		return -EINVAL;
501 
502 	if (val == -1)
503 		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
504 	else {
505 		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
506 		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
507 	}
508 	return count;
509 }
510 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
511 
512 static ssize_t
513 fail_at_unmount_show(
514 	struct kobject	*kobject,
515 	char		*buf)
516 {
517 	struct xfs_mount	*mp = err_to_mp(kobject);
518 
519 	return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
520 }
521 
522 static ssize_t
523 fail_at_unmount_store(
524 	struct kobject	*kobject,
525 	const char	*buf,
526 	size_t		count)
527 {
528 	struct xfs_mount	*mp = err_to_mp(kobject);
529 	int		ret;
530 	int		val;
531 
532 	ret = kstrtoint(buf, 0, &val);
533 	if (ret)
534 		return ret;
535 
536 	if (val < 0 || val > 1)
537 		return -EINVAL;
538 
539 	mp->m_fail_unmount = val;
540 	return count;
541 }
542 XFS_SYSFS_ATTR_RW(fail_at_unmount);
543 
544 static struct attribute *xfs_error_attrs[] = {
545 	ATTR_LIST(max_retries),
546 	ATTR_LIST(retry_timeout_seconds),
547 	NULL,
548 };
549 
550 
551 static struct kobj_type xfs_error_cfg_ktype = {
552 	.release = xfs_sysfs_release,
553 	.sysfs_ops = &xfs_sysfs_ops,
554 	.default_attrs = xfs_error_attrs,
555 };
556 
557 static struct kobj_type xfs_error_ktype = {
558 	.release = xfs_sysfs_release,
559 	.sysfs_ops = &xfs_sysfs_ops,
560 };
561 
562 /*
563  * Error initialization tables. These need to be ordered in the same
564  * order as the enums used to index the array. All class init tables need to
565  * define a "default" behaviour as the first entry, all other entries can be
566  * empty.
567  */
568 struct xfs_error_init {
569 	char		*name;
570 	int		max_retries;
571 	int		retry_timeout;	/* in seconds */
572 };
573 
574 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
575 	{ .name = "default",
576 	  .max_retries = XFS_ERR_RETRY_FOREVER,
577 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
578 	},
579 	{ .name = "EIO",
580 	  .max_retries = XFS_ERR_RETRY_FOREVER,
581 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
582 	},
583 	{ .name = "ENOSPC",
584 	  .max_retries = XFS_ERR_RETRY_FOREVER,
585 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
586 	},
587 	{ .name = "ENODEV",
588 	  .max_retries = 0,	/* We can't recover from devices disappearing */
589 	  .retry_timeout = 0,
590 	},
591 };
592 
593 static int
594 xfs_error_sysfs_init_class(
595 	struct xfs_mount	*mp,
596 	int			class,
597 	const char		*parent_name,
598 	struct xfs_kobj		*parent_kobj,
599 	const struct xfs_error_init init[])
600 {
601 	struct xfs_error_cfg	*cfg;
602 	int			error;
603 	int			i;
604 
605 	ASSERT(class < XFS_ERR_CLASS_MAX);
606 
607 	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
608 				&mp->m_error_kobj, parent_name);
609 	if (error)
610 		return error;
611 
612 	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
613 		cfg = &mp->m_error_cfg[class][i];
614 		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
615 					parent_kobj, init[i].name);
616 		if (error)
617 			goto out_error;
618 
619 		cfg->max_retries = init[i].max_retries;
620 		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
621 			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
622 		else
623 			cfg->retry_timeout = msecs_to_jiffies(
624 					init[i].retry_timeout * MSEC_PER_SEC);
625 	}
626 	return 0;
627 
628 out_error:
629 	/* unwind the entries that succeeded */
630 	for (i--; i >= 0; i--) {
631 		cfg = &mp->m_error_cfg[class][i];
632 		xfs_sysfs_del(&cfg->kobj);
633 	}
634 	xfs_sysfs_del(parent_kobj);
635 	return error;
636 }
637 
638 int
639 xfs_error_sysfs_init(
640 	struct xfs_mount	*mp)
641 {
642 	int			error;
643 
644 	/* .../xfs/<dev>/error/ */
645 	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
646 				&mp->m_kobj, "error");
647 	if (error)
648 		return error;
649 
650 	error = sysfs_create_file(&mp->m_error_kobj.kobject,
651 				  ATTR_LIST(fail_at_unmount));
652 
653 	if (error)
654 		goto out_error;
655 
656 	/* .../xfs/<dev>/error/metadata/ */
657 	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
658 				"metadata", &mp->m_error_meta_kobj,
659 				xfs_error_meta_init);
660 	if (error)
661 		goto out_error;
662 
663 	return 0;
664 
665 out_error:
666 	xfs_sysfs_del(&mp->m_error_kobj);
667 	return error;
668 }
669 
670 void
671 xfs_error_sysfs_del(
672 	struct xfs_mount	*mp)
673 {
674 	struct xfs_error_cfg	*cfg;
675 	int			i, j;
676 
677 	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
678 		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
679 			cfg = &mp->m_error_cfg[i][j];
680 
681 			xfs_sysfs_del(&cfg->kobj);
682 		}
683 	}
684 	xfs_sysfs_del(&mp->m_error_meta_kobj);
685 	xfs_sysfs_del(&mp->m_error_kobj);
686 }
687 
688 struct xfs_error_cfg *
689 xfs_error_get_cfg(
690 	struct xfs_mount	*mp,
691 	int			error_class,
692 	int			error)
693 {
694 	struct xfs_error_cfg	*cfg;
695 
696 	if (error < 0)
697 		error = -error;
698 
699 	switch (error) {
700 	case EIO:
701 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
702 		break;
703 	case ENOSPC:
704 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
705 		break;
706 	case ENODEV:
707 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
708 		break;
709 	default:
710 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
711 		break;
712 	}
713 
714 	return cfg;
715 }
716