xref: /openbmc/linux/fs/xfs/xfs_stats.c (revision 1802d0be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include <linux/proc_fs.h>
8 
9 struct xstats xfsstats;
10 
11 static int counter_val(struct xfsstats __percpu *stats, int idx)
12 {
13 	int val = 0, cpu;
14 
15 	for_each_possible_cpu(cpu)
16 		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
17 	return val;
18 }
19 
20 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
21 {
22 	int		i, j;
23 	int		len = 0;
24 	uint64_t	xs_xstrat_bytes = 0;
25 	uint64_t	xs_write_bytes = 0;
26 	uint64_t	xs_read_bytes = 0;
27 
28 	static const struct xstats_entry {
29 		char	*desc;
30 		int	endpoint;
31 	} xstats[] = {
32 		{ "extent_alloc",	xfsstats_offset(xs_abt_lookup)	},
33 		{ "abt",		xfsstats_offset(xs_blk_mapr)	},
34 		{ "blk_map",		xfsstats_offset(xs_bmbt_lookup)	},
35 		{ "bmbt",		xfsstats_offset(xs_dir_lookup)	},
36 		{ "dir",		xfsstats_offset(xs_trans_sync)	},
37 		{ "trans",		xfsstats_offset(xs_ig_attempts)	},
38 		{ "ig",			xfsstats_offset(xs_log_writes)	},
39 		{ "log",		xfsstats_offset(xs_try_logspace)},
40 		{ "push_ail",		xfsstats_offset(xs_xstrat_quick)},
41 		{ "xstrat",		xfsstats_offset(xs_write_calls)	},
42 		{ "rw",			xfsstats_offset(xs_attr_get)	},
43 		{ "attr",		xfsstats_offset(xs_iflush_count)},
44 		{ "icluster",		xfsstats_offset(vn_active)	},
45 		{ "vnodes",		xfsstats_offset(xb_get)		},
46 		{ "buf",		xfsstats_offset(xs_abtb_2)	},
47 		{ "abtb2",		xfsstats_offset(xs_abtc_2)	},
48 		{ "abtc2",		xfsstats_offset(xs_bmbt_2)	},
49 		{ "bmbt2",		xfsstats_offset(xs_ibt_2)	},
50 		{ "ibt2",		xfsstats_offset(xs_fibt_2)	},
51 		{ "fibt2",		xfsstats_offset(xs_rmap_2)	},
52 		{ "rmapbt",		xfsstats_offset(xs_refcbt_2)	},
53 		{ "refcntbt",		xfsstats_offset(xs_qm_dqreclaims)},
54 		/* we print both series of quota information together */
55 		{ "qm",			xfsstats_offset(xs_xstrat_bytes)},
56 	};
57 
58 	/* Loop over all stats groups */
59 
60 	for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
61 		len += snprintf(buf + len, PATH_MAX - len, "%s",
62 				xstats[i].desc);
63 		/* inner loop does each group */
64 		for (; j < xstats[i].endpoint; j++)
65 			len += snprintf(buf + len, PATH_MAX - len, " %u",
66 					counter_val(stats, j));
67 		len += snprintf(buf + len, PATH_MAX - len, "\n");
68 	}
69 	/* extra precision counters */
70 	for_each_possible_cpu(i) {
71 		xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
72 		xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
73 		xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
74 	}
75 
76 	len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
77 			xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
78 	len += snprintf(buf + len, PATH_MAX-len, "debug %u\n",
79 #if defined(DEBUG)
80 		1);
81 #else
82 		0);
83 #endif
84 
85 	return len;
86 }
87 
88 void xfs_stats_clearall(struct xfsstats __percpu *stats)
89 {
90 	int		c;
91 	uint32_t	vn_active;
92 
93 	xfs_notice(NULL, "Clearing xfsstats");
94 	for_each_possible_cpu(c) {
95 		preempt_disable();
96 		/* save vn_active, it's a universal truth! */
97 		vn_active = per_cpu_ptr(stats, c)->s.vn_active;
98 		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
99 		per_cpu_ptr(stats, c)->s.vn_active = vn_active;
100 		preempt_enable();
101 	}
102 }
103 
104 #ifdef CONFIG_PROC_FS
105 /* legacy quota interfaces */
106 #ifdef CONFIG_XFS_QUOTA
107 
108 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
109 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
110 
111 static int xqm_proc_show(struct seq_file *m, void *v)
112 {
113 	/* maximum; incore; ratio free to inuse; freelist */
114 	seq_printf(m, "%d\t%d\t%d\t%u\n",
115 		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
116 		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
117 	return 0;
118 }
119 
120 /* legacy quota stats interface no 2 */
121 static int xqmstat_proc_show(struct seq_file *m, void *v)
122 {
123 	int j;
124 
125 	seq_printf(m, "qm");
126 	for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
127 		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
128 	seq_putc(m, '\n');
129 	return 0;
130 }
131 #endif /* CONFIG_XFS_QUOTA */
132 
133 int
134 xfs_init_procfs(void)
135 {
136 	if (!proc_mkdir("fs/xfs", NULL))
137 		return -ENOMEM;
138 
139 	if (!proc_symlink("fs/xfs/stat", NULL,
140 			  "/sys/fs/xfs/stats/stats"))
141 		goto out;
142 
143 #ifdef CONFIG_XFS_QUOTA
144 	if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
145 		goto out;
146 	if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
147 		goto out;
148 #endif
149 	return 0;
150 
151 out:
152 	remove_proc_subtree("fs/xfs", NULL);
153 	return -ENOMEM;
154 }
155 
156 void
157 xfs_cleanup_procfs(void)
158 {
159 	remove_proc_subtree("fs/xfs", NULL);
160 }
161 #endif /* CONFIG_PROC_FS */
162