1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 8 struct xstats xfsstats; 9 10 static int counter_val(struct xfsstats __percpu *stats, int idx) 11 { 12 int val = 0, cpu; 13 14 for_each_possible_cpu(cpu) 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); 16 return val; 17 } 18 19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) 20 { 21 int i, j; 22 int len = 0; 23 uint64_t xs_xstrat_bytes = 0; 24 uint64_t xs_write_bytes = 0; 25 uint64_t xs_read_bytes = 0; 26 uint64_t defer_relog = 0; 27 28 static const struct xstats_entry { 29 char *desc; 30 int endpoint; 31 } xstats[] = { 32 { "extent_alloc", xfsstats_offset(xs_abt_lookup) }, 33 { "abt", xfsstats_offset(xs_blk_mapr) }, 34 { "blk_map", xfsstats_offset(xs_bmbt_lookup) }, 35 { "bmbt", xfsstats_offset(xs_dir_lookup) }, 36 { "dir", xfsstats_offset(xs_trans_sync) }, 37 { "trans", xfsstats_offset(xs_ig_attempts) }, 38 { "ig", xfsstats_offset(xs_log_writes) }, 39 { "log", xfsstats_offset(xs_try_logspace)}, 40 { "push_ail", xfsstats_offset(xs_xstrat_quick)}, 41 { "xstrat", xfsstats_offset(xs_write_calls) }, 42 { "rw", xfsstats_offset(xs_attr_get) }, 43 { "attr", xfsstats_offset(xs_iflush_count)}, 44 { "icluster", xfsstats_offset(vn_active) }, 45 { "vnodes", xfsstats_offset(xb_get) }, 46 { "buf", xfsstats_offset(xs_abtb_2) }, 47 { "abtb2", xfsstats_offset(xs_abtc_2) }, 48 { "abtc2", xfsstats_offset(xs_bmbt_2) }, 49 { "bmbt2", xfsstats_offset(xs_ibt_2) }, 50 { "ibt2", xfsstats_offset(xs_fibt_2) }, 51 { "fibt2", xfsstats_offset(xs_rmap_2) }, 52 { "rmapbt", xfsstats_offset(xs_refcbt_2) }, 53 { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)}, 54 /* we print both series of quota information together */ 55 { "qm", xfsstats_offset(xs_xstrat_bytes)}, 56 }; 57 58 /* Loop over all stats groups */ 59 60 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) { 61 len += scnprintf(buf + len, PATH_MAX - len, "%s", 62 xstats[i].desc); 63 /* inner loop does each group */ 64 for (; j < xstats[i].endpoint; j++) 65 len += scnprintf(buf + len, PATH_MAX - len, " %u", 66 counter_val(stats, j)); 67 len += scnprintf(buf + len, PATH_MAX - len, "\n"); 68 } 69 /* extra precision counters */ 70 for_each_possible_cpu(i) { 71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; 72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; 73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; 74 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog; 75 } 76 77 len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n", 78 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); 79 len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n", 80 defer_relog); 81 len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n", 82 #if defined(DEBUG) 83 1); 84 #else 85 0); 86 #endif 87 88 return len; 89 } 90 91 void xfs_stats_clearall(struct xfsstats __percpu *stats) 92 { 93 int c; 94 uint32_t vn_active; 95 96 xfs_notice(NULL, "Clearing xfsstats"); 97 for_each_possible_cpu(c) { 98 preempt_disable(); 99 /* save vn_active, it's a universal truth! */ 100 vn_active = per_cpu_ptr(stats, c)->s.vn_active; 101 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); 102 per_cpu_ptr(stats, c)->s.vn_active = vn_active; 103 preempt_enable(); 104 } 105 } 106 107 #ifdef CONFIG_PROC_FS 108 /* legacy quota interfaces */ 109 #ifdef CONFIG_XFS_QUOTA 110 111 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims) 112 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot) 113 114 static int xqm_proc_show(struct seq_file *m, void *v) 115 { 116 /* maximum; incore; ratio free to inuse; freelist */ 117 seq_printf(m, "%d\t%d\t%d\t%u\n", 118 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT), 119 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1)); 120 return 0; 121 } 122 123 /* legacy quota stats interface no 2 */ 124 static int xqmstat_proc_show(struct seq_file *m, void *v) 125 { 126 int j; 127 128 seq_puts(m, "qm"); 129 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++) 130 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j)); 131 seq_putc(m, '\n'); 132 return 0; 133 } 134 #endif /* CONFIG_XFS_QUOTA */ 135 136 int 137 xfs_init_procfs(void) 138 { 139 if (!proc_mkdir("fs/xfs", NULL)) 140 return -ENOMEM; 141 142 if (!proc_symlink("fs/xfs/stat", NULL, 143 "/sys/fs/xfs/stats/stats")) 144 goto out; 145 146 #ifdef CONFIG_XFS_QUOTA 147 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show)) 148 goto out; 149 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show)) 150 goto out; 151 #endif 152 return 0; 153 154 out: 155 remove_proc_subtree("fs/xfs", NULL); 156 return -ENOMEM; 157 } 158 159 void 160 xfs_cleanup_procfs(void) 161 { 162 remove_proc_subtree("fs/xfs", NULL); 163 } 164 #endif /* CONFIG_PROC_FS */ 165