1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/stats.c 4 * 5 * procfs-based user access to generic RPC statistics. The stats files 6 * reside in /proc/net/rpc. 7 * 8 * The read routines assume that the buffer passed in is just big enough. 9 * If you implement an RPC service that has its own stats routine which 10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE 11 * limit. 12 * 13 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> 14 */ 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/sunrpc/clnt.h> 24 #include <linux/sunrpc/svcsock.h> 25 #include <linux/sunrpc/metrics.h> 26 #include <linux/rcupdate.h> 27 28 #include <trace/events/sunrpc.h> 29 30 #include "netns.h" 31 32 #define RPCDBG_FACILITY RPCDBG_MISC 33 34 /* 35 * Get RPC client stats 36 */ 37 static int rpc_proc_show(struct seq_file *seq, void *v) { 38 const struct rpc_stat *statp = seq->private; 39 const struct rpc_program *prog = statp->program; 40 unsigned int i, j; 41 42 seq_printf(seq, 43 "net %u %u %u %u\n", 44 statp->netcnt, 45 statp->netudpcnt, 46 statp->nettcpcnt, 47 statp->nettcpconn); 48 seq_printf(seq, 49 "rpc %u %u %u\n", 50 statp->rpccnt, 51 statp->rpcretrans, 52 statp->rpcauthrefresh); 53 54 for (i = 0; i < prog->nrvers; i++) { 55 const struct rpc_version *vers = prog->version[i]; 56 if (!vers) 57 continue; 58 seq_printf(seq, "proc%u %u", 59 vers->number, vers->nrprocs); 60 for (j = 0; j < vers->nrprocs; j++) 61 seq_printf(seq, " %u", vers->counts[j]); 62 seq_putc(seq, '\n'); 63 } 64 return 0; 65 } 66 67 static int rpc_proc_open(struct inode *inode, struct file *file) 68 { 69 return single_open(file, rpc_proc_show, PDE_DATA(inode)); 70 } 71 72 static const struct file_operations rpc_proc_fops = { 73 .owner = THIS_MODULE, 74 .open = rpc_proc_open, 75 .read = seq_read, 76 .llseek = seq_lseek, 77 .release = single_release, 78 }; 79 80 /* 81 * Get RPC server stats 82 */ 83 void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) 84 { 85 const struct svc_program *prog = statp->program; 86 const struct svc_version *vers; 87 unsigned int i, j; 88 89 seq_printf(seq, 90 "net %u %u %u %u\n", 91 statp->netcnt, 92 statp->netudpcnt, 93 statp->nettcpcnt, 94 statp->nettcpconn); 95 seq_printf(seq, 96 "rpc %u %u %u %u %u\n", 97 statp->rpccnt, 98 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt, 99 statp->rpcbadfmt, 100 statp->rpcbadauth, 101 statp->rpcbadclnt); 102 103 for (i = 0; i < prog->pg_nvers; i++) { 104 vers = prog->pg_vers[i]; 105 if (!vers) 106 continue; 107 seq_printf(seq, "proc%d %u", i, vers->vs_nproc); 108 for (j = 0; j < vers->vs_nproc; j++) 109 seq_printf(seq, " %u", vers->vs_count[j]); 110 seq_putc(seq, '\n'); 111 } 112 } 113 EXPORT_SYMBOL_GPL(svc_seq_show); 114 115 /** 116 * rpc_alloc_iostats - allocate an rpc_iostats structure 117 * @clnt: RPC program, version, and xprt 118 * 119 */ 120 struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 121 { 122 struct rpc_iostats *stats; 123 int i; 124 125 stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL); 126 if (stats) { 127 for (i = 0; i < clnt->cl_maxproc; i++) 128 spin_lock_init(&stats[i].om_lock); 129 } 130 return stats; 131 } 132 EXPORT_SYMBOL_GPL(rpc_alloc_iostats); 133 134 /** 135 * rpc_free_iostats - release an rpc_iostats structure 136 * @stats: doomed rpc_iostats structure 137 * 138 */ 139 void rpc_free_iostats(struct rpc_iostats *stats) 140 { 141 kfree(stats); 142 } 143 EXPORT_SYMBOL_GPL(rpc_free_iostats); 144 145 /** 146 * rpc_count_iostats_metrics - tally up per-task stats 147 * @task: completed rpc_task 148 * @op_metrics: stat structure for OP that will accumulate stats from @task 149 */ 150 void rpc_count_iostats_metrics(const struct rpc_task *task, 151 struct rpc_iostats *op_metrics) 152 { 153 struct rpc_rqst *req = task->tk_rqstp; 154 ktime_t backlog, execute, now; 155 156 if (!op_metrics || !req) 157 return; 158 159 now = ktime_get(); 160 spin_lock(&op_metrics->om_lock); 161 162 op_metrics->om_ops++; 163 /* kernel API: om_ops must never become larger than om_ntrans */ 164 op_metrics->om_ntrans += max(req->rq_ntrans, 1); 165 op_metrics->om_timeouts += task->tk_timeouts; 166 167 op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent; 168 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; 169 170 backlog = 0; 171 if (ktime_to_ns(req->rq_xtime)) { 172 backlog = ktime_sub(req->rq_xtime, task->tk_start); 173 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog); 174 } 175 176 op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); 177 178 execute = ktime_sub(now, task->tk_start); 179 op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute); 180 if (task->tk_status < 0) 181 op_metrics->om_error_status++; 182 183 spin_unlock(&op_metrics->om_lock); 184 185 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute); 186 } 187 EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics); 188 189 /** 190 * rpc_count_iostats - tally up per-task stats 191 * @task: completed rpc_task 192 * @stats: array of stat structures 193 * 194 * Uses the statidx from @task 195 */ 196 void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) 197 { 198 rpc_count_iostats_metrics(task, 199 &stats[task->tk_msg.rpc_proc->p_statidx]); 200 } 201 EXPORT_SYMBOL_GPL(rpc_count_iostats); 202 203 static void _print_name(struct seq_file *seq, unsigned int op, 204 const struct rpc_procinfo *procs) 205 { 206 if (procs[op].p_name) 207 seq_printf(seq, "\t%12s: ", procs[op].p_name); 208 else if (op == 0) 209 seq_printf(seq, "\t NULL: "); 210 else 211 seq_printf(seq, "\t%12u: ", op); 212 } 213 214 static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b) 215 { 216 a->om_ops += b->om_ops; 217 a->om_ntrans += b->om_ntrans; 218 a->om_timeouts += b->om_timeouts; 219 a->om_bytes_sent += b->om_bytes_sent; 220 a->om_bytes_recv += b->om_bytes_recv; 221 a->om_queue = ktime_add(a->om_queue, b->om_queue); 222 a->om_rtt = ktime_add(a->om_rtt, b->om_rtt); 223 a->om_execute = ktime_add(a->om_execute, b->om_execute); 224 a->om_error_status += b->om_error_status; 225 } 226 227 static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats, 228 int op, const struct rpc_procinfo *procs) 229 { 230 _print_name(seq, op, procs); 231 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n", 232 stats->om_ops, 233 stats->om_ntrans, 234 stats->om_timeouts, 235 stats->om_bytes_sent, 236 stats->om_bytes_recv, 237 ktime_to_ms(stats->om_queue), 238 ktime_to_ms(stats->om_rtt), 239 ktime_to_ms(stats->om_execute), 240 stats->om_error_status); 241 } 242 243 static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv) 244 { 245 struct seq_file *seq = seqv; 246 247 xprt->ops->print_stats(xprt, seq); 248 return 0; 249 } 250 251 void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) 252 { 253 unsigned int op, maxproc = clnt->cl_maxproc; 254 255 if (!clnt->cl_metrics) 256 return; 257 258 seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS); 259 seq_printf(seq, "p/v: %u/%u (%s)\n", 260 clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name); 261 262 rpc_clnt_iterate_for_each_xprt(clnt, do_print_stats, seq); 263 264 seq_printf(seq, "\tper-op statistics\n"); 265 for (op = 0; op < maxproc; op++) { 266 struct rpc_iostats stats = {}; 267 struct rpc_clnt *next = clnt; 268 do { 269 _add_rpc_iostats(&stats, &next->cl_metrics[op]); 270 if (next == next->cl_parent) 271 break; 272 next = next->cl_parent; 273 } while (next); 274 _print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo); 275 } 276 } 277 EXPORT_SYMBOL_GPL(rpc_clnt_show_stats); 278 279 /* 280 * Register/unregister RPC proc files 281 */ 282 static inline struct proc_dir_entry * 283 do_register(struct net *net, const char *name, void *data, 284 const struct file_operations *fops) 285 { 286 struct sunrpc_net *sn; 287 288 dprintk("RPC: registering /proc/net/rpc/%s\n", name); 289 sn = net_generic(net, sunrpc_net_id); 290 return proc_create_data(name, 0, sn->proc_net_rpc, fops, data); 291 } 292 293 struct proc_dir_entry * 294 rpc_proc_register(struct net *net, struct rpc_stat *statp) 295 { 296 return do_register(net, statp->program->name, statp, &rpc_proc_fops); 297 } 298 EXPORT_SYMBOL_GPL(rpc_proc_register); 299 300 void 301 rpc_proc_unregister(struct net *net, const char *name) 302 { 303 struct sunrpc_net *sn; 304 305 sn = net_generic(net, sunrpc_net_id); 306 remove_proc_entry(name, sn->proc_net_rpc); 307 } 308 EXPORT_SYMBOL_GPL(rpc_proc_unregister); 309 310 struct proc_dir_entry * 311 svc_proc_register(struct net *net, struct svc_stat *statp, const struct file_operations *fops) 312 { 313 return do_register(net, statp->program->pg_name, statp, fops); 314 } 315 EXPORT_SYMBOL_GPL(svc_proc_register); 316 317 void 318 svc_proc_unregister(struct net *net, const char *name) 319 { 320 struct sunrpc_net *sn; 321 322 sn = net_generic(net, sunrpc_net_id); 323 remove_proc_entry(name, sn->proc_net_rpc); 324 } 325 EXPORT_SYMBOL_GPL(svc_proc_unregister); 326 327 int rpc_proc_init(struct net *net) 328 { 329 struct sunrpc_net *sn; 330 331 dprintk("RPC: registering /proc/net/rpc\n"); 332 sn = net_generic(net, sunrpc_net_id); 333 sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net); 334 if (sn->proc_net_rpc == NULL) 335 return -ENOMEM; 336 337 return 0; 338 } 339 340 void rpc_proc_exit(struct net *net) 341 { 342 dprintk("RPC: unregistering /proc/net/rpc\n"); 343 remove_proc_entry("rpc", net->proc_net); 344 } 345