1 /* 2 * linux/net/sunrpc/stats.c 3 * 4 * procfs-based user access to generic RPC statistics. The stats files 5 * reside in /proc/net/rpc. 6 * 7 * The read routines assume that the buffer passed in is just big enough. 8 * If you implement an RPC service that has its own stats routine which 9 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE 10 * limit. 11 * 12 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> 13 */ 14 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/proc_fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/sunrpc/clnt.h> 23 #include <linux/sunrpc/svcsock.h> 24 #include <linux/sunrpc/metrics.h> 25 #include <linux/rcupdate.h> 26 27 #include <trace/events/sunrpc.h> 28 29 #include "netns.h" 30 31 #define RPCDBG_FACILITY RPCDBG_MISC 32 33 /* 34 * Get RPC client stats 35 */ 36 static int rpc_proc_show(struct seq_file *seq, void *v) { 37 const struct rpc_stat *statp = seq->private; 38 const struct rpc_program *prog = statp->program; 39 unsigned int i, j; 40 41 seq_printf(seq, 42 "net %u %u %u %u\n", 43 statp->netcnt, 44 statp->netudpcnt, 45 statp->nettcpcnt, 46 statp->nettcpconn); 47 seq_printf(seq, 48 "rpc %u %u %u\n", 49 statp->rpccnt, 50 statp->rpcretrans, 51 statp->rpcauthrefresh); 52 53 for (i = 0; i < prog->nrvers; i++) { 54 const struct rpc_version *vers = prog->version[i]; 55 if (!vers) 56 continue; 57 seq_printf(seq, "proc%u %u", 58 vers->number, vers->nrprocs); 59 for (j = 0; j < vers->nrprocs; j++) 60 seq_printf(seq, " %u", vers->counts[j]); 61 seq_putc(seq, '\n'); 62 } 63 return 0; 64 } 65 66 static int rpc_proc_open(struct inode *inode, struct file *file) 67 { 68 return single_open(file, rpc_proc_show, PDE_DATA(inode)); 69 } 70 71 static const struct file_operations rpc_proc_fops = { 72 .owner = THIS_MODULE, 73 .open = rpc_proc_open, 74 .read = seq_read, 75 .llseek = seq_lseek, 76 .release = single_release, 77 }; 78 79 /* 80 * Get RPC server stats 81 */ 82 void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) 83 { 84 const struct svc_program *prog = statp->program; 85 const struct svc_version *vers; 86 unsigned int i, j; 87 88 seq_printf(seq, 89 "net %u %u %u %u\n", 90 statp->netcnt, 91 statp->netudpcnt, 92 statp->nettcpcnt, 93 statp->nettcpconn); 94 seq_printf(seq, 95 "rpc %u %u %u %u %u\n", 96 statp->rpccnt, 97 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt, 98 statp->rpcbadfmt, 99 statp->rpcbadauth, 100 statp->rpcbadclnt); 101 102 for (i = 0; i < prog->pg_nvers; i++) { 103 vers = prog->pg_vers[i]; 104 if (!vers) 105 continue; 106 seq_printf(seq, "proc%d %u", i, vers->vs_nproc); 107 for (j = 0; j < vers->vs_nproc; j++) 108 seq_printf(seq, " %u", vers->vs_count[j]); 109 seq_putc(seq, '\n'); 110 } 111 } 112 EXPORT_SYMBOL_GPL(svc_seq_show); 113 114 /** 115 * rpc_alloc_iostats - allocate an rpc_iostats structure 116 * @clnt: RPC program, version, and xprt 117 * 118 */ 119 struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 120 { 121 struct rpc_iostats *stats; 122 int i; 123 124 stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL); 125 if (stats) { 126 for (i = 0; i < clnt->cl_maxproc; i++) 127 spin_lock_init(&stats[i].om_lock); 128 } 129 return stats; 130 } 131 EXPORT_SYMBOL_GPL(rpc_alloc_iostats); 132 133 /** 134 * rpc_free_iostats - release an rpc_iostats structure 135 * @stats: doomed rpc_iostats structure 136 * 137 */ 138 void rpc_free_iostats(struct rpc_iostats *stats) 139 { 140 kfree(stats); 141 } 142 EXPORT_SYMBOL_GPL(rpc_free_iostats); 143 144 /** 145 * rpc_count_iostats_metrics - tally up per-task stats 146 * @task: completed rpc_task 147 * @op_metrics: stat structure for OP that will accumulate stats from @task 148 */ 149 void rpc_count_iostats_metrics(const struct rpc_task *task, 150 struct rpc_iostats *op_metrics) 151 { 152 struct rpc_rqst *req = task->tk_rqstp; 153 ktime_t backlog, execute, now; 154 155 if (!op_metrics || !req) 156 return; 157 158 now = ktime_get(); 159 spin_lock(&op_metrics->om_lock); 160 161 op_metrics->om_ops++; 162 /* kernel API: om_ops must never become larger than om_ntrans */ 163 op_metrics->om_ntrans += max(req->rq_ntrans, 1); 164 op_metrics->om_timeouts += task->tk_timeouts; 165 166 op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent; 167 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; 168 169 backlog = 0; 170 if (ktime_to_ns(req->rq_xtime)) { 171 backlog = ktime_sub(req->rq_xtime, task->tk_start); 172 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog); 173 } 174 175 op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); 176 177 execute = ktime_sub(now, task->tk_start); 178 op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute); 179 180 spin_unlock(&op_metrics->om_lock); 181 182 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute); 183 } 184 EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics); 185 186 /** 187 * rpc_count_iostats - tally up per-task stats 188 * @task: completed rpc_task 189 * @stats: array of stat structures 190 * 191 * Uses the statidx from @task 192 */ 193 void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) 194 { 195 rpc_count_iostats_metrics(task, 196 &stats[task->tk_msg.rpc_proc->p_statidx]); 197 } 198 EXPORT_SYMBOL_GPL(rpc_count_iostats); 199 200 static void _print_name(struct seq_file *seq, unsigned int op, 201 const struct rpc_procinfo *procs) 202 { 203 if (procs[op].p_name) 204 seq_printf(seq, "\t%12s: ", procs[op].p_name); 205 else if (op == 0) 206 seq_printf(seq, "\t NULL: "); 207 else 208 seq_printf(seq, "\t%12u: ", op); 209 } 210 211 static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b) 212 { 213 a->om_ops += b->om_ops; 214 a->om_ntrans += b->om_ntrans; 215 a->om_timeouts += b->om_timeouts; 216 a->om_bytes_sent += b->om_bytes_sent; 217 a->om_bytes_recv += b->om_bytes_recv; 218 a->om_queue = ktime_add(a->om_queue, b->om_queue); 219 a->om_rtt = ktime_add(a->om_rtt, b->om_rtt); 220 a->om_execute = ktime_add(a->om_execute, b->om_execute); 221 } 222 223 static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats, 224 int op, const struct rpc_procinfo *procs) 225 { 226 _print_name(seq, op, procs); 227 seq_printf(seq, "%lu %lu %lu %Lu %Lu %Lu %Lu %Lu\n", 228 stats->om_ops, 229 stats->om_ntrans, 230 stats->om_timeouts, 231 stats->om_bytes_sent, 232 stats->om_bytes_recv, 233 ktime_to_ms(stats->om_queue), 234 ktime_to_ms(stats->om_rtt), 235 ktime_to_ms(stats->om_execute)); 236 } 237 238 void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) 239 { 240 struct rpc_xprt *xprt; 241 unsigned int op, maxproc = clnt->cl_maxproc; 242 243 if (!clnt->cl_metrics) 244 return; 245 246 seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS); 247 seq_printf(seq, "p/v: %u/%u (%s)\n", 248 clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name); 249 250 rcu_read_lock(); 251 xprt = rcu_dereference(clnt->cl_xprt); 252 if (xprt) 253 xprt->ops->print_stats(xprt, seq); 254 rcu_read_unlock(); 255 256 seq_printf(seq, "\tper-op statistics\n"); 257 for (op = 0; op < maxproc; op++) { 258 struct rpc_iostats stats = {}; 259 struct rpc_clnt *next = clnt; 260 do { 261 _add_rpc_iostats(&stats, &next->cl_metrics[op]); 262 if (next == next->cl_parent) 263 break; 264 next = next->cl_parent; 265 } while (next); 266 _print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo); 267 } 268 } 269 EXPORT_SYMBOL_GPL(rpc_clnt_show_stats); 270 271 /* 272 * Register/unregister RPC proc files 273 */ 274 static inline struct proc_dir_entry * 275 do_register(struct net *net, const char *name, void *data, 276 const struct file_operations *fops) 277 { 278 struct sunrpc_net *sn; 279 280 dprintk("RPC: registering /proc/net/rpc/%s\n", name); 281 sn = net_generic(net, sunrpc_net_id); 282 return proc_create_data(name, 0, sn->proc_net_rpc, fops, data); 283 } 284 285 struct proc_dir_entry * 286 rpc_proc_register(struct net *net, struct rpc_stat *statp) 287 { 288 return do_register(net, statp->program->name, statp, &rpc_proc_fops); 289 } 290 EXPORT_SYMBOL_GPL(rpc_proc_register); 291 292 void 293 rpc_proc_unregister(struct net *net, const char *name) 294 { 295 struct sunrpc_net *sn; 296 297 sn = net_generic(net, sunrpc_net_id); 298 remove_proc_entry(name, sn->proc_net_rpc); 299 } 300 EXPORT_SYMBOL_GPL(rpc_proc_unregister); 301 302 struct proc_dir_entry * 303 svc_proc_register(struct net *net, struct svc_stat *statp, const struct file_operations *fops) 304 { 305 return do_register(net, statp->program->pg_name, statp, fops); 306 } 307 EXPORT_SYMBOL_GPL(svc_proc_register); 308 309 void 310 svc_proc_unregister(struct net *net, const char *name) 311 { 312 struct sunrpc_net *sn; 313 314 sn = net_generic(net, sunrpc_net_id); 315 remove_proc_entry(name, sn->proc_net_rpc); 316 } 317 EXPORT_SYMBOL_GPL(svc_proc_unregister); 318 319 int rpc_proc_init(struct net *net) 320 { 321 struct sunrpc_net *sn; 322 323 dprintk("RPC: registering /proc/net/rpc\n"); 324 sn = net_generic(net, sunrpc_net_id); 325 sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net); 326 if (sn->proc_net_rpc == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 void rpc_proc_exit(struct net *net) 333 { 334 dprintk("RPC: unregistering /proc/net/rpc\n"); 335 remove_proc_entry("rpc", net->proc_net); 336 } 337