xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 3a9568fedccc6cf26c1a87621c3bfed7b7432119)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
64 	TP_PROTO(
65 		const struct rpcrdma_rep *rep
66 	),
67 
68 	TP_ARGS(rep),
69 
70 	TP_STRUCT__entry(
71 		__field(u32, xid)
72 		__field(u32, version)
73 		__field(u32, proc)
74 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
75 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
76 	),
77 
78 	TP_fast_assign(
79 		__entry->xid = be32_to_cpu(rep->rr_xid);
80 		__entry->version = be32_to_cpu(rep->rr_vers);
81 		__entry->proc = be32_to_cpu(rep->rr_proc);
82 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
83 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
84 	),
85 
86 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
87 		__get_str(addr), __get_str(port),
88 		__entry->xid, __entry->version, __entry->proc
89 	)
90 );
91 
92 #define DEFINE_REPLY_EVENT(name)					\
93 		DEFINE_EVENT(xprtrdma_reply_class,			\
94 				xprtrdma_reply_##name##_err,		\
95 				TP_PROTO(				\
96 					const struct rpcrdma_rep *rep	\
97 				),					\
98 				TP_ARGS(rep))
99 
100 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
101 	TP_PROTO(
102 		const struct rpcrdma_xprt *r_xprt
103 	),
104 
105 	TP_ARGS(r_xprt),
106 
107 	TP_STRUCT__entry(
108 		__field(const void *, r_xprt)
109 		__string(addr, rpcrdma_addrstr(r_xprt))
110 		__string(port, rpcrdma_portstr(r_xprt))
111 	),
112 
113 	TP_fast_assign(
114 		__entry->r_xprt = r_xprt;
115 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
116 		__assign_str(port, rpcrdma_portstr(r_xprt));
117 	),
118 
119 	TP_printk("peer=[%s]:%s r_xprt=%p",
120 		__get_str(addr), __get_str(port), __entry->r_xprt
121 	)
122 );
123 
124 #define DEFINE_RXPRT_EVENT(name)					\
125 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
126 				TP_PROTO(				\
127 					const struct rpcrdma_xprt *r_xprt \
128 				),					\
129 				TP_ARGS(r_xprt))
130 
131 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
132 	TP_PROTO(
133 		const struct rpcrdma_xprt *r_xprt,
134 		int rc
135 	),
136 
137 	TP_ARGS(r_xprt, rc),
138 
139 	TP_STRUCT__entry(
140 		__field(const void *, r_xprt)
141 		__field(int, rc)
142 		__field(int, connect_status)
143 		__string(addr, rpcrdma_addrstr(r_xprt))
144 		__string(port, rpcrdma_portstr(r_xprt))
145 	),
146 
147 	TP_fast_assign(
148 		__entry->r_xprt = r_xprt;
149 		__entry->rc = rc;
150 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
151 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
152 		__assign_str(port, rpcrdma_portstr(r_xprt));
153 	),
154 
155 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
156 		__get_str(addr), __get_str(port), __entry->r_xprt,
157 		__entry->rc, __entry->connect_status
158 	)
159 );
160 
161 #define DEFINE_CONN_EVENT(name)						\
162 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
163 				TP_PROTO(				\
164 					const struct rpcrdma_xprt *r_xprt, \
165 					int rc				\
166 				),					\
167 				TP_ARGS(r_xprt, rc))
168 
169 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
170 	TP_PROTO(
171 		const struct rpc_task *task,
172 		unsigned int pos,
173 		struct rpcrdma_mr *mr,
174 		int nsegs
175 	),
176 
177 	TP_ARGS(task, pos, mr, nsegs),
178 
179 	TP_STRUCT__entry(
180 		__field(unsigned int, task_id)
181 		__field(unsigned int, client_id)
182 		__field(unsigned int, pos)
183 		__field(int, nents)
184 		__field(u32, handle)
185 		__field(u32, length)
186 		__field(u64, offset)
187 		__field(int, nsegs)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->task_id = task->tk_pid;
192 		__entry->client_id = task->tk_client->cl_clid;
193 		__entry->pos = pos;
194 		__entry->nents = mr->mr_nents;
195 		__entry->handle = mr->mr_handle;
196 		__entry->length = mr->mr_length;
197 		__entry->offset = mr->mr_offset;
198 		__entry->nsegs = nsegs;
199 	),
200 
201 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
202 		__entry->task_id, __entry->client_id,
203 		__entry->pos, __entry->length,
204 		(unsigned long long)__entry->offset, __entry->handle,
205 		__entry->nents < __entry->nsegs ? "more" : "last"
206 	)
207 );
208 
209 #define DEFINE_RDCH_EVENT(name)						\
210 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
211 				TP_PROTO(				\
212 					const struct rpc_task *task,	\
213 					unsigned int pos,		\
214 					struct rpcrdma_mr *mr,		\
215 					int nsegs			\
216 				),					\
217 				TP_ARGS(task, pos, mr, nsegs))
218 
219 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
220 	TP_PROTO(
221 		const struct rpc_task *task,
222 		struct rpcrdma_mr *mr,
223 		int nsegs
224 	),
225 
226 	TP_ARGS(task, mr, nsegs),
227 
228 	TP_STRUCT__entry(
229 		__field(unsigned int, task_id)
230 		__field(unsigned int, client_id)
231 		__field(int, nents)
232 		__field(u32, handle)
233 		__field(u32, length)
234 		__field(u64, offset)
235 		__field(int, nsegs)
236 	),
237 
238 	TP_fast_assign(
239 		__entry->task_id = task->tk_pid;
240 		__entry->client_id = task->tk_client->cl_clid;
241 		__entry->nents = mr->mr_nents;
242 		__entry->handle = mr->mr_handle;
243 		__entry->length = mr->mr_length;
244 		__entry->offset = mr->mr_offset;
245 		__entry->nsegs = nsegs;
246 	),
247 
248 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
249 		__entry->task_id, __entry->client_id,
250 		__entry->length, (unsigned long long)__entry->offset,
251 		__entry->handle,
252 		__entry->nents < __entry->nsegs ? "more" : "last"
253 	)
254 );
255 
256 #define DEFINE_WRCH_EVENT(name)						\
257 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
258 				TP_PROTO(				\
259 					const struct rpc_task *task,	\
260 					struct rpcrdma_mr *mr,		\
261 					int nsegs			\
262 				),					\
263 				TP_ARGS(task, mr, nsegs))
264 
265 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
266 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
267 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
268 TRACE_DEFINE_ENUM(DMA_NONE);
269 
270 #define xprtrdma_show_direction(x)					\
271 		__print_symbolic(x,					\
272 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
273 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
274 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
275 				{ DMA_NONE, "NONE" })
276 
277 DECLARE_EVENT_CLASS(xprtrdma_mr,
278 	TP_PROTO(
279 		const struct rpcrdma_mr *mr
280 	),
281 
282 	TP_ARGS(mr),
283 
284 	TP_STRUCT__entry(
285 		__field(u32, mr_id)
286 		__field(int, nents)
287 		__field(u32, handle)
288 		__field(u32, length)
289 		__field(u64, offset)
290 		__field(u32, dir)
291 	),
292 
293 	TP_fast_assign(
294 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
295 		__entry->nents  = mr->mr_nents;
296 		__entry->handle = mr->mr_handle;
297 		__entry->length = mr->mr_length;
298 		__entry->offset = mr->mr_offset;
299 		__entry->dir    = mr->mr_dir;
300 	),
301 
302 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
303 		__entry->mr_id, __entry->nents, __entry->length,
304 		(unsigned long long)__entry->offset, __entry->handle,
305 		xprtrdma_show_direction(__entry->dir)
306 	)
307 );
308 
309 #define DEFINE_MR_EVENT(name) \
310 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
311 				TP_PROTO( \
312 					const struct rpcrdma_mr *mr \
313 				), \
314 				TP_ARGS(mr))
315 
316 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
317 	TP_PROTO(
318 		const struct rpc_rqst *rqst
319 	),
320 
321 	TP_ARGS(rqst),
322 
323 	TP_STRUCT__entry(
324 		__field(const void *, rqst)
325 		__field(const void *, rep)
326 		__field(const void *, req)
327 		__field(u32, xid)
328 	),
329 
330 	TP_fast_assign(
331 		__entry->rqst = rqst;
332 		__entry->req = rpcr_to_rdmar(rqst);
333 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
334 		__entry->xid = be32_to_cpu(rqst->rq_xid);
335 	),
336 
337 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
338 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
339 	)
340 );
341 
342 #define DEFINE_CB_EVENT(name)						\
343 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
344 				TP_PROTO(				\
345 					const struct rpc_rqst *rqst	\
346 				),					\
347 				TP_ARGS(rqst))
348 
349 /**
350  ** Connection events
351  **/
352 
353 TRACE_EVENT(xprtrdma_inline_thresh,
354 	TP_PROTO(
355 		const struct rpcrdma_ep *ep
356 	),
357 
358 	TP_ARGS(ep),
359 
360 	TP_STRUCT__entry(
361 		__field(unsigned int, inline_send)
362 		__field(unsigned int, inline_recv)
363 		__field(unsigned int, max_send)
364 		__field(unsigned int, max_recv)
365 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
366 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
367 	),
368 
369 	TP_fast_assign(
370 		const struct rdma_cm_id *id = ep->re_id;
371 
372 		__entry->inline_send = ep->re_inline_send;
373 		__entry->inline_recv = ep->re_inline_recv;
374 		__entry->max_send = ep->re_max_inline_send;
375 		__entry->max_recv = ep->re_max_inline_recv;
376 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
377 		       sizeof(struct sockaddr_in6));
378 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
379 		       sizeof(struct sockaddr_in6));
380 	),
381 
382 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
383 		__entry->srcaddr, __entry->dstaddr,
384 		__entry->inline_send, __entry->inline_recv,
385 		__entry->max_send, __entry->max_recv
386 	)
387 );
388 
389 DEFINE_CONN_EVENT(connect);
390 DEFINE_CONN_EVENT(disconnect);
391 
392 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
393 
394 TRACE_EVENT(xprtrdma_op_connect,
395 	TP_PROTO(
396 		const struct rpcrdma_xprt *r_xprt,
397 		unsigned long delay
398 	),
399 
400 	TP_ARGS(r_xprt, delay),
401 
402 	TP_STRUCT__entry(
403 		__field(const void *, r_xprt)
404 		__field(unsigned long, delay)
405 		__string(addr, rpcrdma_addrstr(r_xprt))
406 		__string(port, rpcrdma_portstr(r_xprt))
407 	),
408 
409 	TP_fast_assign(
410 		__entry->r_xprt = r_xprt;
411 		__entry->delay = delay;
412 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
413 		__assign_str(port, rpcrdma_portstr(r_xprt));
414 	),
415 
416 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
417 		__get_str(addr), __get_str(port), __entry->r_xprt,
418 		__entry->delay
419 	)
420 );
421 
422 
423 TRACE_EVENT(xprtrdma_op_set_cto,
424 	TP_PROTO(
425 		const struct rpcrdma_xprt *r_xprt,
426 		unsigned long connect,
427 		unsigned long reconnect
428 	),
429 
430 	TP_ARGS(r_xprt, connect, reconnect),
431 
432 	TP_STRUCT__entry(
433 		__field(const void *, r_xprt)
434 		__field(unsigned long, connect)
435 		__field(unsigned long, reconnect)
436 		__string(addr, rpcrdma_addrstr(r_xprt))
437 		__string(port, rpcrdma_portstr(r_xprt))
438 	),
439 
440 	TP_fast_assign(
441 		__entry->r_xprt = r_xprt;
442 		__entry->connect = connect;
443 		__entry->reconnect = reconnect;
444 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
445 		__assign_str(port, rpcrdma_portstr(r_xprt));
446 	),
447 
448 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
449 		__get_str(addr), __get_str(port), __entry->r_xprt,
450 		__entry->connect / HZ, __entry->reconnect / HZ
451 	)
452 );
453 
454 TRACE_EVENT(xprtrdma_qp_event,
455 	TP_PROTO(
456 		const struct rpcrdma_ep *ep,
457 		const struct ib_event *event
458 	),
459 
460 	TP_ARGS(ep, event),
461 
462 	TP_STRUCT__entry(
463 		__field(unsigned long, event)
464 		__string(name, event->device->name)
465 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
466 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
467 	),
468 
469 	TP_fast_assign(
470 		const struct rdma_cm_id *id = ep->re_id;
471 
472 		__entry->event = event->event;
473 		__assign_str(name, event->device->name);
474 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
475 		       sizeof(struct sockaddr_in6));
476 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
477 		       sizeof(struct sockaddr_in6));
478 	),
479 
480 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
481 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
482 		rdma_show_ib_event(__entry->event), __entry->event
483 	)
484 );
485 
486 /**
487  ** Call events
488  **/
489 
490 TRACE_EVENT(xprtrdma_createmrs,
491 	TP_PROTO(
492 		const struct rpcrdma_xprt *r_xprt,
493 		unsigned int count
494 	),
495 
496 	TP_ARGS(r_xprt, count),
497 
498 	TP_STRUCT__entry(
499 		__field(const void *, r_xprt)
500 		__string(addr, rpcrdma_addrstr(r_xprt))
501 		__string(port, rpcrdma_portstr(r_xprt))
502 		__field(unsigned int, count)
503 	),
504 
505 	TP_fast_assign(
506 		__entry->r_xprt = r_xprt;
507 		__entry->count = count;
508 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
509 		__assign_str(port, rpcrdma_portstr(r_xprt));
510 	),
511 
512 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
513 		__get_str(addr), __get_str(port), __entry->r_xprt,
514 		__entry->count
515 	)
516 );
517 
518 TRACE_EVENT(xprtrdma_mr_get,
519 	TP_PROTO(
520 		const struct rpcrdma_req *req
521 	),
522 
523 	TP_ARGS(req),
524 
525 	TP_STRUCT__entry(
526 		__field(const void *, req)
527 		__field(unsigned int, task_id)
528 		__field(unsigned int, client_id)
529 		__field(u32, xid)
530 	),
531 
532 	TP_fast_assign(
533 		const struct rpc_rqst *rqst = &req->rl_slot;
534 
535 		__entry->req = req;
536 		__entry->task_id = rqst->rq_task->tk_pid;
537 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
538 		__entry->xid = be32_to_cpu(rqst->rq_xid);
539 	),
540 
541 	TP_printk("task:%u@%u xid=0x%08x req=%p",
542 		__entry->task_id, __entry->client_id, __entry->xid,
543 		__entry->req
544 	)
545 );
546 
547 TRACE_EVENT(xprtrdma_nomrs,
548 	TP_PROTO(
549 		const struct rpcrdma_req *req
550 	),
551 
552 	TP_ARGS(req),
553 
554 	TP_STRUCT__entry(
555 		__field(const void *, req)
556 		__field(unsigned int, task_id)
557 		__field(unsigned int, client_id)
558 		__field(u32, xid)
559 	),
560 
561 	TP_fast_assign(
562 		const struct rpc_rqst *rqst = &req->rl_slot;
563 
564 		__entry->req = req;
565 		__entry->task_id = rqst->rq_task->tk_pid;
566 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
567 		__entry->xid = be32_to_cpu(rqst->rq_xid);
568 	),
569 
570 	TP_printk("task:%u@%u xid=0x%08x req=%p",
571 		__entry->task_id, __entry->client_id, __entry->xid,
572 		__entry->req
573 	)
574 );
575 
576 DEFINE_RDCH_EVENT(read);
577 DEFINE_WRCH_EVENT(write);
578 DEFINE_WRCH_EVENT(reply);
579 
580 TRACE_DEFINE_ENUM(rpcrdma_noch);
581 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
582 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
583 TRACE_DEFINE_ENUM(rpcrdma_readch);
584 TRACE_DEFINE_ENUM(rpcrdma_areadch);
585 TRACE_DEFINE_ENUM(rpcrdma_writech);
586 TRACE_DEFINE_ENUM(rpcrdma_replych);
587 
588 #define xprtrdma_show_chunktype(x)					\
589 		__print_symbolic(x,					\
590 				{ rpcrdma_noch, "inline" },		\
591 				{ rpcrdma_noch_pullup, "pullup" },	\
592 				{ rpcrdma_noch_mapped, "mapped" },	\
593 				{ rpcrdma_readch, "read list" },	\
594 				{ rpcrdma_areadch, "*read list" },	\
595 				{ rpcrdma_writech, "write list" },	\
596 				{ rpcrdma_replych, "reply chunk" })
597 
598 TRACE_EVENT(xprtrdma_marshal,
599 	TP_PROTO(
600 		const struct rpcrdma_req *req,
601 		unsigned int rtype,
602 		unsigned int wtype
603 	),
604 
605 	TP_ARGS(req, rtype, wtype),
606 
607 	TP_STRUCT__entry(
608 		__field(unsigned int, task_id)
609 		__field(unsigned int, client_id)
610 		__field(u32, xid)
611 		__field(unsigned int, hdrlen)
612 		__field(unsigned int, headlen)
613 		__field(unsigned int, pagelen)
614 		__field(unsigned int, taillen)
615 		__field(unsigned int, rtype)
616 		__field(unsigned int, wtype)
617 	),
618 
619 	TP_fast_assign(
620 		const struct rpc_rqst *rqst = &req->rl_slot;
621 
622 		__entry->task_id = rqst->rq_task->tk_pid;
623 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
624 		__entry->xid = be32_to_cpu(rqst->rq_xid);
625 		__entry->hdrlen = req->rl_hdrbuf.len;
626 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
627 		__entry->pagelen = rqst->rq_snd_buf.page_len;
628 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
629 		__entry->rtype = rtype;
630 		__entry->wtype = wtype;
631 	),
632 
633 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
634 		__entry->task_id, __entry->client_id, __entry->xid,
635 		__entry->hdrlen,
636 		__entry->headlen, __entry->pagelen, __entry->taillen,
637 		xprtrdma_show_chunktype(__entry->rtype),
638 		xprtrdma_show_chunktype(__entry->wtype)
639 	)
640 );
641 
642 TRACE_EVENT(xprtrdma_marshal_failed,
643 	TP_PROTO(const struct rpc_rqst *rqst,
644 		 int ret
645 	),
646 
647 	TP_ARGS(rqst, ret),
648 
649 	TP_STRUCT__entry(
650 		__field(unsigned int, task_id)
651 		__field(unsigned int, client_id)
652 		__field(u32, xid)
653 		__field(int, ret)
654 	),
655 
656 	TP_fast_assign(
657 		__entry->task_id = rqst->rq_task->tk_pid;
658 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
659 		__entry->xid = be32_to_cpu(rqst->rq_xid);
660 		__entry->ret = ret;
661 	),
662 
663 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
664 		__entry->task_id, __entry->client_id, __entry->xid,
665 		__entry->ret
666 	)
667 );
668 
669 TRACE_EVENT(xprtrdma_prepsend_failed,
670 	TP_PROTO(const struct rpc_rqst *rqst,
671 		 int ret
672 	),
673 
674 	TP_ARGS(rqst, ret),
675 
676 	TP_STRUCT__entry(
677 		__field(unsigned int, task_id)
678 		__field(unsigned int, client_id)
679 		__field(u32, xid)
680 		__field(int, ret)
681 	),
682 
683 	TP_fast_assign(
684 		__entry->task_id = rqst->rq_task->tk_pid;
685 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
686 		__entry->xid = be32_to_cpu(rqst->rq_xid);
687 		__entry->ret = ret;
688 	),
689 
690 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
691 		__entry->task_id, __entry->client_id, __entry->xid,
692 		__entry->ret
693 	)
694 );
695 
696 TRACE_EVENT(xprtrdma_post_send,
697 	TP_PROTO(
698 		const struct rpcrdma_req *req
699 	),
700 
701 	TP_ARGS(req),
702 
703 	TP_STRUCT__entry(
704 		__field(u32, cq_id)
705 		__field(int, completion_id)
706 		__field(unsigned int, task_id)
707 		__field(unsigned int, client_id)
708 		__field(int, num_sge)
709 		__field(int, signaled)
710 	),
711 
712 	TP_fast_assign(
713 		const struct rpc_rqst *rqst = &req->rl_slot;
714 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
715 
716 		__entry->cq_id = sc->sc_cid.ci_queue_id;
717 		__entry->completion_id = sc->sc_cid.ci_completion_id;
718 		__entry->task_id = rqst->rq_task->tk_pid;
719 		__entry->client_id = rqst->rq_task->tk_client ?
720 				     rqst->rq_task->tk_client->cl_clid : -1;
721 		__entry->num_sge = req->rl_wr.num_sge;
722 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
723 	),
724 
725 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
726 		__entry->task_id, __entry->client_id,
727 		__entry->cq_id, __entry->completion_id,
728 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
729 		(__entry->signaled ? "signaled" : "")
730 	)
731 );
732 
733 TRACE_EVENT(xprtrdma_post_recv,
734 	TP_PROTO(
735 		const struct rpcrdma_rep *rep
736 	),
737 
738 	TP_ARGS(rep),
739 
740 	TP_STRUCT__entry(
741 		__field(u32, cq_id)
742 		__field(int, completion_id)
743 	),
744 
745 	TP_fast_assign(
746 		__entry->cq_id = rep->rr_cid.ci_queue_id;
747 		__entry->completion_id = rep->rr_cid.ci_completion_id;
748 	),
749 
750 	TP_printk("cq.id=%d cid=%d",
751 		__entry->cq_id, __entry->completion_id
752 	)
753 );
754 
755 TRACE_EVENT(xprtrdma_post_recvs,
756 	TP_PROTO(
757 		const struct rpcrdma_xprt *r_xprt,
758 		unsigned int count,
759 		int status
760 	),
761 
762 	TP_ARGS(r_xprt, count, status),
763 
764 	TP_STRUCT__entry(
765 		__field(const void *, r_xprt)
766 		__field(unsigned int, count)
767 		__field(int, status)
768 		__field(int, posted)
769 		__string(addr, rpcrdma_addrstr(r_xprt))
770 		__string(port, rpcrdma_portstr(r_xprt))
771 	),
772 
773 	TP_fast_assign(
774 		__entry->r_xprt = r_xprt;
775 		__entry->count = count;
776 		__entry->status = status;
777 		__entry->posted = r_xprt->rx_ep->re_receive_count;
778 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
779 		__assign_str(port, rpcrdma_portstr(r_xprt));
780 	),
781 
782 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
783 		__get_str(addr), __get_str(port), __entry->r_xprt,
784 		__entry->count, __entry->posted, __entry->status
785 	)
786 );
787 
788 TRACE_EVENT(xprtrdma_post_linv_err,
789 	TP_PROTO(
790 		const struct rpcrdma_req *req,
791 		int status
792 	),
793 
794 	TP_ARGS(req, status),
795 
796 	TP_STRUCT__entry(
797 		__field(unsigned int, task_id)
798 		__field(unsigned int, client_id)
799 		__field(int, status)
800 	),
801 
802 	TP_fast_assign(
803 		const struct rpc_task *task = req->rl_slot.rq_task;
804 
805 		__entry->task_id = task->tk_pid;
806 		__entry->client_id = task->tk_client->cl_clid;
807 		__entry->status = status;
808 	),
809 
810 	TP_printk("task:%u@%u status=%d",
811 		__entry->task_id, __entry->client_id, __entry->status
812 	)
813 );
814 
815 /**
816  ** Completion events
817  **/
818 
819 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive);
820 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
821 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg);
822 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li);
823 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake);
824 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done);
825 
826 TRACE_EVENT(xprtrdma_frwr_alloc,
827 	TP_PROTO(
828 		const struct rpcrdma_mr *mr,
829 		int rc
830 	),
831 
832 	TP_ARGS(mr, rc),
833 
834 	TP_STRUCT__entry(
835 		__field(u32, mr_id)
836 		__field(int, rc)
837 	),
838 
839 	TP_fast_assign(
840 		__entry->mr_id = mr->frwr.fr_mr->res.id;
841 		__entry->rc = rc;
842 	),
843 
844 	TP_printk("mr.id=%u: rc=%d",
845 		__entry->mr_id, __entry->rc
846 	)
847 );
848 
849 TRACE_EVENT(xprtrdma_frwr_dereg,
850 	TP_PROTO(
851 		const struct rpcrdma_mr *mr,
852 		int rc
853 	),
854 
855 	TP_ARGS(mr, rc),
856 
857 	TP_STRUCT__entry(
858 		__field(u32, mr_id)
859 		__field(int, nents)
860 		__field(u32, handle)
861 		__field(u32, length)
862 		__field(u64, offset)
863 		__field(u32, dir)
864 		__field(int, rc)
865 	),
866 
867 	TP_fast_assign(
868 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
869 		__entry->nents  = mr->mr_nents;
870 		__entry->handle = mr->mr_handle;
871 		__entry->length = mr->mr_length;
872 		__entry->offset = mr->mr_offset;
873 		__entry->dir    = mr->mr_dir;
874 		__entry->rc	= rc;
875 	),
876 
877 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
878 		__entry->mr_id, __entry->nents, __entry->length,
879 		(unsigned long long)__entry->offset, __entry->handle,
880 		xprtrdma_show_direction(__entry->dir),
881 		__entry->rc
882 	)
883 );
884 
885 TRACE_EVENT(xprtrdma_frwr_sgerr,
886 	TP_PROTO(
887 		const struct rpcrdma_mr *mr,
888 		int sg_nents
889 	),
890 
891 	TP_ARGS(mr, sg_nents),
892 
893 	TP_STRUCT__entry(
894 		__field(u32, mr_id)
895 		__field(u64, addr)
896 		__field(u32, dir)
897 		__field(int, nents)
898 	),
899 
900 	TP_fast_assign(
901 		__entry->mr_id = mr->frwr.fr_mr->res.id;
902 		__entry->addr = mr->mr_sg->dma_address;
903 		__entry->dir = mr->mr_dir;
904 		__entry->nents = sg_nents;
905 	),
906 
907 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
908 		__entry->mr_id, __entry->addr,
909 		xprtrdma_show_direction(__entry->dir),
910 		__entry->nents
911 	)
912 );
913 
914 TRACE_EVENT(xprtrdma_frwr_maperr,
915 	TP_PROTO(
916 		const struct rpcrdma_mr *mr,
917 		int num_mapped
918 	),
919 
920 	TP_ARGS(mr, num_mapped),
921 
922 	TP_STRUCT__entry(
923 		__field(u32, mr_id)
924 		__field(u64, addr)
925 		__field(u32, dir)
926 		__field(int, num_mapped)
927 		__field(int, nents)
928 	),
929 
930 	TP_fast_assign(
931 		__entry->mr_id = mr->frwr.fr_mr->res.id;
932 		__entry->addr = mr->mr_sg->dma_address;
933 		__entry->dir = mr->mr_dir;
934 		__entry->num_mapped = num_mapped;
935 		__entry->nents = mr->mr_nents;
936 	),
937 
938 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
939 		__entry->mr_id, __entry->addr,
940 		xprtrdma_show_direction(__entry->dir),
941 		__entry->num_mapped, __entry->nents
942 	)
943 );
944 
945 DEFINE_MR_EVENT(localinv);
946 DEFINE_MR_EVENT(map);
947 DEFINE_MR_EVENT(unmap);
948 DEFINE_MR_EVENT(reminv);
949 DEFINE_MR_EVENT(recycle);
950 
951 TRACE_EVENT(xprtrdma_dma_maperr,
952 	TP_PROTO(
953 		u64 addr
954 	),
955 
956 	TP_ARGS(addr),
957 
958 	TP_STRUCT__entry(
959 		__field(u64, addr)
960 	),
961 
962 	TP_fast_assign(
963 		__entry->addr = addr;
964 	),
965 
966 	TP_printk("dma addr=0x%llx\n", __entry->addr)
967 );
968 
969 /**
970  ** Reply events
971  **/
972 
973 TRACE_EVENT(xprtrdma_reply,
974 	TP_PROTO(
975 		const struct rpc_task *task,
976 		const struct rpcrdma_rep *rep,
977 		const struct rpcrdma_req *req,
978 		unsigned int credits
979 	),
980 
981 	TP_ARGS(task, rep, req, credits),
982 
983 	TP_STRUCT__entry(
984 		__field(unsigned int, task_id)
985 		__field(unsigned int, client_id)
986 		__field(const void *, rep)
987 		__field(const void *, req)
988 		__field(u32, xid)
989 		__field(unsigned int, credits)
990 	),
991 
992 	TP_fast_assign(
993 		__entry->task_id = task->tk_pid;
994 		__entry->client_id = task->tk_client->cl_clid;
995 		__entry->rep = rep;
996 		__entry->req = req;
997 		__entry->xid = be32_to_cpu(rep->rr_xid);
998 		__entry->credits = credits;
999 	),
1000 
1001 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1002 		__entry->task_id, __entry->client_id, __entry->xid,
1003 		__entry->credits, __entry->rep, __entry->req
1004 	)
1005 );
1006 
1007 TRACE_EVENT(xprtrdma_defer_cmp,
1008 	TP_PROTO(
1009 		const struct rpcrdma_rep *rep
1010 	),
1011 
1012 	TP_ARGS(rep),
1013 
1014 	TP_STRUCT__entry(
1015 		__field(unsigned int, task_id)
1016 		__field(unsigned int, client_id)
1017 		__field(const void *, rep)
1018 		__field(u32, xid)
1019 	),
1020 
1021 	TP_fast_assign(
1022 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1023 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1024 		__entry->rep = rep;
1025 		__entry->xid = be32_to_cpu(rep->rr_xid);
1026 	),
1027 
1028 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1029 		__entry->task_id, __entry->client_id, __entry->xid,
1030 		__entry->rep
1031 	)
1032 );
1033 
1034 DEFINE_REPLY_EVENT(vers);
1035 DEFINE_REPLY_EVENT(rqst);
1036 DEFINE_REPLY_EVENT(short);
1037 DEFINE_REPLY_EVENT(hdr);
1038 
1039 TRACE_EVENT(xprtrdma_err_vers,
1040 	TP_PROTO(
1041 		const struct rpc_rqst *rqst,
1042 		__be32 *min,
1043 		__be32 *max
1044 	),
1045 
1046 	TP_ARGS(rqst, min, max),
1047 
1048 	TP_STRUCT__entry(
1049 		__field(unsigned int, task_id)
1050 		__field(unsigned int, client_id)
1051 		__field(u32, xid)
1052 		__field(u32, min)
1053 		__field(u32, max)
1054 	),
1055 
1056 	TP_fast_assign(
1057 		__entry->task_id = rqst->rq_task->tk_pid;
1058 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1059 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1060 		__entry->min = be32_to_cpup(min);
1061 		__entry->max = be32_to_cpup(max);
1062 	),
1063 
1064 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1065 		__entry->task_id, __entry->client_id, __entry->xid,
1066 		__entry->min, __entry->max
1067 	)
1068 );
1069 
1070 TRACE_EVENT(xprtrdma_err_chunk,
1071 	TP_PROTO(
1072 		const struct rpc_rqst *rqst
1073 	),
1074 
1075 	TP_ARGS(rqst),
1076 
1077 	TP_STRUCT__entry(
1078 		__field(unsigned int, task_id)
1079 		__field(unsigned int, client_id)
1080 		__field(u32, xid)
1081 	),
1082 
1083 	TP_fast_assign(
1084 		__entry->task_id = rqst->rq_task->tk_pid;
1085 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1086 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1087 	),
1088 
1089 	TP_printk("task:%u@%u xid=0x%08x",
1090 		__entry->task_id, __entry->client_id, __entry->xid
1091 	)
1092 );
1093 
1094 TRACE_EVENT(xprtrdma_err_unrecognized,
1095 	TP_PROTO(
1096 		const struct rpc_rqst *rqst,
1097 		__be32 *procedure
1098 	),
1099 
1100 	TP_ARGS(rqst, procedure),
1101 
1102 	TP_STRUCT__entry(
1103 		__field(unsigned int, task_id)
1104 		__field(unsigned int, client_id)
1105 		__field(u32, xid)
1106 		__field(u32, procedure)
1107 	),
1108 
1109 	TP_fast_assign(
1110 		__entry->task_id = rqst->rq_task->tk_pid;
1111 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1112 		__entry->procedure = be32_to_cpup(procedure);
1113 	),
1114 
1115 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1116 		__entry->task_id, __entry->client_id, __entry->xid,
1117 		__entry->procedure
1118 	)
1119 );
1120 
1121 TRACE_EVENT(xprtrdma_fixup,
1122 	TP_PROTO(
1123 		const struct rpc_rqst *rqst,
1124 		unsigned long fixup
1125 	),
1126 
1127 	TP_ARGS(rqst, fixup),
1128 
1129 	TP_STRUCT__entry(
1130 		__field(unsigned int, task_id)
1131 		__field(unsigned int, client_id)
1132 		__field(unsigned long, fixup)
1133 		__field(size_t, headlen)
1134 		__field(unsigned int, pagelen)
1135 		__field(size_t, taillen)
1136 	),
1137 
1138 	TP_fast_assign(
1139 		__entry->task_id = rqst->rq_task->tk_pid;
1140 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1141 		__entry->fixup = fixup;
1142 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1143 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1144 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1145 	),
1146 
1147 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1148 		__entry->task_id, __entry->client_id, __entry->fixup,
1149 		__entry->headlen, __entry->pagelen, __entry->taillen
1150 	)
1151 );
1152 
1153 TRACE_EVENT(xprtrdma_decode_seg,
1154 	TP_PROTO(
1155 		u32 handle,
1156 		u32 length,
1157 		u64 offset
1158 	),
1159 
1160 	TP_ARGS(handle, length, offset),
1161 
1162 	TP_STRUCT__entry(
1163 		__field(u32, handle)
1164 		__field(u32, length)
1165 		__field(u64, offset)
1166 	),
1167 
1168 	TP_fast_assign(
1169 		__entry->handle = handle;
1170 		__entry->length = length;
1171 		__entry->offset = offset;
1172 	),
1173 
1174 	TP_printk("%u@0x%016llx:0x%08x",
1175 		__entry->length, (unsigned long long)__entry->offset,
1176 		__entry->handle
1177 	)
1178 );
1179 
1180 /**
1181  ** Callback events
1182  **/
1183 
1184 TRACE_EVENT(xprtrdma_cb_setup,
1185 	TP_PROTO(
1186 		const struct rpcrdma_xprt *r_xprt,
1187 		unsigned int reqs
1188 	),
1189 
1190 	TP_ARGS(r_xprt, reqs),
1191 
1192 	TP_STRUCT__entry(
1193 		__field(const void *, r_xprt)
1194 		__field(unsigned int, reqs)
1195 		__string(addr, rpcrdma_addrstr(r_xprt))
1196 		__string(port, rpcrdma_portstr(r_xprt))
1197 	),
1198 
1199 	TP_fast_assign(
1200 		__entry->r_xprt = r_xprt;
1201 		__entry->reqs = reqs;
1202 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1203 		__assign_str(port, rpcrdma_portstr(r_xprt));
1204 	),
1205 
1206 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1207 		__get_str(addr), __get_str(port),
1208 		__entry->r_xprt, __entry->reqs
1209 	)
1210 );
1211 
1212 DEFINE_CB_EVENT(xprtrdma_cb_call);
1213 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1214 
1215 TRACE_EVENT(xprtrdma_leaked_rep,
1216 	TP_PROTO(
1217 		const struct rpc_rqst *rqst,
1218 		const struct rpcrdma_rep *rep
1219 	),
1220 
1221 	TP_ARGS(rqst, rep),
1222 
1223 	TP_STRUCT__entry(
1224 		__field(unsigned int, task_id)
1225 		__field(unsigned int, client_id)
1226 		__field(u32, xid)
1227 		__field(const void *, rep)
1228 	),
1229 
1230 	TP_fast_assign(
1231 		__entry->task_id = rqst->rq_task->tk_pid;
1232 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1233 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1234 		__entry->rep = rep;
1235 	),
1236 
1237 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1238 		__entry->task_id, __entry->client_id, __entry->xid,
1239 		__entry->rep
1240 	)
1241 );
1242 
1243 /**
1244  ** Server-side RPC/RDMA events
1245  **/
1246 
1247 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1248 	TP_PROTO(
1249 		const struct svcxprt_rdma *rdma,
1250 		long status
1251 	),
1252 
1253 	TP_ARGS(rdma, status),
1254 
1255 	TP_STRUCT__entry(
1256 		__field(long, status)
1257 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1258 	),
1259 
1260 	TP_fast_assign(
1261 		__entry->status = status;
1262 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1263 	),
1264 
1265 	TP_printk("addr=%s status=%ld",
1266 		__get_str(addr), __entry->status
1267 	)
1268 );
1269 
1270 #define DEFINE_ACCEPT_EVENT(name) \
1271 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1272 				TP_PROTO( \
1273 					const struct svcxprt_rdma *rdma, \
1274 					long status \
1275 				), \
1276 				TP_ARGS(rdma, status))
1277 
1278 DEFINE_ACCEPT_EVENT(pd);
1279 DEFINE_ACCEPT_EVENT(qp);
1280 DEFINE_ACCEPT_EVENT(fabric);
1281 DEFINE_ACCEPT_EVENT(initdepth);
1282 DEFINE_ACCEPT_EVENT(accept);
1283 
1284 TRACE_DEFINE_ENUM(RDMA_MSG);
1285 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1286 TRACE_DEFINE_ENUM(RDMA_MSGP);
1287 TRACE_DEFINE_ENUM(RDMA_DONE);
1288 TRACE_DEFINE_ENUM(RDMA_ERROR);
1289 
1290 #define show_rpcrdma_proc(x)						\
1291 		__print_symbolic(x,					\
1292 				{ RDMA_MSG, "RDMA_MSG" },		\
1293 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1294 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1295 				{ RDMA_DONE, "RDMA_DONE" },		\
1296 				{ RDMA_ERROR, "RDMA_ERROR" })
1297 
1298 TRACE_EVENT(svcrdma_decode_rqst,
1299 	TP_PROTO(
1300 		const struct svc_rdma_recv_ctxt *ctxt,
1301 		__be32 *p,
1302 		unsigned int hdrlen
1303 	),
1304 
1305 	TP_ARGS(ctxt, p, hdrlen),
1306 
1307 	TP_STRUCT__entry(
1308 		__field(u32, cq_id)
1309 		__field(int, completion_id)
1310 		__field(u32, xid)
1311 		__field(u32, vers)
1312 		__field(u32, proc)
1313 		__field(u32, credits)
1314 		__field(unsigned int, hdrlen)
1315 	),
1316 
1317 	TP_fast_assign(
1318 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1319 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1320 		__entry->xid = be32_to_cpup(p++);
1321 		__entry->vers = be32_to_cpup(p++);
1322 		__entry->credits = be32_to_cpup(p++);
1323 		__entry->proc = be32_to_cpup(p);
1324 		__entry->hdrlen = hdrlen;
1325 	),
1326 
1327 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1328 		__entry->cq_id, __entry->completion_id,
1329 		__entry->xid, __entry->vers, __entry->credits,
1330 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1331 );
1332 
1333 TRACE_EVENT(svcrdma_decode_short_err,
1334 	TP_PROTO(
1335 		const struct svc_rdma_recv_ctxt *ctxt,
1336 		unsigned int hdrlen
1337 	),
1338 
1339 	TP_ARGS(ctxt, hdrlen),
1340 
1341 	TP_STRUCT__entry(
1342 		__field(u32, cq_id)
1343 		__field(int, completion_id)
1344 		__field(unsigned int, hdrlen)
1345 	),
1346 
1347 	TP_fast_assign(
1348 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1349 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1350 		__entry->hdrlen = hdrlen;
1351 	),
1352 
1353 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1354 		__entry->cq_id, __entry->completion_id,
1355 		__entry->hdrlen)
1356 );
1357 
1358 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1359 	TP_PROTO(
1360 		const struct svc_rdma_recv_ctxt *ctxt,
1361 		__be32 *p
1362 	),
1363 
1364 	TP_ARGS(ctxt, p),
1365 
1366 	TP_STRUCT__entry(
1367 		__field(u32, cq_id)
1368 		__field(int, completion_id)
1369 		__field(u32, xid)
1370 		__field(u32, vers)
1371 		__field(u32, proc)
1372 		__field(u32, credits)
1373 	),
1374 
1375 	TP_fast_assign(
1376 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1377 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1378 		__entry->xid = be32_to_cpup(p++);
1379 		__entry->vers = be32_to_cpup(p++);
1380 		__entry->credits = be32_to_cpup(p++);
1381 		__entry->proc = be32_to_cpup(p);
1382 	),
1383 
1384 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1385 		__entry->cq_id, __entry->completion_id,
1386 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1387 );
1388 
1389 #define DEFINE_BADREQ_EVENT(name)					\
1390 		DEFINE_EVENT(svcrdma_badreq_event,			\
1391 			     svcrdma_decode_##name##_err,		\
1392 				TP_PROTO(				\
1393 					const struct svc_rdma_recv_ctxt *ctxt,	\
1394 					__be32 *p			\
1395 				),					\
1396 				TP_ARGS(ctxt, p))
1397 
1398 DEFINE_BADREQ_EVENT(badvers);
1399 DEFINE_BADREQ_EVENT(drop);
1400 DEFINE_BADREQ_EVENT(badproc);
1401 DEFINE_BADREQ_EVENT(parse);
1402 
1403 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1404 	TP_PROTO(
1405 		u32 handle,
1406 		u32 length,
1407 		u64 offset
1408 	),
1409 
1410 	TP_ARGS(handle, length, offset),
1411 
1412 	TP_STRUCT__entry(
1413 		__field(u32, handle)
1414 		__field(u32, length)
1415 		__field(u64, offset)
1416 	),
1417 
1418 	TP_fast_assign(
1419 		__entry->handle = handle;
1420 		__entry->length = length;
1421 		__entry->offset = offset;
1422 	),
1423 
1424 	TP_printk("%u@0x%016llx:0x%08x",
1425 		__entry->length, (unsigned long long)__entry->offset,
1426 		__entry->handle
1427 	)
1428 );
1429 
1430 #define DEFINE_SEGMENT_EVENT(name)					\
1431 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
1432 				TP_PROTO(				\
1433 					u32 handle,			\
1434 					u32 length,			\
1435 					u64 offset			\
1436 				),					\
1437 				TP_ARGS(handle, length, offset))
1438 
1439 DEFINE_SEGMENT_EVENT(decode_wseg);
1440 DEFINE_SEGMENT_EVENT(encode_rseg);
1441 DEFINE_SEGMENT_EVENT(send_rseg);
1442 DEFINE_SEGMENT_EVENT(encode_wseg);
1443 DEFINE_SEGMENT_EVENT(send_wseg);
1444 
1445 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1446 	TP_PROTO(
1447 		u32 length
1448 	),
1449 
1450 	TP_ARGS(length),
1451 
1452 	TP_STRUCT__entry(
1453 		__field(u32, length)
1454 	),
1455 
1456 	TP_fast_assign(
1457 		__entry->length = length;
1458 	),
1459 
1460 	TP_printk("length=%u",
1461 		__entry->length
1462 	)
1463 );
1464 
1465 #define DEFINE_CHUNK_EVENT(name)					\
1466 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name,	\
1467 				TP_PROTO(				\
1468 					u32 length			\
1469 				),					\
1470 				TP_ARGS(length))
1471 
1472 DEFINE_CHUNK_EVENT(send_pzr);
1473 DEFINE_CHUNK_EVENT(encode_write_chunk);
1474 DEFINE_CHUNK_EVENT(send_write_chunk);
1475 DEFINE_CHUNK_EVENT(encode_read_chunk);
1476 DEFINE_CHUNK_EVENT(send_reply_chunk);
1477 
1478 TRACE_EVENT(svcrdma_send_read_chunk,
1479 	TP_PROTO(
1480 		u32 length,
1481 		u32 position
1482 	),
1483 
1484 	TP_ARGS(length, position),
1485 
1486 	TP_STRUCT__entry(
1487 		__field(u32, length)
1488 		__field(u32, position)
1489 	),
1490 
1491 	TP_fast_assign(
1492 		__entry->length = length;
1493 		__entry->position = position;
1494 	),
1495 
1496 	TP_printk("length=%u position=%u",
1497 		__entry->length, __entry->position
1498 	)
1499 );
1500 
1501 DECLARE_EVENT_CLASS(svcrdma_error_event,
1502 	TP_PROTO(
1503 		__be32 xid
1504 	),
1505 
1506 	TP_ARGS(xid),
1507 
1508 	TP_STRUCT__entry(
1509 		__field(u32, xid)
1510 	),
1511 
1512 	TP_fast_assign(
1513 		__entry->xid = be32_to_cpu(xid);
1514 	),
1515 
1516 	TP_printk("xid=0x%08x",
1517 		__entry->xid
1518 	)
1519 );
1520 
1521 #define DEFINE_ERROR_EVENT(name)					\
1522 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1523 				TP_PROTO(				\
1524 					__be32 xid			\
1525 				),					\
1526 				TP_ARGS(xid))
1527 
1528 DEFINE_ERROR_EVENT(vers);
1529 DEFINE_ERROR_EVENT(chunk);
1530 
1531 /**
1532  ** Server-side RDMA API events
1533  **/
1534 
1535 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1536 	TP_PROTO(
1537 		const struct svcxprt_rdma *rdma,
1538 		u64 dma_addr,
1539 		u32 length
1540 	),
1541 
1542 	TP_ARGS(rdma, dma_addr, length),
1543 
1544 	TP_STRUCT__entry(
1545 		__field(u64, dma_addr)
1546 		__field(u32, length)
1547 		__string(device, rdma->sc_cm_id->device->name)
1548 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1549 	),
1550 
1551 	TP_fast_assign(
1552 		__entry->dma_addr = dma_addr;
1553 		__entry->length = length;
1554 		__assign_str(device, rdma->sc_cm_id->device->name);
1555 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1556 	),
1557 
1558 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1559 		__get_str(addr), __get_str(device),
1560 		__entry->dma_addr, __entry->length
1561 	)
1562 );
1563 
1564 #define DEFINE_SVC_DMA_EVENT(name)					\
1565 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1566 				TP_PROTO(				\
1567 					const struct svcxprt_rdma *rdma,\
1568 					u64 dma_addr,			\
1569 					u32 length			\
1570 				),					\
1571 				TP_ARGS(rdma, dma_addr, length))
1572 
1573 DEFINE_SVC_DMA_EVENT(dma_map_page);
1574 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1575 
1576 TRACE_EVENT(svcrdma_dma_map_rw_err,
1577 	TP_PROTO(
1578 		const struct svcxprt_rdma *rdma,
1579 		unsigned int nents,
1580 		int status
1581 	),
1582 
1583 	TP_ARGS(rdma, nents, status),
1584 
1585 	TP_STRUCT__entry(
1586 		__field(int, status)
1587 		__field(unsigned int, nents)
1588 		__string(device, rdma->sc_cm_id->device->name)
1589 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1590 	),
1591 
1592 	TP_fast_assign(
1593 		__entry->status = status;
1594 		__entry->nents = nents;
1595 		__assign_str(device, rdma->sc_cm_id->device->name);
1596 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1597 	),
1598 
1599 	TP_printk("addr=%s device=%s nents=%u status=%d",
1600 		__get_str(addr), __get_str(device), __entry->nents,
1601 		__entry->status
1602 	)
1603 );
1604 
1605 TRACE_EVENT(svcrdma_no_rwctx_err,
1606 	TP_PROTO(
1607 		const struct svcxprt_rdma *rdma,
1608 		unsigned int num_sges
1609 	),
1610 
1611 	TP_ARGS(rdma, num_sges),
1612 
1613 	TP_STRUCT__entry(
1614 		__field(unsigned int, num_sges)
1615 		__string(device, rdma->sc_cm_id->device->name)
1616 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1617 	),
1618 
1619 	TP_fast_assign(
1620 		__entry->num_sges = num_sges;
1621 		__assign_str(device, rdma->sc_cm_id->device->name);
1622 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1623 	),
1624 
1625 	TP_printk("addr=%s device=%s num_sges=%d",
1626 		__get_str(addr), __get_str(device), __entry->num_sges
1627 	)
1628 );
1629 
1630 TRACE_EVENT(svcrdma_page_overrun_err,
1631 	TP_PROTO(
1632 		const struct svcxprt_rdma *rdma,
1633 		const struct svc_rqst *rqst,
1634 		unsigned int pageno
1635 	),
1636 
1637 	TP_ARGS(rdma, rqst, pageno),
1638 
1639 	TP_STRUCT__entry(
1640 		__field(unsigned int, pageno)
1641 		__field(u32, xid)
1642 		__string(device, rdma->sc_cm_id->device->name)
1643 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1644 	),
1645 
1646 	TP_fast_assign(
1647 		__entry->pageno = pageno;
1648 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1649 		__assign_str(device, rdma->sc_cm_id->device->name);
1650 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1651 	),
1652 
1653 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1654 		__get_str(device), __entry->xid, __entry->pageno
1655 	)
1656 );
1657 
1658 TRACE_EVENT(svcrdma_small_wrch_err,
1659 	TP_PROTO(
1660 		const struct svcxprt_rdma *rdma,
1661 		unsigned int remaining,
1662 		unsigned int seg_no,
1663 		unsigned int num_segs
1664 	),
1665 
1666 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1667 
1668 	TP_STRUCT__entry(
1669 		__field(unsigned int, remaining)
1670 		__field(unsigned int, seg_no)
1671 		__field(unsigned int, num_segs)
1672 		__string(device, rdma->sc_cm_id->device->name)
1673 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1674 	),
1675 
1676 	TP_fast_assign(
1677 		__entry->remaining = remaining;
1678 		__entry->seg_no = seg_no;
1679 		__entry->num_segs = num_segs;
1680 		__assign_str(device, rdma->sc_cm_id->device->name);
1681 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1682 	),
1683 
1684 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1685 		__get_str(addr), __get_str(device), __entry->remaining,
1686 		__entry->seg_no, __entry->num_segs
1687 	)
1688 );
1689 
1690 TRACE_EVENT(svcrdma_send_pullup,
1691 	TP_PROTO(
1692 		unsigned int len
1693 	),
1694 
1695 	TP_ARGS(len),
1696 
1697 	TP_STRUCT__entry(
1698 		__field(unsigned int, len)
1699 	),
1700 
1701 	TP_fast_assign(
1702 		__entry->len = len;
1703 	),
1704 
1705 	TP_printk("len=%u", __entry->len)
1706 );
1707 
1708 TRACE_EVENT(svcrdma_send_err,
1709 	TP_PROTO(
1710 		const struct svc_rqst *rqst,
1711 		int status
1712 	),
1713 
1714 	TP_ARGS(rqst, status),
1715 
1716 	TP_STRUCT__entry(
1717 		__field(int, status)
1718 		__field(u32, xid)
1719 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1720 	),
1721 
1722 	TP_fast_assign(
1723 		__entry->status = status;
1724 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1725 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1726 	),
1727 
1728 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1729 		__entry->xid, __entry->status
1730 	)
1731 );
1732 
1733 TRACE_EVENT(svcrdma_post_send,
1734 	TP_PROTO(
1735 		const struct svc_rdma_send_ctxt *ctxt
1736 	),
1737 
1738 	TP_ARGS(ctxt),
1739 
1740 	TP_STRUCT__entry(
1741 		__field(u32, cq_id)
1742 		__field(int, completion_id)
1743 		__field(unsigned int, num_sge)
1744 		__field(u32, inv_rkey)
1745 	),
1746 
1747 	TP_fast_assign(
1748 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1749 
1750 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1751 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1752 		__entry->num_sge = wr->num_sge;
1753 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1754 					wr->ex.invalidate_rkey : 0;
1755 	),
1756 
1757 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1758 		__entry->cq_id, __entry->completion_id,
1759 		__entry->num_sge, __entry->inv_rkey
1760 	)
1761 );
1762 
1763 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1764 
1765 TRACE_EVENT(svcrdma_post_recv,
1766 	TP_PROTO(
1767 		const struct svc_rdma_recv_ctxt *ctxt
1768 	),
1769 
1770 	TP_ARGS(ctxt),
1771 
1772 	TP_STRUCT__entry(
1773 		__field(u32, cq_id)
1774 		__field(int, completion_id)
1775 	),
1776 
1777 	TP_fast_assign(
1778 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1779 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1780 	),
1781 
1782 	TP_printk("cq.id=%d cid=%d",
1783 		__entry->cq_id, __entry->completion_id
1784 	)
1785 );
1786 
1787 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
1788 
1789 TRACE_EVENT(svcrdma_rq_post_err,
1790 	TP_PROTO(
1791 		const struct svcxprt_rdma *rdma,
1792 		int status
1793 	),
1794 
1795 	TP_ARGS(rdma, status),
1796 
1797 	TP_STRUCT__entry(
1798 		__field(int, status)
1799 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1800 	),
1801 
1802 	TP_fast_assign(
1803 		__entry->status = status;
1804 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1805 	),
1806 
1807 	TP_printk("addr=%s status=%d",
1808 		__get_str(addr), __entry->status
1809 	)
1810 );
1811 
1812 TRACE_EVENT(svcrdma_post_chunk,
1813 	TP_PROTO(
1814 		const struct rpc_rdma_cid *cid,
1815 		int sqecount
1816 	),
1817 
1818 	TP_ARGS(cid, sqecount),
1819 
1820 	TP_STRUCT__entry(
1821 		__field(u32, cq_id)
1822 		__field(int, completion_id)
1823 		__field(int, sqecount)
1824 	),
1825 
1826 	TP_fast_assign(
1827 		__entry->cq_id = cid->ci_queue_id;
1828 		__entry->completion_id = cid->ci_completion_id;
1829 		__entry->sqecount = sqecount;
1830 	),
1831 
1832 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1833 		__entry->cq_id, __entry->completion_id,
1834 		__entry->sqecount
1835 	)
1836 );
1837 
1838 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1839 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1840 
1841 TRACE_EVENT(svcrdma_qp_error,
1842 	TP_PROTO(
1843 		const struct ib_event *event,
1844 		const struct sockaddr *sap
1845 	),
1846 
1847 	TP_ARGS(event, sap),
1848 
1849 	TP_STRUCT__entry(
1850 		__field(unsigned int, event)
1851 		__string(device, event->device->name)
1852 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1853 	),
1854 
1855 	TP_fast_assign(
1856 		__entry->event = event->event;
1857 		__assign_str(device, event->device->name);
1858 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1859 			 "%pISpc", sap);
1860 	),
1861 
1862 	TP_printk("addr=%s dev=%s event=%s (%u)",
1863 		__entry->addr, __get_str(device),
1864 		rdma_show_ib_event(__entry->event), __entry->event
1865 	)
1866 );
1867 
1868 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1869 	TP_PROTO(
1870 		const struct svcxprt_rdma *rdma
1871 	),
1872 
1873 	TP_ARGS(rdma),
1874 
1875 	TP_STRUCT__entry(
1876 		__field(int, avail)
1877 		__field(int, depth)
1878 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1879 	),
1880 
1881 	TP_fast_assign(
1882 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1883 		__entry->depth = rdma->sc_sq_depth;
1884 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1885 	),
1886 
1887 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1888 		__get_str(addr), __entry->avail, __entry->depth
1889 	)
1890 );
1891 
1892 #define DEFINE_SQ_EVENT(name)						\
1893 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1894 				TP_PROTO(				\
1895 					const struct svcxprt_rdma *rdma \
1896 				),					\
1897 				TP_ARGS(rdma))
1898 
1899 DEFINE_SQ_EVENT(full);
1900 DEFINE_SQ_EVENT(retry);
1901 
1902 TRACE_EVENT(svcrdma_sq_post_err,
1903 	TP_PROTO(
1904 		const struct svcxprt_rdma *rdma,
1905 		int status
1906 	),
1907 
1908 	TP_ARGS(rdma, status),
1909 
1910 	TP_STRUCT__entry(
1911 		__field(int, avail)
1912 		__field(int, depth)
1913 		__field(int, status)
1914 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1915 	),
1916 
1917 	TP_fast_assign(
1918 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1919 		__entry->depth = rdma->sc_sq_depth;
1920 		__entry->status = status;
1921 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1922 	),
1923 
1924 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1925 		__get_str(addr), __entry->avail, __entry->depth,
1926 		__entry->status
1927 	)
1928 );
1929 
1930 #endif /* _TRACE_RPCRDMA_H */
1931 
1932 #include <trace/define_trace.h>
1933