xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 8dcc5721)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
64 	TP_PROTO(
65 		const struct ib_wc *wc,
66 		const struct rpc_rdma_cid *cid
67 	),
68 
69 	TP_ARGS(wc, cid),
70 
71 	TP_STRUCT__entry(
72 		__field(u32, cq_id)
73 		__field(int, completion_id)
74 		__field(unsigned long, status)
75 		__field(unsigned int, vendor_err)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->cq_id = cid->ci_queue_id;
80 		__entry->completion_id = cid->ci_completion_id;
81 		__entry->status = wc->status;
82 		if (wc->status)
83 			__entry->vendor_err = wc->vendor_err;
84 		else
85 			__entry->vendor_err = 0;
86 	),
87 
88 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
89 		__entry->cq_id, __entry->completion_id,
90 		rdma_show_wc_status(__entry->status),
91 		__entry->status, __entry->vendor_err
92 	)
93 );
94 
95 #define DEFINE_MR_COMPLETION_EVENT(name)				\
96 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
97 				TP_PROTO(				\
98 					const struct ib_wc *wc,		\
99 					const struct rpc_rdma_cid *cid	\
100 				),					\
101 				TP_ARGS(wc, cid))
102 
103 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
104 	TP_PROTO(
105 		const struct ib_wc *wc,
106 		const struct rpc_rdma_cid *cid
107 	),
108 
109 	TP_ARGS(wc, cid),
110 
111 	TP_STRUCT__entry(
112 		__field(u32, cq_id)
113 		__field(int, completion_id)
114 		__field(u32, received)
115 		__field(unsigned long, status)
116 		__field(unsigned int, vendor_err)
117 	),
118 
119 	TP_fast_assign(
120 		__entry->cq_id = cid->ci_queue_id;
121 		__entry->completion_id = cid->ci_completion_id;
122 		__entry->status = wc->status;
123 		if (wc->status) {
124 			__entry->received = 0;
125 			__entry->vendor_err = wc->vendor_err;
126 		} else {
127 			__entry->received = wc->byte_len;
128 			__entry->vendor_err = 0;
129 		}
130 	),
131 
132 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
133 		__entry->cq_id, __entry->completion_id,
134 		rdma_show_wc_status(__entry->status),
135 		__entry->status, __entry->vendor_err,
136 		__entry->received
137 	)
138 );
139 
140 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
141 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
142 				TP_PROTO(				\
143 					const struct ib_wc *wc,		\
144 					const struct rpc_rdma_cid *cid	\
145 				),					\
146 				TP_ARGS(wc, cid))
147 
148 DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
149 	TP_PROTO(
150 		const struct ib_wc *wc,
151 		const struct rpc_rdma_cid *cid
152 	),
153 
154 	TP_ARGS(wc, cid),
155 
156 	TP_STRUCT__entry(
157 		__field(u32, cq_id)
158 		__field(int, completion_id)
159 		__field(u32, received)
160 	),
161 
162 	TP_fast_assign(
163 		__entry->cq_id = cid->ci_queue_id;
164 		__entry->completion_id = cid->ci_completion_id;
165 		__entry->received = wc->byte_len;
166 	),
167 
168 	TP_printk("cq.id=%u cid=%d received=%u",
169 		__entry->cq_id, __entry->completion_id,
170 		__entry->received
171 	)
172 );
173 
174 #define DEFINE_RECEIVE_SUCCESS_EVENT(name)				\
175 		DEFINE_EVENT(rpcrdma_receive_success_class, name,	\
176 				TP_PROTO(				\
177 					const struct ib_wc *wc,		\
178 					const struct rpc_rdma_cid *cid	\
179 				),					\
180 				TP_ARGS(wc, cid))
181 
182 DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
183 	TP_PROTO(
184 		const struct ib_wc *wc,
185 		const struct rpc_rdma_cid *cid
186 	),
187 
188 	TP_ARGS(wc, cid),
189 
190 	TP_STRUCT__entry(
191 		__field(u32, cq_id)
192 		__field(int, completion_id)
193 		__field(unsigned long, status)
194 		__field(unsigned int, vendor_err)
195 	),
196 
197 	TP_fast_assign(
198 		__entry->cq_id = cid->ci_queue_id;
199 		__entry->completion_id = cid->ci_completion_id;
200 		__entry->status = wc->status;
201 		__entry->vendor_err = wc->vendor_err;
202 	),
203 
204 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
205 		__entry->cq_id, __entry->completion_id,
206 		rdma_show_wc_status(__entry->status),
207 		__entry->status, __entry->vendor_err
208 	)
209 );
210 
211 #define DEFINE_RECEIVE_FLUSH_EVENT(name)				\
212 		DEFINE_EVENT(rpcrdma_receive_flush_class, name,		\
213 				TP_PROTO(				\
214 					const struct ib_wc *wc,		\
215 					const struct rpc_rdma_cid *cid	\
216 				),					\
217 				TP_ARGS(wc, cid))
218 
219 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
220 	TP_PROTO(
221 		const struct rpcrdma_rep *rep
222 	),
223 
224 	TP_ARGS(rep),
225 
226 	TP_STRUCT__entry(
227 		__field(u32, xid)
228 		__field(u32, version)
229 		__field(u32, proc)
230 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
231 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
232 	),
233 
234 	TP_fast_assign(
235 		__entry->xid = be32_to_cpu(rep->rr_xid);
236 		__entry->version = be32_to_cpu(rep->rr_vers);
237 		__entry->proc = be32_to_cpu(rep->rr_proc);
238 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
239 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
240 	),
241 
242 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
243 		__get_str(addr), __get_str(port),
244 		__entry->xid, __entry->version, __entry->proc
245 	)
246 );
247 
248 #define DEFINE_REPLY_EVENT(name)					\
249 		DEFINE_EVENT(xprtrdma_reply_class,			\
250 				xprtrdma_reply_##name##_err,		\
251 				TP_PROTO(				\
252 					const struct rpcrdma_rep *rep	\
253 				),					\
254 				TP_ARGS(rep))
255 
256 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
257 	TP_PROTO(
258 		const struct rpcrdma_xprt *r_xprt
259 	),
260 
261 	TP_ARGS(r_xprt),
262 
263 	TP_STRUCT__entry(
264 		__string(addr, rpcrdma_addrstr(r_xprt))
265 		__string(port, rpcrdma_portstr(r_xprt))
266 	),
267 
268 	TP_fast_assign(
269 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
270 		__assign_str(port, rpcrdma_portstr(r_xprt));
271 	),
272 
273 	TP_printk("peer=[%s]:%s",
274 		__get_str(addr), __get_str(port)
275 	)
276 );
277 
278 #define DEFINE_RXPRT_EVENT(name)					\
279 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
280 				TP_PROTO(				\
281 					const struct rpcrdma_xprt *r_xprt \
282 				),					\
283 				TP_ARGS(r_xprt))
284 
285 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
286 	TP_PROTO(
287 		const struct rpcrdma_xprt *r_xprt,
288 		int rc
289 	),
290 
291 	TP_ARGS(r_xprt, rc),
292 
293 	TP_STRUCT__entry(
294 		__field(int, rc)
295 		__field(int, connect_status)
296 		__string(addr, rpcrdma_addrstr(r_xprt))
297 		__string(port, rpcrdma_portstr(r_xprt))
298 	),
299 
300 	TP_fast_assign(
301 		__entry->rc = rc;
302 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
303 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
304 		__assign_str(port, rpcrdma_portstr(r_xprt));
305 	),
306 
307 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
308 		__get_str(addr), __get_str(port),
309 		__entry->rc, __entry->connect_status
310 	)
311 );
312 
313 #define DEFINE_CONN_EVENT(name)						\
314 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
315 				TP_PROTO(				\
316 					const struct rpcrdma_xprt *r_xprt, \
317 					int rc				\
318 				),					\
319 				TP_ARGS(r_xprt, rc))
320 
321 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
322 	TP_PROTO(
323 		const struct rpc_task *task,
324 		unsigned int pos,
325 		struct rpcrdma_mr *mr,
326 		int nsegs
327 	),
328 
329 	TP_ARGS(task, pos, mr, nsegs),
330 
331 	TP_STRUCT__entry(
332 		__field(unsigned int, task_id)
333 		__field(unsigned int, client_id)
334 		__field(unsigned int, pos)
335 		__field(int, nents)
336 		__field(u32, handle)
337 		__field(u32, length)
338 		__field(u64, offset)
339 		__field(int, nsegs)
340 	),
341 
342 	TP_fast_assign(
343 		__entry->task_id = task->tk_pid;
344 		__entry->client_id = task->tk_client->cl_clid;
345 		__entry->pos = pos;
346 		__entry->nents = mr->mr_nents;
347 		__entry->handle = mr->mr_handle;
348 		__entry->length = mr->mr_length;
349 		__entry->offset = mr->mr_offset;
350 		__entry->nsegs = nsegs;
351 	),
352 
353 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
354 		__entry->task_id, __entry->client_id,
355 		__entry->pos, __entry->length,
356 		(unsigned long long)__entry->offset, __entry->handle,
357 		__entry->nents < __entry->nsegs ? "more" : "last"
358 	)
359 );
360 
361 #define DEFINE_RDCH_EVENT(name)						\
362 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
363 				TP_PROTO(				\
364 					const struct rpc_task *task,	\
365 					unsigned int pos,		\
366 					struct rpcrdma_mr *mr,		\
367 					int nsegs			\
368 				),					\
369 				TP_ARGS(task, pos, mr, nsegs))
370 
371 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
372 	TP_PROTO(
373 		const struct rpc_task *task,
374 		struct rpcrdma_mr *mr,
375 		int nsegs
376 	),
377 
378 	TP_ARGS(task, mr, nsegs),
379 
380 	TP_STRUCT__entry(
381 		__field(unsigned int, task_id)
382 		__field(unsigned int, client_id)
383 		__field(int, nents)
384 		__field(u32, handle)
385 		__field(u32, length)
386 		__field(u64, offset)
387 		__field(int, nsegs)
388 	),
389 
390 	TP_fast_assign(
391 		__entry->task_id = task->tk_pid;
392 		__entry->client_id = task->tk_client->cl_clid;
393 		__entry->nents = mr->mr_nents;
394 		__entry->handle = mr->mr_handle;
395 		__entry->length = mr->mr_length;
396 		__entry->offset = mr->mr_offset;
397 		__entry->nsegs = nsegs;
398 	),
399 
400 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
401 		__entry->task_id, __entry->client_id,
402 		__entry->length, (unsigned long long)__entry->offset,
403 		__entry->handle,
404 		__entry->nents < __entry->nsegs ? "more" : "last"
405 	)
406 );
407 
408 #define DEFINE_WRCH_EVENT(name)						\
409 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
410 				TP_PROTO(				\
411 					const struct rpc_task *task,	\
412 					struct rpcrdma_mr *mr,		\
413 					int nsegs			\
414 				),					\
415 				TP_ARGS(task, mr, nsegs))
416 
417 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
418 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
419 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
420 TRACE_DEFINE_ENUM(DMA_NONE);
421 
422 #define xprtrdma_show_direction(x)					\
423 		__print_symbolic(x,					\
424 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
425 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
426 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
427 				{ DMA_NONE, "NONE" })
428 
429 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
430 	TP_PROTO(
431 		const struct rpcrdma_mr *mr
432 	),
433 
434 	TP_ARGS(mr),
435 
436 	TP_STRUCT__entry(
437 		__field(unsigned int, task_id)
438 		__field(unsigned int, client_id)
439 		__field(u32, mr_id)
440 		__field(int, nents)
441 		__field(u32, handle)
442 		__field(u32, length)
443 		__field(u64, offset)
444 		__field(u32, dir)
445 	),
446 
447 	TP_fast_assign(
448 		const struct rpcrdma_req *req = mr->mr_req;
449 		const struct rpc_task *task = req->rl_slot.rq_task;
450 
451 		__entry->task_id = task->tk_pid;
452 		__entry->client_id = task->tk_client->cl_clid;
453 		__entry->mr_id  = mr->mr_ibmr->res.id;
454 		__entry->nents  = mr->mr_nents;
455 		__entry->handle = mr->mr_handle;
456 		__entry->length = mr->mr_length;
457 		__entry->offset = mr->mr_offset;
458 		__entry->dir    = mr->mr_dir;
459 	),
460 
461 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
462 		__entry->task_id, __entry->client_id,
463 		__entry->mr_id, __entry->nents, __entry->length,
464 		(unsigned long long)__entry->offset, __entry->handle,
465 		xprtrdma_show_direction(__entry->dir)
466 	)
467 );
468 
469 #define DEFINE_MR_EVENT(name)						\
470 		DEFINE_EVENT(xprtrdma_mr_class,				\
471 				xprtrdma_mr_##name,			\
472 				TP_PROTO(				\
473 					const struct rpcrdma_mr *mr	\
474 				),					\
475 				TP_ARGS(mr))
476 
477 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
478 	TP_PROTO(
479 		const struct rpcrdma_mr *mr
480 	),
481 
482 	TP_ARGS(mr),
483 
484 	TP_STRUCT__entry(
485 		__field(u32, mr_id)
486 		__field(int, nents)
487 		__field(u32, handle)
488 		__field(u32, length)
489 		__field(u64, offset)
490 		__field(u32, dir)
491 	),
492 
493 	TP_fast_assign(
494 		__entry->mr_id  = mr->mr_ibmr->res.id;
495 		__entry->nents  = mr->mr_nents;
496 		__entry->handle = mr->mr_handle;
497 		__entry->length = mr->mr_length;
498 		__entry->offset = mr->mr_offset;
499 		__entry->dir    = mr->mr_dir;
500 	),
501 
502 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
503 		__entry->mr_id, __entry->nents, __entry->length,
504 		(unsigned long long)__entry->offset, __entry->handle,
505 		xprtrdma_show_direction(__entry->dir)
506 	)
507 );
508 
509 #define DEFINE_ANON_MR_EVENT(name)					\
510 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
511 				xprtrdma_mr_##name,			\
512 				TP_PROTO(				\
513 					const struct rpcrdma_mr *mr	\
514 				),					\
515 				TP_ARGS(mr))
516 
517 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
518 	TP_PROTO(
519 		const struct rpcrdma_xprt *r_xprt,
520 		const struct rpc_rqst *rqst
521 	),
522 
523 	TP_ARGS(r_xprt, rqst),
524 
525 	TP_STRUCT__entry(
526 		__field(u32, xid)
527 		__string(addr, rpcrdma_addrstr(r_xprt))
528 		__string(port, rpcrdma_portstr(r_xprt))
529 	),
530 
531 	TP_fast_assign(
532 		__entry->xid = be32_to_cpu(rqst->rq_xid);
533 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
534 		__assign_str(port, rpcrdma_portstr(r_xprt));
535 	),
536 
537 	TP_printk("peer=[%s]:%s xid=0x%08x",
538 		__get_str(addr), __get_str(port), __entry->xid
539 	)
540 );
541 
542 #define DEFINE_CALLBACK_EVENT(name)					\
543 		DEFINE_EVENT(xprtrdma_callback_class,			\
544 				xprtrdma_cb_##name,			\
545 				TP_PROTO(				\
546 					const struct rpcrdma_xprt *r_xprt, \
547 					const struct rpc_rqst *rqst	\
548 				),					\
549 				TP_ARGS(r_xprt, rqst))
550 
551 /**
552  ** Connection events
553  **/
554 
555 TRACE_EVENT(xprtrdma_inline_thresh,
556 	TP_PROTO(
557 		const struct rpcrdma_ep *ep
558 	),
559 
560 	TP_ARGS(ep),
561 
562 	TP_STRUCT__entry(
563 		__field(unsigned int, inline_send)
564 		__field(unsigned int, inline_recv)
565 		__field(unsigned int, max_send)
566 		__field(unsigned int, max_recv)
567 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
568 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
569 	),
570 
571 	TP_fast_assign(
572 		const struct rdma_cm_id *id = ep->re_id;
573 
574 		__entry->inline_send = ep->re_inline_send;
575 		__entry->inline_recv = ep->re_inline_recv;
576 		__entry->max_send = ep->re_max_inline_send;
577 		__entry->max_recv = ep->re_max_inline_recv;
578 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
579 		       sizeof(struct sockaddr_in6));
580 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
581 		       sizeof(struct sockaddr_in6));
582 	),
583 
584 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
585 		__entry->srcaddr, __entry->dstaddr,
586 		__entry->inline_send, __entry->inline_recv,
587 		__entry->max_send, __entry->max_recv
588 	)
589 );
590 
591 DEFINE_CONN_EVENT(connect);
592 DEFINE_CONN_EVENT(disconnect);
593 
594 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
595 
596 TRACE_EVENT(xprtrdma_op_connect,
597 	TP_PROTO(
598 		const struct rpcrdma_xprt *r_xprt,
599 		unsigned long delay
600 	),
601 
602 	TP_ARGS(r_xprt, delay),
603 
604 	TP_STRUCT__entry(
605 		__field(unsigned long, delay)
606 		__string(addr, rpcrdma_addrstr(r_xprt))
607 		__string(port, rpcrdma_portstr(r_xprt))
608 	),
609 
610 	TP_fast_assign(
611 		__entry->delay = delay;
612 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
613 		__assign_str(port, rpcrdma_portstr(r_xprt));
614 	),
615 
616 	TP_printk("peer=[%s]:%s delay=%lu",
617 		__get_str(addr), __get_str(port), __entry->delay
618 	)
619 );
620 
621 
622 TRACE_EVENT(xprtrdma_op_set_cto,
623 	TP_PROTO(
624 		const struct rpcrdma_xprt *r_xprt,
625 		unsigned long connect,
626 		unsigned long reconnect
627 	),
628 
629 	TP_ARGS(r_xprt, connect, reconnect),
630 
631 	TP_STRUCT__entry(
632 		__field(unsigned long, connect)
633 		__field(unsigned long, reconnect)
634 		__string(addr, rpcrdma_addrstr(r_xprt))
635 		__string(port, rpcrdma_portstr(r_xprt))
636 	),
637 
638 	TP_fast_assign(
639 		__entry->connect = connect;
640 		__entry->reconnect = reconnect;
641 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
642 		__assign_str(port, rpcrdma_portstr(r_xprt));
643 	),
644 
645 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
646 		__get_str(addr), __get_str(port),
647 		__entry->connect / HZ, __entry->reconnect / HZ
648 	)
649 );
650 
651 /**
652  ** Call events
653  **/
654 
655 TRACE_EVENT(xprtrdma_createmrs,
656 	TP_PROTO(
657 		const struct rpcrdma_xprt *r_xprt,
658 		unsigned int count
659 	),
660 
661 	TP_ARGS(r_xprt, count),
662 
663 	TP_STRUCT__entry(
664 		__string(addr, rpcrdma_addrstr(r_xprt))
665 		__string(port, rpcrdma_portstr(r_xprt))
666 		__field(unsigned int, count)
667 	),
668 
669 	TP_fast_assign(
670 		__entry->count = count;
671 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
672 		__assign_str(port, rpcrdma_portstr(r_xprt));
673 	),
674 
675 	TP_printk("peer=[%s]:%s created %u MRs",
676 		__get_str(addr), __get_str(port), __entry->count
677 	)
678 );
679 
680 TRACE_EVENT(xprtrdma_nomrs_err,
681 	TP_PROTO(
682 		const struct rpcrdma_xprt *r_xprt,
683 		const struct rpcrdma_req *req
684 	),
685 
686 	TP_ARGS(r_xprt, req),
687 
688 	TP_STRUCT__entry(
689 		__field(unsigned int, task_id)
690 		__field(unsigned int, client_id)
691 		__string(addr, rpcrdma_addrstr(r_xprt))
692 		__string(port, rpcrdma_portstr(r_xprt))
693 	),
694 
695 	TP_fast_assign(
696 		const struct rpc_rqst *rqst = &req->rl_slot;
697 
698 		__entry->task_id = rqst->rq_task->tk_pid;
699 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
700 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
701 		__assign_str(port, rpcrdma_portstr(r_xprt));
702 	),
703 
704 	TP_printk("peer=[%s]:%s task:%u@%u",
705 		__get_str(addr), __get_str(port),
706 		__entry->task_id, __entry->client_id
707 	)
708 );
709 
710 DEFINE_RDCH_EVENT(read);
711 DEFINE_WRCH_EVENT(write);
712 DEFINE_WRCH_EVENT(reply);
713 
714 TRACE_DEFINE_ENUM(rpcrdma_noch);
715 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
716 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
717 TRACE_DEFINE_ENUM(rpcrdma_readch);
718 TRACE_DEFINE_ENUM(rpcrdma_areadch);
719 TRACE_DEFINE_ENUM(rpcrdma_writech);
720 TRACE_DEFINE_ENUM(rpcrdma_replych);
721 
722 #define xprtrdma_show_chunktype(x)					\
723 		__print_symbolic(x,					\
724 				{ rpcrdma_noch, "inline" },		\
725 				{ rpcrdma_noch_pullup, "pullup" },	\
726 				{ rpcrdma_noch_mapped, "mapped" },	\
727 				{ rpcrdma_readch, "read list" },	\
728 				{ rpcrdma_areadch, "*read list" },	\
729 				{ rpcrdma_writech, "write list" },	\
730 				{ rpcrdma_replych, "reply chunk" })
731 
732 TRACE_EVENT(xprtrdma_marshal,
733 	TP_PROTO(
734 		const struct rpcrdma_req *req,
735 		unsigned int rtype,
736 		unsigned int wtype
737 	),
738 
739 	TP_ARGS(req, rtype, wtype),
740 
741 	TP_STRUCT__entry(
742 		__field(unsigned int, task_id)
743 		__field(unsigned int, client_id)
744 		__field(u32, xid)
745 		__field(unsigned int, hdrlen)
746 		__field(unsigned int, headlen)
747 		__field(unsigned int, pagelen)
748 		__field(unsigned int, taillen)
749 		__field(unsigned int, rtype)
750 		__field(unsigned int, wtype)
751 	),
752 
753 	TP_fast_assign(
754 		const struct rpc_rqst *rqst = &req->rl_slot;
755 
756 		__entry->task_id = rqst->rq_task->tk_pid;
757 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
758 		__entry->xid = be32_to_cpu(rqst->rq_xid);
759 		__entry->hdrlen = req->rl_hdrbuf.len;
760 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
761 		__entry->pagelen = rqst->rq_snd_buf.page_len;
762 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
763 		__entry->rtype = rtype;
764 		__entry->wtype = wtype;
765 	),
766 
767 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
768 		__entry->task_id, __entry->client_id, __entry->xid,
769 		__entry->hdrlen,
770 		__entry->headlen, __entry->pagelen, __entry->taillen,
771 		xprtrdma_show_chunktype(__entry->rtype),
772 		xprtrdma_show_chunktype(__entry->wtype)
773 	)
774 );
775 
776 TRACE_EVENT(xprtrdma_marshal_failed,
777 	TP_PROTO(const struct rpc_rqst *rqst,
778 		 int ret
779 	),
780 
781 	TP_ARGS(rqst, ret),
782 
783 	TP_STRUCT__entry(
784 		__field(unsigned int, task_id)
785 		__field(unsigned int, client_id)
786 		__field(u32, xid)
787 		__field(int, ret)
788 	),
789 
790 	TP_fast_assign(
791 		__entry->task_id = rqst->rq_task->tk_pid;
792 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
793 		__entry->xid = be32_to_cpu(rqst->rq_xid);
794 		__entry->ret = ret;
795 	),
796 
797 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
798 		__entry->task_id, __entry->client_id, __entry->xid,
799 		__entry->ret
800 	)
801 );
802 
803 TRACE_EVENT(xprtrdma_prepsend_failed,
804 	TP_PROTO(const struct rpc_rqst *rqst,
805 		 int ret
806 	),
807 
808 	TP_ARGS(rqst, ret),
809 
810 	TP_STRUCT__entry(
811 		__field(unsigned int, task_id)
812 		__field(unsigned int, client_id)
813 		__field(u32, xid)
814 		__field(int, ret)
815 	),
816 
817 	TP_fast_assign(
818 		__entry->task_id = rqst->rq_task->tk_pid;
819 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
820 		__entry->xid = be32_to_cpu(rqst->rq_xid);
821 		__entry->ret = ret;
822 	),
823 
824 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
825 		__entry->task_id, __entry->client_id, __entry->xid,
826 		__entry->ret
827 	)
828 );
829 
830 TRACE_EVENT(xprtrdma_post_send,
831 	TP_PROTO(
832 		const struct rpcrdma_req *req
833 	),
834 
835 	TP_ARGS(req),
836 
837 	TP_STRUCT__entry(
838 		__field(u32, cq_id)
839 		__field(int, completion_id)
840 		__field(unsigned int, task_id)
841 		__field(unsigned int, client_id)
842 		__field(int, num_sge)
843 		__field(int, signaled)
844 	),
845 
846 	TP_fast_assign(
847 		const struct rpc_rqst *rqst = &req->rl_slot;
848 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
849 
850 		__entry->cq_id = sc->sc_cid.ci_queue_id;
851 		__entry->completion_id = sc->sc_cid.ci_completion_id;
852 		__entry->task_id = rqst->rq_task->tk_pid;
853 		__entry->client_id = rqst->rq_task->tk_client ?
854 				     rqst->rq_task->tk_client->cl_clid : -1;
855 		__entry->num_sge = req->rl_wr.num_sge;
856 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
857 	),
858 
859 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
860 		__entry->task_id, __entry->client_id,
861 		__entry->cq_id, __entry->completion_id,
862 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
863 		(__entry->signaled ? "signaled" : "")
864 	)
865 );
866 
867 TRACE_EVENT(xprtrdma_post_send_err,
868 	TP_PROTO(
869 		const struct rpcrdma_xprt *r_xprt,
870 		const struct rpcrdma_req *req,
871 		int rc
872 	),
873 
874 	TP_ARGS(r_xprt, req, rc),
875 
876 	TP_STRUCT__entry(
877 		__field(u32, cq_id)
878 		__field(unsigned int, task_id)
879 		__field(unsigned int, client_id)
880 		__field(int, rc)
881 	),
882 
883 	TP_fast_assign(
884 		const struct rpc_rqst *rqst = &req->rl_slot;
885 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
886 
887 		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
888 		__entry->task_id = rqst->rq_task->tk_pid;
889 		__entry->client_id = rqst->rq_task->tk_client ?
890 				     rqst->rq_task->tk_client->cl_clid : -1;
891 		__entry->rc = rc;
892 	),
893 
894 	TP_printk("task:%u@%u cq.id=%u rc=%d",
895 		__entry->task_id, __entry->client_id,
896 		__entry->cq_id, __entry->rc
897 	)
898 );
899 
900 TRACE_EVENT(xprtrdma_post_recv,
901 	TP_PROTO(
902 		const struct rpcrdma_rep *rep
903 	),
904 
905 	TP_ARGS(rep),
906 
907 	TP_STRUCT__entry(
908 		__field(u32, cq_id)
909 		__field(int, completion_id)
910 	),
911 
912 	TP_fast_assign(
913 		__entry->cq_id = rep->rr_cid.ci_queue_id;
914 		__entry->completion_id = rep->rr_cid.ci_completion_id;
915 	),
916 
917 	TP_printk("cq.id=%d cid=%d",
918 		__entry->cq_id, __entry->completion_id
919 	)
920 );
921 
922 TRACE_EVENT(xprtrdma_post_recvs,
923 	TP_PROTO(
924 		const struct rpcrdma_xprt *r_xprt,
925 		unsigned int count
926 	),
927 
928 	TP_ARGS(r_xprt, count),
929 
930 	TP_STRUCT__entry(
931 		__field(u32, cq_id)
932 		__field(unsigned int, count)
933 		__field(int, posted)
934 		__string(addr, rpcrdma_addrstr(r_xprt))
935 		__string(port, rpcrdma_portstr(r_xprt))
936 	),
937 
938 	TP_fast_assign(
939 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
940 
941 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
942 		__entry->count = count;
943 		__entry->posted = ep->re_receive_count;
944 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
945 		__assign_str(port, rpcrdma_portstr(r_xprt));
946 	),
947 
948 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
949 		__get_str(addr), __get_str(port), __entry->cq_id,
950 		__entry->count, __entry->posted
951 	)
952 );
953 
954 TRACE_EVENT(xprtrdma_post_recvs_err,
955 	TP_PROTO(
956 		const struct rpcrdma_xprt *r_xprt,
957 		int status
958 	),
959 
960 	TP_ARGS(r_xprt, status),
961 
962 	TP_STRUCT__entry(
963 		__field(u32, cq_id)
964 		__field(int, status)
965 		__string(addr, rpcrdma_addrstr(r_xprt))
966 		__string(port, rpcrdma_portstr(r_xprt))
967 	),
968 
969 	TP_fast_assign(
970 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
971 
972 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
973 		__entry->status = status;
974 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
975 		__assign_str(port, rpcrdma_portstr(r_xprt));
976 	),
977 
978 	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
979 		__get_str(addr), __get_str(port), __entry->cq_id,
980 		__entry->status
981 	)
982 );
983 
984 TRACE_EVENT(xprtrdma_post_linv_err,
985 	TP_PROTO(
986 		const struct rpcrdma_req *req,
987 		int status
988 	),
989 
990 	TP_ARGS(req, status),
991 
992 	TP_STRUCT__entry(
993 		__field(unsigned int, task_id)
994 		__field(unsigned int, client_id)
995 		__field(int, status)
996 	),
997 
998 	TP_fast_assign(
999 		const struct rpc_task *task = req->rl_slot.rq_task;
1000 
1001 		__entry->task_id = task->tk_pid;
1002 		__entry->client_id = task->tk_client->cl_clid;
1003 		__entry->status = status;
1004 	),
1005 
1006 	TP_printk("task:%u@%u status=%d",
1007 		__entry->task_id, __entry->client_id, __entry->status
1008 	)
1009 );
1010 
1011 /**
1012  ** Completion events
1013  **/
1014 
1015 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
1016 
1017 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
1018 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
1019 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
1020 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
1021 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
1022 
1023 TRACE_EVENT(xprtrdma_frwr_alloc,
1024 	TP_PROTO(
1025 		const struct rpcrdma_mr *mr,
1026 		int rc
1027 	),
1028 
1029 	TP_ARGS(mr, rc),
1030 
1031 	TP_STRUCT__entry(
1032 		__field(u32, mr_id)
1033 		__field(int, rc)
1034 	),
1035 
1036 	TP_fast_assign(
1037 		__entry->mr_id = mr->mr_ibmr->res.id;
1038 		__entry->rc = rc;
1039 	),
1040 
1041 	TP_printk("mr.id=%u: rc=%d",
1042 		__entry->mr_id, __entry->rc
1043 	)
1044 );
1045 
1046 TRACE_EVENT(xprtrdma_frwr_dereg,
1047 	TP_PROTO(
1048 		const struct rpcrdma_mr *mr,
1049 		int rc
1050 	),
1051 
1052 	TP_ARGS(mr, rc),
1053 
1054 	TP_STRUCT__entry(
1055 		__field(u32, mr_id)
1056 		__field(int, nents)
1057 		__field(u32, handle)
1058 		__field(u32, length)
1059 		__field(u64, offset)
1060 		__field(u32, dir)
1061 		__field(int, rc)
1062 	),
1063 
1064 	TP_fast_assign(
1065 		__entry->mr_id  = mr->mr_ibmr->res.id;
1066 		__entry->nents  = mr->mr_nents;
1067 		__entry->handle = mr->mr_handle;
1068 		__entry->length = mr->mr_length;
1069 		__entry->offset = mr->mr_offset;
1070 		__entry->dir    = mr->mr_dir;
1071 		__entry->rc	= rc;
1072 	),
1073 
1074 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1075 		__entry->mr_id, __entry->nents, __entry->length,
1076 		(unsigned long long)__entry->offset, __entry->handle,
1077 		xprtrdma_show_direction(__entry->dir),
1078 		__entry->rc
1079 	)
1080 );
1081 
1082 TRACE_EVENT(xprtrdma_frwr_sgerr,
1083 	TP_PROTO(
1084 		const struct rpcrdma_mr *mr,
1085 		int sg_nents
1086 	),
1087 
1088 	TP_ARGS(mr, sg_nents),
1089 
1090 	TP_STRUCT__entry(
1091 		__field(u32, mr_id)
1092 		__field(u64, addr)
1093 		__field(u32, dir)
1094 		__field(int, nents)
1095 	),
1096 
1097 	TP_fast_assign(
1098 		__entry->mr_id = mr->mr_ibmr->res.id;
1099 		__entry->addr = mr->mr_sg->dma_address;
1100 		__entry->dir = mr->mr_dir;
1101 		__entry->nents = sg_nents;
1102 	),
1103 
1104 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1105 		__entry->mr_id, __entry->addr,
1106 		xprtrdma_show_direction(__entry->dir),
1107 		__entry->nents
1108 	)
1109 );
1110 
1111 TRACE_EVENT(xprtrdma_frwr_maperr,
1112 	TP_PROTO(
1113 		const struct rpcrdma_mr *mr,
1114 		int num_mapped
1115 	),
1116 
1117 	TP_ARGS(mr, num_mapped),
1118 
1119 	TP_STRUCT__entry(
1120 		__field(u32, mr_id)
1121 		__field(u64, addr)
1122 		__field(u32, dir)
1123 		__field(int, num_mapped)
1124 		__field(int, nents)
1125 	),
1126 
1127 	TP_fast_assign(
1128 		__entry->mr_id = mr->mr_ibmr->res.id;
1129 		__entry->addr = mr->mr_sg->dma_address;
1130 		__entry->dir = mr->mr_dir;
1131 		__entry->num_mapped = num_mapped;
1132 		__entry->nents = mr->mr_nents;
1133 	),
1134 
1135 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1136 		__entry->mr_id, __entry->addr,
1137 		xprtrdma_show_direction(__entry->dir),
1138 		__entry->num_mapped, __entry->nents
1139 	)
1140 );
1141 
1142 DEFINE_MR_EVENT(fastreg);
1143 DEFINE_MR_EVENT(localinv);
1144 DEFINE_MR_EVENT(reminv);
1145 DEFINE_MR_EVENT(map);
1146 
1147 DEFINE_ANON_MR_EVENT(unmap);
1148 
1149 TRACE_EVENT(xprtrdma_dma_maperr,
1150 	TP_PROTO(
1151 		u64 addr
1152 	),
1153 
1154 	TP_ARGS(addr),
1155 
1156 	TP_STRUCT__entry(
1157 		__field(u64, addr)
1158 	),
1159 
1160 	TP_fast_assign(
1161 		__entry->addr = addr;
1162 	),
1163 
1164 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1165 );
1166 
1167 /**
1168  ** Reply events
1169  **/
1170 
1171 TRACE_EVENT(xprtrdma_reply,
1172 	TP_PROTO(
1173 		const struct rpc_task *task,
1174 		const struct rpcrdma_rep *rep,
1175 		unsigned int credits
1176 	),
1177 
1178 	TP_ARGS(task, rep, credits),
1179 
1180 	TP_STRUCT__entry(
1181 		__field(unsigned int, task_id)
1182 		__field(unsigned int, client_id)
1183 		__field(u32, xid)
1184 		__field(unsigned int, credits)
1185 	),
1186 
1187 	TP_fast_assign(
1188 		__entry->task_id = task->tk_pid;
1189 		__entry->client_id = task->tk_client->cl_clid;
1190 		__entry->xid = be32_to_cpu(rep->rr_xid);
1191 		__entry->credits = credits;
1192 	),
1193 
1194 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1195 		__entry->task_id, __entry->client_id, __entry->xid,
1196 		__entry->credits
1197 	)
1198 );
1199 
1200 DEFINE_REPLY_EVENT(vers);
1201 DEFINE_REPLY_EVENT(rqst);
1202 DEFINE_REPLY_EVENT(short);
1203 DEFINE_REPLY_EVENT(hdr);
1204 
1205 TRACE_EVENT(xprtrdma_err_vers,
1206 	TP_PROTO(
1207 		const struct rpc_rqst *rqst,
1208 		__be32 *min,
1209 		__be32 *max
1210 	),
1211 
1212 	TP_ARGS(rqst, min, max),
1213 
1214 	TP_STRUCT__entry(
1215 		__field(unsigned int, task_id)
1216 		__field(unsigned int, client_id)
1217 		__field(u32, xid)
1218 		__field(u32, min)
1219 		__field(u32, max)
1220 	),
1221 
1222 	TP_fast_assign(
1223 		__entry->task_id = rqst->rq_task->tk_pid;
1224 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1225 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1226 		__entry->min = be32_to_cpup(min);
1227 		__entry->max = be32_to_cpup(max);
1228 	),
1229 
1230 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1231 		__entry->task_id, __entry->client_id, __entry->xid,
1232 		__entry->min, __entry->max
1233 	)
1234 );
1235 
1236 TRACE_EVENT(xprtrdma_err_chunk,
1237 	TP_PROTO(
1238 		const struct rpc_rqst *rqst
1239 	),
1240 
1241 	TP_ARGS(rqst),
1242 
1243 	TP_STRUCT__entry(
1244 		__field(unsigned int, task_id)
1245 		__field(unsigned int, client_id)
1246 		__field(u32, xid)
1247 	),
1248 
1249 	TP_fast_assign(
1250 		__entry->task_id = rqst->rq_task->tk_pid;
1251 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1252 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1253 	),
1254 
1255 	TP_printk("task:%u@%u xid=0x%08x",
1256 		__entry->task_id, __entry->client_id, __entry->xid
1257 	)
1258 );
1259 
1260 TRACE_EVENT(xprtrdma_err_unrecognized,
1261 	TP_PROTO(
1262 		const struct rpc_rqst *rqst,
1263 		__be32 *procedure
1264 	),
1265 
1266 	TP_ARGS(rqst, procedure),
1267 
1268 	TP_STRUCT__entry(
1269 		__field(unsigned int, task_id)
1270 		__field(unsigned int, client_id)
1271 		__field(u32, xid)
1272 		__field(u32, procedure)
1273 	),
1274 
1275 	TP_fast_assign(
1276 		__entry->task_id = rqst->rq_task->tk_pid;
1277 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1278 		__entry->procedure = be32_to_cpup(procedure);
1279 	),
1280 
1281 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1282 		__entry->task_id, __entry->client_id, __entry->xid,
1283 		__entry->procedure
1284 	)
1285 );
1286 
1287 TRACE_EVENT(xprtrdma_fixup,
1288 	TP_PROTO(
1289 		const struct rpc_rqst *rqst,
1290 		unsigned long fixup
1291 	),
1292 
1293 	TP_ARGS(rqst, fixup),
1294 
1295 	TP_STRUCT__entry(
1296 		__field(unsigned int, task_id)
1297 		__field(unsigned int, client_id)
1298 		__field(unsigned long, fixup)
1299 		__field(size_t, headlen)
1300 		__field(unsigned int, pagelen)
1301 		__field(size_t, taillen)
1302 	),
1303 
1304 	TP_fast_assign(
1305 		__entry->task_id = rqst->rq_task->tk_pid;
1306 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1307 		__entry->fixup = fixup;
1308 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1309 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1310 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1311 	),
1312 
1313 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1314 		__entry->task_id, __entry->client_id, __entry->fixup,
1315 		__entry->headlen, __entry->pagelen, __entry->taillen
1316 	)
1317 );
1318 
1319 TRACE_EVENT(xprtrdma_decode_seg,
1320 	TP_PROTO(
1321 		u32 handle,
1322 		u32 length,
1323 		u64 offset
1324 	),
1325 
1326 	TP_ARGS(handle, length, offset),
1327 
1328 	TP_STRUCT__entry(
1329 		__field(u32, handle)
1330 		__field(u32, length)
1331 		__field(u64, offset)
1332 	),
1333 
1334 	TP_fast_assign(
1335 		__entry->handle = handle;
1336 		__entry->length = length;
1337 		__entry->offset = offset;
1338 	),
1339 
1340 	TP_printk("%u@0x%016llx:0x%08x",
1341 		__entry->length, (unsigned long long)__entry->offset,
1342 		__entry->handle
1343 	)
1344 );
1345 
1346 TRACE_EVENT(xprtrdma_mrs_zap,
1347 	TP_PROTO(
1348 		const struct rpc_task *task
1349 	),
1350 
1351 	TP_ARGS(task),
1352 
1353 	TP_STRUCT__entry(
1354 		__field(unsigned int, task_id)
1355 		__field(unsigned int, client_id)
1356 	),
1357 
1358 	TP_fast_assign(
1359 		__entry->task_id = task->tk_pid;
1360 		__entry->client_id = task->tk_client->cl_clid;
1361 	),
1362 
1363 	TP_printk("task:%u@%u",
1364 		__entry->task_id, __entry->client_id
1365 	)
1366 );
1367 
1368 /**
1369  ** Callback events
1370  **/
1371 
1372 TRACE_EVENT(xprtrdma_cb_setup,
1373 	TP_PROTO(
1374 		const struct rpcrdma_xprt *r_xprt,
1375 		unsigned int reqs
1376 	),
1377 
1378 	TP_ARGS(r_xprt, reqs),
1379 
1380 	TP_STRUCT__entry(
1381 		__field(unsigned int, reqs)
1382 		__string(addr, rpcrdma_addrstr(r_xprt))
1383 		__string(port, rpcrdma_portstr(r_xprt))
1384 	),
1385 
1386 	TP_fast_assign(
1387 		__entry->reqs = reqs;
1388 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1389 		__assign_str(port, rpcrdma_portstr(r_xprt));
1390 	),
1391 
1392 	TP_printk("peer=[%s]:%s %u reqs",
1393 		__get_str(addr), __get_str(port), __entry->reqs
1394 	)
1395 );
1396 
1397 DEFINE_CALLBACK_EVENT(call);
1398 DEFINE_CALLBACK_EVENT(reply);
1399 
1400 /**
1401  ** Server-side RPC/RDMA events
1402  **/
1403 
1404 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1405 	TP_PROTO(
1406 		const struct svcxprt_rdma *rdma,
1407 		long status
1408 	),
1409 
1410 	TP_ARGS(rdma, status),
1411 
1412 	TP_STRUCT__entry(
1413 		__field(long, status)
1414 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1415 	),
1416 
1417 	TP_fast_assign(
1418 		__entry->status = status;
1419 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1420 	),
1421 
1422 	TP_printk("addr=%s status=%ld",
1423 		__get_str(addr), __entry->status
1424 	)
1425 );
1426 
1427 #define DEFINE_ACCEPT_EVENT(name) \
1428 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1429 				TP_PROTO( \
1430 					const struct svcxprt_rdma *rdma, \
1431 					long status \
1432 				), \
1433 				TP_ARGS(rdma, status))
1434 
1435 DEFINE_ACCEPT_EVENT(pd);
1436 DEFINE_ACCEPT_EVENT(qp);
1437 DEFINE_ACCEPT_EVENT(fabric);
1438 DEFINE_ACCEPT_EVENT(initdepth);
1439 DEFINE_ACCEPT_EVENT(accept);
1440 
1441 TRACE_DEFINE_ENUM(RDMA_MSG);
1442 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1443 TRACE_DEFINE_ENUM(RDMA_MSGP);
1444 TRACE_DEFINE_ENUM(RDMA_DONE);
1445 TRACE_DEFINE_ENUM(RDMA_ERROR);
1446 
1447 #define show_rpcrdma_proc(x)						\
1448 		__print_symbolic(x,					\
1449 				{ RDMA_MSG, "RDMA_MSG" },		\
1450 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1451 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1452 				{ RDMA_DONE, "RDMA_DONE" },		\
1453 				{ RDMA_ERROR, "RDMA_ERROR" })
1454 
1455 TRACE_EVENT(svcrdma_decode_rqst,
1456 	TP_PROTO(
1457 		const struct svc_rdma_recv_ctxt *ctxt,
1458 		__be32 *p,
1459 		unsigned int hdrlen
1460 	),
1461 
1462 	TP_ARGS(ctxt, p, hdrlen),
1463 
1464 	TP_STRUCT__entry(
1465 		__field(u32, cq_id)
1466 		__field(int, completion_id)
1467 		__field(u32, xid)
1468 		__field(u32, vers)
1469 		__field(u32, proc)
1470 		__field(u32, credits)
1471 		__field(unsigned int, hdrlen)
1472 	),
1473 
1474 	TP_fast_assign(
1475 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1476 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1477 		__entry->xid = be32_to_cpup(p++);
1478 		__entry->vers = be32_to_cpup(p++);
1479 		__entry->credits = be32_to_cpup(p++);
1480 		__entry->proc = be32_to_cpup(p);
1481 		__entry->hdrlen = hdrlen;
1482 	),
1483 
1484 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1485 		__entry->cq_id, __entry->completion_id,
1486 		__entry->xid, __entry->vers, __entry->credits,
1487 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1488 );
1489 
1490 TRACE_EVENT(svcrdma_decode_short_err,
1491 	TP_PROTO(
1492 		const struct svc_rdma_recv_ctxt *ctxt,
1493 		unsigned int hdrlen
1494 	),
1495 
1496 	TP_ARGS(ctxt, hdrlen),
1497 
1498 	TP_STRUCT__entry(
1499 		__field(u32, cq_id)
1500 		__field(int, completion_id)
1501 		__field(unsigned int, hdrlen)
1502 	),
1503 
1504 	TP_fast_assign(
1505 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1506 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1507 		__entry->hdrlen = hdrlen;
1508 	),
1509 
1510 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1511 		__entry->cq_id, __entry->completion_id,
1512 		__entry->hdrlen)
1513 );
1514 
1515 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1516 	TP_PROTO(
1517 		const struct svc_rdma_recv_ctxt *ctxt,
1518 		__be32 *p
1519 	),
1520 
1521 	TP_ARGS(ctxt, p),
1522 
1523 	TP_STRUCT__entry(
1524 		__field(u32, cq_id)
1525 		__field(int, completion_id)
1526 		__field(u32, xid)
1527 		__field(u32, vers)
1528 		__field(u32, proc)
1529 		__field(u32, credits)
1530 	),
1531 
1532 	TP_fast_assign(
1533 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1534 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1535 		__entry->xid = be32_to_cpup(p++);
1536 		__entry->vers = be32_to_cpup(p++);
1537 		__entry->credits = be32_to_cpup(p++);
1538 		__entry->proc = be32_to_cpup(p);
1539 	),
1540 
1541 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1542 		__entry->cq_id, __entry->completion_id,
1543 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1544 );
1545 
1546 #define DEFINE_BADREQ_EVENT(name)					\
1547 		DEFINE_EVENT(svcrdma_badreq_event,			\
1548 			     svcrdma_decode_##name##_err,		\
1549 				TP_PROTO(				\
1550 					const struct svc_rdma_recv_ctxt *ctxt,	\
1551 					__be32 *p			\
1552 				),					\
1553 				TP_ARGS(ctxt, p))
1554 
1555 DEFINE_BADREQ_EVENT(badvers);
1556 DEFINE_BADREQ_EVENT(drop);
1557 DEFINE_BADREQ_EVENT(badproc);
1558 DEFINE_BADREQ_EVENT(parse);
1559 
1560 TRACE_EVENT(svcrdma_encode_wseg,
1561 	TP_PROTO(
1562 		const struct svc_rdma_send_ctxt *ctxt,
1563 		u32 segno,
1564 		u32 handle,
1565 		u32 length,
1566 		u64 offset
1567 	),
1568 
1569 	TP_ARGS(ctxt, segno, handle, length, offset),
1570 
1571 	TP_STRUCT__entry(
1572 		__field(u32, cq_id)
1573 		__field(int, completion_id)
1574 		__field(u32, segno)
1575 		__field(u32, handle)
1576 		__field(u32, length)
1577 		__field(u64, offset)
1578 	),
1579 
1580 	TP_fast_assign(
1581 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1582 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1583 		__entry->segno = segno;
1584 		__entry->handle = handle;
1585 		__entry->length = length;
1586 		__entry->offset = offset;
1587 	),
1588 
1589 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1590 		__entry->cq_id, __entry->completion_id,
1591 		__entry->segno, __entry->length,
1592 		(unsigned long long)__entry->offset, __entry->handle
1593 	)
1594 );
1595 
1596 TRACE_EVENT(svcrdma_decode_rseg,
1597 	TP_PROTO(
1598 		const struct rpc_rdma_cid *cid,
1599 		const struct svc_rdma_chunk *chunk,
1600 		const struct svc_rdma_segment *segment
1601 	),
1602 
1603 	TP_ARGS(cid, chunk, segment),
1604 
1605 	TP_STRUCT__entry(
1606 		__field(u32, cq_id)
1607 		__field(int, completion_id)
1608 		__field(u32, segno)
1609 		__field(u32, position)
1610 		__field(u32, handle)
1611 		__field(u32, length)
1612 		__field(u64, offset)
1613 	),
1614 
1615 	TP_fast_assign(
1616 		__entry->cq_id = cid->ci_queue_id;
1617 		__entry->completion_id = cid->ci_completion_id;
1618 		__entry->segno = chunk->ch_segcount;
1619 		__entry->position = chunk->ch_position;
1620 		__entry->handle = segment->rs_handle;
1621 		__entry->length = segment->rs_length;
1622 		__entry->offset = segment->rs_offset;
1623 	),
1624 
1625 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1626 		__entry->cq_id, __entry->completion_id,
1627 		__entry->segno, __entry->position, __entry->length,
1628 		(unsigned long long)__entry->offset, __entry->handle
1629 	)
1630 );
1631 
1632 TRACE_EVENT(svcrdma_decode_wseg,
1633 	TP_PROTO(
1634 		const struct rpc_rdma_cid *cid,
1635 		const struct svc_rdma_chunk *chunk,
1636 		u32 segno
1637 	),
1638 
1639 	TP_ARGS(cid, chunk, segno),
1640 
1641 	TP_STRUCT__entry(
1642 		__field(u32, cq_id)
1643 		__field(int, completion_id)
1644 		__field(u32, segno)
1645 		__field(u32, handle)
1646 		__field(u32, length)
1647 		__field(u64, offset)
1648 	),
1649 
1650 	TP_fast_assign(
1651 		const struct svc_rdma_segment *segment =
1652 			&chunk->ch_segments[segno];
1653 
1654 		__entry->cq_id = cid->ci_queue_id;
1655 		__entry->completion_id = cid->ci_completion_id;
1656 		__entry->segno = segno;
1657 		__entry->handle = segment->rs_handle;
1658 		__entry->length = segment->rs_length;
1659 		__entry->offset = segment->rs_offset;
1660 	),
1661 
1662 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1663 		__entry->cq_id, __entry->completion_id,
1664 		__entry->segno, __entry->length,
1665 		(unsigned long long)__entry->offset, __entry->handle
1666 	)
1667 );
1668 
1669 DECLARE_EVENT_CLASS(svcrdma_error_event,
1670 	TP_PROTO(
1671 		__be32 xid
1672 	),
1673 
1674 	TP_ARGS(xid),
1675 
1676 	TP_STRUCT__entry(
1677 		__field(u32, xid)
1678 	),
1679 
1680 	TP_fast_assign(
1681 		__entry->xid = be32_to_cpu(xid);
1682 	),
1683 
1684 	TP_printk("xid=0x%08x",
1685 		__entry->xid
1686 	)
1687 );
1688 
1689 #define DEFINE_ERROR_EVENT(name)					\
1690 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1691 				TP_PROTO(				\
1692 					__be32 xid			\
1693 				),					\
1694 				TP_ARGS(xid))
1695 
1696 DEFINE_ERROR_EVENT(vers);
1697 DEFINE_ERROR_EVENT(chunk);
1698 
1699 /**
1700  ** Server-side RDMA API events
1701  **/
1702 
1703 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1704 	TP_PROTO(
1705 		const struct svcxprt_rdma *rdma,
1706 		u64 dma_addr,
1707 		u32 length
1708 	),
1709 
1710 	TP_ARGS(rdma, dma_addr, length),
1711 
1712 	TP_STRUCT__entry(
1713 		__field(u64, dma_addr)
1714 		__field(u32, length)
1715 		__string(device, rdma->sc_cm_id->device->name)
1716 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1717 	),
1718 
1719 	TP_fast_assign(
1720 		__entry->dma_addr = dma_addr;
1721 		__entry->length = length;
1722 		__assign_str(device, rdma->sc_cm_id->device->name);
1723 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1724 	),
1725 
1726 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1727 		__get_str(addr), __get_str(device),
1728 		__entry->dma_addr, __entry->length
1729 	)
1730 );
1731 
1732 #define DEFINE_SVC_DMA_EVENT(name)					\
1733 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1734 				TP_PROTO(				\
1735 					const struct svcxprt_rdma *rdma,\
1736 					u64 dma_addr,			\
1737 					u32 length			\
1738 				),					\
1739 				TP_ARGS(rdma, dma_addr, length))
1740 
1741 DEFINE_SVC_DMA_EVENT(dma_map_page);
1742 DEFINE_SVC_DMA_EVENT(dma_map_err);
1743 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1744 
1745 TRACE_EVENT(svcrdma_dma_map_rw_err,
1746 	TP_PROTO(
1747 		const struct svcxprt_rdma *rdma,
1748 		unsigned int nents,
1749 		int status
1750 	),
1751 
1752 	TP_ARGS(rdma, nents, status),
1753 
1754 	TP_STRUCT__entry(
1755 		__field(int, status)
1756 		__field(unsigned int, nents)
1757 		__string(device, rdma->sc_cm_id->device->name)
1758 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1759 	),
1760 
1761 	TP_fast_assign(
1762 		__entry->status = status;
1763 		__entry->nents = nents;
1764 		__assign_str(device, rdma->sc_cm_id->device->name);
1765 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1766 	),
1767 
1768 	TP_printk("addr=%s device=%s nents=%u status=%d",
1769 		__get_str(addr), __get_str(device), __entry->nents,
1770 		__entry->status
1771 	)
1772 );
1773 
1774 TRACE_EVENT(svcrdma_no_rwctx_err,
1775 	TP_PROTO(
1776 		const struct svcxprt_rdma *rdma,
1777 		unsigned int num_sges
1778 	),
1779 
1780 	TP_ARGS(rdma, num_sges),
1781 
1782 	TP_STRUCT__entry(
1783 		__field(unsigned int, num_sges)
1784 		__string(device, rdma->sc_cm_id->device->name)
1785 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1786 	),
1787 
1788 	TP_fast_assign(
1789 		__entry->num_sges = num_sges;
1790 		__assign_str(device, rdma->sc_cm_id->device->name);
1791 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1792 	),
1793 
1794 	TP_printk("addr=%s device=%s num_sges=%d",
1795 		__get_str(addr), __get_str(device), __entry->num_sges
1796 	)
1797 );
1798 
1799 TRACE_EVENT(svcrdma_page_overrun_err,
1800 	TP_PROTO(
1801 		const struct svcxprt_rdma *rdma,
1802 		const struct svc_rqst *rqst,
1803 		unsigned int pageno
1804 	),
1805 
1806 	TP_ARGS(rdma, rqst, pageno),
1807 
1808 	TP_STRUCT__entry(
1809 		__field(unsigned int, pageno)
1810 		__field(u32, xid)
1811 		__string(device, rdma->sc_cm_id->device->name)
1812 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1813 	),
1814 
1815 	TP_fast_assign(
1816 		__entry->pageno = pageno;
1817 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1818 		__assign_str(device, rdma->sc_cm_id->device->name);
1819 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1820 	),
1821 
1822 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1823 		__get_str(device), __entry->xid, __entry->pageno
1824 	)
1825 );
1826 
1827 TRACE_EVENT(svcrdma_small_wrch_err,
1828 	TP_PROTO(
1829 		const struct svcxprt_rdma *rdma,
1830 		unsigned int remaining,
1831 		unsigned int seg_no,
1832 		unsigned int num_segs
1833 	),
1834 
1835 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1836 
1837 	TP_STRUCT__entry(
1838 		__field(unsigned int, remaining)
1839 		__field(unsigned int, seg_no)
1840 		__field(unsigned int, num_segs)
1841 		__string(device, rdma->sc_cm_id->device->name)
1842 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1843 	),
1844 
1845 	TP_fast_assign(
1846 		__entry->remaining = remaining;
1847 		__entry->seg_no = seg_no;
1848 		__entry->num_segs = num_segs;
1849 		__assign_str(device, rdma->sc_cm_id->device->name);
1850 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1851 	),
1852 
1853 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1854 		__get_str(addr), __get_str(device), __entry->remaining,
1855 		__entry->seg_no, __entry->num_segs
1856 	)
1857 );
1858 
1859 TRACE_EVENT(svcrdma_send_pullup,
1860 	TP_PROTO(
1861 		const struct svc_rdma_send_ctxt *ctxt,
1862 		unsigned int msglen
1863 	),
1864 
1865 	TP_ARGS(ctxt, msglen),
1866 
1867 	TP_STRUCT__entry(
1868 		__field(u32, cq_id)
1869 		__field(int, completion_id)
1870 		__field(unsigned int, hdrlen)
1871 		__field(unsigned int, msglen)
1872 	),
1873 
1874 	TP_fast_assign(
1875 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1876 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1877 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1878 		__entry->msglen = msglen;
1879 	),
1880 
1881 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1882 		__entry->cq_id, __entry->completion_id,
1883 		__entry->hdrlen, __entry->msglen,
1884 		__entry->hdrlen + __entry->msglen)
1885 );
1886 
1887 TRACE_EVENT(svcrdma_send_err,
1888 	TP_PROTO(
1889 		const struct svc_rqst *rqst,
1890 		int status
1891 	),
1892 
1893 	TP_ARGS(rqst, status),
1894 
1895 	TP_STRUCT__entry(
1896 		__field(int, status)
1897 		__field(u32, xid)
1898 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1899 	),
1900 
1901 	TP_fast_assign(
1902 		__entry->status = status;
1903 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1904 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1905 	),
1906 
1907 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1908 		__entry->xid, __entry->status
1909 	)
1910 );
1911 
1912 TRACE_EVENT(svcrdma_post_send,
1913 	TP_PROTO(
1914 		const struct svc_rdma_send_ctxt *ctxt
1915 	),
1916 
1917 	TP_ARGS(ctxt),
1918 
1919 	TP_STRUCT__entry(
1920 		__field(u32, cq_id)
1921 		__field(int, completion_id)
1922 		__field(unsigned int, num_sge)
1923 		__field(u32, inv_rkey)
1924 	),
1925 
1926 	TP_fast_assign(
1927 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1928 
1929 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1930 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1931 		__entry->num_sge = wr->num_sge;
1932 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1933 					wr->ex.invalidate_rkey : 0;
1934 	),
1935 
1936 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1937 		__entry->cq_id, __entry->completion_id,
1938 		__entry->num_sge, __entry->inv_rkey
1939 	)
1940 );
1941 
1942 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1943 
1944 TRACE_EVENT(svcrdma_post_recv,
1945 	TP_PROTO(
1946 		const struct svc_rdma_recv_ctxt *ctxt
1947 	),
1948 
1949 	TP_ARGS(ctxt),
1950 
1951 	TP_STRUCT__entry(
1952 		__field(u32, cq_id)
1953 		__field(int, completion_id)
1954 	),
1955 
1956 	TP_fast_assign(
1957 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1958 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1959 	),
1960 
1961 	TP_printk("cq.id=%d cid=%d",
1962 		__entry->cq_id, __entry->completion_id
1963 	)
1964 );
1965 
1966 DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
1967 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
1968 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
1969 
1970 TRACE_EVENT(svcrdma_rq_post_err,
1971 	TP_PROTO(
1972 		const struct svcxprt_rdma *rdma,
1973 		int status
1974 	),
1975 
1976 	TP_ARGS(rdma, status),
1977 
1978 	TP_STRUCT__entry(
1979 		__field(int, status)
1980 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1981 	),
1982 
1983 	TP_fast_assign(
1984 		__entry->status = status;
1985 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1986 	),
1987 
1988 	TP_printk("addr=%s status=%d",
1989 		__get_str(addr), __entry->status
1990 	)
1991 );
1992 
1993 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1994 	TP_PROTO(
1995 		const struct rpc_rdma_cid *cid,
1996 		int sqecount
1997 	),
1998 
1999 	TP_ARGS(cid, sqecount),
2000 
2001 	TP_STRUCT__entry(
2002 		__field(u32, cq_id)
2003 		__field(int, completion_id)
2004 		__field(int, sqecount)
2005 	),
2006 
2007 	TP_fast_assign(
2008 		__entry->cq_id = cid->ci_queue_id;
2009 		__entry->completion_id = cid->ci_completion_id;
2010 		__entry->sqecount = sqecount;
2011 	),
2012 
2013 	TP_printk("cq.id=%u cid=%d sqecount=%d",
2014 		__entry->cq_id, __entry->completion_id,
2015 		__entry->sqecount
2016 	)
2017 );
2018 
2019 #define DEFINE_POST_CHUNK_EVENT(name)					\
2020 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
2021 				svcrdma_post_##name##_chunk,		\
2022 				TP_PROTO(				\
2023 					const struct rpc_rdma_cid *cid,	\
2024 					int sqecount			\
2025 				),					\
2026 				TP_ARGS(cid, sqecount))
2027 
2028 DEFINE_POST_CHUNK_EVENT(read);
2029 DEFINE_POST_CHUNK_EVENT(write);
2030 DEFINE_POST_CHUNK_EVENT(reply);
2031 
2032 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
2033 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
2034 
2035 TRACE_EVENT(svcrdma_qp_error,
2036 	TP_PROTO(
2037 		const struct ib_event *event,
2038 		const struct sockaddr *sap
2039 	),
2040 
2041 	TP_ARGS(event, sap),
2042 
2043 	TP_STRUCT__entry(
2044 		__field(unsigned int, event)
2045 		__string(device, event->device->name)
2046 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
2047 	),
2048 
2049 	TP_fast_assign(
2050 		__entry->event = event->event;
2051 		__assign_str(device, event->device->name);
2052 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
2053 			 "%pISpc", sap);
2054 	),
2055 
2056 	TP_printk("addr=%s dev=%s event=%s (%u)",
2057 		__entry->addr, __get_str(device),
2058 		rdma_show_ib_event(__entry->event), __entry->event
2059 	)
2060 );
2061 
2062 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
2063 	TP_PROTO(
2064 		const struct svcxprt_rdma *rdma
2065 	),
2066 
2067 	TP_ARGS(rdma),
2068 
2069 	TP_STRUCT__entry(
2070 		__field(int, avail)
2071 		__field(int, depth)
2072 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2073 	),
2074 
2075 	TP_fast_assign(
2076 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2077 		__entry->depth = rdma->sc_sq_depth;
2078 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2079 	),
2080 
2081 	TP_printk("addr=%s sc_sq_avail=%d/%d",
2082 		__get_str(addr), __entry->avail, __entry->depth
2083 	)
2084 );
2085 
2086 #define DEFINE_SQ_EVENT(name)						\
2087 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
2088 				TP_PROTO(				\
2089 					const struct svcxprt_rdma *rdma \
2090 				),					\
2091 				TP_ARGS(rdma))
2092 
2093 DEFINE_SQ_EVENT(full);
2094 DEFINE_SQ_EVENT(retry);
2095 
2096 TRACE_EVENT(svcrdma_sq_post_err,
2097 	TP_PROTO(
2098 		const struct svcxprt_rdma *rdma,
2099 		int status
2100 	),
2101 
2102 	TP_ARGS(rdma, status),
2103 
2104 	TP_STRUCT__entry(
2105 		__field(int, avail)
2106 		__field(int, depth)
2107 		__field(int, status)
2108 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2109 	),
2110 
2111 	TP_fast_assign(
2112 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2113 		__entry->depth = rdma->sc_sq_depth;
2114 		__entry->status = status;
2115 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2116 	),
2117 
2118 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2119 		__get_str(addr), __entry->avail, __entry->depth,
2120 		__entry->status
2121 	)
2122 );
2123 
2124 #endif /* _TRACE_RPCRDMA_H */
2125 
2126 #include <trace/define_trace.h>
2127