xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision eef2d8d4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
64 	TP_PROTO(
65 		const struct ib_wc *wc,
66 		const struct rpc_rdma_cid *cid
67 	),
68 
69 	TP_ARGS(wc, cid),
70 
71 	TP_STRUCT__entry(
72 		__field(u32, cq_id)
73 		__field(int, completion_id)
74 	),
75 
76 	TP_fast_assign(
77 		__entry->cq_id = cid->ci_queue_id;
78 		__entry->completion_id = cid->ci_completion_id;
79 	),
80 
81 	TP_printk("cq.id=%u cid=%d",
82 		__entry->cq_id, __entry->completion_id
83 	)
84 );
85 
86 #define DEFINE_SEND_COMPLETION_EVENT(name)				\
87 		DEFINE_EVENT(rpcrdma_send_completion_class, name,	\
88 				TP_PROTO(				\
89 					const struct ib_wc *wc,		\
90 					const struct rpc_rdma_cid *cid	\
91 				),					\
92 				TP_ARGS(wc, cid))
93 
94 DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
95 	TP_PROTO(
96 		const struct ib_wc *wc,
97 		const struct rpc_rdma_cid *cid
98 	),
99 
100 	TP_ARGS(wc, cid),
101 
102 	TP_STRUCT__entry(
103 		__field(u32, cq_id)
104 		__field(int, completion_id)
105 		__field(unsigned long, status)
106 		__field(unsigned int, vendor_err)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->cq_id = cid->ci_queue_id;
111 		__entry->completion_id = cid->ci_completion_id;
112 		__entry->status = wc->status;
113 		__entry->vendor_err = wc->vendor_err;
114 	),
115 
116 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
117 		__entry->cq_id, __entry->completion_id,
118 		rdma_show_wc_status(__entry->status),
119 		__entry->status, __entry->vendor_err
120 	)
121 );
122 
123 #define DEFINE_SEND_FLUSH_EVENT(name)					\
124 		DEFINE_EVENT(rpcrdma_send_flush_class, name,		\
125 				TP_PROTO(				\
126 					const struct ib_wc *wc,		\
127 					const struct rpc_rdma_cid *cid	\
128 				),					\
129 				TP_ARGS(wc, cid))
130 
131 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
132 	TP_PROTO(
133 		const struct ib_wc *wc,
134 		const struct rpc_rdma_cid *cid
135 	),
136 
137 	TP_ARGS(wc, cid),
138 
139 	TP_STRUCT__entry(
140 		__field(u32, cq_id)
141 		__field(int, completion_id)
142 		__field(unsigned long, status)
143 		__field(unsigned int, vendor_err)
144 	),
145 
146 	TP_fast_assign(
147 		__entry->cq_id = cid->ci_queue_id;
148 		__entry->completion_id = cid->ci_completion_id;
149 		__entry->status = wc->status;
150 		if (wc->status)
151 			__entry->vendor_err = wc->vendor_err;
152 		else
153 			__entry->vendor_err = 0;
154 	),
155 
156 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
157 		__entry->cq_id, __entry->completion_id,
158 		rdma_show_wc_status(__entry->status),
159 		__entry->status, __entry->vendor_err
160 	)
161 );
162 
163 #define DEFINE_MR_COMPLETION_EVENT(name)				\
164 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
165 				TP_PROTO(				\
166 					const struct ib_wc *wc,		\
167 					const struct rpc_rdma_cid *cid	\
168 				),					\
169 				TP_ARGS(wc, cid))
170 
171 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
172 	TP_PROTO(
173 		const struct ib_wc *wc,
174 		const struct rpc_rdma_cid *cid
175 	),
176 
177 	TP_ARGS(wc, cid),
178 
179 	TP_STRUCT__entry(
180 		__field(u32, cq_id)
181 		__field(int, completion_id)
182 		__field(u32, received)
183 		__field(unsigned long, status)
184 		__field(unsigned int, vendor_err)
185 	),
186 
187 	TP_fast_assign(
188 		__entry->cq_id = cid->ci_queue_id;
189 		__entry->completion_id = cid->ci_completion_id;
190 		__entry->status = wc->status;
191 		if (wc->status) {
192 			__entry->received = 0;
193 			__entry->vendor_err = wc->vendor_err;
194 		} else {
195 			__entry->received = wc->byte_len;
196 			__entry->vendor_err = 0;
197 		}
198 	),
199 
200 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
201 		__entry->cq_id, __entry->completion_id,
202 		rdma_show_wc_status(__entry->status),
203 		__entry->status, __entry->vendor_err,
204 		__entry->received
205 	)
206 );
207 
208 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
209 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
210 				TP_PROTO(				\
211 					const struct ib_wc *wc,		\
212 					const struct rpc_rdma_cid *cid	\
213 				),					\
214 				TP_ARGS(wc, cid))
215 
216 DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
217 	TP_PROTO(
218 		const struct ib_wc *wc,
219 		const struct rpc_rdma_cid *cid
220 	),
221 
222 	TP_ARGS(wc, cid),
223 
224 	TP_STRUCT__entry(
225 		__field(u32, cq_id)
226 		__field(int, completion_id)
227 		__field(u32, received)
228 	),
229 
230 	TP_fast_assign(
231 		__entry->cq_id = cid->ci_queue_id;
232 		__entry->completion_id = cid->ci_completion_id;
233 		__entry->received = wc->byte_len;
234 	),
235 
236 	TP_printk("cq.id=%u cid=%d received=%u",
237 		__entry->cq_id, __entry->completion_id,
238 		__entry->received
239 	)
240 );
241 
242 #define DEFINE_RECEIVE_SUCCESS_EVENT(name)				\
243 		DEFINE_EVENT(rpcrdma_receive_success_class, name,	\
244 				TP_PROTO(				\
245 					const struct ib_wc *wc,		\
246 					const struct rpc_rdma_cid *cid	\
247 				),					\
248 				TP_ARGS(wc, cid))
249 
250 DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
251 	TP_PROTO(
252 		const struct ib_wc *wc,
253 		const struct rpc_rdma_cid *cid
254 	),
255 
256 	TP_ARGS(wc, cid),
257 
258 	TP_STRUCT__entry(
259 		__field(u32, cq_id)
260 		__field(int, completion_id)
261 		__field(unsigned long, status)
262 		__field(unsigned int, vendor_err)
263 	),
264 
265 	TP_fast_assign(
266 		__entry->cq_id = cid->ci_queue_id;
267 		__entry->completion_id = cid->ci_completion_id;
268 		__entry->status = wc->status;
269 		__entry->vendor_err = wc->vendor_err;
270 	),
271 
272 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
273 		__entry->cq_id, __entry->completion_id,
274 		rdma_show_wc_status(__entry->status),
275 		__entry->status, __entry->vendor_err
276 	)
277 );
278 
279 #define DEFINE_RECEIVE_FLUSH_EVENT(name)				\
280 		DEFINE_EVENT(rpcrdma_receive_flush_class, name,		\
281 				TP_PROTO(				\
282 					const struct ib_wc *wc,		\
283 					const struct rpc_rdma_cid *cid	\
284 				),					\
285 				TP_ARGS(wc, cid))
286 
287 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
288 	TP_PROTO(
289 		const struct rpcrdma_rep *rep
290 	),
291 
292 	TP_ARGS(rep),
293 
294 	TP_STRUCT__entry(
295 		__field(u32, xid)
296 		__field(u32, version)
297 		__field(u32, proc)
298 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
299 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
300 	),
301 
302 	TP_fast_assign(
303 		__entry->xid = be32_to_cpu(rep->rr_xid);
304 		__entry->version = be32_to_cpu(rep->rr_vers);
305 		__entry->proc = be32_to_cpu(rep->rr_proc);
306 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
307 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
308 	),
309 
310 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
311 		__get_str(addr), __get_str(port),
312 		__entry->xid, __entry->version, __entry->proc
313 	)
314 );
315 
316 #define DEFINE_REPLY_EVENT(name)					\
317 		DEFINE_EVENT(xprtrdma_reply_class,			\
318 				xprtrdma_reply_##name##_err,		\
319 				TP_PROTO(				\
320 					const struct rpcrdma_rep *rep	\
321 				),					\
322 				TP_ARGS(rep))
323 
324 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
325 	TP_PROTO(
326 		const struct rpcrdma_xprt *r_xprt
327 	),
328 
329 	TP_ARGS(r_xprt),
330 
331 	TP_STRUCT__entry(
332 		__string(addr, rpcrdma_addrstr(r_xprt))
333 		__string(port, rpcrdma_portstr(r_xprt))
334 	),
335 
336 	TP_fast_assign(
337 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
338 		__assign_str(port, rpcrdma_portstr(r_xprt));
339 	),
340 
341 	TP_printk("peer=[%s]:%s",
342 		__get_str(addr), __get_str(port)
343 	)
344 );
345 
346 #define DEFINE_RXPRT_EVENT(name)					\
347 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
348 				TP_PROTO(				\
349 					const struct rpcrdma_xprt *r_xprt \
350 				),					\
351 				TP_ARGS(r_xprt))
352 
353 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
354 	TP_PROTO(
355 		const struct rpcrdma_xprt *r_xprt,
356 		int rc
357 	),
358 
359 	TP_ARGS(r_xprt, rc),
360 
361 	TP_STRUCT__entry(
362 		__field(int, rc)
363 		__field(int, connect_status)
364 		__string(addr, rpcrdma_addrstr(r_xprt))
365 		__string(port, rpcrdma_portstr(r_xprt))
366 	),
367 
368 	TP_fast_assign(
369 		__entry->rc = rc;
370 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
371 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
372 		__assign_str(port, rpcrdma_portstr(r_xprt));
373 	),
374 
375 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
376 		__get_str(addr), __get_str(port),
377 		__entry->rc, __entry->connect_status
378 	)
379 );
380 
381 #define DEFINE_CONN_EVENT(name)						\
382 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
383 				TP_PROTO(				\
384 					const struct rpcrdma_xprt *r_xprt, \
385 					int rc				\
386 				),					\
387 				TP_ARGS(r_xprt, rc))
388 
389 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
390 	TP_PROTO(
391 		const struct rpc_task *task,
392 		unsigned int pos,
393 		struct rpcrdma_mr *mr,
394 		int nsegs
395 	),
396 
397 	TP_ARGS(task, pos, mr, nsegs),
398 
399 	TP_STRUCT__entry(
400 		__field(unsigned int, task_id)
401 		__field(unsigned int, client_id)
402 		__field(unsigned int, pos)
403 		__field(int, nents)
404 		__field(u32, handle)
405 		__field(u32, length)
406 		__field(u64, offset)
407 		__field(int, nsegs)
408 	),
409 
410 	TP_fast_assign(
411 		__entry->task_id = task->tk_pid;
412 		__entry->client_id = task->tk_client->cl_clid;
413 		__entry->pos = pos;
414 		__entry->nents = mr->mr_nents;
415 		__entry->handle = mr->mr_handle;
416 		__entry->length = mr->mr_length;
417 		__entry->offset = mr->mr_offset;
418 		__entry->nsegs = nsegs;
419 	),
420 
421 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
422 		__entry->task_id, __entry->client_id,
423 		__entry->pos, __entry->length,
424 		(unsigned long long)__entry->offset, __entry->handle,
425 		__entry->nents < __entry->nsegs ? "more" : "last"
426 	)
427 );
428 
429 #define DEFINE_RDCH_EVENT(name)						\
430 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
431 				TP_PROTO(				\
432 					const struct rpc_task *task,	\
433 					unsigned int pos,		\
434 					struct rpcrdma_mr *mr,		\
435 					int nsegs			\
436 				),					\
437 				TP_ARGS(task, pos, mr, nsegs))
438 
439 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
440 	TP_PROTO(
441 		const struct rpc_task *task,
442 		struct rpcrdma_mr *mr,
443 		int nsegs
444 	),
445 
446 	TP_ARGS(task, mr, nsegs),
447 
448 	TP_STRUCT__entry(
449 		__field(unsigned int, task_id)
450 		__field(unsigned int, client_id)
451 		__field(int, nents)
452 		__field(u32, handle)
453 		__field(u32, length)
454 		__field(u64, offset)
455 		__field(int, nsegs)
456 	),
457 
458 	TP_fast_assign(
459 		__entry->task_id = task->tk_pid;
460 		__entry->client_id = task->tk_client->cl_clid;
461 		__entry->nents = mr->mr_nents;
462 		__entry->handle = mr->mr_handle;
463 		__entry->length = mr->mr_length;
464 		__entry->offset = mr->mr_offset;
465 		__entry->nsegs = nsegs;
466 	),
467 
468 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
469 		__entry->task_id, __entry->client_id,
470 		__entry->length, (unsigned long long)__entry->offset,
471 		__entry->handle,
472 		__entry->nents < __entry->nsegs ? "more" : "last"
473 	)
474 );
475 
476 #define DEFINE_WRCH_EVENT(name)						\
477 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
478 				TP_PROTO(				\
479 					const struct rpc_task *task,	\
480 					struct rpcrdma_mr *mr,		\
481 					int nsegs			\
482 				),					\
483 				TP_ARGS(task, mr, nsegs))
484 
485 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
486 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
487 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
488 TRACE_DEFINE_ENUM(DMA_NONE);
489 
490 #define xprtrdma_show_direction(x)					\
491 		__print_symbolic(x,					\
492 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
493 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
494 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
495 				{ DMA_NONE, "NONE" })
496 
497 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
498 	TP_PROTO(
499 		const struct rpcrdma_mr *mr
500 	),
501 
502 	TP_ARGS(mr),
503 
504 	TP_STRUCT__entry(
505 		__field(unsigned int, task_id)
506 		__field(unsigned int, client_id)
507 		__field(u32, mr_id)
508 		__field(int, nents)
509 		__field(u32, handle)
510 		__field(u32, length)
511 		__field(u64, offset)
512 		__field(u32, dir)
513 	),
514 
515 	TP_fast_assign(
516 		const struct rpcrdma_req *req = mr->mr_req;
517 		const struct rpc_task *task = req->rl_slot.rq_task;
518 
519 		__entry->task_id = task->tk_pid;
520 		__entry->client_id = task->tk_client->cl_clid;
521 		__entry->mr_id  = mr->mr_ibmr->res.id;
522 		__entry->nents  = mr->mr_nents;
523 		__entry->handle = mr->mr_handle;
524 		__entry->length = mr->mr_length;
525 		__entry->offset = mr->mr_offset;
526 		__entry->dir    = mr->mr_dir;
527 	),
528 
529 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
530 		__entry->task_id, __entry->client_id,
531 		__entry->mr_id, __entry->nents, __entry->length,
532 		(unsigned long long)__entry->offset, __entry->handle,
533 		xprtrdma_show_direction(__entry->dir)
534 	)
535 );
536 
537 #define DEFINE_MR_EVENT(name)						\
538 		DEFINE_EVENT(xprtrdma_mr_class,				\
539 				xprtrdma_mr_##name,			\
540 				TP_PROTO(				\
541 					const struct rpcrdma_mr *mr	\
542 				),					\
543 				TP_ARGS(mr))
544 
545 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
546 	TP_PROTO(
547 		const struct rpcrdma_mr *mr
548 	),
549 
550 	TP_ARGS(mr),
551 
552 	TP_STRUCT__entry(
553 		__field(u32, mr_id)
554 		__field(int, nents)
555 		__field(u32, handle)
556 		__field(u32, length)
557 		__field(u64, offset)
558 		__field(u32, dir)
559 	),
560 
561 	TP_fast_assign(
562 		__entry->mr_id  = mr->mr_ibmr->res.id;
563 		__entry->nents  = mr->mr_nents;
564 		__entry->handle = mr->mr_handle;
565 		__entry->length = mr->mr_length;
566 		__entry->offset = mr->mr_offset;
567 		__entry->dir    = mr->mr_dir;
568 	),
569 
570 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
571 		__entry->mr_id, __entry->nents, __entry->length,
572 		(unsigned long long)__entry->offset, __entry->handle,
573 		xprtrdma_show_direction(__entry->dir)
574 	)
575 );
576 
577 #define DEFINE_ANON_MR_EVENT(name)					\
578 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
579 				xprtrdma_mr_##name,			\
580 				TP_PROTO(				\
581 					const struct rpcrdma_mr *mr	\
582 				),					\
583 				TP_ARGS(mr))
584 
585 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
586 	TP_PROTO(
587 		const struct rpcrdma_xprt *r_xprt,
588 		const struct rpc_rqst *rqst
589 	),
590 
591 	TP_ARGS(r_xprt, rqst),
592 
593 	TP_STRUCT__entry(
594 		__field(u32, xid)
595 		__string(addr, rpcrdma_addrstr(r_xprt))
596 		__string(port, rpcrdma_portstr(r_xprt))
597 	),
598 
599 	TP_fast_assign(
600 		__entry->xid = be32_to_cpu(rqst->rq_xid);
601 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
602 		__assign_str(port, rpcrdma_portstr(r_xprt));
603 	),
604 
605 	TP_printk("peer=[%s]:%s xid=0x%08x",
606 		__get_str(addr), __get_str(port), __entry->xid
607 	)
608 );
609 
610 #define DEFINE_CALLBACK_EVENT(name)					\
611 		DEFINE_EVENT(xprtrdma_callback_class,			\
612 				xprtrdma_cb_##name,			\
613 				TP_PROTO(				\
614 					const struct rpcrdma_xprt *r_xprt, \
615 					const struct rpc_rqst *rqst	\
616 				),					\
617 				TP_ARGS(r_xprt, rqst))
618 
619 /**
620  ** Connection events
621  **/
622 
623 TRACE_EVENT(xprtrdma_inline_thresh,
624 	TP_PROTO(
625 		const struct rpcrdma_ep *ep
626 	),
627 
628 	TP_ARGS(ep),
629 
630 	TP_STRUCT__entry(
631 		__field(unsigned int, inline_send)
632 		__field(unsigned int, inline_recv)
633 		__field(unsigned int, max_send)
634 		__field(unsigned int, max_recv)
635 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
636 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
637 	),
638 
639 	TP_fast_assign(
640 		const struct rdma_cm_id *id = ep->re_id;
641 
642 		__entry->inline_send = ep->re_inline_send;
643 		__entry->inline_recv = ep->re_inline_recv;
644 		__entry->max_send = ep->re_max_inline_send;
645 		__entry->max_recv = ep->re_max_inline_recv;
646 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
647 		       sizeof(struct sockaddr_in6));
648 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
649 		       sizeof(struct sockaddr_in6));
650 	),
651 
652 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
653 		__entry->srcaddr, __entry->dstaddr,
654 		__entry->inline_send, __entry->inline_recv,
655 		__entry->max_send, __entry->max_recv
656 	)
657 );
658 
659 DEFINE_CONN_EVENT(connect);
660 DEFINE_CONN_EVENT(disconnect);
661 
662 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
663 
664 TRACE_EVENT(xprtrdma_op_connect,
665 	TP_PROTO(
666 		const struct rpcrdma_xprt *r_xprt,
667 		unsigned long delay
668 	),
669 
670 	TP_ARGS(r_xprt, delay),
671 
672 	TP_STRUCT__entry(
673 		__field(unsigned long, delay)
674 		__string(addr, rpcrdma_addrstr(r_xprt))
675 		__string(port, rpcrdma_portstr(r_xprt))
676 	),
677 
678 	TP_fast_assign(
679 		__entry->delay = delay;
680 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
681 		__assign_str(port, rpcrdma_portstr(r_xprt));
682 	),
683 
684 	TP_printk("peer=[%s]:%s delay=%lu",
685 		__get_str(addr), __get_str(port), __entry->delay
686 	)
687 );
688 
689 
690 TRACE_EVENT(xprtrdma_op_set_cto,
691 	TP_PROTO(
692 		const struct rpcrdma_xprt *r_xprt,
693 		unsigned long connect,
694 		unsigned long reconnect
695 	),
696 
697 	TP_ARGS(r_xprt, connect, reconnect),
698 
699 	TP_STRUCT__entry(
700 		__field(unsigned long, connect)
701 		__field(unsigned long, reconnect)
702 		__string(addr, rpcrdma_addrstr(r_xprt))
703 		__string(port, rpcrdma_portstr(r_xprt))
704 	),
705 
706 	TP_fast_assign(
707 		__entry->connect = connect;
708 		__entry->reconnect = reconnect;
709 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
710 		__assign_str(port, rpcrdma_portstr(r_xprt));
711 	),
712 
713 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
714 		__get_str(addr), __get_str(port),
715 		__entry->connect / HZ, __entry->reconnect / HZ
716 	)
717 );
718 
719 /**
720  ** Call events
721  **/
722 
723 TRACE_EVENT(xprtrdma_createmrs,
724 	TP_PROTO(
725 		const struct rpcrdma_xprt *r_xprt,
726 		unsigned int count
727 	),
728 
729 	TP_ARGS(r_xprt, count),
730 
731 	TP_STRUCT__entry(
732 		__string(addr, rpcrdma_addrstr(r_xprt))
733 		__string(port, rpcrdma_portstr(r_xprt))
734 		__field(unsigned int, count)
735 	),
736 
737 	TP_fast_assign(
738 		__entry->count = count;
739 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
740 		__assign_str(port, rpcrdma_portstr(r_xprt));
741 	),
742 
743 	TP_printk("peer=[%s]:%s created %u MRs",
744 		__get_str(addr), __get_str(port), __entry->count
745 	)
746 );
747 
748 TRACE_EVENT(xprtrdma_nomrs_err,
749 	TP_PROTO(
750 		const struct rpcrdma_xprt *r_xprt,
751 		const struct rpcrdma_req *req
752 	),
753 
754 	TP_ARGS(r_xprt, req),
755 
756 	TP_STRUCT__entry(
757 		__field(unsigned int, task_id)
758 		__field(unsigned int, client_id)
759 		__string(addr, rpcrdma_addrstr(r_xprt))
760 		__string(port, rpcrdma_portstr(r_xprt))
761 	),
762 
763 	TP_fast_assign(
764 		const struct rpc_rqst *rqst = &req->rl_slot;
765 
766 		__entry->task_id = rqst->rq_task->tk_pid;
767 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
768 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
769 		__assign_str(port, rpcrdma_portstr(r_xprt));
770 	),
771 
772 	TP_printk("peer=[%s]:%s task:%u@%u",
773 		__get_str(addr), __get_str(port),
774 		__entry->task_id, __entry->client_id
775 	)
776 );
777 
778 DEFINE_RDCH_EVENT(read);
779 DEFINE_WRCH_EVENT(write);
780 DEFINE_WRCH_EVENT(reply);
781 
782 TRACE_DEFINE_ENUM(rpcrdma_noch);
783 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
784 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
785 TRACE_DEFINE_ENUM(rpcrdma_readch);
786 TRACE_DEFINE_ENUM(rpcrdma_areadch);
787 TRACE_DEFINE_ENUM(rpcrdma_writech);
788 TRACE_DEFINE_ENUM(rpcrdma_replych);
789 
790 #define xprtrdma_show_chunktype(x)					\
791 		__print_symbolic(x,					\
792 				{ rpcrdma_noch, "inline" },		\
793 				{ rpcrdma_noch_pullup, "pullup" },	\
794 				{ rpcrdma_noch_mapped, "mapped" },	\
795 				{ rpcrdma_readch, "read list" },	\
796 				{ rpcrdma_areadch, "*read list" },	\
797 				{ rpcrdma_writech, "write list" },	\
798 				{ rpcrdma_replych, "reply chunk" })
799 
800 TRACE_EVENT(xprtrdma_marshal,
801 	TP_PROTO(
802 		const struct rpcrdma_req *req,
803 		unsigned int rtype,
804 		unsigned int wtype
805 	),
806 
807 	TP_ARGS(req, rtype, wtype),
808 
809 	TP_STRUCT__entry(
810 		__field(unsigned int, task_id)
811 		__field(unsigned int, client_id)
812 		__field(u32, xid)
813 		__field(unsigned int, hdrlen)
814 		__field(unsigned int, headlen)
815 		__field(unsigned int, pagelen)
816 		__field(unsigned int, taillen)
817 		__field(unsigned int, rtype)
818 		__field(unsigned int, wtype)
819 	),
820 
821 	TP_fast_assign(
822 		const struct rpc_rqst *rqst = &req->rl_slot;
823 
824 		__entry->task_id = rqst->rq_task->tk_pid;
825 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
826 		__entry->xid = be32_to_cpu(rqst->rq_xid);
827 		__entry->hdrlen = req->rl_hdrbuf.len;
828 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
829 		__entry->pagelen = rqst->rq_snd_buf.page_len;
830 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
831 		__entry->rtype = rtype;
832 		__entry->wtype = wtype;
833 	),
834 
835 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
836 		__entry->task_id, __entry->client_id, __entry->xid,
837 		__entry->hdrlen,
838 		__entry->headlen, __entry->pagelen, __entry->taillen,
839 		xprtrdma_show_chunktype(__entry->rtype),
840 		xprtrdma_show_chunktype(__entry->wtype)
841 	)
842 );
843 
844 TRACE_EVENT(xprtrdma_marshal_failed,
845 	TP_PROTO(const struct rpc_rqst *rqst,
846 		 int ret
847 	),
848 
849 	TP_ARGS(rqst, ret),
850 
851 	TP_STRUCT__entry(
852 		__field(unsigned int, task_id)
853 		__field(unsigned int, client_id)
854 		__field(u32, xid)
855 		__field(int, ret)
856 	),
857 
858 	TP_fast_assign(
859 		__entry->task_id = rqst->rq_task->tk_pid;
860 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
861 		__entry->xid = be32_to_cpu(rqst->rq_xid);
862 		__entry->ret = ret;
863 	),
864 
865 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
866 		__entry->task_id, __entry->client_id, __entry->xid,
867 		__entry->ret
868 	)
869 );
870 
871 TRACE_EVENT(xprtrdma_prepsend_failed,
872 	TP_PROTO(const struct rpc_rqst *rqst,
873 		 int ret
874 	),
875 
876 	TP_ARGS(rqst, ret),
877 
878 	TP_STRUCT__entry(
879 		__field(unsigned int, task_id)
880 		__field(unsigned int, client_id)
881 		__field(u32, xid)
882 		__field(int, ret)
883 	),
884 
885 	TP_fast_assign(
886 		__entry->task_id = rqst->rq_task->tk_pid;
887 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
888 		__entry->xid = be32_to_cpu(rqst->rq_xid);
889 		__entry->ret = ret;
890 	),
891 
892 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
893 		__entry->task_id, __entry->client_id, __entry->xid,
894 		__entry->ret
895 	)
896 );
897 
898 TRACE_EVENT(xprtrdma_post_send,
899 	TP_PROTO(
900 		const struct rpcrdma_req *req
901 	),
902 
903 	TP_ARGS(req),
904 
905 	TP_STRUCT__entry(
906 		__field(u32, cq_id)
907 		__field(int, completion_id)
908 		__field(unsigned int, task_id)
909 		__field(unsigned int, client_id)
910 		__field(int, num_sge)
911 		__field(int, signaled)
912 	),
913 
914 	TP_fast_assign(
915 		const struct rpc_rqst *rqst = &req->rl_slot;
916 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
917 
918 		__entry->cq_id = sc->sc_cid.ci_queue_id;
919 		__entry->completion_id = sc->sc_cid.ci_completion_id;
920 		__entry->task_id = rqst->rq_task->tk_pid;
921 		__entry->client_id = rqst->rq_task->tk_client ?
922 				     rqst->rq_task->tk_client->cl_clid : -1;
923 		__entry->num_sge = req->rl_wr.num_sge;
924 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
925 	),
926 
927 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
928 		__entry->task_id, __entry->client_id,
929 		__entry->cq_id, __entry->completion_id,
930 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
931 		(__entry->signaled ? "signaled" : "")
932 	)
933 );
934 
935 TRACE_EVENT(xprtrdma_post_send_err,
936 	TP_PROTO(
937 		const struct rpcrdma_xprt *r_xprt,
938 		const struct rpcrdma_req *req,
939 		int rc
940 	),
941 
942 	TP_ARGS(r_xprt, req, rc),
943 
944 	TP_STRUCT__entry(
945 		__field(u32, cq_id)
946 		__field(unsigned int, task_id)
947 		__field(unsigned int, client_id)
948 		__field(int, rc)
949 	),
950 
951 	TP_fast_assign(
952 		const struct rpc_rqst *rqst = &req->rl_slot;
953 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
954 
955 		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
956 		__entry->task_id = rqst->rq_task->tk_pid;
957 		__entry->client_id = rqst->rq_task->tk_client ?
958 				     rqst->rq_task->tk_client->cl_clid : -1;
959 		__entry->rc = rc;
960 	),
961 
962 	TP_printk("task:%u@%u cq.id=%u rc=%d",
963 		__entry->task_id, __entry->client_id,
964 		__entry->cq_id, __entry->rc
965 	)
966 );
967 
968 TRACE_EVENT(xprtrdma_post_recv,
969 	TP_PROTO(
970 		const struct rpcrdma_rep *rep
971 	),
972 
973 	TP_ARGS(rep),
974 
975 	TP_STRUCT__entry(
976 		__field(u32, cq_id)
977 		__field(int, completion_id)
978 	),
979 
980 	TP_fast_assign(
981 		__entry->cq_id = rep->rr_cid.ci_queue_id;
982 		__entry->completion_id = rep->rr_cid.ci_completion_id;
983 	),
984 
985 	TP_printk("cq.id=%d cid=%d",
986 		__entry->cq_id, __entry->completion_id
987 	)
988 );
989 
990 TRACE_EVENT(xprtrdma_post_recvs,
991 	TP_PROTO(
992 		const struct rpcrdma_xprt *r_xprt,
993 		unsigned int count
994 	),
995 
996 	TP_ARGS(r_xprt, count),
997 
998 	TP_STRUCT__entry(
999 		__field(u32, cq_id)
1000 		__field(unsigned int, count)
1001 		__field(int, posted)
1002 		__string(addr, rpcrdma_addrstr(r_xprt))
1003 		__string(port, rpcrdma_portstr(r_xprt))
1004 	),
1005 
1006 	TP_fast_assign(
1007 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1008 
1009 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1010 		__entry->count = count;
1011 		__entry->posted = ep->re_receive_count;
1012 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1013 		__assign_str(port, rpcrdma_portstr(r_xprt));
1014 	),
1015 
1016 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
1017 		__get_str(addr), __get_str(port), __entry->cq_id,
1018 		__entry->count, __entry->posted
1019 	)
1020 );
1021 
1022 TRACE_EVENT(xprtrdma_post_recvs_err,
1023 	TP_PROTO(
1024 		const struct rpcrdma_xprt *r_xprt,
1025 		int status
1026 	),
1027 
1028 	TP_ARGS(r_xprt, status),
1029 
1030 	TP_STRUCT__entry(
1031 		__field(u32, cq_id)
1032 		__field(int, status)
1033 		__string(addr, rpcrdma_addrstr(r_xprt))
1034 		__string(port, rpcrdma_portstr(r_xprt))
1035 	),
1036 
1037 	TP_fast_assign(
1038 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1039 
1040 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1041 		__entry->status = status;
1042 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1043 		__assign_str(port, rpcrdma_portstr(r_xprt));
1044 	),
1045 
1046 	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
1047 		__get_str(addr), __get_str(port), __entry->cq_id,
1048 		__entry->status
1049 	)
1050 );
1051 
1052 TRACE_EVENT(xprtrdma_post_linv_err,
1053 	TP_PROTO(
1054 		const struct rpcrdma_req *req,
1055 		int status
1056 	),
1057 
1058 	TP_ARGS(req, status),
1059 
1060 	TP_STRUCT__entry(
1061 		__field(unsigned int, task_id)
1062 		__field(unsigned int, client_id)
1063 		__field(int, status)
1064 	),
1065 
1066 	TP_fast_assign(
1067 		const struct rpc_task *task = req->rl_slot.rq_task;
1068 
1069 		__entry->task_id = task->tk_pid;
1070 		__entry->client_id = task->tk_client->cl_clid;
1071 		__entry->status = status;
1072 	),
1073 
1074 	TP_printk("task:%u@%u status=%d",
1075 		__entry->task_id, __entry->client_id, __entry->status
1076 	)
1077 );
1078 
1079 /**
1080  ** Completion events
1081  **/
1082 
1083 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
1084 
1085 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
1086 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
1087 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
1088 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
1089 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
1090 
1091 TRACE_EVENT(xprtrdma_frwr_alloc,
1092 	TP_PROTO(
1093 		const struct rpcrdma_mr *mr,
1094 		int rc
1095 	),
1096 
1097 	TP_ARGS(mr, rc),
1098 
1099 	TP_STRUCT__entry(
1100 		__field(u32, mr_id)
1101 		__field(int, rc)
1102 	),
1103 
1104 	TP_fast_assign(
1105 		__entry->mr_id = mr->mr_ibmr->res.id;
1106 		__entry->rc = rc;
1107 	),
1108 
1109 	TP_printk("mr.id=%u: rc=%d",
1110 		__entry->mr_id, __entry->rc
1111 	)
1112 );
1113 
1114 TRACE_EVENT(xprtrdma_frwr_dereg,
1115 	TP_PROTO(
1116 		const struct rpcrdma_mr *mr,
1117 		int rc
1118 	),
1119 
1120 	TP_ARGS(mr, rc),
1121 
1122 	TP_STRUCT__entry(
1123 		__field(u32, mr_id)
1124 		__field(int, nents)
1125 		__field(u32, handle)
1126 		__field(u32, length)
1127 		__field(u64, offset)
1128 		__field(u32, dir)
1129 		__field(int, rc)
1130 	),
1131 
1132 	TP_fast_assign(
1133 		__entry->mr_id  = mr->mr_ibmr->res.id;
1134 		__entry->nents  = mr->mr_nents;
1135 		__entry->handle = mr->mr_handle;
1136 		__entry->length = mr->mr_length;
1137 		__entry->offset = mr->mr_offset;
1138 		__entry->dir    = mr->mr_dir;
1139 		__entry->rc	= rc;
1140 	),
1141 
1142 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1143 		__entry->mr_id, __entry->nents, __entry->length,
1144 		(unsigned long long)__entry->offset, __entry->handle,
1145 		xprtrdma_show_direction(__entry->dir),
1146 		__entry->rc
1147 	)
1148 );
1149 
1150 TRACE_EVENT(xprtrdma_frwr_sgerr,
1151 	TP_PROTO(
1152 		const struct rpcrdma_mr *mr,
1153 		int sg_nents
1154 	),
1155 
1156 	TP_ARGS(mr, sg_nents),
1157 
1158 	TP_STRUCT__entry(
1159 		__field(u32, mr_id)
1160 		__field(u64, addr)
1161 		__field(u32, dir)
1162 		__field(int, nents)
1163 	),
1164 
1165 	TP_fast_assign(
1166 		__entry->mr_id = mr->mr_ibmr->res.id;
1167 		__entry->addr = mr->mr_sg->dma_address;
1168 		__entry->dir = mr->mr_dir;
1169 		__entry->nents = sg_nents;
1170 	),
1171 
1172 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1173 		__entry->mr_id, __entry->addr,
1174 		xprtrdma_show_direction(__entry->dir),
1175 		__entry->nents
1176 	)
1177 );
1178 
1179 TRACE_EVENT(xprtrdma_frwr_maperr,
1180 	TP_PROTO(
1181 		const struct rpcrdma_mr *mr,
1182 		int num_mapped
1183 	),
1184 
1185 	TP_ARGS(mr, num_mapped),
1186 
1187 	TP_STRUCT__entry(
1188 		__field(u32, mr_id)
1189 		__field(u64, addr)
1190 		__field(u32, dir)
1191 		__field(int, num_mapped)
1192 		__field(int, nents)
1193 	),
1194 
1195 	TP_fast_assign(
1196 		__entry->mr_id = mr->mr_ibmr->res.id;
1197 		__entry->addr = mr->mr_sg->dma_address;
1198 		__entry->dir = mr->mr_dir;
1199 		__entry->num_mapped = num_mapped;
1200 		__entry->nents = mr->mr_nents;
1201 	),
1202 
1203 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1204 		__entry->mr_id, __entry->addr,
1205 		xprtrdma_show_direction(__entry->dir),
1206 		__entry->num_mapped, __entry->nents
1207 	)
1208 );
1209 
1210 DEFINE_MR_EVENT(fastreg);
1211 DEFINE_MR_EVENT(localinv);
1212 DEFINE_MR_EVENT(reminv);
1213 DEFINE_MR_EVENT(map);
1214 
1215 DEFINE_ANON_MR_EVENT(unmap);
1216 
1217 TRACE_EVENT(xprtrdma_dma_maperr,
1218 	TP_PROTO(
1219 		u64 addr
1220 	),
1221 
1222 	TP_ARGS(addr),
1223 
1224 	TP_STRUCT__entry(
1225 		__field(u64, addr)
1226 	),
1227 
1228 	TP_fast_assign(
1229 		__entry->addr = addr;
1230 	),
1231 
1232 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1233 );
1234 
1235 /**
1236  ** Reply events
1237  **/
1238 
1239 TRACE_EVENT(xprtrdma_reply,
1240 	TP_PROTO(
1241 		const struct rpc_task *task,
1242 		const struct rpcrdma_rep *rep,
1243 		unsigned int credits
1244 	),
1245 
1246 	TP_ARGS(task, rep, credits),
1247 
1248 	TP_STRUCT__entry(
1249 		__field(unsigned int, task_id)
1250 		__field(unsigned int, client_id)
1251 		__field(u32, xid)
1252 		__field(unsigned int, credits)
1253 	),
1254 
1255 	TP_fast_assign(
1256 		__entry->task_id = task->tk_pid;
1257 		__entry->client_id = task->tk_client->cl_clid;
1258 		__entry->xid = be32_to_cpu(rep->rr_xid);
1259 		__entry->credits = credits;
1260 	),
1261 
1262 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1263 		__entry->task_id, __entry->client_id, __entry->xid,
1264 		__entry->credits
1265 	)
1266 );
1267 
1268 DEFINE_REPLY_EVENT(vers);
1269 DEFINE_REPLY_EVENT(rqst);
1270 DEFINE_REPLY_EVENT(short);
1271 DEFINE_REPLY_EVENT(hdr);
1272 
1273 TRACE_EVENT(xprtrdma_err_vers,
1274 	TP_PROTO(
1275 		const struct rpc_rqst *rqst,
1276 		__be32 *min,
1277 		__be32 *max
1278 	),
1279 
1280 	TP_ARGS(rqst, min, max),
1281 
1282 	TP_STRUCT__entry(
1283 		__field(unsigned int, task_id)
1284 		__field(unsigned int, client_id)
1285 		__field(u32, xid)
1286 		__field(u32, min)
1287 		__field(u32, max)
1288 	),
1289 
1290 	TP_fast_assign(
1291 		__entry->task_id = rqst->rq_task->tk_pid;
1292 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1293 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1294 		__entry->min = be32_to_cpup(min);
1295 		__entry->max = be32_to_cpup(max);
1296 	),
1297 
1298 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1299 		__entry->task_id, __entry->client_id, __entry->xid,
1300 		__entry->min, __entry->max
1301 	)
1302 );
1303 
1304 TRACE_EVENT(xprtrdma_err_chunk,
1305 	TP_PROTO(
1306 		const struct rpc_rqst *rqst
1307 	),
1308 
1309 	TP_ARGS(rqst),
1310 
1311 	TP_STRUCT__entry(
1312 		__field(unsigned int, task_id)
1313 		__field(unsigned int, client_id)
1314 		__field(u32, xid)
1315 	),
1316 
1317 	TP_fast_assign(
1318 		__entry->task_id = rqst->rq_task->tk_pid;
1319 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1320 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1321 	),
1322 
1323 	TP_printk("task:%u@%u xid=0x%08x",
1324 		__entry->task_id, __entry->client_id, __entry->xid
1325 	)
1326 );
1327 
1328 TRACE_EVENT(xprtrdma_err_unrecognized,
1329 	TP_PROTO(
1330 		const struct rpc_rqst *rqst,
1331 		__be32 *procedure
1332 	),
1333 
1334 	TP_ARGS(rqst, procedure),
1335 
1336 	TP_STRUCT__entry(
1337 		__field(unsigned int, task_id)
1338 		__field(unsigned int, client_id)
1339 		__field(u32, xid)
1340 		__field(u32, procedure)
1341 	),
1342 
1343 	TP_fast_assign(
1344 		__entry->task_id = rqst->rq_task->tk_pid;
1345 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1346 		__entry->procedure = be32_to_cpup(procedure);
1347 	),
1348 
1349 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1350 		__entry->task_id, __entry->client_id, __entry->xid,
1351 		__entry->procedure
1352 	)
1353 );
1354 
1355 TRACE_EVENT(xprtrdma_fixup,
1356 	TP_PROTO(
1357 		const struct rpc_rqst *rqst,
1358 		unsigned long fixup
1359 	),
1360 
1361 	TP_ARGS(rqst, fixup),
1362 
1363 	TP_STRUCT__entry(
1364 		__field(unsigned int, task_id)
1365 		__field(unsigned int, client_id)
1366 		__field(unsigned long, fixup)
1367 		__field(size_t, headlen)
1368 		__field(unsigned int, pagelen)
1369 		__field(size_t, taillen)
1370 	),
1371 
1372 	TP_fast_assign(
1373 		__entry->task_id = rqst->rq_task->tk_pid;
1374 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1375 		__entry->fixup = fixup;
1376 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1377 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1378 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1379 	),
1380 
1381 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1382 		__entry->task_id, __entry->client_id, __entry->fixup,
1383 		__entry->headlen, __entry->pagelen, __entry->taillen
1384 	)
1385 );
1386 
1387 TRACE_EVENT(xprtrdma_decode_seg,
1388 	TP_PROTO(
1389 		u32 handle,
1390 		u32 length,
1391 		u64 offset
1392 	),
1393 
1394 	TP_ARGS(handle, length, offset),
1395 
1396 	TP_STRUCT__entry(
1397 		__field(u32, handle)
1398 		__field(u32, length)
1399 		__field(u64, offset)
1400 	),
1401 
1402 	TP_fast_assign(
1403 		__entry->handle = handle;
1404 		__entry->length = length;
1405 		__entry->offset = offset;
1406 	),
1407 
1408 	TP_printk("%u@0x%016llx:0x%08x",
1409 		__entry->length, (unsigned long long)__entry->offset,
1410 		__entry->handle
1411 	)
1412 );
1413 
1414 TRACE_EVENT(xprtrdma_mrs_zap,
1415 	TP_PROTO(
1416 		const struct rpc_task *task
1417 	),
1418 
1419 	TP_ARGS(task),
1420 
1421 	TP_STRUCT__entry(
1422 		__field(unsigned int, task_id)
1423 		__field(unsigned int, client_id)
1424 	),
1425 
1426 	TP_fast_assign(
1427 		__entry->task_id = task->tk_pid;
1428 		__entry->client_id = task->tk_client->cl_clid;
1429 	),
1430 
1431 	TP_printk("task:%u@%u",
1432 		__entry->task_id, __entry->client_id
1433 	)
1434 );
1435 
1436 /**
1437  ** Callback events
1438  **/
1439 
1440 TRACE_EVENT(xprtrdma_cb_setup,
1441 	TP_PROTO(
1442 		const struct rpcrdma_xprt *r_xprt,
1443 		unsigned int reqs
1444 	),
1445 
1446 	TP_ARGS(r_xprt, reqs),
1447 
1448 	TP_STRUCT__entry(
1449 		__field(unsigned int, reqs)
1450 		__string(addr, rpcrdma_addrstr(r_xprt))
1451 		__string(port, rpcrdma_portstr(r_xprt))
1452 	),
1453 
1454 	TP_fast_assign(
1455 		__entry->reqs = reqs;
1456 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1457 		__assign_str(port, rpcrdma_portstr(r_xprt));
1458 	),
1459 
1460 	TP_printk("peer=[%s]:%s %u reqs",
1461 		__get_str(addr), __get_str(port), __entry->reqs
1462 	)
1463 );
1464 
1465 DEFINE_CALLBACK_EVENT(call);
1466 DEFINE_CALLBACK_EVENT(reply);
1467 
1468 /**
1469  ** Server-side RPC/RDMA events
1470  **/
1471 
1472 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1473 	TP_PROTO(
1474 		const struct svcxprt_rdma *rdma,
1475 		long status
1476 	),
1477 
1478 	TP_ARGS(rdma, status),
1479 
1480 	TP_STRUCT__entry(
1481 		__field(long, status)
1482 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1483 	),
1484 
1485 	TP_fast_assign(
1486 		__entry->status = status;
1487 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1488 	),
1489 
1490 	TP_printk("addr=%s status=%ld",
1491 		__get_str(addr), __entry->status
1492 	)
1493 );
1494 
1495 #define DEFINE_ACCEPT_EVENT(name) \
1496 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1497 				TP_PROTO( \
1498 					const struct svcxprt_rdma *rdma, \
1499 					long status \
1500 				), \
1501 				TP_ARGS(rdma, status))
1502 
1503 DEFINE_ACCEPT_EVENT(pd);
1504 DEFINE_ACCEPT_EVENT(qp);
1505 DEFINE_ACCEPT_EVENT(fabric);
1506 DEFINE_ACCEPT_EVENT(initdepth);
1507 DEFINE_ACCEPT_EVENT(accept);
1508 
1509 TRACE_DEFINE_ENUM(RDMA_MSG);
1510 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1511 TRACE_DEFINE_ENUM(RDMA_MSGP);
1512 TRACE_DEFINE_ENUM(RDMA_DONE);
1513 TRACE_DEFINE_ENUM(RDMA_ERROR);
1514 
1515 #define show_rpcrdma_proc(x)						\
1516 		__print_symbolic(x,					\
1517 				{ RDMA_MSG, "RDMA_MSG" },		\
1518 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1519 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1520 				{ RDMA_DONE, "RDMA_DONE" },		\
1521 				{ RDMA_ERROR, "RDMA_ERROR" })
1522 
1523 TRACE_EVENT(svcrdma_decode_rqst,
1524 	TP_PROTO(
1525 		const struct svc_rdma_recv_ctxt *ctxt,
1526 		__be32 *p,
1527 		unsigned int hdrlen
1528 	),
1529 
1530 	TP_ARGS(ctxt, p, hdrlen),
1531 
1532 	TP_STRUCT__entry(
1533 		__field(u32, cq_id)
1534 		__field(int, completion_id)
1535 		__field(u32, xid)
1536 		__field(u32, vers)
1537 		__field(u32, proc)
1538 		__field(u32, credits)
1539 		__field(unsigned int, hdrlen)
1540 	),
1541 
1542 	TP_fast_assign(
1543 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1544 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1545 		__entry->xid = be32_to_cpup(p++);
1546 		__entry->vers = be32_to_cpup(p++);
1547 		__entry->credits = be32_to_cpup(p++);
1548 		__entry->proc = be32_to_cpup(p);
1549 		__entry->hdrlen = hdrlen;
1550 	),
1551 
1552 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1553 		__entry->cq_id, __entry->completion_id,
1554 		__entry->xid, __entry->vers, __entry->credits,
1555 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1556 );
1557 
1558 TRACE_EVENT(svcrdma_decode_short_err,
1559 	TP_PROTO(
1560 		const struct svc_rdma_recv_ctxt *ctxt,
1561 		unsigned int hdrlen
1562 	),
1563 
1564 	TP_ARGS(ctxt, hdrlen),
1565 
1566 	TP_STRUCT__entry(
1567 		__field(u32, cq_id)
1568 		__field(int, completion_id)
1569 		__field(unsigned int, hdrlen)
1570 	),
1571 
1572 	TP_fast_assign(
1573 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1574 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1575 		__entry->hdrlen = hdrlen;
1576 	),
1577 
1578 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1579 		__entry->cq_id, __entry->completion_id,
1580 		__entry->hdrlen)
1581 );
1582 
1583 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1584 	TP_PROTO(
1585 		const struct svc_rdma_recv_ctxt *ctxt,
1586 		__be32 *p
1587 	),
1588 
1589 	TP_ARGS(ctxt, p),
1590 
1591 	TP_STRUCT__entry(
1592 		__field(u32, cq_id)
1593 		__field(int, completion_id)
1594 		__field(u32, xid)
1595 		__field(u32, vers)
1596 		__field(u32, proc)
1597 		__field(u32, credits)
1598 	),
1599 
1600 	TP_fast_assign(
1601 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1602 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1603 		__entry->xid = be32_to_cpup(p++);
1604 		__entry->vers = be32_to_cpup(p++);
1605 		__entry->credits = be32_to_cpup(p++);
1606 		__entry->proc = be32_to_cpup(p);
1607 	),
1608 
1609 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1610 		__entry->cq_id, __entry->completion_id,
1611 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1612 );
1613 
1614 #define DEFINE_BADREQ_EVENT(name)					\
1615 		DEFINE_EVENT(svcrdma_badreq_event,			\
1616 			     svcrdma_decode_##name##_err,		\
1617 				TP_PROTO(				\
1618 					const struct svc_rdma_recv_ctxt *ctxt,	\
1619 					__be32 *p			\
1620 				),					\
1621 				TP_ARGS(ctxt, p))
1622 
1623 DEFINE_BADREQ_EVENT(badvers);
1624 DEFINE_BADREQ_EVENT(drop);
1625 DEFINE_BADREQ_EVENT(badproc);
1626 DEFINE_BADREQ_EVENT(parse);
1627 
1628 TRACE_EVENT(svcrdma_encode_wseg,
1629 	TP_PROTO(
1630 		const struct svc_rdma_send_ctxt *ctxt,
1631 		u32 segno,
1632 		u32 handle,
1633 		u32 length,
1634 		u64 offset
1635 	),
1636 
1637 	TP_ARGS(ctxt, segno, handle, length, offset),
1638 
1639 	TP_STRUCT__entry(
1640 		__field(u32, cq_id)
1641 		__field(int, completion_id)
1642 		__field(u32, segno)
1643 		__field(u32, handle)
1644 		__field(u32, length)
1645 		__field(u64, offset)
1646 	),
1647 
1648 	TP_fast_assign(
1649 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1650 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1651 		__entry->segno = segno;
1652 		__entry->handle = handle;
1653 		__entry->length = length;
1654 		__entry->offset = offset;
1655 	),
1656 
1657 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1658 		__entry->cq_id, __entry->completion_id,
1659 		__entry->segno, __entry->length,
1660 		(unsigned long long)__entry->offset, __entry->handle
1661 	)
1662 );
1663 
1664 TRACE_EVENT(svcrdma_decode_rseg,
1665 	TP_PROTO(
1666 		const struct rpc_rdma_cid *cid,
1667 		const struct svc_rdma_chunk *chunk,
1668 		const struct svc_rdma_segment *segment
1669 	),
1670 
1671 	TP_ARGS(cid, chunk, segment),
1672 
1673 	TP_STRUCT__entry(
1674 		__field(u32, cq_id)
1675 		__field(int, completion_id)
1676 		__field(u32, segno)
1677 		__field(u32, position)
1678 		__field(u32, handle)
1679 		__field(u32, length)
1680 		__field(u64, offset)
1681 	),
1682 
1683 	TP_fast_assign(
1684 		__entry->cq_id = cid->ci_queue_id;
1685 		__entry->completion_id = cid->ci_completion_id;
1686 		__entry->segno = chunk->ch_segcount;
1687 		__entry->position = chunk->ch_position;
1688 		__entry->handle = segment->rs_handle;
1689 		__entry->length = segment->rs_length;
1690 		__entry->offset = segment->rs_offset;
1691 	),
1692 
1693 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1694 		__entry->cq_id, __entry->completion_id,
1695 		__entry->segno, __entry->position, __entry->length,
1696 		(unsigned long long)__entry->offset, __entry->handle
1697 	)
1698 );
1699 
1700 TRACE_EVENT(svcrdma_decode_wseg,
1701 	TP_PROTO(
1702 		const struct rpc_rdma_cid *cid,
1703 		const struct svc_rdma_chunk *chunk,
1704 		u32 segno
1705 	),
1706 
1707 	TP_ARGS(cid, chunk, segno),
1708 
1709 	TP_STRUCT__entry(
1710 		__field(u32, cq_id)
1711 		__field(int, completion_id)
1712 		__field(u32, segno)
1713 		__field(u32, handle)
1714 		__field(u32, length)
1715 		__field(u64, offset)
1716 	),
1717 
1718 	TP_fast_assign(
1719 		const struct svc_rdma_segment *segment =
1720 			&chunk->ch_segments[segno];
1721 
1722 		__entry->cq_id = cid->ci_queue_id;
1723 		__entry->completion_id = cid->ci_completion_id;
1724 		__entry->segno = segno;
1725 		__entry->handle = segment->rs_handle;
1726 		__entry->length = segment->rs_length;
1727 		__entry->offset = segment->rs_offset;
1728 	),
1729 
1730 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1731 		__entry->cq_id, __entry->completion_id,
1732 		__entry->segno, __entry->length,
1733 		(unsigned long long)__entry->offset, __entry->handle
1734 	)
1735 );
1736 
1737 DECLARE_EVENT_CLASS(svcrdma_error_event,
1738 	TP_PROTO(
1739 		__be32 xid
1740 	),
1741 
1742 	TP_ARGS(xid),
1743 
1744 	TP_STRUCT__entry(
1745 		__field(u32, xid)
1746 	),
1747 
1748 	TP_fast_assign(
1749 		__entry->xid = be32_to_cpu(xid);
1750 	),
1751 
1752 	TP_printk("xid=0x%08x",
1753 		__entry->xid
1754 	)
1755 );
1756 
1757 #define DEFINE_ERROR_EVENT(name)					\
1758 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1759 				TP_PROTO(				\
1760 					__be32 xid			\
1761 				),					\
1762 				TP_ARGS(xid))
1763 
1764 DEFINE_ERROR_EVENT(vers);
1765 DEFINE_ERROR_EVENT(chunk);
1766 
1767 /**
1768  ** Server-side RDMA API events
1769  **/
1770 
1771 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1772 	TP_PROTO(
1773 		const struct svcxprt_rdma *rdma,
1774 		u64 dma_addr,
1775 		u32 length
1776 	),
1777 
1778 	TP_ARGS(rdma, dma_addr, length),
1779 
1780 	TP_STRUCT__entry(
1781 		__field(u64, dma_addr)
1782 		__field(u32, length)
1783 		__string(device, rdma->sc_cm_id->device->name)
1784 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1785 	),
1786 
1787 	TP_fast_assign(
1788 		__entry->dma_addr = dma_addr;
1789 		__entry->length = length;
1790 		__assign_str(device, rdma->sc_cm_id->device->name);
1791 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1792 	),
1793 
1794 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1795 		__get_str(addr), __get_str(device),
1796 		__entry->dma_addr, __entry->length
1797 	)
1798 );
1799 
1800 #define DEFINE_SVC_DMA_EVENT(name)					\
1801 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1802 				TP_PROTO(				\
1803 					const struct svcxprt_rdma *rdma,\
1804 					u64 dma_addr,			\
1805 					u32 length			\
1806 				),					\
1807 				TP_ARGS(rdma, dma_addr, length))
1808 
1809 DEFINE_SVC_DMA_EVENT(dma_map_page);
1810 DEFINE_SVC_DMA_EVENT(dma_map_err);
1811 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1812 
1813 TRACE_EVENT(svcrdma_dma_map_rw_err,
1814 	TP_PROTO(
1815 		const struct svcxprt_rdma *rdma,
1816 		unsigned int nents,
1817 		int status
1818 	),
1819 
1820 	TP_ARGS(rdma, nents, status),
1821 
1822 	TP_STRUCT__entry(
1823 		__field(int, status)
1824 		__field(unsigned int, nents)
1825 		__string(device, rdma->sc_cm_id->device->name)
1826 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1827 	),
1828 
1829 	TP_fast_assign(
1830 		__entry->status = status;
1831 		__entry->nents = nents;
1832 		__assign_str(device, rdma->sc_cm_id->device->name);
1833 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1834 	),
1835 
1836 	TP_printk("addr=%s device=%s nents=%u status=%d",
1837 		__get_str(addr), __get_str(device), __entry->nents,
1838 		__entry->status
1839 	)
1840 );
1841 
1842 TRACE_EVENT(svcrdma_no_rwctx_err,
1843 	TP_PROTO(
1844 		const struct svcxprt_rdma *rdma,
1845 		unsigned int num_sges
1846 	),
1847 
1848 	TP_ARGS(rdma, num_sges),
1849 
1850 	TP_STRUCT__entry(
1851 		__field(unsigned int, num_sges)
1852 		__string(device, rdma->sc_cm_id->device->name)
1853 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1854 	),
1855 
1856 	TP_fast_assign(
1857 		__entry->num_sges = num_sges;
1858 		__assign_str(device, rdma->sc_cm_id->device->name);
1859 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1860 	),
1861 
1862 	TP_printk("addr=%s device=%s num_sges=%d",
1863 		__get_str(addr), __get_str(device), __entry->num_sges
1864 	)
1865 );
1866 
1867 TRACE_EVENT(svcrdma_page_overrun_err,
1868 	TP_PROTO(
1869 		const struct svcxprt_rdma *rdma,
1870 		const struct svc_rqst *rqst,
1871 		unsigned int pageno
1872 	),
1873 
1874 	TP_ARGS(rdma, rqst, pageno),
1875 
1876 	TP_STRUCT__entry(
1877 		__field(unsigned int, pageno)
1878 		__field(u32, xid)
1879 		__string(device, rdma->sc_cm_id->device->name)
1880 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1881 	),
1882 
1883 	TP_fast_assign(
1884 		__entry->pageno = pageno;
1885 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1886 		__assign_str(device, rdma->sc_cm_id->device->name);
1887 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1888 	),
1889 
1890 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1891 		__get_str(device), __entry->xid, __entry->pageno
1892 	)
1893 );
1894 
1895 TRACE_EVENT(svcrdma_small_wrch_err,
1896 	TP_PROTO(
1897 		const struct svcxprt_rdma *rdma,
1898 		unsigned int remaining,
1899 		unsigned int seg_no,
1900 		unsigned int num_segs
1901 	),
1902 
1903 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1904 
1905 	TP_STRUCT__entry(
1906 		__field(unsigned int, remaining)
1907 		__field(unsigned int, seg_no)
1908 		__field(unsigned int, num_segs)
1909 		__string(device, rdma->sc_cm_id->device->name)
1910 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1911 	),
1912 
1913 	TP_fast_assign(
1914 		__entry->remaining = remaining;
1915 		__entry->seg_no = seg_no;
1916 		__entry->num_segs = num_segs;
1917 		__assign_str(device, rdma->sc_cm_id->device->name);
1918 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1919 	),
1920 
1921 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1922 		__get_str(addr), __get_str(device), __entry->remaining,
1923 		__entry->seg_no, __entry->num_segs
1924 	)
1925 );
1926 
1927 TRACE_EVENT(svcrdma_send_pullup,
1928 	TP_PROTO(
1929 		const struct svc_rdma_send_ctxt *ctxt,
1930 		unsigned int msglen
1931 	),
1932 
1933 	TP_ARGS(ctxt, msglen),
1934 
1935 	TP_STRUCT__entry(
1936 		__field(u32, cq_id)
1937 		__field(int, completion_id)
1938 		__field(unsigned int, hdrlen)
1939 		__field(unsigned int, msglen)
1940 	),
1941 
1942 	TP_fast_assign(
1943 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1944 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1945 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1946 		__entry->msglen = msglen;
1947 	),
1948 
1949 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1950 		__entry->cq_id, __entry->completion_id,
1951 		__entry->hdrlen, __entry->msglen,
1952 		__entry->hdrlen + __entry->msglen)
1953 );
1954 
1955 TRACE_EVENT(svcrdma_send_err,
1956 	TP_PROTO(
1957 		const struct svc_rqst *rqst,
1958 		int status
1959 	),
1960 
1961 	TP_ARGS(rqst, status),
1962 
1963 	TP_STRUCT__entry(
1964 		__field(int, status)
1965 		__field(u32, xid)
1966 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1967 	),
1968 
1969 	TP_fast_assign(
1970 		__entry->status = status;
1971 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1972 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1973 	),
1974 
1975 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1976 		__entry->xid, __entry->status
1977 	)
1978 );
1979 
1980 TRACE_EVENT(svcrdma_post_send,
1981 	TP_PROTO(
1982 		const struct svc_rdma_send_ctxt *ctxt
1983 	),
1984 
1985 	TP_ARGS(ctxt),
1986 
1987 	TP_STRUCT__entry(
1988 		__field(u32, cq_id)
1989 		__field(int, completion_id)
1990 		__field(unsigned int, num_sge)
1991 		__field(u32, inv_rkey)
1992 	),
1993 
1994 	TP_fast_assign(
1995 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1996 
1997 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1998 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1999 		__entry->num_sge = wr->num_sge;
2000 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
2001 					wr->ex.invalidate_rkey : 0;
2002 	),
2003 
2004 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
2005 		__entry->cq_id, __entry->completion_id,
2006 		__entry->num_sge, __entry->inv_rkey
2007 	)
2008 );
2009 
2010 DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
2011 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
2012 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
2013 
2014 TRACE_EVENT(svcrdma_post_recv,
2015 	TP_PROTO(
2016 		const struct svc_rdma_recv_ctxt *ctxt
2017 	),
2018 
2019 	TP_ARGS(ctxt),
2020 
2021 	TP_STRUCT__entry(
2022 		__field(u32, cq_id)
2023 		__field(int, completion_id)
2024 	),
2025 
2026 	TP_fast_assign(
2027 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
2028 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
2029 	),
2030 
2031 	TP_printk("cq.id=%d cid=%d",
2032 		__entry->cq_id, __entry->completion_id
2033 	)
2034 );
2035 
2036 DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
2037 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
2038 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
2039 
2040 TRACE_EVENT(svcrdma_rq_post_err,
2041 	TP_PROTO(
2042 		const struct svcxprt_rdma *rdma,
2043 		int status
2044 	),
2045 
2046 	TP_ARGS(rdma, status),
2047 
2048 	TP_STRUCT__entry(
2049 		__field(int, status)
2050 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2051 	),
2052 
2053 	TP_fast_assign(
2054 		__entry->status = status;
2055 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2056 	),
2057 
2058 	TP_printk("addr=%s status=%d",
2059 		__get_str(addr), __entry->status
2060 	)
2061 );
2062 
2063 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
2064 	TP_PROTO(
2065 		const struct rpc_rdma_cid *cid,
2066 		int sqecount
2067 	),
2068 
2069 	TP_ARGS(cid, sqecount),
2070 
2071 	TP_STRUCT__entry(
2072 		__field(u32, cq_id)
2073 		__field(int, completion_id)
2074 		__field(int, sqecount)
2075 	),
2076 
2077 	TP_fast_assign(
2078 		__entry->cq_id = cid->ci_queue_id;
2079 		__entry->completion_id = cid->ci_completion_id;
2080 		__entry->sqecount = sqecount;
2081 	),
2082 
2083 	TP_printk("cq.id=%u cid=%d sqecount=%d",
2084 		__entry->cq_id, __entry->completion_id,
2085 		__entry->sqecount
2086 	)
2087 );
2088 
2089 #define DEFINE_POST_CHUNK_EVENT(name)					\
2090 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
2091 				svcrdma_post_##name##_chunk,		\
2092 				TP_PROTO(				\
2093 					const struct rpc_rdma_cid *cid,	\
2094 					int sqecount			\
2095 				),					\
2096 				TP_ARGS(cid, sqecount))
2097 
2098 DEFINE_POST_CHUNK_EVENT(read);
2099 DEFINE_POST_CHUNK_EVENT(write);
2100 DEFINE_POST_CHUNK_EVENT(reply);
2101 
2102 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
2103 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
2104 
2105 TRACE_EVENT(svcrdma_qp_error,
2106 	TP_PROTO(
2107 		const struct ib_event *event,
2108 		const struct sockaddr *sap
2109 	),
2110 
2111 	TP_ARGS(event, sap),
2112 
2113 	TP_STRUCT__entry(
2114 		__field(unsigned int, event)
2115 		__string(device, event->device->name)
2116 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
2117 	),
2118 
2119 	TP_fast_assign(
2120 		__entry->event = event->event;
2121 		__assign_str(device, event->device->name);
2122 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
2123 			 "%pISpc", sap);
2124 	),
2125 
2126 	TP_printk("addr=%s dev=%s event=%s (%u)",
2127 		__entry->addr, __get_str(device),
2128 		rdma_show_ib_event(__entry->event), __entry->event
2129 	)
2130 );
2131 
2132 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
2133 	TP_PROTO(
2134 		const struct svcxprt_rdma *rdma
2135 	),
2136 
2137 	TP_ARGS(rdma),
2138 
2139 	TP_STRUCT__entry(
2140 		__field(int, avail)
2141 		__field(int, depth)
2142 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2143 	),
2144 
2145 	TP_fast_assign(
2146 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2147 		__entry->depth = rdma->sc_sq_depth;
2148 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2149 	),
2150 
2151 	TP_printk("addr=%s sc_sq_avail=%d/%d",
2152 		__get_str(addr), __entry->avail, __entry->depth
2153 	)
2154 );
2155 
2156 #define DEFINE_SQ_EVENT(name)						\
2157 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
2158 				TP_PROTO(				\
2159 					const struct svcxprt_rdma *rdma \
2160 				),					\
2161 				TP_ARGS(rdma))
2162 
2163 DEFINE_SQ_EVENT(full);
2164 DEFINE_SQ_EVENT(retry);
2165 
2166 TRACE_EVENT(svcrdma_sq_post_err,
2167 	TP_PROTO(
2168 		const struct svcxprt_rdma *rdma,
2169 		int status
2170 	),
2171 
2172 	TP_ARGS(rdma, status),
2173 
2174 	TP_STRUCT__entry(
2175 		__field(int, avail)
2176 		__field(int, depth)
2177 		__field(int, status)
2178 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2179 	),
2180 
2181 	TP_fast_assign(
2182 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2183 		__entry->depth = rdma->sc_sq_depth;
2184 		__entry->status = status;
2185 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2186 	),
2187 
2188 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2189 		__get_str(addr), __entry->avail, __entry->depth,
2190 		__entry->status
2191 	)
2192 );
2193 
2194 #endif /* _TRACE_RPCRDMA_H */
2195 
2196 #include <trace/define_trace.h>
2197