xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 6b147ea7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
64 	TP_PROTO(
65 		const struct ib_wc *wc,
66 		const struct rpc_rdma_cid *cid
67 	),
68 
69 	TP_ARGS(wc, cid),
70 
71 	TP_STRUCT__entry(
72 		__field(u32, cq_id)
73 		__field(int, completion_id)
74 		__field(unsigned long, status)
75 		__field(unsigned int, vendor_err)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->cq_id = cid->ci_queue_id;
80 		__entry->completion_id = cid->ci_completion_id;
81 		__entry->status = wc->status;
82 		if (wc->status)
83 			__entry->vendor_err = wc->vendor_err;
84 		else
85 			__entry->vendor_err = 0;
86 	),
87 
88 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
89 		__entry->cq_id, __entry->completion_id,
90 		rdma_show_wc_status(__entry->status),
91 		__entry->status, __entry->vendor_err
92 	)
93 );
94 
95 #define DEFINE_MR_COMPLETION_EVENT(name)				\
96 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
97 				TP_PROTO(				\
98 					const struct ib_wc *wc,		\
99 					const struct rpc_rdma_cid *cid	\
100 				),					\
101 				TP_ARGS(wc, cid))
102 
103 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
104 	TP_PROTO(
105 		const struct ib_wc *wc,
106 		const struct rpc_rdma_cid *cid
107 	),
108 
109 	TP_ARGS(wc, cid),
110 
111 	TP_STRUCT__entry(
112 		__field(u32, cq_id)
113 		__field(int, completion_id)
114 		__field(u32, received)
115 		__field(unsigned long, status)
116 		__field(unsigned int, vendor_err)
117 	),
118 
119 	TP_fast_assign(
120 		__entry->cq_id = cid->ci_queue_id;
121 		__entry->completion_id = cid->ci_completion_id;
122 		__entry->status = wc->status;
123 		if (wc->status) {
124 			__entry->received = 0;
125 			__entry->vendor_err = wc->vendor_err;
126 		} else {
127 			__entry->received = wc->byte_len;
128 			__entry->vendor_err = 0;
129 		}
130 	),
131 
132 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
133 		__entry->cq_id, __entry->completion_id,
134 		rdma_show_wc_status(__entry->status),
135 		__entry->status, __entry->vendor_err,
136 		__entry->received
137 	)
138 );
139 
140 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
141 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
142 				TP_PROTO(				\
143 					const struct ib_wc *wc,		\
144 					const struct rpc_rdma_cid *cid	\
145 				),					\
146 				TP_ARGS(wc, cid))
147 
148 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
149 	TP_PROTO(
150 		const struct rpcrdma_rep *rep
151 	),
152 
153 	TP_ARGS(rep),
154 
155 	TP_STRUCT__entry(
156 		__field(u32, xid)
157 		__field(u32, version)
158 		__field(u32, proc)
159 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
160 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
161 	),
162 
163 	TP_fast_assign(
164 		__entry->xid = be32_to_cpu(rep->rr_xid);
165 		__entry->version = be32_to_cpu(rep->rr_vers);
166 		__entry->proc = be32_to_cpu(rep->rr_proc);
167 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
168 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
169 	),
170 
171 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
172 		__get_str(addr), __get_str(port),
173 		__entry->xid, __entry->version, __entry->proc
174 	)
175 );
176 
177 #define DEFINE_REPLY_EVENT(name)					\
178 		DEFINE_EVENT(xprtrdma_reply_class,			\
179 				xprtrdma_reply_##name##_err,		\
180 				TP_PROTO(				\
181 					const struct rpcrdma_rep *rep	\
182 				),					\
183 				TP_ARGS(rep))
184 
185 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
186 	TP_PROTO(
187 		const struct rpcrdma_xprt *r_xprt
188 	),
189 
190 	TP_ARGS(r_xprt),
191 
192 	TP_STRUCT__entry(
193 		__field(const void *, r_xprt)
194 		__string(addr, rpcrdma_addrstr(r_xprt))
195 		__string(port, rpcrdma_portstr(r_xprt))
196 	),
197 
198 	TP_fast_assign(
199 		__entry->r_xprt = r_xprt;
200 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
201 		__assign_str(port, rpcrdma_portstr(r_xprt));
202 	),
203 
204 	TP_printk("peer=[%s]:%s r_xprt=%p",
205 		__get_str(addr), __get_str(port), __entry->r_xprt
206 	)
207 );
208 
209 #define DEFINE_RXPRT_EVENT(name)					\
210 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
211 				TP_PROTO(				\
212 					const struct rpcrdma_xprt *r_xprt \
213 				),					\
214 				TP_ARGS(r_xprt))
215 
216 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
217 	TP_PROTO(
218 		const struct rpcrdma_xprt *r_xprt,
219 		int rc
220 	),
221 
222 	TP_ARGS(r_xprt, rc),
223 
224 	TP_STRUCT__entry(
225 		__field(const void *, r_xprt)
226 		__field(int, rc)
227 		__field(int, connect_status)
228 		__string(addr, rpcrdma_addrstr(r_xprt))
229 		__string(port, rpcrdma_portstr(r_xprt))
230 	),
231 
232 	TP_fast_assign(
233 		__entry->r_xprt = r_xprt;
234 		__entry->rc = rc;
235 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
236 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
237 		__assign_str(port, rpcrdma_portstr(r_xprt));
238 	),
239 
240 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
241 		__get_str(addr), __get_str(port), __entry->r_xprt,
242 		__entry->rc, __entry->connect_status
243 	)
244 );
245 
246 #define DEFINE_CONN_EVENT(name)						\
247 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
248 				TP_PROTO(				\
249 					const struct rpcrdma_xprt *r_xprt, \
250 					int rc				\
251 				),					\
252 				TP_ARGS(r_xprt, rc))
253 
254 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
255 	TP_PROTO(
256 		const struct rpc_task *task,
257 		unsigned int pos,
258 		struct rpcrdma_mr *mr,
259 		int nsegs
260 	),
261 
262 	TP_ARGS(task, pos, mr, nsegs),
263 
264 	TP_STRUCT__entry(
265 		__field(unsigned int, task_id)
266 		__field(unsigned int, client_id)
267 		__field(unsigned int, pos)
268 		__field(int, nents)
269 		__field(u32, handle)
270 		__field(u32, length)
271 		__field(u64, offset)
272 		__field(int, nsegs)
273 	),
274 
275 	TP_fast_assign(
276 		__entry->task_id = task->tk_pid;
277 		__entry->client_id = task->tk_client->cl_clid;
278 		__entry->pos = pos;
279 		__entry->nents = mr->mr_nents;
280 		__entry->handle = mr->mr_handle;
281 		__entry->length = mr->mr_length;
282 		__entry->offset = mr->mr_offset;
283 		__entry->nsegs = nsegs;
284 	),
285 
286 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
287 		__entry->task_id, __entry->client_id,
288 		__entry->pos, __entry->length,
289 		(unsigned long long)__entry->offset, __entry->handle,
290 		__entry->nents < __entry->nsegs ? "more" : "last"
291 	)
292 );
293 
294 #define DEFINE_RDCH_EVENT(name)						\
295 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
296 				TP_PROTO(				\
297 					const struct rpc_task *task,	\
298 					unsigned int pos,		\
299 					struct rpcrdma_mr *mr,		\
300 					int nsegs			\
301 				),					\
302 				TP_ARGS(task, pos, mr, nsegs))
303 
304 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
305 	TP_PROTO(
306 		const struct rpc_task *task,
307 		struct rpcrdma_mr *mr,
308 		int nsegs
309 	),
310 
311 	TP_ARGS(task, mr, nsegs),
312 
313 	TP_STRUCT__entry(
314 		__field(unsigned int, task_id)
315 		__field(unsigned int, client_id)
316 		__field(int, nents)
317 		__field(u32, handle)
318 		__field(u32, length)
319 		__field(u64, offset)
320 		__field(int, nsegs)
321 	),
322 
323 	TP_fast_assign(
324 		__entry->task_id = task->tk_pid;
325 		__entry->client_id = task->tk_client->cl_clid;
326 		__entry->nents = mr->mr_nents;
327 		__entry->handle = mr->mr_handle;
328 		__entry->length = mr->mr_length;
329 		__entry->offset = mr->mr_offset;
330 		__entry->nsegs = nsegs;
331 	),
332 
333 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
334 		__entry->task_id, __entry->client_id,
335 		__entry->length, (unsigned long long)__entry->offset,
336 		__entry->handle,
337 		__entry->nents < __entry->nsegs ? "more" : "last"
338 	)
339 );
340 
341 #define DEFINE_WRCH_EVENT(name)						\
342 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
343 				TP_PROTO(				\
344 					const struct rpc_task *task,	\
345 					struct rpcrdma_mr *mr,		\
346 					int nsegs			\
347 				),					\
348 				TP_ARGS(task, mr, nsegs))
349 
350 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
351 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
352 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
353 TRACE_DEFINE_ENUM(DMA_NONE);
354 
355 #define xprtrdma_show_direction(x)					\
356 		__print_symbolic(x,					\
357 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
358 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
359 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
360 				{ DMA_NONE, "NONE" })
361 
362 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
363 	TP_PROTO(
364 		const struct rpcrdma_mr *mr
365 	),
366 
367 	TP_ARGS(mr),
368 
369 	TP_STRUCT__entry(
370 		__field(unsigned int, task_id)
371 		__field(unsigned int, client_id)
372 		__field(u32, mr_id)
373 		__field(int, nents)
374 		__field(u32, handle)
375 		__field(u32, length)
376 		__field(u64, offset)
377 		__field(u32, dir)
378 	),
379 
380 	TP_fast_assign(
381 		const struct rpcrdma_req *req = mr->mr_req;
382 		const struct rpc_task *task = req->rl_slot.rq_task;
383 
384 		__entry->task_id = task->tk_pid;
385 		__entry->client_id = task->tk_client->cl_clid;
386 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
387 		__entry->nents  = mr->mr_nents;
388 		__entry->handle = mr->mr_handle;
389 		__entry->length = mr->mr_length;
390 		__entry->offset = mr->mr_offset;
391 		__entry->dir    = mr->mr_dir;
392 	),
393 
394 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
395 		__entry->task_id, __entry->client_id,
396 		__entry->mr_id, __entry->nents, __entry->length,
397 		(unsigned long long)__entry->offset, __entry->handle,
398 		xprtrdma_show_direction(__entry->dir)
399 	)
400 );
401 
402 #define DEFINE_MR_EVENT(name)						\
403 		DEFINE_EVENT(xprtrdma_mr_class,				\
404 				xprtrdma_mr_##name,			\
405 				TP_PROTO(				\
406 					const struct rpcrdma_mr *mr	\
407 				),					\
408 				TP_ARGS(mr))
409 
410 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
411 	TP_PROTO(
412 		const struct rpcrdma_mr *mr
413 	),
414 
415 	TP_ARGS(mr),
416 
417 	TP_STRUCT__entry(
418 		__field(u32, mr_id)
419 		__field(int, nents)
420 		__field(u32, handle)
421 		__field(u32, length)
422 		__field(u64, offset)
423 		__field(u32, dir)
424 	),
425 
426 	TP_fast_assign(
427 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
428 		__entry->nents  = mr->mr_nents;
429 		__entry->handle = mr->mr_handle;
430 		__entry->length = mr->mr_length;
431 		__entry->offset = mr->mr_offset;
432 		__entry->dir    = mr->mr_dir;
433 	),
434 
435 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
436 		__entry->mr_id, __entry->nents, __entry->length,
437 		(unsigned long long)__entry->offset, __entry->handle,
438 		xprtrdma_show_direction(__entry->dir)
439 	)
440 );
441 
442 #define DEFINE_ANON_MR_EVENT(name)					\
443 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
444 				xprtrdma_mr_##name,			\
445 				TP_PROTO(				\
446 					const struct rpcrdma_mr *mr	\
447 				),					\
448 				TP_ARGS(mr))
449 
450 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
451 	TP_PROTO(
452 		const struct rpcrdma_xprt *r_xprt,
453 		const struct rpc_rqst *rqst
454 	),
455 
456 	TP_ARGS(r_xprt, rqst),
457 
458 	TP_STRUCT__entry(
459 		__field(u32, xid)
460 		__string(addr, rpcrdma_addrstr(r_xprt))
461 		__string(port, rpcrdma_portstr(r_xprt))
462 	),
463 
464 	TP_fast_assign(
465 		__entry->xid = be32_to_cpu(rqst->rq_xid);
466 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
467 		__assign_str(port, rpcrdma_portstr(r_xprt));
468 	),
469 
470 	TP_printk("peer=[%s]:%s xid=0x%08x",
471 		__get_str(addr), __get_str(port), __entry->xid
472 	)
473 );
474 
475 #define DEFINE_CALLBACK_EVENT(name)					\
476 		DEFINE_EVENT(xprtrdma_callback_class,			\
477 				xprtrdma_cb_##name,			\
478 				TP_PROTO(				\
479 					const struct rpcrdma_xprt *r_xprt, \
480 					const struct rpc_rqst *rqst	\
481 				),					\
482 				TP_ARGS(r_xprt, rqst))
483 
484 /**
485  ** Connection events
486  **/
487 
488 TRACE_EVENT(xprtrdma_inline_thresh,
489 	TP_PROTO(
490 		const struct rpcrdma_ep *ep
491 	),
492 
493 	TP_ARGS(ep),
494 
495 	TP_STRUCT__entry(
496 		__field(unsigned int, inline_send)
497 		__field(unsigned int, inline_recv)
498 		__field(unsigned int, max_send)
499 		__field(unsigned int, max_recv)
500 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
501 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
502 	),
503 
504 	TP_fast_assign(
505 		const struct rdma_cm_id *id = ep->re_id;
506 
507 		__entry->inline_send = ep->re_inline_send;
508 		__entry->inline_recv = ep->re_inline_recv;
509 		__entry->max_send = ep->re_max_inline_send;
510 		__entry->max_recv = ep->re_max_inline_recv;
511 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
512 		       sizeof(struct sockaddr_in6));
513 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
514 		       sizeof(struct sockaddr_in6));
515 	),
516 
517 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
518 		__entry->srcaddr, __entry->dstaddr,
519 		__entry->inline_send, __entry->inline_recv,
520 		__entry->max_send, __entry->max_recv
521 	)
522 );
523 
524 DEFINE_CONN_EVENT(connect);
525 DEFINE_CONN_EVENT(disconnect);
526 
527 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
528 
529 TRACE_EVENT(xprtrdma_op_connect,
530 	TP_PROTO(
531 		const struct rpcrdma_xprt *r_xprt,
532 		unsigned long delay
533 	),
534 
535 	TP_ARGS(r_xprt, delay),
536 
537 	TP_STRUCT__entry(
538 		__field(const void *, r_xprt)
539 		__field(unsigned long, delay)
540 		__string(addr, rpcrdma_addrstr(r_xprt))
541 		__string(port, rpcrdma_portstr(r_xprt))
542 	),
543 
544 	TP_fast_assign(
545 		__entry->r_xprt = r_xprt;
546 		__entry->delay = delay;
547 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
548 		__assign_str(port, rpcrdma_portstr(r_xprt));
549 	),
550 
551 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
552 		__get_str(addr), __get_str(port), __entry->r_xprt,
553 		__entry->delay
554 	)
555 );
556 
557 
558 TRACE_EVENT(xprtrdma_op_set_cto,
559 	TP_PROTO(
560 		const struct rpcrdma_xprt *r_xprt,
561 		unsigned long connect,
562 		unsigned long reconnect
563 	),
564 
565 	TP_ARGS(r_xprt, connect, reconnect),
566 
567 	TP_STRUCT__entry(
568 		__field(const void *, r_xprt)
569 		__field(unsigned long, connect)
570 		__field(unsigned long, reconnect)
571 		__string(addr, rpcrdma_addrstr(r_xprt))
572 		__string(port, rpcrdma_portstr(r_xprt))
573 	),
574 
575 	TP_fast_assign(
576 		__entry->r_xprt = r_xprt;
577 		__entry->connect = connect;
578 		__entry->reconnect = reconnect;
579 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
580 		__assign_str(port, rpcrdma_portstr(r_xprt));
581 	),
582 
583 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
584 		__get_str(addr), __get_str(port), __entry->r_xprt,
585 		__entry->connect / HZ, __entry->reconnect / HZ
586 	)
587 );
588 
589 TRACE_EVENT(xprtrdma_qp_event,
590 	TP_PROTO(
591 		const struct rpcrdma_ep *ep,
592 		const struct ib_event *event
593 	),
594 
595 	TP_ARGS(ep, event),
596 
597 	TP_STRUCT__entry(
598 		__field(unsigned long, event)
599 		__string(name, event->device->name)
600 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
601 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
602 	),
603 
604 	TP_fast_assign(
605 		const struct rdma_cm_id *id = ep->re_id;
606 
607 		__entry->event = event->event;
608 		__assign_str(name, event->device->name);
609 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
610 		       sizeof(struct sockaddr_in6));
611 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
612 		       sizeof(struct sockaddr_in6));
613 	),
614 
615 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
616 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
617 		rdma_show_ib_event(__entry->event), __entry->event
618 	)
619 );
620 
621 /**
622  ** Call events
623  **/
624 
625 TRACE_EVENT(xprtrdma_createmrs,
626 	TP_PROTO(
627 		const struct rpcrdma_xprt *r_xprt,
628 		unsigned int count
629 	),
630 
631 	TP_ARGS(r_xprt, count),
632 
633 	TP_STRUCT__entry(
634 		__field(const void *, r_xprt)
635 		__string(addr, rpcrdma_addrstr(r_xprt))
636 		__string(port, rpcrdma_portstr(r_xprt))
637 		__field(unsigned int, count)
638 	),
639 
640 	TP_fast_assign(
641 		__entry->r_xprt = r_xprt;
642 		__entry->count = count;
643 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
644 		__assign_str(port, rpcrdma_portstr(r_xprt));
645 	),
646 
647 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
648 		__get_str(addr), __get_str(port), __entry->r_xprt,
649 		__entry->count
650 	)
651 );
652 
653 TRACE_EVENT(xprtrdma_nomrs_err,
654 	TP_PROTO(
655 		const struct rpcrdma_xprt *r_xprt,
656 		const struct rpcrdma_req *req
657 	),
658 
659 	TP_ARGS(r_xprt, req),
660 
661 	TP_STRUCT__entry(
662 		__field(unsigned int, task_id)
663 		__field(unsigned int, client_id)
664 		__string(addr, rpcrdma_addrstr(r_xprt))
665 		__string(port, rpcrdma_portstr(r_xprt))
666 	),
667 
668 	TP_fast_assign(
669 		const struct rpc_rqst *rqst = &req->rl_slot;
670 
671 		__entry->task_id = rqst->rq_task->tk_pid;
672 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
673 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
674 		__assign_str(port, rpcrdma_portstr(r_xprt));
675 	),
676 
677 	TP_printk("peer=[%s]:%s task:%u@%u",
678 		__get_str(addr), __get_str(port),
679 		__entry->task_id, __entry->client_id
680 	)
681 );
682 
683 DEFINE_RDCH_EVENT(read);
684 DEFINE_WRCH_EVENT(write);
685 DEFINE_WRCH_EVENT(reply);
686 
687 TRACE_DEFINE_ENUM(rpcrdma_noch);
688 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
689 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
690 TRACE_DEFINE_ENUM(rpcrdma_readch);
691 TRACE_DEFINE_ENUM(rpcrdma_areadch);
692 TRACE_DEFINE_ENUM(rpcrdma_writech);
693 TRACE_DEFINE_ENUM(rpcrdma_replych);
694 
695 #define xprtrdma_show_chunktype(x)					\
696 		__print_symbolic(x,					\
697 				{ rpcrdma_noch, "inline" },		\
698 				{ rpcrdma_noch_pullup, "pullup" },	\
699 				{ rpcrdma_noch_mapped, "mapped" },	\
700 				{ rpcrdma_readch, "read list" },	\
701 				{ rpcrdma_areadch, "*read list" },	\
702 				{ rpcrdma_writech, "write list" },	\
703 				{ rpcrdma_replych, "reply chunk" })
704 
705 TRACE_EVENT(xprtrdma_marshal,
706 	TP_PROTO(
707 		const struct rpcrdma_req *req,
708 		unsigned int rtype,
709 		unsigned int wtype
710 	),
711 
712 	TP_ARGS(req, rtype, wtype),
713 
714 	TP_STRUCT__entry(
715 		__field(unsigned int, task_id)
716 		__field(unsigned int, client_id)
717 		__field(u32, xid)
718 		__field(unsigned int, hdrlen)
719 		__field(unsigned int, headlen)
720 		__field(unsigned int, pagelen)
721 		__field(unsigned int, taillen)
722 		__field(unsigned int, rtype)
723 		__field(unsigned int, wtype)
724 	),
725 
726 	TP_fast_assign(
727 		const struct rpc_rqst *rqst = &req->rl_slot;
728 
729 		__entry->task_id = rqst->rq_task->tk_pid;
730 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
731 		__entry->xid = be32_to_cpu(rqst->rq_xid);
732 		__entry->hdrlen = req->rl_hdrbuf.len;
733 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
734 		__entry->pagelen = rqst->rq_snd_buf.page_len;
735 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
736 		__entry->rtype = rtype;
737 		__entry->wtype = wtype;
738 	),
739 
740 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
741 		__entry->task_id, __entry->client_id, __entry->xid,
742 		__entry->hdrlen,
743 		__entry->headlen, __entry->pagelen, __entry->taillen,
744 		xprtrdma_show_chunktype(__entry->rtype),
745 		xprtrdma_show_chunktype(__entry->wtype)
746 	)
747 );
748 
749 TRACE_EVENT(xprtrdma_marshal_failed,
750 	TP_PROTO(const struct rpc_rqst *rqst,
751 		 int ret
752 	),
753 
754 	TP_ARGS(rqst, ret),
755 
756 	TP_STRUCT__entry(
757 		__field(unsigned int, task_id)
758 		__field(unsigned int, client_id)
759 		__field(u32, xid)
760 		__field(int, ret)
761 	),
762 
763 	TP_fast_assign(
764 		__entry->task_id = rqst->rq_task->tk_pid;
765 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
766 		__entry->xid = be32_to_cpu(rqst->rq_xid);
767 		__entry->ret = ret;
768 	),
769 
770 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
771 		__entry->task_id, __entry->client_id, __entry->xid,
772 		__entry->ret
773 	)
774 );
775 
776 TRACE_EVENT(xprtrdma_prepsend_failed,
777 	TP_PROTO(const struct rpc_rqst *rqst,
778 		 int ret
779 	),
780 
781 	TP_ARGS(rqst, ret),
782 
783 	TP_STRUCT__entry(
784 		__field(unsigned int, task_id)
785 		__field(unsigned int, client_id)
786 		__field(u32, xid)
787 		__field(int, ret)
788 	),
789 
790 	TP_fast_assign(
791 		__entry->task_id = rqst->rq_task->tk_pid;
792 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
793 		__entry->xid = be32_to_cpu(rqst->rq_xid);
794 		__entry->ret = ret;
795 	),
796 
797 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
798 		__entry->task_id, __entry->client_id, __entry->xid,
799 		__entry->ret
800 	)
801 );
802 
803 TRACE_EVENT(xprtrdma_post_send,
804 	TP_PROTO(
805 		const struct rpcrdma_req *req
806 	),
807 
808 	TP_ARGS(req),
809 
810 	TP_STRUCT__entry(
811 		__field(u32, cq_id)
812 		__field(int, completion_id)
813 		__field(unsigned int, task_id)
814 		__field(unsigned int, client_id)
815 		__field(int, num_sge)
816 		__field(int, signaled)
817 	),
818 
819 	TP_fast_assign(
820 		const struct rpc_rqst *rqst = &req->rl_slot;
821 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
822 
823 		__entry->cq_id = sc->sc_cid.ci_queue_id;
824 		__entry->completion_id = sc->sc_cid.ci_completion_id;
825 		__entry->task_id = rqst->rq_task->tk_pid;
826 		__entry->client_id = rqst->rq_task->tk_client ?
827 				     rqst->rq_task->tk_client->cl_clid : -1;
828 		__entry->num_sge = req->rl_wr.num_sge;
829 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
830 	),
831 
832 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
833 		__entry->task_id, __entry->client_id,
834 		__entry->cq_id, __entry->completion_id,
835 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
836 		(__entry->signaled ? "signaled" : "")
837 	)
838 );
839 
840 TRACE_EVENT(xprtrdma_post_recv,
841 	TP_PROTO(
842 		const struct rpcrdma_rep *rep
843 	),
844 
845 	TP_ARGS(rep),
846 
847 	TP_STRUCT__entry(
848 		__field(u32, cq_id)
849 		__field(int, completion_id)
850 	),
851 
852 	TP_fast_assign(
853 		__entry->cq_id = rep->rr_cid.ci_queue_id;
854 		__entry->completion_id = rep->rr_cid.ci_completion_id;
855 	),
856 
857 	TP_printk("cq.id=%d cid=%d",
858 		__entry->cq_id, __entry->completion_id
859 	)
860 );
861 
862 TRACE_EVENT(xprtrdma_post_recvs,
863 	TP_PROTO(
864 		const struct rpcrdma_xprt *r_xprt,
865 		unsigned int count,
866 		int status
867 	),
868 
869 	TP_ARGS(r_xprt, count, status),
870 
871 	TP_STRUCT__entry(
872 		__field(const void *, r_xprt)
873 		__field(unsigned int, count)
874 		__field(int, status)
875 		__field(int, posted)
876 		__string(addr, rpcrdma_addrstr(r_xprt))
877 		__string(port, rpcrdma_portstr(r_xprt))
878 	),
879 
880 	TP_fast_assign(
881 		__entry->r_xprt = r_xprt;
882 		__entry->count = count;
883 		__entry->status = status;
884 		__entry->posted = r_xprt->rx_ep->re_receive_count;
885 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
886 		__assign_str(port, rpcrdma_portstr(r_xprt));
887 	),
888 
889 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
890 		__get_str(addr), __get_str(port), __entry->r_xprt,
891 		__entry->count, __entry->posted, __entry->status
892 	)
893 );
894 
895 TRACE_EVENT(xprtrdma_post_linv_err,
896 	TP_PROTO(
897 		const struct rpcrdma_req *req,
898 		int status
899 	),
900 
901 	TP_ARGS(req, status),
902 
903 	TP_STRUCT__entry(
904 		__field(unsigned int, task_id)
905 		__field(unsigned int, client_id)
906 		__field(int, status)
907 	),
908 
909 	TP_fast_assign(
910 		const struct rpc_task *task = req->rl_slot.rq_task;
911 
912 		__entry->task_id = task->tk_pid;
913 		__entry->client_id = task->tk_client->cl_clid;
914 		__entry->status = status;
915 	),
916 
917 	TP_printk("task:%u@%u status=%d",
918 		__entry->task_id, __entry->client_id, __entry->status
919 	)
920 );
921 
922 /**
923  ** Completion events
924  **/
925 
926 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
927 
928 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
929 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
930 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
931 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
932 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
933 
934 TRACE_EVENT(xprtrdma_frwr_alloc,
935 	TP_PROTO(
936 		const struct rpcrdma_mr *mr,
937 		int rc
938 	),
939 
940 	TP_ARGS(mr, rc),
941 
942 	TP_STRUCT__entry(
943 		__field(u32, mr_id)
944 		__field(int, rc)
945 	),
946 
947 	TP_fast_assign(
948 		__entry->mr_id = mr->frwr.fr_mr->res.id;
949 		__entry->rc = rc;
950 	),
951 
952 	TP_printk("mr.id=%u: rc=%d",
953 		__entry->mr_id, __entry->rc
954 	)
955 );
956 
957 TRACE_EVENT(xprtrdma_frwr_dereg,
958 	TP_PROTO(
959 		const struct rpcrdma_mr *mr,
960 		int rc
961 	),
962 
963 	TP_ARGS(mr, rc),
964 
965 	TP_STRUCT__entry(
966 		__field(u32, mr_id)
967 		__field(int, nents)
968 		__field(u32, handle)
969 		__field(u32, length)
970 		__field(u64, offset)
971 		__field(u32, dir)
972 		__field(int, rc)
973 	),
974 
975 	TP_fast_assign(
976 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
977 		__entry->nents  = mr->mr_nents;
978 		__entry->handle = mr->mr_handle;
979 		__entry->length = mr->mr_length;
980 		__entry->offset = mr->mr_offset;
981 		__entry->dir    = mr->mr_dir;
982 		__entry->rc	= rc;
983 	),
984 
985 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
986 		__entry->mr_id, __entry->nents, __entry->length,
987 		(unsigned long long)__entry->offset, __entry->handle,
988 		xprtrdma_show_direction(__entry->dir),
989 		__entry->rc
990 	)
991 );
992 
993 TRACE_EVENT(xprtrdma_frwr_sgerr,
994 	TP_PROTO(
995 		const struct rpcrdma_mr *mr,
996 		int sg_nents
997 	),
998 
999 	TP_ARGS(mr, sg_nents),
1000 
1001 	TP_STRUCT__entry(
1002 		__field(u32, mr_id)
1003 		__field(u64, addr)
1004 		__field(u32, dir)
1005 		__field(int, nents)
1006 	),
1007 
1008 	TP_fast_assign(
1009 		__entry->mr_id = mr->frwr.fr_mr->res.id;
1010 		__entry->addr = mr->mr_sg->dma_address;
1011 		__entry->dir = mr->mr_dir;
1012 		__entry->nents = sg_nents;
1013 	),
1014 
1015 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1016 		__entry->mr_id, __entry->addr,
1017 		xprtrdma_show_direction(__entry->dir),
1018 		__entry->nents
1019 	)
1020 );
1021 
1022 TRACE_EVENT(xprtrdma_frwr_maperr,
1023 	TP_PROTO(
1024 		const struct rpcrdma_mr *mr,
1025 		int num_mapped
1026 	),
1027 
1028 	TP_ARGS(mr, num_mapped),
1029 
1030 	TP_STRUCT__entry(
1031 		__field(u32, mr_id)
1032 		__field(u64, addr)
1033 		__field(u32, dir)
1034 		__field(int, num_mapped)
1035 		__field(int, nents)
1036 	),
1037 
1038 	TP_fast_assign(
1039 		__entry->mr_id = mr->frwr.fr_mr->res.id;
1040 		__entry->addr = mr->mr_sg->dma_address;
1041 		__entry->dir = mr->mr_dir;
1042 		__entry->num_mapped = num_mapped;
1043 		__entry->nents = mr->mr_nents;
1044 	),
1045 
1046 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1047 		__entry->mr_id, __entry->addr,
1048 		xprtrdma_show_direction(__entry->dir),
1049 		__entry->num_mapped, __entry->nents
1050 	)
1051 );
1052 
1053 DEFINE_MR_EVENT(fastreg);
1054 DEFINE_MR_EVENT(localinv);
1055 DEFINE_MR_EVENT(reminv);
1056 DEFINE_MR_EVENT(map);
1057 
1058 DEFINE_ANON_MR_EVENT(unmap);
1059 
1060 TRACE_EVENT(xprtrdma_dma_maperr,
1061 	TP_PROTO(
1062 		u64 addr
1063 	),
1064 
1065 	TP_ARGS(addr),
1066 
1067 	TP_STRUCT__entry(
1068 		__field(u64, addr)
1069 	),
1070 
1071 	TP_fast_assign(
1072 		__entry->addr = addr;
1073 	),
1074 
1075 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1076 );
1077 
1078 /**
1079  ** Reply events
1080  **/
1081 
1082 TRACE_EVENT(xprtrdma_reply,
1083 	TP_PROTO(
1084 		const struct rpc_task *task,
1085 		const struct rpcrdma_rep *rep,
1086 		unsigned int credits
1087 	),
1088 
1089 	TP_ARGS(task, rep, credits),
1090 
1091 	TP_STRUCT__entry(
1092 		__field(unsigned int, task_id)
1093 		__field(unsigned int, client_id)
1094 		__field(u32, xid)
1095 		__field(unsigned int, credits)
1096 	),
1097 
1098 	TP_fast_assign(
1099 		__entry->task_id = task->tk_pid;
1100 		__entry->client_id = task->tk_client->cl_clid;
1101 		__entry->xid = be32_to_cpu(rep->rr_xid);
1102 		__entry->credits = credits;
1103 	),
1104 
1105 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1106 		__entry->task_id, __entry->client_id, __entry->xid,
1107 		__entry->credits
1108 	)
1109 );
1110 
1111 DEFINE_REPLY_EVENT(vers);
1112 DEFINE_REPLY_EVENT(rqst);
1113 DEFINE_REPLY_EVENT(short);
1114 DEFINE_REPLY_EVENT(hdr);
1115 
1116 TRACE_EVENT(xprtrdma_err_vers,
1117 	TP_PROTO(
1118 		const struct rpc_rqst *rqst,
1119 		__be32 *min,
1120 		__be32 *max
1121 	),
1122 
1123 	TP_ARGS(rqst, min, max),
1124 
1125 	TP_STRUCT__entry(
1126 		__field(unsigned int, task_id)
1127 		__field(unsigned int, client_id)
1128 		__field(u32, xid)
1129 		__field(u32, min)
1130 		__field(u32, max)
1131 	),
1132 
1133 	TP_fast_assign(
1134 		__entry->task_id = rqst->rq_task->tk_pid;
1135 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1136 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1137 		__entry->min = be32_to_cpup(min);
1138 		__entry->max = be32_to_cpup(max);
1139 	),
1140 
1141 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1142 		__entry->task_id, __entry->client_id, __entry->xid,
1143 		__entry->min, __entry->max
1144 	)
1145 );
1146 
1147 TRACE_EVENT(xprtrdma_err_chunk,
1148 	TP_PROTO(
1149 		const struct rpc_rqst *rqst
1150 	),
1151 
1152 	TP_ARGS(rqst),
1153 
1154 	TP_STRUCT__entry(
1155 		__field(unsigned int, task_id)
1156 		__field(unsigned int, client_id)
1157 		__field(u32, xid)
1158 	),
1159 
1160 	TP_fast_assign(
1161 		__entry->task_id = rqst->rq_task->tk_pid;
1162 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1163 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1164 	),
1165 
1166 	TP_printk("task:%u@%u xid=0x%08x",
1167 		__entry->task_id, __entry->client_id, __entry->xid
1168 	)
1169 );
1170 
1171 TRACE_EVENT(xprtrdma_err_unrecognized,
1172 	TP_PROTO(
1173 		const struct rpc_rqst *rqst,
1174 		__be32 *procedure
1175 	),
1176 
1177 	TP_ARGS(rqst, procedure),
1178 
1179 	TP_STRUCT__entry(
1180 		__field(unsigned int, task_id)
1181 		__field(unsigned int, client_id)
1182 		__field(u32, xid)
1183 		__field(u32, procedure)
1184 	),
1185 
1186 	TP_fast_assign(
1187 		__entry->task_id = rqst->rq_task->tk_pid;
1188 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1189 		__entry->procedure = be32_to_cpup(procedure);
1190 	),
1191 
1192 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1193 		__entry->task_id, __entry->client_id, __entry->xid,
1194 		__entry->procedure
1195 	)
1196 );
1197 
1198 TRACE_EVENT(xprtrdma_fixup,
1199 	TP_PROTO(
1200 		const struct rpc_rqst *rqst,
1201 		unsigned long fixup
1202 	),
1203 
1204 	TP_ARGS(rqst, fixup),
1205 
1206 	TP_STRUCT__entry(
1207 		__field(unsigned int, task_id)
1208 		__field(unsigned int, client_id)
1209 		__field(unsigned long, fixup)
1210 		__field(size_t, headlen)
1211 		__field(unsigned int, pagelen)
1212 		__field(size_t, taillen)
1213 	),
1214 
1215 	TP_fast_assign(
1216 		__entry->task_id = rqst->rq_task->tk_pid;
1217 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1218 		__entry->fixup = fixup;
1219 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1220 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1221 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1222 	),
1223 
1224 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1225 		__entry->task_id, __entry->client_id, __entry->fixup,
1226 		__entry->headlen, __entry->pagelen, __entry->taillen
1227 	)
1228 );
1229 
1230 TRACE_EVENT(xprtrdma_decode_seg,
1231 	TP_PROTO(
1232 		u32 handle,
1233 		u32 length,
1234 		u64 offset
1235 	),
1236 
1237 	TP_ARGS(handle, length, offset),
1238 
1239 	TP_STRUCT__entry(
1240 		__field(u32, handle)
1241 		__field(u32, length)
1242 		__field(u64, offset)
1243 	),
1244 
1245 	TP_fast_assign(
1246 		__entry->handle = handle;
1247 		__entry->length = length;
1248 		__entry->offset = offset;
1249 	),
1250 
1251 	TP_printk("%u@0x%016llx:0x%08x",
1252 		__entry->length, (unsigned long long)__entry->offset,
1253 		__entry->handle
1254 	)
1255 );
1256 
1257 TRACE_EVENT(xprtrdma_mrs_zap,
1258 	TP_PROTO(
1259 		const struct rpc_task *task
1260 	),
1261 
1262 	TP_ARGS(task),
1263 
1264 	TP_STRUCT__entry(
1265 		__field(unsigned int, task_id)
1266 		__field(unsigned int, client_id)
1267 	),
1268 
1269 	TP_fast_assign(
1270 		__entry->task_id = task->tk_pid;
1271 		__entry->client_id = task->tk_client->cl_clid;
1272 	),
1273 
1274 	TP_printk("task:%u@%u",
1275 		__entry->task_id, __entry->client_id
1276 	)
1277 );
1278 
1279 /**
1280  ** Callback events
1281  **/
1282 
1283 TRACE_EVENT(xprtrdma_cb_setup,
1284 	TP_PROTO(
1285 		const struct rpcrdma_xprt *r_xprt,
1286 		unsigned int reqs
1287 	),
1288 
1289 	TP_ARGS(r_xprt, reqs),
1290 
1291 	TP_STRUCT__entry(
1292 		__field(const void *, r_xprt)
1293 		__field(unsigned int, reqs)
1294 		__string(addr, rpcrdma_addrstr(r_xprt))
1295 		__string(port, rpcrdma_portstr(r_xprt))
1296 	),
1297 
1298 	TP_fast_assign(
1299 		__entry->r_xprt = r_xprt;
1300 		__entry->reqs = reqs;
1301 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1302 		__assign_str(port, rpcrdma_portstr(r_xprt));
1303 	),
1304 
1305 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1306 		__get_str(addr), __get_str(port),
1307 		__entry->r_xprt, __entry->reqs
1308 	)
1309 );
1310 
1311 DEFINE_CALLBACK_EVENT(call);
1312 DEFINE_CALLBACK_EVENT(reply);
1313 
1314 /**
1315  ** Server-side RPC/RDMA events
1316  **/
1317 
1318 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1319 	TP_PROTO(
1320 		const struct svcxprt_rdma *rdma,
1321 		long status
1322 	),
1323 
1324 	TP_ARGS(rdma, status),
1325 
1326 	TP_STRUCT__entry(
1327 		__field(long, status)
1328 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1329 	),
1330 
1331 	TP_fast_assign(
1332 		__entry->status = status;
1333 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1334 	),
1335 
1336 	TP_printk("addr=%s status=%ld",
1337 		__get_str(addr), __entry->status
1338 	)
1339 );
1340 
1341 #define DEFINE_ACCEPT_EVENT(name) \
1342 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1343 				TP_PROTO( \
1344 					const struct svcxprt_rdma *rdma, \
1345 					long status \
1346 				), \
1347 				TP_ARGS(rdma, status))
1348 
1349 DEFINE_ACCEPT_EVENT(pd);
1350 DEFINE_ACCEPT_EVENT(qp);
1351 DEFINE_ACCEPT_EVENT(fabric);
1352 DEFINE_ACCEPT_EVENT(initdepth);
1353 DEFINE_ACCEPT_EVENT(accept);
1354 
1355 TRACE_DEFINE_ENUM(RDMA_MSG);
1356 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1357 TRACE_DEFINE_ENUM(RDMA_MSGP);
1358 TRACE_DEFINE_ENUM(RDMA_DONE);
1359 TRACE_DEFINE_ENUM(RDMA_ERROR);
1360 
1361 #define show_rpcrdma_proc(x)						\
1362 		__print_symbolic(x,					\
1363 				{ RDMA_MSG, "RDMA_MSG" },		\
1364 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1365 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1366 				{ RDMA_DONE, "RDMA_DONE" },		\
1367 				{ RDMA_ERROR, "RDMA_ERROR" })
1368 
1369 TRACE_EVENT(svcrdma_decode_rqst,
1370 	TP_PROTO(
1371 		const struct svc_rdma_recv_ctxt *ctxt,
1372 		__be32 *p,
1373 		unsigned int hdrlen
1374 	),
1375 
1376 	TP_ARGS(ctxt, p, hdrlen),
1377 
1378 	TP_STRUCT__entry(
1379 		__field(u32, cq_id)
1380 		__field(int, completion_id)
1381 		__field(u32, xid)
1382 		__field(u32, vers)
1383 		__field(u32, proc)
1384 		__field(u32, credits)
1385 		__field(unsigned int, hdrlen)
1386 	),
1387 
1388 	TP_fast_assign(
1389 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1390 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1391 		__entry->xid = be32_to_cpup(p++);
1392 		__entry->vers = be32_to_cpup(p++);
1393 		__entry->credits = be32_to_cpup(p++);
1394 		__entry->proc = be32_to_cpup(p);
1395 		__entry->hdrlen = hdrlen;
1396 	),
1397 
1398 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1399 		__entry->cq_id, __entry->completion_id,
1400 		__entry->xid, __entry->vers, __entry->credits,
1401 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1402 );
1403 
1404 TRACE_EVENT(svcrdma_decode_short_err,
1405 	TP_PROTO(
1406 		const struct svc_rdma_recv_ctxt *ctxt,
1407 		unsigned int hdrlen
1408 	),
1409 
1410 	TP_ARGS(ctxt, hdrlen),
1411 
1412 	TP_STRUCT__entry(
1413 		__field(u32, cq_id)
1414 		__field(int, completion_id)
1415 		__field(unsigned int, hdrlen)
1416 	),
1417 
1418 	TP_fast_assign(
1419 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1420 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1421 		__entry->hdrlen = hdrlen;
1422 	),
1423 
1424 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1425 		__entry->cq_id, __entry->completion_id,
1426 		__entry->hdrlen)
1427 );
1428 
1429 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1430 	TP_PROTO(
1431 		const struct svc_rdma_recv_ctxt *ctxt,
1432 		__be32 *p
1433 	),
1434 
1435 	TP_ARGS(ctxt, p),
1436 
1437 	TP_STRUCT__entry(
1438 		__field(u32, cq_id)
1439 		__field(int, completion_id)
1440 		__field(u32, xid)
1441 		__field(u32, vers)
1442 		__field(u32, proc)
1443 		__field(u32, credits)
1444 	),
1445 
1446 	TP_fast_assign(
1447 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1448 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1449 		__entry->xid = be32_to_cpup(p++);
1450 		__entry->vers = be32_to_cpup(p++);
1451 		__entry->credits = be32_to_cpup(p++);
1452 		__entry->proc = be32_to_cpup(p);
1453 	),
1454 
1455 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1456 		__entry->cq_id, __entry->completion_id,
1457 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1458 );
1459 
1460 #define DEFINE_BADREQ_EVENT(name)					\
1461 		DEFINE_EVENT(svcrdma_badreq_event,			\
1462 			     svcrdma_decode_##name##_err,		\
1463 				TP_PROTO(				\
1464 					const struct svc_rdma_recv_ctxt *ctxt,	\
1465 					__be32 *p			\
1466 				),					\
1467 				TP_ARGS(ctxt, p))
1468 
1469 DEFINE_BADREQ_EVENT(badvers);
1470 DEFINE_BADREQ_EVENT(drop);
1471 DEFINE_BADREQ_EVENT(badproc);
1472 DEFINE_BADREQ_EVENT(parse);
1473 
1474 TRACE_EVENT(svcrdma_encode_wseg,
1475 	TP_PROTO(
1476 		const struct svc_rdma_send_ctxt *ctxt,
1477 		u32 segno,
1478 		u32 handle,
1479 		u32 length,
1480 		u64 offset
1481 	),
1482 
1483 	TP_ARGS(ctxt, segno, handle, length, offset),
1484 
1485 	TP_STRUCT__entry(
1486 		__field(u32, cq_id)
1487 		__field(int, completion_id)
1488 		__field(u32, segno)
1489 		__field(u32, handle)
1490 		__field(u32, length)
1491 		__field(u64, offset)
1492 	),
1493 
1494 	TP_fast_assign(
1495 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1496 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1497 		__entry->segno = segno;
1498 		__entry->handle = handle;
1499 		__entry->length = length;
1500 		__entry->offset = offset;
1501 	),
1502 
1503 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1504 		__entry->cq_id, __entry->completion_id,
1505 		__entry->segno, __entry->length,
1506 		(unsigned long long)__entry->offset, __entry->handle
1507 	)
1508 );
1509 
1510 TRACE_EVENT(svcrdma_decode_rseg,
1511 	TP_PROTO(
1512 		const struct rpc_rdma_cid *cid,
1513 		const struct svc_rdma_chunk *chunk,
1514 		const struct svc_rdma_segment *segment
1515 	),
1516 
1517 	TP_ARGS(cid, chunk, segment),
1518 
1519 	TP_STRUCT__entry(
1520 		__field(u32, cq_id)
1521 		__field(int, completion_id)
1522 		__field(u32, segno)
1523 		__field(u32, position)
1524 		__field(u32, handle)
1525 		__field(u32, length)
1526 		__field(u64, offset)
1527 	),
1528 
1529 	TP_fast_assign(
1530 		__entry->cq_id = cid->ci_queue_id;
1531 		__entry->completion_id = cid->ci_completion_id;
1532 		__entry->segno = chunk->ch_segcount;
1533 		__entry->position = chunk->ch_position;
1534 		__entry->handle = segment->rs_handle;
1535 		__entry->length = segment->rs_length;
1536 		__entry->offset = segment->rs_offset;
1537 	),
1538 
1539 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1540 		__entry->cq_id, __entry->completion_id,
1541 		__entry->segno, __entry->position, __entry->length,
1542 		(unsigned long long)__entry->offset, __entry->handle
1543 	)
1544 );
1545 
1546 TRACE_EVENT(svcrdma_decode_wseg,
1547 	TP_PROTO(
1548 		const struct rpc_rdma_cid *cid,
1549 		const struct svc_rdma_chunk *chunk,
1550 		u32 segno
1551 	),
1552 
1553 	TP_ARGS(cid, chunk, segno),
1554 
1555 	TP_STRUCT__entry(
1556 		__field(u32, cq_id)
1557 		__field(int, completion_id)
1558 		__field(u32, segno)
1559 		__field(u32, handle)
1560 		__field(u32, length)
1561 		__field(u64, offset)
1562 	),
1563 
1564 	TP_fast_assign(
1565 		const struct svc_rdma_segment *segment =
1566 			&chunk->ch_segments[segno];
1567 
1568 		__entry->cq_id = cid->ci_queue_id;
1569 		__entry->completion_id = cid->ci_completion_id;
1570 		__entry->segno = segno;
1571 		__entry->handle = segment->rs_handle;
1572 		__entry->length = segment->rs_length;
1573 		__entry->offset = segment->rs_offset;
1574 	),
1575 
1576 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1577 		__entry->cq_id, __entry->completion_id,
1578 		__entry->segno, __entry->length,
1579 		(unsigned long long)__entry->offset, __entry->handle
1580 	)
1581 );
1582 
1583 DECLARE_EVENT_CLASS(svcrdma_error_event,
1584 	TP_PROTO(
1585 		__be32 xid
1586 	),
1587 
1588 	TP_ARGS(xid),
1589 
1590 	TP_STRUCT__entry(
1591 		__field(u32, xid)
1592 	),
1593 
1594 	TP_fast_assign(
1595 		__entry->xid = be32_to_cpu(xid);
1596 	),
1597 
1598 	TP_printk("xid=0x%08x",
1599 		__entry->xid
1600 	)
1601 );
1602 
1603 #define DEFINE_ERROR_EVENT(name)					\
1604 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1605 				TP_PROTO(				\
1606 					__be32 xid			\
1607 				),					\
1608 				TP_ARGS(xid))
1609 
1610 DEFINE_ERROR_EVENT(vers);
1611 DEFINE_ERROR_EVENT(chunk);
1612 
1613 /**
1614  ** Server-side RDMA API events
1615  **/
1616 
1617 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1618 	TP_PROTO(
1619 		const struct svcxprt_rdma *rdma,
1620 		u64 dma_addr,
1621 		u32 length
1622 	),
1623 
1624 	TP_ARGS(rdma, dma_addr, length),
1625 
1626 	TP_STRUCT__entry(
1627 		__field(u64, dma_addr)
1628 		__field(u32, length)
1629 		__string(device, rdma->sc_cm_id->device->name)
1630 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1631 	),
1632 
1633 	TP_fast_assign(
1634 		__entry->dma_addr = dma_addr;
1635 		__entry->length = length;
1636 		__assign_str(device, rdma->sc_cm_id->device->name);
1637 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1638 	),
1639 
1640 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1641 		__get_str(addr), __get_str(device),
1642 		__entry->dma_addr, __entry->length
1643 	)
1644 );
1645 
1646 #define DEFINE_SVC_DMA_EVENT(name)					\
1647 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1648 				TP_PROTO(				\
1649 					const struct svcxprt_rdma *rdma,\
1650 					u64 dma_addr,			\
1651 					u32 length			\
1652 				),					\
1653 				TP_ARGS(rdma, dma_addr, length))
1654 
1655 DEFINE_SVC_DMA_EVENT(dma_map_page);
1656 DEFINE_SVC_DMA_EVENT(dma_map_err);
1657 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1658 
1659 TRACE_EVENT(svcrdma_dma_map_rw_err,
1660 	TP_PROTO(
1661 		const struct svcxprt_rdma *rdma,
1662 		unsigned int nents,
1663 		int status
1664 	),
1665 
1666 	TP_ARGS(rdma, nents, status),
1667 
1668 	TP_STRUCT__entry(
1669 		__field(int, status)
1670 		__field(unsigned int, nents)
1671 		__string(device, rdma->sc_cm_id->device->name)
1672 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1673 	),
1674 
1675 	TP_fast_assign(
1676 		__entry->status = status;
1677 		__entry->nents = nents;
1678 		__assign_str(device, rdma->sc_cm_id->device->name);
1679 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1680 	),
1681 
1682 	TP_printk("addr=%s device=%s nents=%u status=%d",
1683 		__get_str(addr), __get_str(device), __entry->nents,
1684 		__entry->status
1685 	)
1686 );
1687 
1688 TRACE_EVENT(svcrdma_no_rwctx_err,
1689 	TP_PROTO(
1690 		const struct svcxprt_rdma *rdma,
1691 		unsigned int num_sges
1692 	),
1693 
1694 	TP_ARGS(rdma, num_sges),
1695 
1696 	TP_STRUCT__entry(
1697 		__field(unsigned int, num_sges)
1698 		__string(device, rdma->sc_cm_id->device->name)
1699 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1700 	),
1701 
1702 	TP_fast_assign(
1703 		__entry->num_sges = num_sges;
1704 		__assign_str(device, rdma->sc_cm_id->device->name);
1705 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1706 	),
1707 
1708 	TP_printk("addr=%s device=%s num_sges=%d",
1709 		__get_str(addr), __get_str(device), __entry->num_sges
1710 	)
1711 );
1712 
1713 TRACE_EVENT(svcrdma_page_overrun_err,
1714 	TP_PROTO(
1715 		const struct svcxprt_rdma *rdma,
1716 		const struct svc_rqst *rqst,
1717 		unsigned int pageno
1718 	),
1719 
1720 	TP_ARGS(rdma, rqst, pageno),
1721 
1722 	TP_STRUCT__entry(
1723 		__field(unsigned int, pageno)
1724 		__field(u32, xid)
1725 		__string(device, rdma->sc_cm_id->device->name)
1726 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1727 	),
1728 
1729 	TP_fast_assign(
1730 		__entry->pageno = pageno;
1731 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1732 		__assign_str(device, rdma->sc_cm_id->device->name);
1733 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1734 	),
1735 
1736 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1737 		__get_str(device), __entry->xid, __entry->pageno
1738 	)
1739 );
1740 
1741 TRACE_EVENT(svcrdma_small_wrch_err,
1742 	TP_PROTO(
1743 		const struct svcxprt_rdma *rdma,
1744 		unsigned int remaining,
1745 		unsigned int seg_no,
1746 		unsigned int num_segs
1747 	),
1748 
1749 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1750 
1751 	TP_STRUCT__entry(
1752 		__field(unsigned int, remaining)
1753 		__field(unsigned int, seg_no)
1754 		__field(unsigned int, num_segs)
1755 		__string(device, rdma->sc_cm_id->device->name)
1756 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1757 	),
1758 
1759 	TP_fast_assign(
1760 		__entry->remaining = remaining;
1761 		__entry->seg_no = seg_no;
1762 		__entry->num_segs = num_segs;
1763 		__assign_str(device, rdma->sc_cm_id->device->name);
1764 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1765 	),
1766 
1767 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1768 		__get_str(addr), __get_str(device), __entry->remaining,
1769 		__entry->seg_no, __entry->num_segs
1770 	)
1771 );
1772 
1773 TRACE_EVENT(svcrdma_send_pullup,
1774 	TP_PROTO(
1775 		const struct svc_rdma_send_ctxt *ctxt,
1776 		unsigned int msglen
1777 	),
1778 
1779 	TP_ARGS(ctxt, msglen),
1780 
1781 	TP_STRUCT__entry(
1782 		__field(u32, cq_id)
1783 		__field(int, completion_id)
1784 		__field(unsigned int, hdrlen)
1785 		__field(unsigned int, msglen)
1786 	),
1787 
1788 	TP_fast_assign(
1789 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1790 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1791 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1792 		__entry->msglen = msglen;
1793 	),
1794 
1795 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1796 		__entry->cq_id, __entry->completion_id,
1797 		__entry->hdrlen, __entry->msglen,
1798 		__entry->hdrlen + __entry->msglen)
1799 );
1800 
1801 TRACE_EVENT(svcrdma_send_err,
1802 	TP_PROTO(
1803 		const struct svc_rqst *rqst,
1804 		int status
1805 	),
1806 
1807 	TP_ARGS(rqst, status),
1808 
1809 	TP_STRUCT__entry(
1810 		__field(int, status)
1811 		__field(u32, xid)
1812 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1813 	),
1814 
1815 	TP_fast_assign(
1816 		__entry->status = status;
1817 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1818 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1819 	),
1820 
1821 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1822 		__entry->xid, __entry->status
1823 	)
1824 );
1825 
1826 TRACE_EVENT(svcrdma_post_send,
1827 	TP_PROTO(
1828 		const struct svc_rdma_send_ctxt *ctxt
1829 	),
1830 
1831 	TP_ARGS(ctxt),
1832 
1833 	TP_STRUCT__entry(
1834 		__field(u32, cq_id)
1835 		__field(int, completion_id)
1836 		__field(unsigned int, num_sge)
1837 		__field(u32, inv_rkey)
1838 	),
1839 
1840 	TP_fast_assign(
1841 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1842 
1843 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1844 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1845 		__entry->num_sge = wr->num_sge;
1846 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1847 					wr->ex.invalidate_rkey : 0;
1848 	),
1849 
1850 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1851 		__entry->cq_id, __entry->completion_id,
1852 		__entry->num_sge, __entry->inv_rkey
1853 	)
1854 );
1855 
1856 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1857 
1858 TRACE_EVENT(svcrdma_post_recv,
1859 	TP_PROTO(
1860 		const struct svc_rdma_recv_ctxt *ctxt
1861 	),
1862 
1863 	TP_ARGS(ctxt),
1864 
1865 	TP_STRUCT__entry(
1866 		__field(u32, cq_id)
1867 		__field(int, completion_id)
1868 	),
1869 
1870 	TP_fast_assign(
1871 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1872 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1873 	),
1874 
1875 	TP_printk("cq.id=%d cid=%d",
1876 		__entry->cq_id, __entry->completion_id
1877 	)
1878 );
1879 
1880 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1881 
1882 TRACE_EVENT(svcrdma_rq_post_err,
1883 	TP_PROTO(
1884 		const struct svcxprt_rdma *rdma,
1885 		int status
1886 	),
1887 
1888 	TP_ARGS(rdma, status),
1889 
1890 	TP_STRUCT__entry(
1891 		__field(int, status)
1892 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1893 	),
1894 
1895 	TP_fast_assign(
1896 		__entry->status = status;
1897 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1898 	),
1899 
1900 	TP_printk("addr=%s status=%d",
1901 		__get_str(addr), __entry->status
1902 	)
1903 );
1904 
1905 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1906 	TP_PROTO(
1907 		const struct rpc_rdma_cid *cid,
1908 		int sqecount
1909 	),
1910 
1911 	TP_ARGS(cid, sqecount),
1912 
1913 	TP_STRUCT__entry(
1914 		__field(u32, cq_id)
1915 		__field(int, completion_id)
1916 		__field(int, sqecount)
1917 	),
1918 
1919 	TP_fast_assign(
1920 		__entry->cq_id = cid->ci_queue_id;
1921 		__entry->completion_id = cid->ci_completion_id;
1922 		__entry->sqecount = sqecount;
1923 	),
1924 
1925 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1926 		__entry->cq_id, __entry->completion_id,
1927 		__entry->sqecount
1928 	)
1929 );
1930 
1931 #define DEFINE_POST_CHUNK_EVENT(name)					\
1932 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1933 				svcrdma_post_##name##_chunk,		\
1934 				TP_PROTO(				\
1935 					const struct rpc_rdma_cid *cid,	\
1936 					int sqecount			\
1937 				),					\
1938 				TP_ARGS(cid, sqecount))
1939 
1940 DEFINE_POST_CHUNK_EVENT(read);
1941 DEFINE_POST_CHUNK_EVENT(write);
1942 DEFINE_POST_CHUNK_EVENT(reply);
1943 
1944 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1945 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1946 
1947 TRACE_EVENT(svcrdma_qp_error,
1948 	TP_PROTO(
1949 		const struct ib_event *event,
1950 		const struct sockaddr *sap
1951 	),
1952 
1953 	TP_ARGS(event, sap),
1954 
1955 	TP_STRUCT__entry(
1956 		__field(unsigned int, event)
1957 		__string(device, event->device->name)
1958 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1959 	),
1960 
1961 	TP_fast_assign(
1962 		__entry->event = event->event;
1963 		__assign_str(device, event->device->name);
1964 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1965 			 "%pISpc", sap);
1966 	),
1967 
1968 	TP_printk("addr=%s dev=%s event=%s (%u)",
1969 		__entry->addr, __get_str(device),
1970 		rdma_show_ib_event(__entry->event), __entry->event
1971 	)
1972 );
1973 
1974 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1975 	TP_PROTO(
1976 		const struct svcxprt_rdma *rdma
1977 	),
1978 
1979 	TP_ARGS(rdma),
1980 
1981 	TP_STRUCT__entry(
1982 		__field(int, avail)
1983 		__field(int, depth)
1984 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1985 	),
1986 
1987 	TP_fast_assign(
1988 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1989 		__entry->depth = rdma->sc_sq_depth;
1990 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1991 	),
1992 
1993 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1994 		__get_str(addr), __entry->avail, __entry->depth
1995 	)
1996 );
1997 
1998 #define DEFINE_SQ_EVENT(name)						\
1999 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
2000 				TP_PROTO(				\
2001 					const struct svcxprt_rdma *rdma \
2002 				),					\
2003 				TP_ARGS(rdma))
2004 
2005 DEFINE_SQ_EVENT(full);
2006 DEFINE_SQ_EVENT(retry);
2007 
2008 TRACE_EVENT(svcrdma_sq_post_err,
2009 	TP_PROTO(
2010 		const struct svcxprt_rdma *rdma,
2011 		int status
2012 	),
2013 
2014 	TP_ARGS(rdma, status),
2015 
2016 	TP_STRUCT__entry(
2017 		__field(int, avail)
2018 		__field(int, depth)
2019 		__field(int, status)
2020 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2021 	),
2022 
2023 	TP_fast_assign(
2024 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2025 		__entry->depth = rdma->sc_sq_depth;
2026 		__entry->status = status;
2027 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2028 	),
2029 
2030 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2031 		__get_str(addr), __entry->avail, __entry->depth,
2032 		__entry->status
2033 	)
2034 );
2035 
2036 #endif /* _TRACE_RPCRDMA_H */
2037 
2038 #include <trace/define_trace.h>
2039