xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 83189d15)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
64 	TP_PROTO(
65 		const struct ib_wc *wc,
66 		const struct rpc_rdma_cid *cid
67 	),
68 
69 	TP_ARGS(wc, cid),
70 
71 	TP_STRUCT__entry(
72 		__field(u32, cq_id)
73 		__field(int, completion_id)
74 		__field(unsigned long, status)
75 		__field(unsigned int, vendor_err)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->cq_id = cid->ci_queue_id;
80 		__entry->completion_id = cid->ci_completion_id;
81 		__entry->status = wc->status;
82 		if (wc->status)
83 			__entry->vendor_err = wc->vendor_err;
84 		else
85 			__entry->vendor_err = 0;
86 	),
87 
88 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
89 		__entry->cq_id, __entry->completion_id,
90 		rdma_show_wc_status(__entry->status),
91 		__entry->status, __entry->vendor_err
92 	)
93 );
94 
95 #define DEFINE_MR_COMPLETION_EVENT(name)				\
96 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
97 				TP_PROTO(				\
98 					const struct ib_wc *wc,		\
99 					const struct rpc_rdma_cid *cid	\
100 				),					\
101 				TP_ARGS(wc, cid))
102 
103 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
104 	TP_PROTO(
105 		const struct ib_wc *wc,
106 		const struct rpc_rdma_cid *cid
107 	),
108 
109 	TP_ARGS(wc, cid),
110 
111 	TP_STRUCT__entry(
112 		__field(u32, cq_id)
113 		__field(int, completion_id)
114 		__field(u32, received)
115 		__field(unsigned long, status)
116 		__field(unsigned int, vendor_err)
117 	),
118 
119 	TP_fast_assign(
120 		__entry->cq_id = cid->ci_queue_id;
121 		__entry->completion_id = cid->ci_completion_id;
122 		__entry->status = wc->status;
123 		if (wc->status) {
124 			__entry->received = 0;
125 			__entry->vendor_err = wc->vendor_err;
126 		} else {
127 			__entry->received = wc->byte_len;
128 			__entry->vendor_err = 0;
129 		}
130 	),
131 
132 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
133 		__entry->cq_id, __entry->completion_id,
134 		rdma_show_wc_status(__entry->status),
135 		__entry->status, __entry->vendor_err,
136 		__entry->received
137 	)
138 );
139 
140 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
141 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
142 				TP_PROTO(				\
143 					const struct ib_wc *wc,		\
144 					const struct rpc_rdma_cid *cid	\
145 				),					\
146 				TP_ARGS(wc, cid))
147 
148 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
149 	TP_PROTO(
150 		const struct rpcrdma_rep *rep
151 	),
152 
153 	TP_ARGS(rep),
154 
155 	TP_STRUCT__entry(
156 		__field(u32, xid)
157 		__field(u32, version)
158 		__field(u32, proc)
159 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
160 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
161 	),
162 
163 	TP_fast_assign(
164 		__entry->xid = be32_to_cpu(rep->rr_xid);
165 		__entry->version = be32_to_cpu(rep->rr_vers);
166 		__entry->proc = be32_to_cpu(rep->rr_proc);
167 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
168 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
169 	),
170 
171 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
172 		__get_str(addr), __get_str(port),
173 		__entry->xid, __entry->version, __entry->proc
174 	)
175 );
176 
177 #define DEFINE_REPLY_EVENT(name)					\
178 		DEFINE_EVENT(xprtrdma_reply_class,			\
179 				xprtrdma_reply_##name##_err,		\
180 				TP_PROTO(				\
181 					const struct rpcrdma_rep *rep	\
182 				),					\
183 				TP_ARGS(rep))
184 
185 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
186 	TP_PROTO(
187 		const struct rpcrdma_xprt *r_xprt
188 	),
189 
190 	TP_ARGS(r_xprt),
191 
192 	TP_STRUCT__entry(
193 		__string(addr, rpcrdma_addrstr(r_xprt))
194 		__string(port, rpcrdma_portstr(r_xprt))
195 	),
196 
197 	TP_fast_assign(
198 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
199 		__assign_str(port, rpcrdma_portstr(r_xprt));
200 	),
201 
202 	TP_printk("peer=[%s]:%s",
203 		__get_str(addr), __get_str(port)
204 	)
205 );
206 
207 #define DEFINE_RXPRT_EVENT(name)					\
208 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
209 				TP_PROTO(				\
210 					const struct rpcrdma_xprt *r_xprt \
211 				),					\
212 				TP_ARGS(r_xprt))
213 
214 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
215 	TP_PROTO(
216 		const struct rpcrdma_xprt *r_xprt,
217 		int rc
218 	),
219 
220 	TP_ARGS(r_xprt, rc),
221 
222 	TP_STRUCT__entry(
223 		__field(int, rc)
224 		__field(int, connect_status)
225 		__string(addr, rpcrdma_addrstr(r_xprt))
226 		__string(port, rpcrdma_portstr(r_xprt))
227 	),
228 
229 	TP_fast_assign(
230 		__entry->rc = rc;
231 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
232 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
233 		__assign_str(port, rpcrdma_portstr(r_xprt));
234 	),
235 
236 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
237 		__get_str(addr), __get_str(port),
238 		__entry->rc, __entry->connect_status
239 	)
240 );
241 
242 #define DEFINE_CONN_EVENT(name)						\
243 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
244 				TP_PROTO(				\
245 					const struct rpcrdma_xprt *r_xprt, \
246 					int rc				\
247 				),					\
248 				TP_ARGS(r_xprt, rc))
249 
250 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
251 	TP_PROTO(
252 		const struct rpc_task *task,
253 		unsigned int pos,
254 		struct rpcrdma_mr *mr,
255 		int nsegs
256 	),
257 
258 	TP_ARGS(task, pos, mr, nsegs),
259 
260 	TP_STRUCT__entry(
261 		__field(unsigned int, task_id)
262 		__field(unsigned int, client_id)
263 		__field(unsigned int, pos)
264 		__field(int, nents)
265 		__field(u32, handle)
266 		__field(u32, length)
267 		__field(u64, offset)
268 		__field(int, nsegs)
269 	),
270 
271 	TP_fast_assign(
272 		__entry->task_id = task->tk_pid;
273 		__entry->client_id = task->tk_client->cl_clid;
274 		__entry->pos = pos;
275 		__entry->nents = mr->mr_nents;
276 		__entry->handle = mr->mr_handle;
277 		__entry->length = mr->mr_length;
278 		__entry->offset = mr->mr_offset;
279 		__entry->nsegs = nsegs;
280 	),
281 
282 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
283 		__entry->task_id, __entry->client_id,
284 		__entry->pos, __entry->length,
285 		(unsigned long long)__entry->offset, __entry->handle,
286 		__entry->nents < __entry->nsegs ? "more" : "last"
287 	)
288 );
289 
290 #define DEFINE_RDCH_EVENT(name)						\
291 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
292 				TP_PROTO(				\
293 					const struct rpc_task *task,	\
294 					unsigned int pos,		\
295 					struct rpcrdma_mr *mr,		\
296 					int nsegs			\
297 				),					\
298 				TP_ARGS(task, pos, mr, nsegs))
299 
300 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
301 	TP_PROTO(
302 		const struct rpc_task *task,
303 		struct rpcrdma_mr *mr,
304 		int nsegs
305 	),
306 
307 	TP_ARGS(task, mr, nsegs),
308 
309 	TP_STRUCT__entry(
310 		__field(unsigned int, task_id)
311 		__field(unsigned int, client_id)
312 		__field(int, nents)
313 		__field(u32, handle)
314 		__field(u32, length)
315 		__field(u64, offset)
316 		__field(int, nsegs)
317 	),
318 
319 	TP_fast_assign(
320 		__entry->task_id = task->tk_pid;
321 		__entry->client_id = task->tk_client->cl_clid;
322 		__entry->nents = mr->mr_nents;
323 		__entry->handle = mr->mr_handle;
324 		__entry->length = mr->mr_length;
325 		__entry->offset = mr->mr_offset;
326 		__entry->nsegs = nsegs;
327 	),
328 
329 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
330 		__entry->task_id, __entry->client_id,
331 		__entry->length, (unsigned long long)__entry->offset,
332 		__entry->handle,
333 		__entry->nents < __entry->nsegs ? "more" : "last"
334 	)
335 );
336 
337 #define DEFINE_WRCH_EVENT(name)						\
338 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
339 				TP_PROTO(				\
340 					const struct rpc_task *task,	\
341 					struct rpcrdma_mr *mr,		\
342 					int nsegs			\
343 				),					\
344 				TP_ARGS(task, mr, nsegs))
345 
346 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
347 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
348 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
349 TRACE_DEFINE_ENUM(DMA_NONE);
350 
351 #define xprtrdma_show_direction(x)					\
352 		__print_symbolic(x,					\
353 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
354 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
355 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
356 				{ DMA_NONE, "NONE" })
357 
358 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
359 	TP_PROTO(
360 		const struct rpcrdma_mr *mr
361 	),
362 
363 	TP_ARGS(mr),
364 
365 	TP_STRUCT__entry(
366 		__field(unsigned int, task_id)
367 		__field(unsigned int, client_id)
368 		__field(u32, mr_id)
369 		__field(int, nents)
370 		__field(u32, handle)
371 		__field(u32, length)
372 		__field(u64, offset)
373 		__field(u32, dir)
374 	),
375 
376 	TP_fast_assign(
377 		const struct rpcrdma_req *req = mr->mr_req;
378 		const struct rpc_task *task = req->rl_slot.rq_task;
379 
380 		__entry->task_id = task->tk_pid;
381 		__entry->client_id = task->tk_client->cl_clid;
382 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
383 		__entry->nents  = mr->mr_nents;
384 		__entry->handle = mr->mr_handle;
385 		__entry->length = mr->mr_length;
386 		__entry->offset = mr->mr_offset;
387 		__entry->dir    = mr->mr_dir;
388 	),
389 
390 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
391 		__entry->task_id, __entry->client_id,
392 		__entry->mr_id, __entry->nents, __entry->length,
393 		(unsigned long long)__entry->offset, __entry->handle,
394 		xprtrdma_show_direction(__entry->dir)
395 	)
396 );
397 
398 #define DEFINE_MR_EVENT(name)						\
399 		DEFINE_EVENT(xprtrdma_mr_class,				\
400 				xprtrdma_mr_##name,			\
401 				TP_PROTO(				\
402 					const struct rpcrdma_mr *mr	\
403 				),					\
404 				TP_ARGS(mr))
405 
406 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
407 	TP_PROTO(
408 		const struct rpcrdma_mr *mr
409 	),
410 
411 	TP_ARGS(mr),
412 
413 	TP_STRUCT__entry(
414 		__field(u32, mr_id)
415 		__field(int, nents)
416 		__field(u32, handle)
417 		__field(u32, length)
418 		__field(u64, offset)
419 		__field(u32, dir)
420 	),
421 
422 	TP_fast_assign(
423 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
424 		__entry->nents  = mr->mr_nents;
425 		__entry->handle = mr->mr_handle;
426 		__entry->length = mr->mr_length;
427 		__entry->offset = mr->mr_offset;
428 		__entry->dir    = mr->mr_dir;
429 	),
430 
431 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
432 		__entry->mr_id, __entry->nents, __entry->length,
433 		(unsigned long long)__entry->offset, __entry->handle,
434 		xprtrdma_show_direction(__entry->dir)
435 	)
436 );
437 
438 #define DEFINE_ANON_MR_EVENT(name)					\
439 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
440 				xprtrdma_mr_##name,			\
441 				TP_PROTO(				\
442 					const struct rpcrdma_mr *mr	\
443 				),					\
444 				TP_ARGS(mr))
445 
446 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
447 	TP_PROTO(
448 		const struct rpcrdma_xprt *r_xprt,
449 		const struct rpc_rqst *rqst
450 	),
451 
452 	TP_ARGS(r_xprt, rqst),
453 
454 	TP_STRUCT__entry(
455 		__field(u32, xid)
456 		__string(addr, rpcrdma_addrstr(r_xprt))
457 		__string(port, rpcrdma_portstr(r_xprt))
458 	),
459 
460 	TP_fast_assign(
461 		__entry->xid = be32_to_cpu(rqst->rq_xid);
462 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
463 		__assign_str(port, rpcrdma_portstr(r_xprt));
464 	),
465 
466 	TP_printk("peer=[%s]:%s xid=0x%08x",
467 		__get_str(addr), __get_str(port), __entry->xid
468 	)
469 );
470 
471 #define DEFINE_CALLBACK_EVENT(name)					\
472 		DEFINE_EVENT(xprtrdma_callback_class,			\
473 				xprtrdma_cb_##name,			\
474 				TP_PROTO(				\
475 					const struct rpcrdma_xprt *r_xprt, \
476 					const struct rpc_rqst *rqst	\
477 				),					\
478 				TP_ARGS(r_xprt, rqst))
479 
480 /**
481  ** Connection events
482  **/
483 
484 TRACE_EVENT(xprtrdma_inline_thresh,
485 	TP_PROTO(
486 		const struct rpcrdma_ep *ep
487 	),
488 
489 	TP_ARGS(ep),
490 
491 	TP_STRUCT__entry(
492 		__field(unsigned int, inline_send)
493 		__field(unsigned int, inline_recv)
494 		__field(unsigned int, max_send)
495 		__field(unsigned int, max_recv)
496 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
497 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
498 	),
499 
500 	TP_fast_assign(
501 		const struct rdma_cm_id *id = ep->re_id;
502 
503 		__entry->inline_send = ep->re_inline_send;
504 		__entry->inline_recv = ep->re_inline_recv;
505 		__entry->max_send = ep->re_max_inline_send;
506 		__entry->max_recv = ep->re_max_inline_recv;
507 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
508 		       sizeof(struct sockaddr_in6));
509 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
510 		       sizeof(struct sockaddr_in6));
511 	),
512 
513 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
514 		__entry->srcaddr, __entry->dstaddr,
515 		__entry->inline_send, __entry->inline_recv,
516 		__entry->max_send, __entry->max_recv
517 	)
518 );
519 
520 DEFINE_CONN_EVENT(connect);
521 DEFINE_CONN_EVENT(disconnect);
522 
523 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
524 
525 TRACE_EVENT(xprtrdma_op_connect,
526 	TP_PROTO(
527 		const struct rpcrdma_xprt *r_xprt,
528 		unsigned long delay
529 	),
530 
531 	TP_ARGS(r_xprt, delay),
532 
533 	TP_STRUCT__entry(
534 		__field(unsigned long, delay)
535 		__string(addr, rpcrdma_addrstr(r_xprt))
536 		__string(port, rpcrdma_portstr(r_xprt))
537 	),
538 
539 	TP_fast_assign(
540 		__entry->delay = delay;
541 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
542 		__assign_str(port, rpcrdma_portstr(r_xprt));
543 	),
544 
545 	TP_printk("peer=[%s]:%s delay=%lu",
546 		__get_str(addr), __get_str(port), __entry->delay
547 	)
548 );
549 
550 
551 TRACE_EVENT(xprtrdma_op_set_cto,
552 	TP_PROTO(
553 		const struct rpcrdma_xprt *r_xprt,
554 		unsigned long connect,
555 		unsigned long reconnect
556 	),
557 
558 	TP_ARGS(r_xprt, connect, reconnect),
559 
560 	TP_STRUCT__entry(
561 		__field(unsigned long, connect)
562 		__field(unsigned long, reconnect)
563 		__string(addr, rpcrdma_addrstr(r_xprt))
564 		__string(port, rpcrdma_portstr(r_xprt))
565 	),
566 
567 	TP_fast_assign(
568 		__entry->connect = connect;
569 		__entry->reconnect = reconnect;
570 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
571 		__assign_str(port, rpcrdma_portstr(r_xprt));
572 	),
573 
574 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
575 		__get_str(addr), __get_str(port),
576 		__entry->connect / HZ, __entry->reconnect / HZ
577 	)
578 );
579 
580 TRACE_EVENT(xprtrdma_qp_event,
581 	TP_PROTO(
582 		const struct rpcrdma_ep *ep,
583 		const struct ib_event *event
584 	),
585 
586 	TP_ARGS(ep, event),
587 
588 	TP_STRUCT__entry(
589 		__field(unsigned long, event)
590 		__string(name, event->device->name)
591 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
592 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
593 	),
594 
595 	TP_fast_assign(
596 		const struct rdma_cm_id *id = ep->re_id;
597 
598 		__entry->event = event->event;
599 		__assign_str(name, event->device->name);
600 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
601 		       sizeof(struct sockaddr_in6));
602 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
603 		       sizeof(struct sockaddr_in6));
604 	),
605 
606 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
607 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
608 		rdma_show_ib_event(__entry->event), __entry->event
609 	)
610 );
611 
612 /**
613  ** Call events
614  **/
615 
616 TRACE_EVENT(xprtrdma_createmrs,
617 	TP_PROTO(
618 		const struct rpcrdma_xprt *r_xprt,
619 		unsigned int count
620 	),
621 
622 	TP_ARGS(r_xprt, count),
623 
624 	TP_STRUCT__entry(
625 		__string(addr, rpcrdma_addrstr(r_xprt))
626 		__string(port, rpcrdma_portstr(r_xprt))
627 		__field(unsigned int, count)
628 	),
629 
630 	TP_fast_assign(
631 		__entry->count = count;
632 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
633 		__assign_str(port, rpcrdma_portstr(r_xprt));
634 	),
635 
636 	TP_printk("peer=[%s]:%s created %u MRs",
637 		__get_str(addr), __get_str(port), __entry->count
638 	)
639 );
640 
641 TRACE_EVENT(xprtrdma_nomrs_err,
642 	TP_PROTO(
643 		const struct rpcrdma_xprt *r_xprt,
644 		const struct rpcrdma_req *req
645 	),
646 
647 	TP_ARGS(r_xprt, req),
648 
649 	TP_STRUCT__entry(
650 		__field(unsigned int, task_id)
651 		__field(unsigned int, client_id)
652 		__string(addr, rpcrdma_addrstr(r_xprt))
653 		__string(port, rpcrdma_portstr(r_xprt))
654 	),
655 
656 	TP_fast_assign(
657 		const struct rpc_rqst *rqst = &req->rl_slot;
658 
659 		__entry->task_id = rqst->rq_task->tk_pid;
660 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
661 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
662 		__assign_str(port, rpcrdma_portstr(r_xprt));
663 	),
664 
665 	TP_printk("peer=[%s]:%s task:%u@%u",
666 		__get_str(addr), __get_str(port),
667 		__entry->task_id, __entry->client_id
668 	)
669 );
670 
671 DEFINE_RDCH_EVENT(read);
672 DEFINE_WRCH_EVENT(write);
673 DEFINE_WRCH_EVENT(reply);
674 
675 TRACE_DEFINE_ENUM(rpcrdma_noch);
676 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
677 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
678 TRACE_DEFINE_ENUM(rpcrdma_readch);
679 TRACE_DEFINE_ENUM(rpcrdma_areadch);
680 TRACE_DEFINE_ENUM(rpcrdma_writech);
681 TRACE_DEFINE_ENUM(rpcrdma_replych);
682 
683 #define xprtrdma_show_chunktype(x)					\
684 		__print_symbolic(x,					\
685 				{ rpcrdma_noch, "inline" },		\
686 				{ rpcrdma_noch_pullup, "pullup" },	\
687 				{ rpcrdma_noch_mapped, "mapped" },	\
688 				{ rpcrdma_readch, "read list" },	\
689 				{ rpcrdma_areadch, "*read list" },	\
690 				{ rpcrdma_writech, "write list" },	\
691 				{ rpcrdma_replych, "reply chunk" })
692 
693 TRACE_EVENT(xprtrdma_marshal,
694 	TP_PROTO(
695 		const struct rpcrdma_req *req,
696 		unsigned int rtype,
697 		unsigned int wtype
698 	),
699 
700 	TP_ARGS(req, rtype, wtype),
701 
702 	TP_STRUCT__entry(
703 		__field(unsigned int, task_id)
704 		__field(unsigned int, client_id)
705 		__field(u32, xid)
706 		__field(unsigned int, hdrlen)
707 		__field(unsigned int, headlen)
708 		__field(unsigned int, pagelen)
709 		__field(unsigned int, taillen)
710 		__field(unsigned int, rtype)
711 		__field(unsigned int, wtype)
712 	),
713 
714 	TP_fast_assign(
715 		const struct rpc_rqst *rqst = &req->rl_slot;
716 
717 		__entry->task_id = rqst->rq_task->tk_pid;
718 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
719 		__entry->xid = be32_to_cpu(rqst->rq_xid);
720 		__entry->hdrlen = req->rl_hdrbuf.len;
721 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
722 		__entry->pagelen = rqst->rq_snd_buf.page_len;
723 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
724 		__entry->rtype = rtype;
725 		__entry->wtype = wtype;
726 	),
727 
728 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
729 		__entry->task_id, __entry->client_id, __entry->xid,
730 		__entry->hdrlen,
731 		__entry->headlen, __entry->pagelen, __entry->taillen,
732 		xprtrdma_show_chunktype(__entry->rtype),
733 		xprtrdma_show_chunktype(__entry->wtype)
734 	)
735 );
736 
737 TRACE_EVENT(xprtrdma_marshal_failed,
738 	TP_PROTO(const struct rpc_rqst *rqst,
739 		 int ret
740 	),
741 
742 	TP_ARGS(rqst, ret),
743 
744 	TP_STRUCT__entry(
745 		__field(unsigned int, task_id)
746 		__field(unsigned int, client_id)
747 		__field(u32, xid)
748 		__field(int, ret)
749 	),
750 
751 	TP_fast_assign(
752 		__entry->task_id = rqst->rq_task->tk_pid;
753 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
754 		__entry->xid = be32_to_cpu(rqst->rq_xid);
755 		__entry->ret = ret;
756 	),
757 
758 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
759 		__entry->task_id, __entry->client_id, __entry->xid,
760 		__entry->ret
761 	)
762 );
763 
764 TRACE_EVENT(xprtrdma_prepsend_failed,
765 	TP_PROTO(const struct rpc_rqst *rqst,
766 		 int ret
767 	),
768 
769 	TP_ARGS(rqst, ret),
770 
771 	TP_STRUCT__entry(
772 		__field(unsigned int, task_id)
773 		__field(unsigned int, client_id)
774 		__field(u32, xid)
775 		__field(int, ret)
776 	),
777 
778 	TP_fast_assign(
779 		__entry->task_id = rqst->rq_task->tk_pid;
780 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
781 		__entry->xid = be32_to_cpu(rqst->rq_xid);
782 		__entry->ret = ret;
783 	),
784 
785 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
786 		__entry->task_id, __entry->client_id, __entry->xid,
787 		__entry->ret
788 	)
789 );
790 
791 TRACE_EVENT(xprtrdma_post_send,
792 	TP_PROTO(
793 		const struct rpcrdma_req *req
794 	),
795 
796 	TP_ARGS(req),
797 
798 	TP_STRUCT__entry(
799 		__field(u32, cq_id)
800 		__field(int, completion_id)
801 		__field(unsigned int, task_id)
802 		__field(unsigned int, client_id)
803 		__field(int, num_sge)
804 		__field(int, signaled)
805 	),
806 
807 	TP_fast_assign(
808 		const struct rpc_rqst *rqst = &req->rl_slot;
809 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
810 
811 		__entry->cq_id = sc->sc_cid.ci_queue_id;
812 		__entry->completion_id = sc->sc_cid.ci_completion_id;
813 		__entry->task_id = rqst->rq_task->tk_pid;
814 		__entry->client_id = rqst->rq_task->tk_client ?
815 				     rqst->rq_task->tk_client->cl_clid : -1;
816 		__entry->num_sge = req->rl_wr.num_sge;
817 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
818 	),
819 
820 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
821 		__entry->task_id, __entry->client_id,
822 		__entry->cq_id, __entry->completion_id,
823 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
824 		(__entry->signaled ? "signaled" : "")
825 	)
826 );
827 
828 TRACE_EVENT(xprtrdma_post_recv,
829 	TP_PROTO(
830 		const struct rpcrdma_rep *rep
831 	),
832 
833 	TP_ARGS(rep),
834 
835 	TP_STRUCT__entry(
836 		__field(u32, cq_id)
837 		__field(int, completion_id)
838 	),
839 
840 	TP_fast_assign(
841 		__entry->cq_id = rep->rr_cid.ci_queue_id;
842 		__entry->completion_id = rep->rr_cid.ci_completion_id;
843 	),
844 
845 	TP_printk("cq.id=%d cid=%d",
846 		__entry->cq_id, __entry->completion_id
847 	)
848 );
849 
850 TRACE_EVENT(xprtrdma_post_recvs,
851 	TP_PROTO(
852 		const struct rpcrdma_xprt *r_xprt,
853 		unsigned int count,
854 		int status
855 	),
856 
857 	TP_ARGS(r_xprt, count, status),
858 
859 	TP_STRUCT__entry(
860 		__field(u32, cq_id)
861 		__field(unsigned int, count)
862 		__field(int, status)
863 		__field(int, posted)
864 		__string(addr, rpcrdma_addrstr(r_xprt))
865 		__string(port, rpcrdma_portstr(r_xprt))
866 	),
867 
868 	TP_fast_assign(
869 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
870 
871 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
872 		__entry->count = count;
873 		__entry->status = status;
874 		__entry->posted = ep->re_receive_count;
875 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
876 		__assign_str(port, rpcrdma_portstr(r_xprt));
877 	),
878 
879 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)",
880 		__get_str(addr), __get_str(port), __entry->cq_id,
881 		__entry->count, __entry->posted, __entry->status
882 	)
883 );
884 
885 TRACE_EVENT(xprtrdma_post_linv_err,
886 	TP_PROTO(
887 		const struct rpcrdma_req *req,
888 		int status
889 	),
890 
891 	TP_ARGS(req, status),
892 
893 	TP_STRUCT__entry(
894 		__field(unsigned int, task_id)
895 		__field(unsigned int, client_id)
896 		__field(int, status)
897 	),
898 
899 	TP_fast_assign(
900 		const struct rpc_task *task = req->rl_slot.rq_task;
901 
902 		__entry->task_id = task->tk_pid;
903 		__entry->client_id = task->tk_client->cl_clid;
904 		__entry->status = status;
905 	),
906 
907 	TP_printk("task:%u@%u status=%d",
908 		__entry->task_id, __entry->client_id, __entry->status
909 	)
910 );
911 
912 /**
913  ** Completion events
914  **/
915 
916 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
917 
918 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
919 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
920 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
921 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
922 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
923 
924 TRACE_EVENT(xprtrdma_frwr_alloc,
925 	TP_PROTO(
926 		const struct rpcrdma_mr *mr,
927 		int rc
928 	),
929 
930 	TP_ARGS(mr, rc),
931 
932 	TP_STRUCT__entry(
933 		__field(u32, mr_id)
934 		__field(int, rc)
935 	),
936 
937 	TP_fast_assign(
938 		__entry->mr_id = mr->frwr.fr_mr->res.id;
939 		__entry->rc = rc;
940 	),
941 
942 	TP_printk("mr.id=%u: rc=%d",
943 		__entry->mr_id, __entry->rc
944 	)
945 );
946 
947 TRACE_EVENT(xprtrdma_frwr_dereg,
948 	TP_PROTO(
949 		const struct rpcrdma_mr *mr,
950 		int rc
951 	),
952 
953 	TP_ARGS(mr, rc),
954 
955 	TP_STRUCT__entry(
956 		__field(u32, mr_id)
957 		__field(int, nents)
958 		__field(u32, handle)
959 		__field(u32, length)
960 		__field(u64, offset)
961 		__field(u32, dir)
962 		__field(int, rc)
963 	),
964 
965 	TP_fast_assign(
966 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
967 		__entry->nents  = mr->mr_nents;
968 		__entry->handle = mr->mr_handle;
969 		__entry->length = mr->mr_length;
970 		__entry->offset = mr->mr_offset;
971 		__entry->dir    = mr->mr_dir;
972 		__entry->rc	= rc;
973 	),
974 
975 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
976 		__entry->mr_id, __entry->nents, __entry->length,
977 		(unsigned long long)__entry->offset, __entry->handle,
978 		xprtrdma_show_direction(__entry->dir),
979 		__entry->rc
980 	)
981 );
982 
983 TRACE_EVENT(xprtrdma_frwr_sgerr,
984 	TP_PROTO(
985 		const struct rpcrdma_mr *mr,
986 		int sg_nents
987 	),
988 
989 	TP_ARGS(mr, sg_nents),
990 
991 	TP_STRUCT__entry(
992 		__field(u32, mr_id)
993 		__field(u64, addr)
994 		__field(u32, dir)
995 		__field(int, nents)
996 	),
997 
998 	TP_fast_assign(
999 		__entry->mr_id = mr->frwr.fr_mr->res.id;
1000 		__entry->addr = mr->mr_sg->dma_address;
1001 		__entry->dir = mr->mr_dir;
1002 		__entry->nents = sg_nents;
1003 	),
1004 
1005 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1006 		__entry->mr_id, __entry->addr,
1007 		xprtrdma_show_direction(__entry->dir),
1008 		__entry->nents
1009 	)
1010 );
1011 
1012 TRACE_EVENT(xprtrdma_frwr_maperr,
1013 	TP_PROTO(
1014 		const struct rpcrdma_mr *mr,
1015 		int num_mapped
1016 	),
1017 
1018 	TP_ARGS(mr, num_mapped),
1019 
1020 	TP_STRUCT__entry(
1021 		__field(u32, mr_id)
1022 		__field(u64, addr)
1023 		__field(u32, dir)
1024 		__field(int, num_mapped)
1025 		__field(int, nents)
1026 	),
1027 
1028 	TP_fast_assign(
1029 		__entry->mr_id = mr->frwr.fr_mr->res.id;
1030 		__entry->addr = mr->mr_sg->dma_address;
1031 		__entry->dir = mr->mr_dir;
1032 		__entry->num_mapped = num_mapped;
1033 		__entry->nents = mr->mr_nents;
1034 	),
1035 
1036 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1037 		__entry->mr_id, __entry->addr,
1038 		xprtrdma_show_direction(__entry->dir),
1039 		__entry->num_mapped, __entry->nents
1040 	)
1041 );
1042 
1043 DEFINE_MR_EVENT(fastreg);
1044 DEFINE_MR_EVENT(localinv);
1045 DEFINE_MR_EVENT(reminv);
1046 DEFINE_MR_EVENT(map);
1047 
1048 DEFINE_ANON_MR_EVENT(unmap);
1049 
1050 TRACE_EVENT(xprtrdma_dma_maperr,
1051 	TP_PROTO(
1052 		u64 addr
1053 	),
1054 
1055 	TP_ARGS(addr),
1056 
1057 	TP_STRUCT__entry(
1058 		__field(u64, addr)
1059 	),
1060 
1061 	TP_fast_assign(
1062 		__entry->addr = addr;
1063 	),
1064 
1065 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1066 );
1067 
1068 /**
1069  ** Reply events
1070  **/
1071 
1072 TRACE_EVENT(xprtrdma_reply,
1073 	TP_PROTO(
1074 		const struct rpc_task *task,
1075 		const struct rpcrdma_rep *rep,
1076 		unsigned int credits
1077 	),
1078 
1079 	TP_ARGS(task, rep, credits),
1080 
1081 	TP_STRUCT__entry(
1082 		__field(unsigned int, task_id)
1083 		__field(unsigned int, client_id)
1084 		__field(u32, xid)
1085 		__field(unsigned int, credits)
1086 	),
1087 
1088 	TP_fast_assign(
1089 		__entry->task_id = task->tk_pid;
1090 		__entry->client_id = task->tk_client->cl_clid;
1091 		__entry->xid = be32_to_cpu(rep->rr_xid);
1092 		__entry->credits = credits;
1093 	),
1094 
1095 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1096 		__entry->task_id, __entry->client_id, __entry->xid,
1097 		__entry->credits
1098 	)
1099 );
1100 
1101 DEFINE_REPLY_EVENT(vers);
1102 DEFINE_REPLY_EVENT(rqst);
1103 DEFINE_REPLY_EVENT(short);
1104 DEFINE_REPLY_EVENT(hdr);
1105 
1106 TRACE_EVENT(xprtrdma_err_vers,
1107 	TP_PROTO(
1108 		const struct rpc_rqst *rqst,
1109 		__be32 *min,
1110 		__be32 *max
1111 	),
1112 
1113 	TP_ARGS(rqst, min, max),
1114 
1115 	TP_STRUCT__entry(
1116 		__field(unsigned int, task_id)
1117 		__field(unsigned int, client_id)
1118 		__field(u32, xid)
1119 		__field(u32, min)
1120 		__field(u32, max)
1121 	),
1122 
1123 	TP_fast_assign(
1124 		__entry->task_id = rqst->rq_task->tk_pid;
1125 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1126 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1127 		__entry->min = be32_to_cpup(min);
1128 		__entry->max = be32_to_cpup(max);
1129 	),
1130 
1131 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1132 		__entry->task_id, __entry->client_id, __entry->xid,
1133 		__entry->min, __entry->max
1134 	)
1135 );
1136 
1137 TRACE_EVENT(xprtrdma_err_chunk,
1138 	TP_PROTO(
1139 		const struct rpc_rqst *rqst
1140 	),
1141 
1142 	TP_ARGS(rqst),
1143 
1144 	TP_STRUCT__entry(
1145 		__field(unsigned int, task_id)
1146 		__field(unsigned int, client_id)
1147 		__field(u32, xid)
1148 	),
1149 
1150 	TP_fast_assign(
1151 		__entry->task_id = rqst->rq_task->tk_pid;
1152 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1153 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1154 	),
1155 
1156 	TP_printk("task:%u@%u xid=0x%08x",
1157 		__entry->task_id, __entry->client_id, __entry->xid
1158 	)
1159 );
1160 
1161 TRACE_EVENT(xprtrdma_err_unrecognized,
1162 	TP_PROTO(
1163 		const struct rpc_rqst *rqst,
1164 		__be32 *procedure
1165 	),
1166 
1167 	TP_ARGS(rqst, procedure),
1168 
1169 	TP_STRUCT__entry(
1170 		__field(unsigned int, task_id)
1171 		__field(unsigned int, client_id)
1172 		__field(u32, xid)
1173 		__field(u32, procedure)
1174 	),
1175 
1176 	TP_fast_assign(
1177 		__entry->task_id = rqst->rq_task->tk_pid;
1178 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1179 		__entry->procedure = be32_to_cpup(procedure);
1180 	),
1181 
1182 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1183 		__entry->task_id, __entry->client_id, __entry->xid,
1184 		__entry->procedure
1185 	)
1186 );
1187 
1188 TRACE_EVENT(xprtrdma_fixup,
1189 	TP_PROTO(
1190 		const struct rpc_rqst *rqst,
1191 		unsigned long fixup
1192 	),
1193 
1194 	TP_ARGS(rqst, fixup),
1195 
1196 	TP_STRUCT__entry(
1197 		__field(unsigned int, task_id)
1198 		__field(unsigned int, client_id)
1199 		__field(unsigned long, fixup)
1200 		__field(size_t, headlen)
1201 		__field(unsigned int, pagelen)
1202 		__field(size_t, taillen)
1203 	),
1204 
1205 	TP_fast_assign(
1206 		__entry->task_id = rqst->rq_task->tk_pid;
1207 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1208 		__entry->fixup = fixup;
1209 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1210 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1211 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1212 	),
1213 
1214 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1215 		__entry->task_id, __entry->client_id, __entry->fixup,
1216 		__entry->headlen, __entry->pagelen, __entry->taillen
1217 	)
1218 );
1219 
1220 TRACE_EVENT(xprtrdma_decode_seg,
1221 	TP_PROTO(
1222 		u32 handle,
1223 		u32 length,
1224 		u64 offset
1225 	),
1226 
1227 	TP_ARGS(handle, length, offset),
1228 
1229 	TP_STRUCT__entry(
1230 		__field(u32, handle)
1231 		__field(u32, length)
1232 		__field(u64, offset)
1233 	),
1234 
1235 	TP_fast_assign(
1236 		__entry->handle = handle;
1237 		__entry->length = length;
1238 		__entry->offset = offset;
1239 	),
1240 
1241 	TP_printk("%u@0x%016llx:0x%08x",
1242 		__entry->length, (unsigned long long)__entry->offset,
1243 		__entry->handle
1244 	)
1245 );
1246 
1247 TRACE_EVENT(xprtrdma_mrs_zap,
1248 	TP_PROTO(
1249 		const struct rpc_task *task
1250 	),
1251 
1252 	TP_ARGS(task),
1253 
1254 	TP_STRUCT__entry(
1255 		__field(unsigned int, task_id)
1256 		__field(unsigned int, client_id)
1257 	),
1258 
1259 	TP_fast_assign(
1260 		__entry->task_id = task->tk_pid;
1261 		__entry->client_id = task->tk_client->cl_clid;
1262 	),
1263 
1264 	TP_printk("task:%u@%u",
1265 		__entry->task_id, __entry->client_id
1266 	)
1267 );
1268 
1269 /**
1270  ** Callback events
1271  **/
1272 
1273 TRACE_EVENT(xprtrdma_cb_setup,
1274 	TP_PROTO(
1275 		const struct rpcrdma_xprt *r_xprt,
1276 		unsigned int reqs
1277 	),
1278 
1279 	TP_ARGS(r_xprt, reqs),
1280 
1281 	TP_STRUCT__entry(
1282 		__field(unsigned int, reqs)
1283 		__string(addr, rpcrdma_addrstr(r_xprt))
1284 		__string(port, rpcrdma_portstr(r_xprt))
1285 	),
1286 
1287 	TP_fast_assign(
1288 		__entry->reqs = reqs;
1289 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1290 		__assign_str(port, rpcrdma_portstr(r_xprt));
1291 	),
1292 
1293 	TP_printk("peer=[%s]:%s %u reqs",
1294 		__get_str(addr), __get_str(port), __entry->reqs
1295 	)
1296 );
1297 
1298 DEFINE_CALLBACK_EVENT(call);
1299 DEFINE_CALLBACK_EVENT(reply);
1300 
1301 /**
1302  ** Server-side RPC/RDMA events
1303  **/
1304 
1305 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1306 	TP_PROTO(
1307 		const struct svcxprt_rdma *rdma,
1308 		long status
1309 	),
1310 
1311 	TP_ARGS(rdma, status),
1312 
1313 	TP_STRUCT__entry(
1314 		__field(long, status)
1315 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1316 	),
1317 
1318 	TP_fast_assign(
1319 		__entry->status = status;
1320 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1321 	),
1322 
1323 	TP_printk("addr=%s status=%ld",
1324 		__get_str(addr), __entry->status
1325 	)
1326 );
1327 
1328 #define DEFINE_ACCEPT_EVENT(name) \
1329 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1330 				TP_PROTO( \
1331 					const struct svcxprt_rdma *rdma, \
1332 					long status \
1333 				), \
1334 				TP_ARGS(rdma, status))
1335 
1336 DEFINE_ACCEPT_EVENT(pd);
1337 DEFINE_ACCEPT_EVENT(qp);
1338 DEFINE_ACCEPT_EVENT(fabric);
1339 DEFINE_ACCEPT_EVENT(initdepth);
1340 DEFINE_ACCEPT_EVENT(accept);
1341 
1342 TRACE_DEFINE_ENUM(RDMA_MSG);
1343 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1344 TRACE_DEFINE_ENUM(RDMA_MSGP);
1345 TRACE_DEFINE_ENUM(RDMA_DONE);
1346 TRACE_DEFINE_ENUM(RDMA_ERROR);
1347 
1348 #define show_rpcrdma_proc(x)						\
1349 		__print_symbolic(x,					\
1350 				{ RDMA_MSG, "RDMA_MSG" },		\
1351 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1352 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1353 				{ RDMA_DONE, "RDMA_DONE" },		\
1354 				{ RDMA_ERROR, "RDMA_ERROR" })
1355 
1356 TRACE_EVENT(svcrdma_decode_rqst,
1357 	TP_PROTO(
1358 		const struct svc_rdma_recv_ctxt *ctxt,
1359 		__be32 *p,
1360 		unsigned int hdrlen
1361 	),
1362 
1363 	TP_ARGS(ctxt, p, hdrlen),
1364 
1365 	TP_STRUCT__entry(
1366 		__field(u32, cq_id)
1367 		__field(int, completion_id)
1368 		__field(u32, xid)
1369 		__field(u32, vers)
1370 		__field(u32, proc)
1371 		__field(u32, credits)
1372 		__field(unsigned int, hdrlen)
1373 	),
1374 
1375 	TP_fast_assign(
1376 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1377 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1378 		__entry->xid = be32_to_cpup(p++);
1379 		__entry->vers = be32_to_cpup(p++);
1380 		__entry->credits = be32_to_cpup(p++);
1381 		__entry->proc = be32_to_cpup(p);
1382 		__entry->hdrlen = hdrlen;
1383 	),
1384 
1385 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1386 		__entry->cq_id, __entry->completion_id,
1387 		__entry->xid, __entry->vers, __entry->credits,
1388 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1389 );
1390 
1391 TRACE_EVENT(svcrdma_decode_short_err,
1392 	TP_PROTO(
1393 		const struct svc_rdma_recv_ctxt *ctxt,
1394 		unsigned int hdrlen
1395 	),
1396 
1397 	TP_ARGS(ctxt, hdrlen),
1398 
1399 	TP_STRUCT__entry(
1400 		__field(u32, cq_id)
1401 		__field(int, completion_id)
1402 		__field(unsigned int, hdrlen)
1403 	),
1404 
1405 	TP_fast_assign(
1406 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1407 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1408 		__entry->hdrlen = hdrlen;
1409 	),
1410 
1411 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1412 		__entry->cq_id, __entry->completion_id,
1413 		__entry->hdrlen)
1414 );
1415 
1416 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1417 	TP_PROTO(
1418 		const struct svc_rdma_recv_ctxt *ctxt,
1419 		__be32 *p
1420 	),
1421 
1422 	TP_ARGS(ctxt, p),
1423 
1424 	TP_STRUCT__entry(
1425 		__field(u32, cq_id)
1426 		__field(int, completion_id)
1427 		__field(u32, xid)
1428 		__field(u32, vers)
1429 		__field(u32, proc)
1430 		__field(u32, credits)
1431 	),
1432 
1433 	TP_fast_assign(
1434 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1435 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1436 		__entry->xid = be32_to_cpup(p++);
1437 		__entry->vers = be32_to_cpup(p++);
1438 		__entry->credits = be32_to_cpup(p++);
1439 		__entry->proc = be32_to_cpup(p);
1440 	),
1441 
1442 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1443 		__entry->cq_id, __entry->completion_id,
1444 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1445 );
1446 
1447 #define DEFINE_BADREQ_EVENT(name)					\
1448 		DEFINE_EVENT(svcrdma_badreq_event,			\
1449 			     svcrdma_decode_##name##_err,		\
1450 				TP_PROTO(				\
1451 					const struct svc_rdma_recv_ctxt *ctxt,	\
1452 					__be32 *p			\
1453 				),					\
1454 				TP_ARGS(ctxt, p))
1455 
1456 DEFINE_BADREQ_EVENT(badvers);
1457 DEFINE_BADREQ_EVENT(drop);
1458 DEFINE_BADREQ_EVENT(badproc);
1459 DEFINE_BADREQ_EVENT(parse);
1460 
1461 TRACE_EVENT(svcrdma_encode_wseg,
1462 	TP_PROTO(
1463 		const struct svc_rdma_send_ctxt *ctxt,
1464 		u32 segno,
1465 		u32 handle,
1466 		u32 length,
1467 		u64 offset
1468 	),
1469 
1470 	TP_ARGS(ctxt, segno, handle, length, offset),
1471 
1472 	TP_STRUCT__entry(
1473 		__field(u32, cq_id)
1474 		__field(int, completion_id)
1475 		__field(u32, segno)
1476 		__field(u32, handle)
1477 		__field(u32, length)
1478 		__field(u64, offset)
1479 	),
1480 
1481 	TP_fast_assign(
1482 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1483 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1484 		__entry->segno = segno;
1485 		__entry->handle = handle;
1486 		__entry->length = length;
1487 		__entry->offset = offset;
1488 	),
1489 
1490 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1491 		__entry->cq_id, __entry->completion_id,
1492 		__entry->segno, __entry->length,
1493 		(unsigned long long)__entry->offset, __entry->handle
1494 	)
1495 );
1496 
1497 TRACE_EVENT(svcrdma_decode_rseg,
1498 	TP_PROTO(
1499 		const struct rpc_rdma_cid *cid,
1500 		const struct svc_rdma_chunk *chunk,
1501 		const struct svc_rdma_segment *segment
1502 	),
1503 
1504 	TP_ARGS(cid, chunk, segment),
1505 
1506 	TP_STRUCT__entry(
1507 		__field(u32, cq_id)
1508 		__field(int, completion_id)
1509 		__field(u32, segno)
1510 		__field(u32, position)
1511 		__field(u32, handle)
1512 		__field(u32, length)
1513 		__field(u64, offset)
1514 	),
1515 
1516 	TP_fast_assign(
1517 		__entry->cq_id = cid->ci_queue_id;
1518 		__entry->completion_id = cid->ci_completion_id;
1519 		__entry->segno = chunk->ch_segcount;
1520 		__entry->position = chunk->ch_position;
1521 		__entry->handle = segment->rs_handle;
1522 		__entry->length = segment->rs_length;
1523 		__entry->offset = segment->rs_offset;
1524 	),
1525 
1526 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1527 		__entry->cq_id, __entry->completion_id,
1528 		__entry->segno, __entry->position, __entry->length,
1529 		(unsigned long long)__entry->offset, __entry->handle
1530 	)
1531 );
1532 
1533 TRACE_EVENT(svcrdma_decode_wseg,
1534 	TP_PROTO(
1535 		const struct rpc_rdma_cid *cid,
1536 		const struct svc_rdma_chunk *chunk,
1537 		u32 segno
1538 	),
1539 
1540 	TP_ARGS(cid, chunk, segno),
1541 
1542 	TP_STRUCT__entry(
1543 		__field(u32, cq_id)
1544 		__field(int, completion_id)
1545 		__field(u32, segno)
1546 		__field(u32, handle)
1547 		__field(u32, length)
1548 		__field(u64, offset)
1549 	),
1550 
1551 	TP_fast_assign(
1552 		const struct svc_rdma_segment *segment =
1553 			&chunk->ch_segments[segno];
1554 
1555 		__entry->cq_id = cid->ci_queue_id;
1556 		__entry->completion_id = cid->ci_completion_id;
1557 		__entry->segno = segno;
1558 		__entry->handle = segment->rs_handle;
1559 		__entry->length = segment->rs_length;
1560 		__entry->offset = segment->rs_offset;
1561 	),
1562 
1563 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1564 		__entry->cq_id, __entry->completion_id,
1565 		__entry->segno, __entry->length,
1566 		(unsigned long long)__entry->offset, __entry->handle
1567 	)
1568 );
1569 
1570 DECLARE_EVENT_CLASS(svcrdma_error_event,
1571 	TP_PROTO(
1572 		__be32 xid
1573 	),
1574 
1575 	TP_ARGS(xid),
1576 
1577 	TP_STRUCT__entry(
1578 		__field(u32, xid)
1579 	),
1580 
1581 	TP_fast_assign(
1582 		__entry->xid = be32_to_cpu(xid);
1583 	),
1584 
1585 	TP_printk("xid=0x%08x",
1586 		__entry->xid
1587 	)
1588 );
1589 
1590 #define DEFINE_ERROR_EVENT(name)					\
1591 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1592 				TP_PROTO(				\
1593 					__be32 xid			\
1594 				),					\
1595 				TP_ARGS(xid))
1596 
1597 DEFINE_ERROR_EVENT(vers);
1598 DEFINE_ERROR_EVENT(chunk);
1599 
1600 /**
1601  ** Server-side RDMA API events
1602  **/
1603 
1604 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1605 	TP_PROTO(
1606 		const struct svcxprt_rdma *rdma,
1607 		u64 dma_addr,
1608 		u32 length
1609 	),
1610 
1611 	TP_ARGS(rdma, dma_addr, length),
1612 
1613 	TP_STRUCT__entry(
1614 		__field(u64, dma_addr)
1615 		__field(u32, length)
1616 		__string(device, rdma->sc_cm_id->device->name)
1617 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1618 	),
1619 
1620 	TP_fast_assign(
1621 		__entry->dma_addr = dma_addr;
1622 		__entry->length = length;
1623 		__assign_str(device, rdma->sc_cm_id->device->name);
1624 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1625 	),
1626 
1627 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1628 		__get_str(addr), __get_str(device),
1629 		__entry->dma_addr, __entry->length
1630 	)
1631 );
1632 
1633 #define DEFINE_SVC_DMA_EVENT(name)					\
1634 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1635 				TP_PROTO(				\
1636 					const struct svcxprt_rdma *rdma,\
1637 					u64 dma_addr,			\
1638 					u32 length			\
1639 				),					\
1640 				TP_ARGS(rdma, dma_addr, length))
1641 
1642 DEFINE_SVC_DMA_EVENT(dma_map_page);
1643 DEFINE_SVC_DMA_EVENT(dma_map_err);
1644 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1645 
1646 TRACE_EVENT(svcrdma_dma_map_rw_err,
1647 	TP_PROTO(
1648 		const struct svcxprt_rdma *rdma,
1649 		unsigned int nents,
1650 		int status
1651 	),
1652 
1653 	TP_ARGS(rdma, nents, status),
1654 
1655 	TP_STRUCT__entry(
1656 		__field(int, status)
1657 		__field(unsigned int, nents)
1658 		__string(device, rdma->sc_cm_id->device->name)
1659 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1660 	),
1661 
1662 	TP_fast_assign(
1663 		__entry->status = status;
1664 		__entry->nents = nents;
1665 		__assign_str(device, rdma->sc_cm_id->device->name);
1666 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1667 	),
1668 
1669 	TP_printk("addr=%s device=%s nents=%u status=%d",
1670 		__get_str(addr), __get_str(device), __entry->nents,
1671 		__entry->status
1672 	)
1673 );
1674 
1675 TRACE_EVENT(svcrdma_no_rwctx_err,
1676 	TP_PROTO(
1677 		const struct svcxprt_rdma *rdma,
1678 		unsigned int num_sges
1679 	),
1680 
1681 	TP_ARGS(rdma, num_sges),
1682 
1683 	TP_STRUCT__entry(
1684 		__field(unsigned int, num_sges)
1685 		__string(device, rdma->sc_cm_id->device->name)
1686 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1687 	),
1688 
1689 	TP_fast_assign(
1690 		__entry->num_sges = num_sges;
1691 		__assign_str(device, rdma->sc_cm_id->device->name);
1692 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1693 	),
1694 
1695 	TP_printk("addr=%s device=%s num_sges=%d",
1696 		__get_str(addr), __get_str(device), __entry->num_sges
1697 	)
1698 );
1699 
1700 TRACE_EVENT(svcrdma_page_overrun_err,
1701 	TP_PROTO(
1702 		const struct svcxprt_rdma *rdma,
1703 		const struct svc_rqst *rqst,
1704 		unsigned int pageno
1705 	),
1706 
1707 	TP_ARGS(rdma, rqst, pageno),
1708 
1709 	TP_STRUCT__entry(
1710 		__field(unsigned int, pageno)
1711 		__field(u32, xid)
1712 		__string(device, rdma->sc_cm_id->device->name)
1713 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1714 	),
1715 
1716 	TP_fast_assign(
1717 		__entry->pageno = pageno;
1718 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1719 		__assign_str(device, rdma->sc_cm_id->device->name);
1720 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1721 	),
1722 
1723 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1724 		__get_str(device), __entry->xid, __entry->pageno
1725 	)
1726 );
1727 
1728 TRACE_EVENT(svcrdma_small_wrch_err,
1729 	TP_PROTO(
1730 		const struct svcxprt_rdma *rdma,
1731 		unsigned int remaining,
1732 		unsigned int seg_no,
1733 		unsigned int num_segs
1734 	),
1735 
1736 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1737 
1738 	TP_STRUCT__entry(
1739 		__field(unsigned int, remaining)
1740 		__field(unsigned int, seg_no)
1741 		__field(unsigned int, num_segs)
1742 		__string(device, rdma->sc_cm_id->device->name)
1743 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1744 	),
1745 
1746 	TP_fast_assign(
1747 		__entry->remaining = remaining;
1748 		__entry->seg_no = seg_no;
1749 		__entry->num_segs = num_segs;
1750 		__assign_str(device, rdma->sc_cm_id->device->name);
1751 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1752 	),
1753 
1754 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1755 		__get_str(addr), __get_str(device), __entry->remaining,
1756 		__entry->seg_no, __entry->num_segs
1757 	)
1758 );
1759 
1760 TRACE_EVENT(svcrdma_send_pullup,
1761 	TP_PROTO(
1762 		const struct svc_rdma_send_ctxt *ctxt,
1763 		unsigned int msglen
1764 	),
1765 
1766 	TP_ARGS(ctxt, msglen),
1767 
1768 	TP_STRUCT__entry(
1769 		__field(u32, cq_id)
1770 		__field(int, completion_id)
1771 		__field(unsigned int, hdrlen)
1772 		__field(unsigned int, msglen)
1773 	),
1774 
1775 	TP_fast_assign(
1776 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1777 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1778 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1779 		__entry->msglen = msglen;
1780 	),
1781 
1782 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1783 		__entry->cq_id, __entry->completion_id,
1784 		__entry->hdrlen, __entry->msglen,
1785 		__entry->hdrlen + __entry->msglen)
1786 );
1787 
1788 TRACE_EVENT(svcrdma_send_err,
1789 	TP_PROTO(
1790 		const struct svc_rqst *rqst,
1791 		int status
1792 	),
1793 
1794 	TP_ARGS(rqst, status),
1795 
1796 	TP_STRUCT__entry(
1797 		__field(int, status)
1798 		__field(u32, xid)
1799 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1800 	),
1801 
1802 	TP_fast_assign(
1803 		__entry->status = status;
1804 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1805 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1806 	),
1807 
1808 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1809 		__entry->xid, __entry->status
1810 	)
1811 );
1812 
1813 TRACE_EVENT(svcrdma_post_send,
1814 	TP_PROTO(
1815 		const struct svc_rdma_send_ctxt *ctxt
1816 	),
1817 
1818 	TP_ARGS(ctxt),
1819 
1820 	TP_STRUCT__entry(
1821 		__field(u32, cq_id)
1822 		__field(int, completion_id)
1823 		__field(unsigned int, num_sge)
1824 		__field(u32, inv_rkey)
1825 	),
1826 
1827 	TP_fast_assign(
1828 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1829 
1830 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1831 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1832 		__entry->num_sge = wr->num_sge;
1833 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1834 					wr->ex.invalidate_rkey : 0;
1835 	),
1836 
1837 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1838 		__entry->cq_id, __entry->completion_id,
1839 		__entry->num_sge, __entry->inv_rkey
1840 	)
1841 );
1842 
1843 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1844 
1845 TRACE_EVENT(svcrdma_post_recv,
1846 	TP_PROTO(
1847 		const struct svc_rdma_recv_ctxt *ctxt
1848 	),
1849 
1850 	TP_ARGS(ctxt),
1851 
1852 	TP_STRUCT__entry(
1853 		__field(u32, cq_id)
1854 		__field(int, completion_id)
1855 	),
1856 
1857 	TP_fast_assign(
1858 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1859 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1860 	),
1861 
1862 	TP_printk("cq.id=%d cid=%d",
1863 		__entry->cq_id, __entry->completion_id
1864 	)
1865 );
1866 
1867 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1868 
1869 TRACE_EVENT(svcrdma_rq_post_err,
1870 	TP_PROTO(
1871 		const struct svcxprt_rdma *rdma,
1872 		int status
1873 	),
1874 
1875 	TP_ARGS(rdma, status),
1876 
1877 	TP_STRUCT__entry(
1878 		__field(int, status)
1879 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1880 	),
1881 
1882 	TP_fast_assign(
1883 		__entry->status = status;
1884 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1885 	),
1886 
1887 	TP_printk("addr=%s status=%d",
1888 		__get_str(addr), __entry->status
1889 	)
1890 );
1891 
1892 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1893 	TP_PROTO(
1894 		const struct rpc_rdma_cid *cid,
1895 		int sqecount
1896 	),
1897 
1898 	TP_ARGS(cid, sqecount),
1899 
1900 	TP_STRUCT__entry(
1901 		__field(u32, cq_id)
1902 		__field(int, completion_id)
1903 		__field(int, sqecount)
1904 	),
1905 
1906 	TP_fast_assign(
1907 		__entry->cq_id = cid->ci_queue_id;
1908 		__entry->completion_id = cid->ci_completion_id;
1909 		__entry->sqecount = sqecount;
1910 	),
1911 
1912 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1913 		__entry->cq_id, __entry->completion_id,
1914 		__entry->sqecount
1915 	)
1916 );
1917 
1918 #define DEFINE_POST_CHUNK_EVENT(name)					\
1919 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1920 				svcrdma_post_##name##_chunk,		\
1921 				TP_PROTO(				\
1922 					const struct rpc_rdma_cid *cid,	\
1923 					int sqecount			\
1924 				),					\
1925 				TP_ARGS(cid, sqecount))
1926 
1927 DEFINE_POST_CHUNK_EVENT(read);
1928 DEFINE_POST_CHUNK_EVENT(write);
1929 DEFINE_POST_CHUNK_EVENT(reply);
1930 
1931 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1932 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1933 
1934 TRACE_EVENT(svcrdma_qp_error,
1935 	TP_PROTO(
1936 		const struct ib_event *event,
1937 		const struct sockaddr *sap
1938 	),
1939 
1940 	TP_ARGS(event, sap),
1941 
1942 	TP_STRUCT__entry(
1943 		__field(unsigned int, event)
1944 		__string(device, event->device->name)
1945 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1946 	),
1947 
1948 	TP_fast_assign(
1949 		__entry->event = event->event;
1950 		__assign_str(device, event->device->name);
1951 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1952 			 "%pISpc", sap);
1953 	),
1954 
1955 	TP_printk("addr=%s dev=%s event=%s (%u)",
1956 		__entry->addr, __get_str(device),
1957 		rdma_show_ib_event(__entry->event), __entry->event
1958 	)
1959 );
1960 
1961 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1962 	TP_PROTO(
1963 		const struct svcxprt_rdma *rdma
1964 	),
1965 
1966 	TP_ARGS(rdma),
1967 
1968 	TP_STRUCT__entry(
1969 		__field(int, avail)
1970 		__field(int, depth)
1971 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1972 	),
1973 
1974 	TP_fast_assign(
1975 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1976 		__entry->depth = rdma->sc_sq_depth;
1977 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1978 	),
1979 
1980 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1981 		__get_str(addr), __entry->avail, __entry->depth
1982 	)
1983 );
1984 
1985 #define DEFINE_SQ_EVENT(name)						\
1986 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1987 				TP_PROTO(				\
1988 					const struct svcxprt_rdma *rdma \
1989 				),					\
1990 				TP_ARGS(rdma))
1991 
1992 DEFINE_SQ_EVENT(full);
1993 DEFINE_SQ_EVENT(retry);
1994 
1995 TRACE_EVENT(svcrdma_sq_post_err,
1996 	TP_PROTO(
1997 		const struct svcxprt_rdma *rdma,
1998 		int status
1999 	),
2000 
2001 	TP_ARGS(rdma, status),
2002 
2003 	TP_STRUCT__entry(
2004 		__field(int, avail)
2005 		__field(int, depth)
2006 		__field(int, status)
2007 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2008 	),
2009 
2010 	TP_fast_assign(
2011 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2012 		__entry->depth = rdma->sc_sq_depth;
2013 		__entry->status = status;
2014 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2015 	),
2016 
2017 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2018 		__get_str(addr), __entry->avail, __entry->depth,
2019 		__entry->status
2020 	)
2021 );
2022 
2023 #endif /* _TRACE_RPCRDMA_H */
2024 
2025 #include <trace/define_trace.h>
2026